]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/mtd/nand/nand_base.c
UBUNTU: Ubuntu-4.13.0-45.50
[mirror_ubuntu-artful-kernel.git] / drivers / mtd / nand / nand_base.c
1 /*
2 * Overview:
3 * This is the generic MTD driver for NAND flash devices. It should be
4 * capable of working with almost all NAND chips currently available.
5 *
6 * Additional technical information is available on
7 * http://www.linux-mtd.infradead.org/doc/nand.html
8 *
9 * Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com)
10 * 2002-2006 Thomas Gleixner (tglx@linutronix.de)
11 *
12 * Credits:
13 * David Woodhouse for adding multichip support
14 *
15 * Aleph One Ltd. and Toby Churchill Ltd. for supporting the
16 * rework for 2K page size chips
17 *
18 * TODO:
19 * Enable cached programming for 2k page size chips
20 * Check, if mtd->ecctype should be set to MTD_ECC_HW
21 * if we have HW ECC support.
22 * BBT table is not serialized, has to be fixed
23 *
24 * This program is free software; you can redistribute it and/or modify
25 * it under the terms of the GNU General Public License version 2 as
26 * published by the Free Software Foundation.
27 *
28 */
29
30 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31
32 #include <linux/module.h>
33 #include <linux/delay.h>
34 #include <linux/errno.h>
35 #include <linux/err.h>
36 #include <linux/sched.h>
37 #include <linux/slab.h>
38 #include <linux/mm.h>
39 #include <linux/nmi.h>
40 #include <linux/types.h>
41 #include <linux/mtd/mtd.h>
42 #include <linux/mtd/nand.h>
43 #include <linux/mtd/nand_ecc.h>
44 #include <linux/mtd/nand_bch.h>
45 #include <linux/interrupt.h>
46 #include <linux/bitops.h>
47 #include <linux/io.h>
48 #include <linux/mtd/partitions.h>
49 #include <linux/of.h>
50
51 static int nand_get_device(struct mtd_info *mtd, int new_state);
52
53 static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
54 struct mtd_oob_ops *ops);
55
56 /* Define default oob placement schemes for large and small page devices */
57 static int nand_ooblayout_ecc_sp(struct mtd_info *mtd, int section,
58 struct mtd_oob_region *oobregion)
59 {
60 struct nand_chip *chip = mtd_to_nand(mtd);
61 struct nand_ecc_ctrl *ecc = &chip->ecc;
62
63 if (section > 1)
64 return -ERANGE;
65
66 if (!section) {
67 oobregion->offset = 0;
68 if (mtd->oobsize == 16)
69 oobregion->length = 4;
70 else
71 oobregion->length = 3;
72 } else {
73 if (mtd->oobsize == 8)
74 return -ERANGE;
75
76 oobregion->offset = 6;
77 oobregion->length = ecc->total - 4;
78 }
79
80 return 0;
81 }
82
83 static int nand_ooblayout_free_sp(struct mtd_info *mtd, int section,
84 struct mtd_oob_region *oobregion)
85 {
86 if (section > 1)
87 return -ERANGE;
88
89 if (mtd->oobsize == 16) {
90 if (section)
91 return -ERANGE;
92
93 oobregion->length = 8;
94 oobregion->offset = 8;
95 } else {
96 oobregion->length = 2;
97 if (!section)
98 oobregion->offset = 3;
99 else
100 oobregion->offset = 6;
101 }
102
103 return 0;
104 }
105
106 const struct mtd_ooblayout_ops nand_ooblayout_sp_ops = {
107 .ecc = nand_ooblayout_ecc_sp,
108 .free = nand_ooblayout_free_sp,
109 };
110 EXPORT_SYMBOL_GPL(nand_ooblayout_sp_ops);
111
112 static int nand_ooblayout_ecc_lp(struct mtd_info *mtd, int section,
113 struct mtd_oob_region *oobregion)
114 {
115 struct nand_chip *chip = mtd_to_nand(mtd);
116 struct nand_ecc_ctrl *ecc = &chip->ecc;
117
118 if (section)
119 return -ERANGE;
120
121 oobregion->length = ecc->total;
122 oobregion->offset = mtd->oobsize - oobregion->length;
123
124 return 0;
125 }
126
127 static int nand_ooblayout_free_lp(struct mtd_info *mtd, int section,
128 struct mtd_oob_region *oobregion)
129 {
130 struct nand_chip *chip = mtd_to_nand(mtd);
131 struct nand_ecc_ctrl *ecc = &chip->ecc;
132
133 if (section)
134 return -ERANGE;
135
136 oobregion->length = mtd->oobsize - ecc->total - 2;
137 oobregion->offset = 2;
138
139 return 0;
140 }
141
142 const struct mtd_ooblayout_ops nand_ooblayout_lp_ops = {
143 .ecc = nand_ooblayout_ecc_lp,
144 .free = nand_ooblayout_free_lp,
145 };
146 EXPORT_SYMBOL_GPL(nand_ooblayout_lp_ops);
147
148 /*
149 * Support the old "large page" layout used for 1-bit Hamming ECC where ECC
150 * are placed at a fixed offset.
151 */
152 static int nand_ooblayout_ecc_lp_hamming(struct mtd_info *mtd, int section,
153 struct mtd_oob_region *oobregion)
154 {
155 struct nand_chip *chip = mtd_to_nand(mtd);
156 struct nand_ecc_ctrl *ecc = &chip->ecc;
157
158 if (section)
159 return -ERANGE;
160
161 switch (mtd->oobsize) {
162 case 64:
163 oobregion->offset = 40;
164 break;
165 case 128:
166 oobregion->offset = 80;
167 break;
168 default:
169 return -EINVAL;
170 }
171
172 oobregion->length = ecc->total;
173 if (oobregion->offset + oobregion->length > mtd->oobsize)
174 return -ERANGE;
175
176 return 0;
177 }
178
179 static int nand_ooblayout_free_lp_hamming(struct mtd_info *mtd, int section,
180 struct mtd_oob_region *oobregion)
181 {
182 struct nand_chip *chip = mtd_to_nand(mtd);
183 struct nand_ecc_ctrl *ecc = &chip->ecc;
184 int ecc_offset = 0;
185
186 if (section < 0 || section > 1)
187 return -ERANGE;
188
189 switch (mtd->oobsize) {
190 case 64:
191 ecc_offset = 40;
192 break;
193 case 128:
194 ecc_offset = 80;
195 break;
196 default:
197 return -EINVAL;
198 }
199
200 if (section == 0) {
201 oobregion->offset = 2;
202 oobregion->length = ecc_offset - 2;
203 } else {
204 oobregion->offset = ecc_offset + ecc->total;
205 oobregion->length = mtd->oobsize - oobregion->offset;
206 }
207
208 return 0;
209 }
210
211 static const struct mtd_ooblayout_ops nand_ooblayout_lp_hamming_ops = {
212 .ecc = nand_ooblayout_ecc_lp_hamming,
213 .free = nand_ooblayout_free_lp_hamming,
214 };
215
216 static int check_offs_len(struct mtd_info *mtd,
217 loff_t ofs, uint64_t len)
218 {
219 struct nand_chip *chip = mtd_to_nand(mtd);
220 int ret = 0;
221
222 /* Start address must align on block boundary */
223 if (ofs & ((1ULL << chip->phys_erase_shift) - 1)) {
224 pr_debug("%s: unaligned address\n", __func__);
225 ret = -EINVAL;
226 }
227
228 /* Length must align on block boundary */
229 if (len & ((1ULL << chip->phys_erase_shift) - 1)) {
230 pr_debug("%s: length not block aligned\n", __func__);
231 ret = -EINVAL;
232 }
233
234 return ret;
235 }
236
237 /**
238 * nand_release_device - [GENERIC] release chip
239 * @mtd: MTD device structure
240 *
241 * Release chip lock and wake up anyone waiting on the device.
242 */
243 static void nand_release_device(struct mtd_info *mtd)
244 {
245 struct nand_chip *chip = mtd_to_nand(mtd);
246
247 /* Release the controller and the chip */
248 spin_lock(&chip->controller->lock);
249 chip->controller->active = NULL;
250 chip->state = FL_READY;
251 wake_up(&chip->controller->wq);
252 spin_unlock(&chip->controller->lock);
253 }
254
255 /**
256 * nand_read_byte - [DEFAULT] read one byte from the chip
257 * @mtd: MTD device structure
258 *
259 * Default read function for 8bit buswidth
260 */
261 static uint8_t nand_read_byte(struct mtd_info *mtd)
262 {
263 struct nand_chip *chip = mtd_to_nand(mtd);
264 return readb(chip->IO_ADDR_R);
265 }
266
267 /**
268 * nand_read_byte16 - [DEFAULT] read one byte endianness aware from the chip
269 * @mtd: MTD device structure
270 *
271 * Default read function for 16bit buswidth with endianness conversion.
272 *
273 */
274 static uint8_t nand_read_byte16(struct mtd_info *mtd)
275 {
276 struct nand_chip *chip = mtd_to_nand(mtd);
277 return (uint8_t) cpu_to_le16(readw(chip->IO_ADDR_R));
278 }
279
280 /**
281 * nand_read_word - [DEFAULT] read one word from the chip
282 * @mtd: MTD device structure
283 *
284 * Default read function for 16bit buswidth without endianness conversion.
285 */
286 static u16 nand_read_word(struct mtd_info *mtd)
287 {
288 struct nand_chip *chip = mtd_to_nand(mtd);
289 return readw(chip->IO_ADDR_R);
290 }
291
292 /**
293 * nand_select_chip - [DEFAULT] control CE line
294 * @mtd: MTD device structure
295 * @chipnr: chipnumber to select, -1 for deselect
296 *
297 * Default select function for 1 chip devices.
298 */
299 static void nand_select_chip(struct mtd_info *mtd, int chipnr)
300 {
301 struct nand_chip *chip = mtd_to_nand(mtd);
302
303 switch (chipnr) {
304 case -1:
305 chip->cmd_ctrl(mtd, NAND_CMD_NONE, 0 | NAND_CTRL_CHANGE);
306 break;
307 case 0:
308 break;
309
310 default:
311 BUG();
312 }
313 }
314
315 /**
316 * nand_write_byte - [DEFAULT] write single byte to chip
317 * @mtd: MTD device structure
318 * @byte: value to write
319 *
320 * Default function to write a byte to I/O[7:0]
321 */
322 static void nand_write_byte(struct mtd_info *mtd, uint8_t byte)
323 {
324 struct nand_chip *chip = mtd_to_nand(mtd);
325
326 chip->write_buf(mtd, &byte, 1);
327 }
328
329 /**
330 * nand_write_byte16 - [DEFAULT] write single byte to a chip with width 16
331 * @mtd: MTD device structure
332 * @byte: value to write
333 *
334 * Default function to write a byte to I/O[7:0] on a 16-bit wide chip.
335 */
336 static void nand_write_byte16(struct mtd_info *mtd, uint8_t byte)
337 {
338 struct nand_chip *chip = mtd_to_nand(mtd);
339 uint16_t word = byte;
340
341 /*
342 * It's not entirely clear what should happen to I/O[15:8] when writing
343 * a byte. The ONFi spec (Revision 3.1; 2012-09-19, Section 2.16) reads:
344 *
345 * When the host supports a 16-bit bus width, only data is
346 * transferred at the 16-bit width. All address and command line
347 * transfers shall use only the lower 8-bits of the data bus. During
348 * command transfers, the host may place any value on the upper
349 * 8-bits of the data bus. During address transfers, the host shall
350 * set the upper 8-bits of the data bus to 00h.
351 *
352 * One user of the write_byte callback is nand_onfi_set_features. The
353 * four parameters are specified to be written to I/O[7:0], but this is
354 * neither an address nor a command transfer. Let's assume a 0 on the
355 * upper I/O lines is OK.
356 */
357 chip->write_buf(mtd, (uint8_t *)&word, 2);
358 }
359
360 /**
361 * nand_write_buf - [DEFAULT] write buffer to chip
362 * @mtd: MTD device structure
363 * @buf: data buffer
364 * @len: number of bytes to write
365 *
366 * Default write function for 8bit buswidth.
367 */
368 static void nand_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
369 {
370 struct nand_chip *chip = mtd_to_nand(mtd);
371
372 iowrite8_rep(chip->IO_ADDR_W, buf, len);
373 }
374
375 /**
376 * nand_read_buf - [DEFAULT] read chip data into buffer
377 * @mtd: MTD device structure
378 * @buf: buffer to store date
379 * @len: number of bytes to read
380 *
381 * Default read function for 8bit buswidth.
382 */
383 static void nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
384 {
385 struct nand_chip *chip = mtd_to_nand(mtd);
386
387 ioread8_rep(chip->IO_ADDR_R, buf, len);
388 }
389
390 /**
391 * nand_write_buf16 - [DEFAULT] write buffer to chip
392 * @mtd: MTD device structure
393 * @buf: data buffer
394 * @len: number of bytes to write
395 *
396 * Default write function for 16bit buswidth.
397 */
398 static void nand_write_buf16(struct mtd_info *mtd, const uint8_t *buf, int len)
399 {
400 struct nand_chip *chip = mtd_to_nand(mtd);
401 u16 *p = (u16 *) buf;
402
403 iowrite16_rep(chip->IO_ADDR_W, p, len >> 1);
404 }
405
406 /**
407 * nand_read_buf16 - [DEFAULT] read chip data into buffer
408 * @mtd: MTD device structure
409 * @buf: buffer to store date
410 * @len: number of bytes to read
411 *
412 * Default read function for 16bit buswidth.
413 */
414 static void nand_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len)
415 {
416 struct nand_chip *chip = mtd_to_nand(mtd);
417 u16 *p = (u16 *) buf;
418
419 ioread16_rep(chip->IO_ADDR_R, p, len >> 1);
420 }
421
422 /**
423 * nand_block_bad - [DEFAULT] Read bad block marker from the chip
424 * @mtd: MTD device structure
425 * @ofs: offset from device start
426 *
427 * Check, if the block is bad.
428 */
429 static int nand_block_bad(struct mtd_info *mtd, loff_t ofs)
430 {
431 int page, page_end, res;
432 struct nand_chip *chip = mtd_to_nand(mtd);
433 u8 bad;
434
435 if (chip->bbt_options & NAND_BBT_SCANLASTPAGE)
436 ofs += mtd->erasesize - mtd->writesize;
437
438 page = (int)(ofs >> chip->page_shift) & chip->pagemask;
439 page_end = page + (chip->bbt_options & NAND_BBT_SCAN2NDPAGE ? 2 : 1);
440
441 for (; page < page_end; page++) {
442 res = chip->ecc.read_oob(mtd, chip, page);
443 if (res)
444 return res;
445
446 bad = chip->oob_poi[chip->badblockpos];
447
448 if (likely(chip->badblockbits == 8))
449 res = bad != 0xFF;
450 else
451 res = hweight8(bad) < chip->badblockbits;
452 if (res)
453 return res;
454 }
455
456 return 0;
457 }
458
459 /**
460 * nand_default_block_markbad - [DEFAULT] mark a block bad via bad block marker
461 * @mtd: MTD device structure
462 * @ofs: offset from device start
463 *
464 * This is the default implementation, which can be overridden by a hardware
465 * specific driver. It provides the details for writing a bad block marker to a
466 * block.
467 */
468 static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
469 {
470 struct nand_chip *chip = mtd_to_nand(mtd);
471 struct mtd_oob_ops ops;
472 uint8_t buf[2] = { 0, 0 };
473 int ret = 0, res, i = 0;
474
475 memset(&ops, 0, sizeof(ops));
476 ops.oobbuf = buf;
477 ops.ooboffs = chip->badblockpos;
478 if (chip->options & NAND_BUSWIDTH_16) {
479 ops.ooboffs &= ~0x01;
480 ops.len = ops.ooblen = 2;
481 } else {
482 ops.len = ops.ooblen = 1;
483 }
484 ops.mode = MTD_OPS_PLACE_OOB;
485
486 /* Write to first/last page(s) if necessary */
487 if (chip->bbt_options & NAND_BBT_SCANLASTPAGE)
488 ofs += mtd->erasesize - mtd->writesize;
489 do {
490 res = nand_do_write_oob(mtd, ofs, &ops);
491 if (!ret)
492 ret = res;
493
494 i++;
495 ofs += mtd->writesize;
496 } while ((chip->bbt_options & NAND_BBT_SCAN2NDPAGE) && i < 2);
497
498 return ret;
499 }
500
501 /**
502 * nand_block_markbad_lowlevel - mark a block bad
503 * @mtd: MTD device structure
504 * @ofs: offset from device start
505 *
506 * This function performs the generic NAND bad block marking steps (i.e., bad
507 * block table(s) and/or marker(s)). We only allow the hardware driver to
508 * specify how to write bad block markers to OOB (chip->block_markbad).
509 *
510 * We try operations in the following order:
511 *
512 * (1) erase the affected block, to allow OOB marker to be written cleanly
513 * (2) write bad block marker to OOB area of affected block (unless flag
514 * NAND_BBT_NO_OOB_BBM is present)
515 * (3) update the BBT
516 *
517 * Note that we retain the first error encountered in (2) or (3), finish the
518 * procedures, and dump the error in the end.
519 */
520 static int nand_block_markbad_lowlevel(struct mtd_info *mtd, loff_t ofs)
521 {
522 struct nand_chip *chip = mtd_to_nand(mtd);
523 int res, ret = 0;
524
525 if (!(chip->bbt_options & NAND_BBT_NO_OOB_BBM)) {
526 struct erase_info einfo;
527
528 /* Attempt erase before marking OOB */
529 memset(&einfo, 0, sizeof(einfo));
530 einfo.mtd = mtd;
531 einfo.addr = ofs;
532 einfo.len = 1ULL << chip->phys_erase_shift;
533 nand_erase_nand(mtd, &einfo, 0);
534
535 /* Write bad block marker to OOB */
536 nand_get_device(mtd, FL_WRITING);
537 ret = chip->block_markbad(mtd, ofs);
538 nand_release_device(mtd);
539 }
540
541 /* Mark block bad in BBT */
542 if (chip->bbt) {
543 res = nand_markbad_bbt(mtd, ofs);
544 if (!ret)
545 ret = res;
546 }
547
548 if (!ret)
549 mtd->ecc_stats.badblocks++;
550
551 return ret;
552 }
553
554 /**
555 * nand_check_wp - [GENERIC] check if the chip is write protected
556 * @mtd: MTD device structure
557 *
558 * Check, if the device is write protected. The function expects, that the
559 * device is already selected.
560 */
561 static int nand_check_wp(struct mtd_info *mtd)
562 {
563 struct nand_chip *chip = mtd_to_nand(mtd);
564
565 /* Broken xD cards report WP despite being writable */
566 if (chip->options & NAND_BROKEN_XD)
567 return 0;
568
569 /* Check the WP bit */
570 chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
571 return (chip->read_byte(mtd) & NAND_STATUS_WP) ? 0 : 1;
572 }
573
574 /**
575 * nand_block_isreserved - [GENERIC] Check if a block is marked reserved.
576 * @mtd: MTD device structure
577 * @ofs: offset from device start
578 *
579 * Check if the block is marked as reserved.
580 */
581 static int nand_block_isreserved(struct mtd_info *mtd, loff_t ofs)
582 {
583 struct nand_chip *chip = mtd_to_nand(mtd);
584
585 if (!chip->bbt)
586 return 0;
587 /* Return info from the table */
588 return nand_isreserved_bbt(mtd, ofs);
589 }
590
591 /**
592 * nand_block_checkbad - [GENERIC] Check if a block is marked bad
593 * @mtd: MTD device structure
594 * @ofs: offset from device start
595 * @allowbbt: 1, if its allowed to access the bbt area
596 *
597 * Check, if the block is bad. Either by reading the bad block table or
598 * calling of the scan function.
599 */
600 static int nand_block_checkbad(struct mtd_info *mtd, loff_t ofs, int allowbbt)
601 {
602 struct nand_chip *chip = mtd_to_nand(mtd);
603
604 if (!chip->bbt)
605 return chip->block_bad(mtd, ofs);
606
607 /* Return info from the table */
608 return nand_isbad_bbt(mtd, ofs, allowbbt);
609 }
610
611 /**
612 * panic_nand_wait_ready - [GENERIC] Wait for the ready pin after commands.
613 * @mtd: MTD device structure
614 * @timeo: Timeout
615 *
616 * Helper function for nand_wait_ready used when needing to wait in interrupt
617 * context.
618 */
619 static void panic_nand_wait_ready(struct mtd_info *mtd, unsigned long timeo)
620 {
621 struct nand_chip *chip = mtd_to_nand(mtd);
622 int i;
623
624 /* Wait for the device to get ready */
625 for (i = 0; i < timeo; i++) {
626 if (chip->dev_ready(mtd))
627 break;
628 touch_softlockup_watchdog();
629 mdelay(1);
630 }
631 }
632
633 /**
634 * nand_wait_ready - [GENERIC] Wait for the ready pin after commands.
635 * @mtd: MTD device structure
636 *
637 * Wait for the ready pin after a command, and warn if a timeout occurs.
638 */
639 void nand_wait_ready(struct mtd_info *mtd)
640 {
641 struct nand_chip *chip = mtd_to_nand(mtd);
642 unsigned long timeo = 400;
643
644 if (in_interrupt() || oops_in_progress)
645 return panic_nand_wait_ready(mtd, timeo);
646
647 /* Wait until command is processed or timeout occurs */
648 timeo = jiffies + msecs_to_jiffies(timeo);
649 do {
650 if (chip->dev_ready(mtd))
651 return;
652 cond_resched();
653 } while (time_before(jiffies, timeo));
654
655 if (!chip->dev_ready(mtd))
656 pr_warn_ratelimited("timeout while waiting for chip to become ready\n");
657 }
658 EXPORT_SYMBOL_GPL(nand_wait_ready);
659
660 /**
661 * nand_wait_status_ready - [GENERIC] Wait for the ready status after commands.
662 * @mtd: MTD device structure
663 * @timeo: Timeout in ms
664 *
665 * Wait for status ready (i.e. command done) or timeout.
666 */
667 static void nand_wait_status_ready(struct mtd_info *mtd, unsigned long timeo)
668 {
669 register struct nand_chip *chip = mtd_to_nand(mtd);
670
671 timeo = jiffies + msecs_to_jiffies(timeo);
672 do {
673 if ((chip->read_byte(mtd) & NAND_STATUS_READY))
674 break;
675 touch_softlockup_watchdog();
676 } while (time_before(jiffies, timeo));
677 };
678
679 /**
680 * nand_command - [DEFAULT] Send command to NAND device
681 * @mtd: MTD device structure
682 * @command: the command to be sent
683 * @column: the column address for this command, -1 if none
684 * @page_addr: the page address for this command, -1 if none
685 *
686 * Send command to NAND device. This function is used for small page devices
687 * (512 Bytes per page).
688 */
689 static void nand_command(struct mtd_info *mtd, unsigned int command,
690 int column, int page_addr)
691 {
692 register struct nand_chip *chip = mtd_to_nand(mtd);
693 int ctrl = NAND_CTRL_CLE | NAND_CTRL_CHANGE;
694
695 /* Write out the command to the device */
696 if (command == NAND_CMD_SEQIN) {
697 int readcmd;
698
699 if (column >= mtd->writesize) {
700 /* OOB area */
701 column -= mtd->writesize;
702 readcmd = NAND_CMD_READOOB;
703 } else if (column < 256) {
704 /* First 256 bytes --> READ0 */
705 readcmd = NAND_CMD_READ0;
706 } else {
707 column -= 256;
708 readcmd = NAND_CMD_READ1;
709 }
710 chip->cmd_ctrl(mtd, readcmd, ctrl);
711 ctrl &= ~NAND_CTRL_CHANGE;
712 }
713 chip->cmd_ctrl(mtd, command, ctrl);
714
715 /* Address cycle, when necessary */
716 ctrl = NAND_CTRL_ALE | NAND_CTRL_CHANGE;
717 /* Serially input address */
718 if (column != -1) {
719 /* Adjust columns for 16 bit buswidth */
720 if (chip->options & NAND_BUSWIDTH_16 &&
721 !nand_opcode_8bits(command))
722 column >>= 1;
723 chip->cmd_ctrl(mtd, column, ctrl);
724 ctrl &= ~NAND_CTRL_CHANGE;
725 }
726 if (page_addr != -1) {
727 chip->cmd_ctrl(mtd, page_addr, ctrl);
728 ctrl &= ~NAND_CTRL_CHANGE;
729 chip->cmd_ctrl(mtd, page_addr >> 8, ctrl);
730 /* One more address cycle for devices > 32MiB */
731 if (chip->chipsize > (32 << 20))
732 chip->cmd_ctrl(mtd, page_addr >> 16, ctrl);
733 }
734 chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
735
736 /*
737 * Program and erase have their own busy handlers status and sequential
738 * in needs no delay
739 */
740 switch (command) {
741
742 case NAND_CMD_PAGEPROG:
743 case NAND_CMD_ERASE1:
744 case NAND_CMD_ERASE2:
745 case NAND_CMD_SEQIN:
746 case NAND_CMD_STATUS:
747 case NAND_CMD_READID:
748 case NAND_CMD_SET_FEATURES:
749 return;
750
751 case NAND_CMD_RESET:
752 if (chip->dev_ready)
753 break;
754 udelay(chip->chip_delay);
755 chip->cmd_ctrl(mtd, NAND_CMD_STATUS,
756 NAND_CTRL_CLE | NAND_CTRL_CHANGE);
757 chip->cmd_ctrl(mtd,
758 NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
759 /* EZ-NAND can take upto 250ms as per ONFi v4.0 */
760 nand_wait_status_ready(mtd, 250);
761 return;
762
763 /* This applies to read commands */
764 case NAND_CMD_READ0:
765 /*
766 * READ0 is sometimes used to exit GET STATUS mode. When this
767 * is the case no address cycles are requested, and we can use
768 * this information to detect that we should not wait for the
769 * device to be ready.
770 */
771 if (column == -1 && page_addr == -1)
772 return;
773
774 default:
775 /*
776 * If we don't have access to the busy pin, we apply the given
777 * command delay
778 */
779 if (!chip->dev_ready) {
780 udelay(chip->chip_delay);
781 return;
782 }
783 }
784 /*
785 * Apply this short delay always to ensure that we do wait tWB in
786 * any case on any machine.
787 */
788 ndelay(100);
789
790 nand_wait_ready(mtd);
791 }
792
793 static void nand_ccs_delay(struct nand_chip *chip)
794 {
795 /*
796 * The controller already takes care of waiting for tCCS when the RNDIN
797 * or RNDOUT command is sent, return directly.
798 */
799 if (!(chip->options & NAND_WAIT_TCCS))
800 return;
801
802 /*
803 * Wait tCCS_min if it is correctly defined, otherwise wait 500ns
804 * (which should be safe for all NANDs).
805 */
806 if (chip->data_interface && chip->data_interface->timings.sdr.tCCS_min)
807 ndelay(chip->data_interface->timings.sdr.tCCS_min / 1000);
808 else
809 ndelay(500);
810 }
811
812 /**
813 * nand_command_lp - [DEFAULT] Send command to NAND large page device
814 * @mtd: MTD device structure
815 * @command: the command to be sent
816 * @column: the column address for this command, -1 if none
817 * @page_addr: the page address for this command, -1 if none
818 *
819 * Send command to NAND device. This is the version for the new large page
820 * devices. We don't have the separate regions as we have in the small page
821 * devices. We must emulate NAND_CMD_READOOB to keep the code compatible.
822 */
823 static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
824 int column, int page_addr)
825 {
826 register struct nand_chip *chip = mtd_to_nand(mtd);
827
828 /* Emulate NAND_CMD_READOOB */
829 if (command == NAND_CMD_READOOB) {
830 column += mtd->writesize;
831 command = NAND_CMD_READ0;
832 }
833
834 /* Command latch cycle */
835 chip->cmd_ctrl(mtd, command, NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
836
837 if (column != -1 || page_addr != -1) {
838 int ctrl = NAND_CTRL_CHANGE | NAND_NCE | NAND_ALE;
839
840 /* Serially input address */
841 if (column != -1) {
842 /* Adjust columns for 16 bit buswidth */
843 if (chip->options & NAND_BUSWIDTH_16 &&
844 !nand_opcode_8bits(command))
845 column >>= 1;
846 chip->cmd_ctrl(mtd, column, ctrl);
847 ctrl &= ~NAND_CTRL_CHANGE;
848
849 /* Only output a single addr cycle for 8bits opcodes. */
850 if (!nand_opcode_8bits(command))
851 chip->cmd_ctrl(mtd, column >> 8, ctrl);
852 }
853 if (page_addr != -1) {
854 chip->cmd_ctrl(mtd, page_addr, ctrl);
855 chip->cmd_ctrl(mtd, page_addr >> 8,
856 NAND_NCE | NAND_ALE);
857 /* One more address cycle for devices > 128MiB */
858 if (chip->chipsize > (128 << 20))
859 chip->cmd_ctrl(mtd, page_addr >> 16,
860 NAND_NCE | NAND_ALE);
861 }
862 }
863 chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
864
865 /*
866 * Program and erase have their own busy handlers status, sequential
867 * in and status need no delay.
868 */
869 switch (command) {
870
871 case NAND_CMD_CACHEDPROG:
872 case NAND_CMD_PAGEPROG:
873 case NAND_CMD_ERASE1:
874 case NAND_CMD_ERASE2:
875 case NAND_CMD_SEQIN:
876 case NAND_CMD_STATUS:
877 case NAND_CMD_READID:
878 case NAND_CMD_SET_FEATURES:
879 return;
880
881 case NAND_CMD_RNDIN:
882 nand_ccs_delay(chip);
883 return;
884
885 case NAND_CMD_RESET:
886 if (chip->dev_ready)
887 break;
888 udelay(chip->chip_delay);
889 chip->cmd_ctrl(mtd, NAND_CMD_STATUS,
890 NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
891 chip->cmd_ctrl(mtd, NAND_CMD_NONE,
892 NAND_NCE | NAND_CTRL_CHANGE);
893 /* EZ-NAND can take upto 250ms as per ONFi v4.0 */
894 nand_wait_status_ready(mtd, 250);
895 return;
896
897 case NAND_CMD_RNDOUT:
898 /* No ready / busy check necessary */
899 chip->cmd_ctrl(mtd, NAND_CMD_RNDOUTSTART,
900 NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
901 chip->cmd_ctrl(mtd, NAND_CMD_NONE,
902 NAND_NCE | NAND_CTRL_CHANGE);
903
904 nand_ccs_delay(chip);
905 return;
906
907 case NAND_CMD_READ0:
908 /*
909 * READ0 is sometimes used to exit GET STATUS mode. When this
910 * is the case no address cycles are requested, and we can use
911 * this information to detect that READSTART should not be
912 * issued.
913 */
914 if (column == -1 && page_addr == -1)
915 return;
916
917 chip->cmd_ctrl(mtd, NAND_CMD_READSTART,
918 NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
919 chip->cmd_ctrl(mtd, NAND_CMD_NONE,
920 NAND_NCE | NAND_CTRL_CHANGE);
921
922 /* This applies to read commands */
923 default:
924 /*
925 * If we don't have access to the busy pin, we apply the given
926 * command delay.
927 */
928 if (!chip->dev_ready) {
929 udelay(chip->chip_delay);
930 return;
931 }
932 }
933
934 /*
935 * Apply this short delay always to ensure that we do wait tWB in
936 * any case on any machine.
937 */
938 ndelay(100);
939
940 nand_wait_ready(mtd);
941 }
942
943 /**
944 * panic_nand_get_device - [GENERIC] Get chip for selected access
945 * @chip: the nand chip descriptor
946 * @mtd: MTD device structure
947 * @new_state: the state which is requested
948 *
949 * Used when in panic, no locks are taken.
950 */
951 static void panic_nand_get_device(struct nand_chip *chip,
952 struct mtd_info *mtd, int new_state)
953 {
954 /* Hardware controller shared among independent devices */
955 chip->controller->active = chip;
956 chip->state = new_state;
957 }
958
959 /**
960 * nand_get_device - [GENERIC] Get chip for selected access
961 * @mtd: MTD device structure
962 * @new_state: the state which is requested
963 *
964 * Get the device and lock it for exclusive access
965 */
966 static int
967 nand_get_device(struct mtd_info *mtd, int new_state)
968 {
969 struct nand_chip *chip = mtd_to_nand(mtd);
970 spinlock_t *lock = &chip->controller->lock;
971 wait_queue_head_t *wq = &chip->controller->wq;
972 DECLARE_WAITQUEUE(wait, current);
973 retry:
974 spin_lock(lock);
975
976 /* Hardware controller shared among independent devices */
977 if (!chip->controller->active)
978 chip->controller->active = chip;
979
980 if (chip->controller->active == chip && chip->state == FL_READY) {
981 chip->state = new_state;
982 spin_unlock(lock);
983 return 0;
984 }
985 if (new_state == FL_PM_SUSPENDED) {
986 if (chip->controller->active->state == FL_PM_SUSPENDED) {
987 chip->state = FL_PM_SUSPENDED;
988 spin_unlock(lock);
989 return 0;
990 }
991 }
992 set_current_state(TASK_UNINTERRUPTIBLE);
993 add_wait_queue(wq, &wait);
994 spin_unlock(lock);
995 schedule();
996 remove_wait_queue(wq, &wait);
997 goto retry;
998 }
999
1000 /**
1001 * panic_nand_wait - [GENERIC] wait until the command is done
1002 * @mtd: MTD device structure
1003 * @chip: NAND chip structure
1004 * @timeo: timeout
1005 *
1006 * Wait for command done. This is a helper function for nand_wait used when
1007 * we are in interrupt context. May happen when in panic and trying to write
1008 * an oops through mtdoops.
1009 */
1010 static void panic_nand_wait(struct mtd_info *mtd, struct nand_chip *chip,
1011 unsigned long timeo)
1012 {
1013 int i;
1014 for (i = 0; i < timeo; i++) {
1015 if (chip->dev_ready) {
1016 if (chip->dev_ready(mtd))
1017 break;
1018 } else {
1019 if (chip->read_byte(mtd) & NAND_STATUS_READY)
1020 break;
1021 }
1022 mdelay(1);
1023 }
1024 }
1025
1026 /**
1027 * nand_wait - [DEFAULT] wait until the command is done
1028 * @mtd: MTD device structure
1029 * @chip: NAND chip structure
1030 *
1031 * Wait for command done. This applies to erase and program only.
1032 */
1033 static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
1034 {
1035
1036 int status;
1037 unsigned long timeo = 400;
1038
1039 /*
1040 * Apply this short delay always to ensure that we do wait tWB in any
1041 * case on any machine.
1042 */
1043 ndelay(100);
1044
1045 chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
1046
1047 if (in_interrupt() || oops_in_progress)
1048 panic_nand_wait(mtd, chip, timeo);
1049 else {
1050 timeo = jiffies + msecs_to_jiffies(timeo);
1051 do {
1052 if (chip->dev_ready) {
1053 if (chip->dev_ready(mtd))
1054 break;
1055 } else {
1056 if (chip->read_byte(mtd) & NAND_STATUS_READY)
1057 break;
1058 }
1059 cond_resched();
1060 } while (time_before(jiffies, timeo));
1061 }
1062
1063 status = (int)chip->read_byte(mtd);
1064 /* This can happen if in case of timeout or buggy dev_ready */
1065 WARN_ON(!(status & NAND_STATUS_READY));
1066 return status;
1067 }
1068
1069 /**
1070 * nand_reset_data_interface - Reset data interface and timings
1071 * @chip: The NAND chip
1072 * @chipnr: Internal die id
1073 *
1074 * Reset the Data interface and timings to ONFI mode 0.
1075 *
1076 * Returns 0 for success or negative error code otherwise.
1077 */
1078 static int nand_reset_data_interface(struct nand_chip *chip, int chipnr)
1079 {
1080 struct mtd_info *mtd = nand_to_mtd(chip);
1081 const struct nand_data_interface *conf;
1082 int ret;
1083
1084 if (!chip->setup_data_interface)
1085 return 0;
1086
1087 /*
1088 * The ONFI specification says:
1089 * "
1090 * To transition from NV-DDR or NV-DDR2 to the SDR data
1091 * interface, the host shall use the Reset (FFh) command
1092 * using SDR timing mode 0. A device in any timing mode is
1093 * required to recognize Reset (FFh) command issued in SDR
1094 * timing mode 0.
1095 * "
1096 *
1097 * Configure the data interface in SDR mode and set the
1098 * timings to timing mode 0.
1099 */
1100
1101 conf = nand_get_default_data_interface();
1102 ret = chip->setup_data_interface(mtd, chipnr, conf);
1103 if (ret)
1104 pr_err("Failed to configure data interface to SDR timing mode 0\n");
1105
1106 return ret;
1107 }
1108
1109 /**
1110 * nand_setup_data_interface - Setup the best data interface and timings
1111 * @chip: The NAND chip
1112 * @chipnr: Internal die id
1113 *
1114 * Find and configure the best data interface and NAND timings supported by
1115 * the chip and the driver.
1116 * First tries to retrieve supported timing modes from ONFI information,
1117 * and if the NAND chip does not support ONFI, relies on the
1118 * ->onfi_timing_mode_default specified in the nand_ids table.
1119 *
1120 * Returns 0 for success or negative error code otherwise.
1121 */
1122 static int nand_setup_data_interface(struct nand_chip *chip, int chipnr)
1123 {
1124 struct mtd_info *mtd = nand_to_mtd(chip);
1125 int ret;
1126
1127 if (!chip->setup_data_interface || !chip->data_interface)
1128 return 0;
1129
1130 /*
1131 * Ensure the timing mode has been changed on the chip side
1132 * before changing timings on the controller side.
1133 */
1134 if (chip->onfi_version &&
1135 (le16_to_cpu(chip->onfi_params.opt_cmd) &
1136 ONFI_OPT_CMD_SET_GET_FEATURES)) {
1137 u8 tmode_param[ONFI_SUBFEATURE_PARAM_LEN] = {
1138 chip->onfi_timing_mode_default,
1139 };
1140
1141 ret = chip->onfi_set_features(mtd, chip,
1142 ONFI_FEATURE_ADDR_TIMING_MODE,
1143 tmode_param);
1144 if (ret)
1145 goto err;
1146 }
1147
1148 ret = chip->setup_data_interface(mtd, chipnr, chip->data_interface);
1149 err:
1150 return ret;
1151 }
1152
1153 /**
1154 * nand_init_data_interface - find the best data interface and timings
1155 * @chip: The NAND chip
1156 *
1157 * Find the best data interface and NAND timings supported by the chip
1158 * and the driver.
1159 * First tries to retrieve supported timing modes from ONFI information,
1160 * and if the NAND chip does not support ONFI, relies on the
1161 * ->onfi_timing_mode_default specified in the nand_ids table. After this
1162 * function nand_chip->data_interface is initialized with the best timing mode
1163 * available.
1164 *
1165 * Returns 0 for success or negative error code otherwise.
1166 */
1167 static int nand_init_data_interface(struct nand_chip *chip)
1168 {
1169 struct mtd_info *mtd = nand_to_mtd(chip);
1170 int modes, mode, ret;
1171
1172 if (!chip->setup_data_interface)
1173 return 0;
1174
1175 /*
1176 * First try to identify the best timings from ONFI parameters and
1177 * if the NAND does not support ONFI, fallback to the default ONFI
1178 * timing mode.
1179 */
1180 modes = onfi_get_async_timing_mode(chip);
1181 if (modes == ONFI_TIMING_MODE_UNKNOWN) {
1182 if (!chip->onfi_timing_mode_default)
1183 return 0;
1184
1185 modes = GENMASK(chip->onfi_timing_mode_default, 0);
1186 }
1187
1188 chip->data_interface = kzalloc(sizeof(*chip->data_interface),
1189 GFP_KERNEL);
1190 if (!chip->data_interface)
1191 return -ENOMEM;
1192
1193 for (mode = fls(modes) - 1; mode >= 0; mode--) {
1194 ret = onfi_init_data_interface(chip, chip->data_interface,
1195 NAND_SDR_IFACE, mode);
1196 if (ret)
1197 continue;
1198
1199 /* Pass -1 to only */
1200 ret = chip->setup_data_interface(mtd,
1201 NAND_DATA_IFACE_CHECK_ONLY,
1202 chip->data_interface);
1203 if (!ret) {
1204 chip->onfi_timing_mode_default = mode;
1205 break;
1206 }
1207 }
1208
1209 return 0;
1210 }
1211
1212 static void nand_release_data_interface(struct nand_chip *chip)
1213 {
1214 kfree(chip->data_interface);
1215 }
1216
1217 /**
1218 * nand_reset - Reset and initialize a NAND device
1219 * @chip: The NAND chip
1220 * @chipnr: Internal die id
1221 *
1222 * Returns 0 for success or negative error code otherwise
1223 */
1224 int nand_reset(struct nand_chip *chip, int chipnr)
1225 {
1226 struct mtd_info *mtd = nand_to_mtd(chip);
1227 int ret;
1228
1229 ret = nand_reset_data_interface(chip, chipnr);
1230 if (ret)
1231 return ret;
1232
1233 /*
1234 * The CS line has to be released before we can apply the new NAND
1235 * interface settings, hence this weird ->select_chip() dance.
1236 */
1237 chip->select_chip(mtd, chipnr);
1238 chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
1239 chip->select_chip(mtd, -1);
1240
1241 chip->select_chip(mtd, chipnr);
1242 ret = nand_setup_data_interface(chip, chipnr);
1243 chip->select_chip(mtd, -1);
1244 if (ret)
1245 return ret;
1246
1247 return 0;
1248 }
1249
1250 /**
1251 * __nand_unlock - [REPLACEABLE] unlocks specified locked blocks
1252 * @mtd: mtd info
1253 * @ofs: offset to start unlock from
1254 * @len: length to unlock
1255 * @invert:
1256 * - when = 0, unlock the range of blocks within the lower and
1257 * upper boundary address
1258 * - when = 1, unlock the range of blocks outside the boundaries
1259 * of the lower and upper boundary address
1260 *
1261 * Returs unlock status.
1262 */
1263 static int __nand_unlock(struct mtd_info *mtd, loff_t ofs,
1264 uint64_t len, int invert)
1265 {
1266 int ret = 0;
1267 int status, page;
1268 struct nand_chip *chip = mtd_to_nand(mtd);
1269
1270 /* Submit address of first page to unlock */
1271 page = ofs >> chip->page_shift;
1272 chip->cmdfunc(mtd, NAND_CMD_UNLOCK1, -1, page & chip->pagemask);
1273
1274 /* Submit address of last page to unlock */
1275 page = (ofs + len) >> chip->page_shift;
1276 chip->cmdfunc(mtd, NAND_CMD_UNLOCK2, -1,
1277 (page | invert) & chip->pagemask);
1278
1279 /* Call wait ready function */
1280 status = chip->waitfunc(mtd, chip);
1281 /* See if device thinks it succeeded */
1282 if (status & NAND_STATUS_FAIL) {
1283 pr_debug("%s: error status = 0x%08x\n",
1284 __func__, status);
1285 ret = -EIO;
1286 }
1287
1288 return ret;
1289 }
1290
1291 /**
1292 * nand_unlock - [REPLACEABLE] unlocks specified locked blocks
1293 * @mtd: mtd info
1294 * @ofs: offset to start unlock from
1295 * @len: length to unlock
1296 *
1297 * Returns unlock status.
1298 */
1299 int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1300 {
1301 int ret = 0;
1302 int chipnr;
1303 struct nand_chip *chip = mtd_to_nand(mtd);
1304
1305 pr_debug("%s: start = 0x%012llx, len = %llu\n",
1306 __func__, (unsigned long long)ofs, len);
1307
1308 if (check_offs_len(mtd, ofs, len))
1309 return -EINVAL;
1310
1311 /* Align to last block address if size addresses end of the device */
1312 if (ofs + len == mtd->size)
1313 len -= mtd->erasesize;
1314
1315 nand_get_device(mtd, FL_UNLOCKING);
1316
1317 /* Shift to get chip number */
1318 chipnr = ofs >> chip->chip_shift;
1319
1320 /*
1321 * Reset the chip.
1322 * If we want to check the WP through READ STATUS and check the bit 7
1323 * we must reset the chip
1324 * some operation can also clear the bit 7 of status register
1325 * eg. erase/program a locked block
1326 */
1327 nand_reset(chip, chipnr);
1328
1329 chip->select_chip(mtd, chipnr);
1330
1331 /* Check, if it is write protected */
1332 if (nand_check_wp(mtd)) {
1333 pr_debug("%s: device is write protected!\n",
1334 __func__);
1335 ret = -EIO;
1336 goto out;
1337 }
1338
1339 ret = __nand_unlock(mtd, ofs, len, 0);
1340
1341 out:
1342 chip->select_chip(mtd, -1);
1343 nand_release_device(mtd);
1344
1345 return ret;
1346 }
1347 EXPORT_SYMBOL(nand_unlock);
1348
1349 /**
1350 * nand_lock - [REPLACEABLE] locks all blocks present in the device
1351 * @mtd: mtd info
1352 * @ofs: offset to start unlock from
1353 * @len: length to unlock
1354 *
1355 * This feature is not supported in many NAND parts. 'Micron' NAND parts do
1356 * have this feature, but it allows only to lock all blocks, not for specified
1357 * range for block. Implementing 'lock' feature by making use of 'unlock', for
1358 * now.
1359 *
1360 * Returns lock status.
1361 */
1362 int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1363 {
1364 int ret = 0;
1365 int chipnr, status, page;
1366 struct nand_chip *chip = mtd_to_nand(mtd);
1367
1368 pr_debug("%s: start = 0x%012llx, len = %llu\n",
1369 __func__, (unsigned long long)ofs, len);
1370
1371 if (check_offs_len(mtd, ofs, len))
1372 return -EINVAL;
1373
1374 nand_get_device(mtd, FL_LOCKING);
1375
1376 /* Shift to get chip number */
1377 chipnr = ofs >> chip->chip_shift;
1378
1379 /*
1380 * Reset the chip.
1381 * If we want to check the WP through READ STATUS and check the bit 7
1382 * we must reset the chip
1383 * some operation can also clear the bit 7 of status register
1384 * eg. erase/program a locked block
1385 */
1386 nand_reset(chip, chipnr);
1387
1388 chip->select_chip(mtd, chipnr);
1389
1390 /* Check, if it is write protected */
1391 if (nand_check_wp(mtd)) {
1392 pr_debug("%s: device is write protected!\n",
1393 __func__);
1394 status = MTD_ERASE_FAILED;
1395 ret = -EIO;
1396 goto out;
1397 }
1398
1399 /* Submit address of first page to lock */
1400 page = ofs >> chip->page_shift;
1401 chip->cmdfunc(mtd, NAND_CMD_LOCK, -1, page & chip->pagemask);
1402
1403 /* Call wait ready function */
1404 status = chip->waitfunc(mtd, chip);
1405 /* See if device thinks it succeeded */
1406 if (status & NAND_STATUS_FAIL) {
1407 pr_debug("%s: error status = 0x%08x\n",
1408 __func__, status);
1409 ret = -EIO;
1410 goto out;
1411 }
1412
1413 ret = __nand_unlock(mtd, ofs, len, 0x1);
1414
1415 out:
1416 chip->select_chip(mtd, -1);
1417 nand_release_device(mtd);
1418
1419 return ret;
1420 }
1421 EXPORT_SYMBOL(nand_lock);
1422
1423 /**
1424 * nand_check_erased_buf - check if a buffer contains (almost) only 0xff data
1425 * @buf: buffer to test
1426 * @len: buffer length
1427 * @bitflips_threshold: maximum number of bitflips
1428 *
1429 * Check if a buffer contains only 0xff, which means the underlying region
1430 * has been erased and is ready to be programmed.
1431 * The bitflips_threshold specify the maximum number of bitflips before
1432 * considering the region is not erased.
1433 * Note: The logic of this function has been extracted from the memweight
1434 * implementation, except that nand_check_erased_buf function exit before
1435 * testing the whole buffer if the number of bitflips exceed the
1436 * bitflips_threshold value.
1437 *
1438 * Returns a positive number of bitflips less than or equal to
1439 * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the
1440 * threshold.
1441 */
1442 static int nand_check_erased_buf(void *buf, int len, int bitflips_threshold)
1443 {
1444 const unsigned char *bitmap = buf;
1445 int bitflips = 0;
1446 int weight;
1447
1448 for (; len && ((uintptr_t)bitmap) % sizeof(long);
1449 len--, bitmap++) {
1450 weight = hweight8(*bitmap);
1451 bitflips += BITS_PER_BYTE - weight;
1452 if (unlikely(bitflips > bitflips_threshold))
1453 return -EBADMSG;
1454 }
1455
1456 for (; len >= sizeof(long);
1457 len -= sizeof(long), bitmap += sizeof(long)) {
1458 unsigned long d = *((unsigned long *)bitmap);
1459 if (d == ~0UL)
1460 continue;
1461 weight = hweight_long(d);
1462 bitflips += BITS_PER_LONG - weight;
1463 if (unlikely(bitflips > bitflips_threshold))
1464 return -EBADMSG;
1465 }
1466
1467 for (; len > 0; len--, bitmap++) {
1468 weight = hweight8(*bitmap);
1469 bitflips += BITS_PER_BYTE - weight;
1470 if (unlikely(bitflips > bitflips_threshold))
1471 return -EBADMSG;
1472 }
1473
1474 return bitflips;
1475 }
1476
1477 /**
1478 * nand_check_erased_ecc_chunk - check if an ECC chunk contains (almost) only
1479 * 0xff data
1480 * @data: data buffer to test
1481 * @datalen: data length
1482 * @ecc: ECC buffer
1483 * @ecclen: ECC length
1484 * @extraoob: extra OOB buffer
1485 * @extraooblen: extra OOB length
1486 * @bitflips_threshold: maximum number of bitflips
1487 *
1488 * Check if a data buffer and its associated ECC and OOB data contains only
1489 * 0xff pattern, which means the underlying region has been erased and is
1490 * ready to be programmed.
1491 * The bitflips_threshold specify the maximum number of bitflips before
1492 * considering the region as not erased.
1493 *
1494 * Note:
1495 * 1/ ECC algorithms are working on pre-defined block sizes which are usually
1496 * different from the NAND page size. When fixing bitflips, ECC engines will
1497 * report the number of errors per chunk, and the NAND core infrastructure
1498 * expect you to return the maximum number of bitflips for the whole page.
1499 * This is why you should always use this function on a single chunk and
1500 * not on the whole page. After checking each chunk you should update your
1501 * max_bitflips value accordingly.
1502 * 2/ When checking for bitflips in erased pages you should not only check
1503 * the payload data but also their associated ECC data, because a user might
1504 * have programmed almost all bits to 1 but a few. In this case, we
1505 * shouldn't consider the chunk as erased, and checking ECC bytes prevent
1506 * this case.
1507 * 3/ The extraoob argument is optional, and should be used if some of your OOB
1508 * data are protected by the ECC engine.
1509 * It could also be used if you support subpages and want to attach some
1510 * extra OOB data to an ECC chunk.
1511 *
1512 * Returns a positive number of bitflips less than or equal to
1513 * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the
1514 * threshold. In case of success, the passed buffers are filled with 0xff.
1515 */
1516 int nand_check_erased_ecc_chunk(void *data, int datalen,
1517 void *ecc, int ecclen,
1518 void *extraoob, int extraooblen,
1519 int bitflips_threshold)
1520 {
1521 int data_bitflips = 0, ecc_bitflips = 0, extraoob_bitflips = 0;
1522
1523 data_bitflips = nand_check_erased_buf(data, datalen,
1524 bitflips_threshold);
1525 if (data_bitflips < 0)
1526 return data_bitflips;
1527
1528 bitflips_threshold -= data_bitflips;
1529
1530 ecc_bitflips = nand_check_erased_buf(ecc, ecclen, bitflips_threshold);
1531 if (ecc_bitflips < 0)
1532 return ecc_bitflips;
1533
1534 bitflips_threshold -= ecc_bitflips;
1535
1536 extraoob_bitflips = nand_check_erased_buf(extraoob, extraooblen,
1537 bitflips_threshold);
1538 if (extraoob_bitflips < 0)
1539 return extraoob_bitflips;
1540
1541 if (data_bitflips)
1542 memset(data, 0xff, datalen);
1543
1544 if (ecc_bitflips)
1545 memset(ecc, 0xff, ecclen);
1546
1547 if (extraoob_bitflips)
1548 memset(extraoob, 0xff, extraooblen);
1549
1550 return data_bitflips + ecc_bitflips + extraoob_bitflips;
1551 }
1552 EXPORT_SYMBOL(nand_check_erased_ecc_chunk);
1553
1554 /**
1555 * nand_read_page_raw - [INTERN] read raw page data without ecc
1556 * @mtd: mtd info structure
1557 * @chip: nand chip info structure
1558 * @buf: buffer to store read data
1559 * @oob_required: caller requires OOB data read to chip->oob_poi
1560 * @page: page number to read
1561 *
1562 * Not for syndrome calculating ECC controllers, which use a special oob layout.
1563 */
1564 int nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
1565 uint8_t *buf, int oob_required, int page)
1566 {
1567 chip->read_buf(mtd, buf, mtd->writesize);
1568 if (oob_required)
1569 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1570 return 0;
1571 }
1572 EXPORT_SYMBOL(nand_read_page_raw);
1573
1574 /**
1575 * nand_read_page_raw_syndrome - [INTERN] read raw page data without ecc
1576 * @mtd: mtd info structure
1577 * @chip: nand chip info structure
1578 * @buf: buffer to store read data
1579 * @oob_required: caller requires OOB data read to chip->oob_poi
1580 * @page: page number to read
1581 *
1582 * We need a special oob layout and handling even when OOB isn't used.
1583 */
1584 static int nand_read_page_raw_syndrome(struct mtd_info *mtd,
1585 struct nand_chip *chip, uint8_t *buf,
1586 int oob_required, int page)
1587 {
1588 int eccsize = chip->ecc.size;
1589 int eccbytes = chip->ecc.bytes;
1590 uint8_t *oob = chip->oob_poi;
1591 int steps, size;
1592
1593 for (steps = chip->ecc.steps; steps > 0; steps--) {
1594 chip->read_buf(mtd, buf, eccsize);
1595 buf += eccsize;
1596
1597 if (chip->ecc.prepad) {
1598 chip->read_buf(mtd, oob, chip->ecc.prepad);
1599 oob += chip->ecc.prepad;
1600 }
1601
1602 chip->read_buf(mtd, oob, eccbytes);
1603 oob += eccbytes;
1604
1605 if (chip->ecc.postpad) {
1606 chip->read_buf(mtd, oob, chip->ecc.postpad);
1607 oob += chip->ecc.postpad;
1608 }
1609 }
1610
1611 size = mtd->oobsize - (oob - chip->oob_poi);
1612 if (size)
1613 chip->read_buf(mtd, oob, size);
1614
1615 return 0;
1616 }
1617
1618 /**
1619 * nand_read_page_swecc - [REPLACEABLE] software ECC based page read function
1620 * @mtd: mtd info structure
1621 * @chip: nand chip info structure
1622 * @buf: buffer to store read data
1623 * @oob_required: caller requires OOB data read to chip->oob_poi
1624 * @page: page number to read
1625 */
1626 static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
1627 uint8_t *buf, int oob_required, int page)
1628 {
1629 int i, eccsize = chip->ecc.size, ret;
1630 int eccbytes = chip->ecc.bytes;
1631 int eccsteps = chip->ecc.steps;
1632 uint8_t *p = buf;
1633 uint8_t *ecc_calc = chip->buffers->ecccalc;
1634 uint8_t *ecc_code = chip->buffers->ecccode;
1635 unsigned int max_bitflips = 0;
1636
1637 chip->ecc.read_page_raw(mtd, chip, buf, 1, page);
1638
1639 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
1640 chip->ecc.calculate(mtd, p, &ecc_calc[i]);
1641
1642 ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
1643 chip->ecc.total);
1644 if (ret)
1645 return ret;
1646
1647 eccsteps = chip->ecc.steps;
1648 p = buf;
1649
1650 for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
1651 int stat;
1652
1653 stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
1654 if (stat < 0) {
1655 mtd->ecc_stats.failed++;
1656 } else {
1657 mtd->ecc_stats.corrected += stat;
1658 max_bitflips = max_t(unsigned int, max_bitflips, stat);
1659 }
1660 }
1661 return max_bitflips;
1662 }
1663
1664 /**
1665 * nand_read_subpage - [REPLACEABLE] ECC based sub-page read function
1666 * @mtd: mtd info structure
1667 * @chip: nand chip info structure
1668 * @data_offs: offset of requested data within the page
1669 * @readlen: data length
1670 * @bufpoi: buffer to store read data
1671 * @page: page number to read
1672 */
1673 static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
1674 uint32_t data_offs, uint32_t readlen, uint8_t *bufpoi,
1675 int page)
1676 {
1677 int start_step, end_step, num_steps, ret;
1678 uint8_t *p;
1679 int data_col_addr, i, gaps = 0;
1680 int datafrag_len, eccfrag_len, aligned_len, aligned_pos;
1681 int busw = (chip->options & NAND_BUSWIDTH_16) ? 2 : 1;
1682 int index, section = 0;
1683 unsigned int max_bitflips = 0;
1684 struct mtd_oob_region oobregion = { };
1685
1686 /* Column address within the page aligned to ECC size (256bytes) */
1687 start_step = data_offs / chip->ecc.size;
1688 end_step = (data_offs + readlen - 1) / chip->ecc.size;
1689 num_steps = end_step - start_step + 1;
1690 index = start_step * chip->ecc.bytes;
1691
1692 /* Data size aligned to ECC ecc.size */
1693 datafrag_len = num_steps * chip->ecc.size;
1694 eccfrag_len = num_steps * chip->ecc.bytes;
1695
1696 data_col_addr = start_step * chip->ecc.size;
1697 /* If we read not a page aligned data */
1698 if (data_col_addr != 0)
1699 chip->cmdfunc(mtd, NAND_CMD_RNDOUT, data_col_addr, -1);
1700
1701 p = bufpoi + data_col_addr;
1702 chip->read_buf(mtd, p, datafrag_len);
1703
1704 /* Calculate ECC */
1705 for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size)
1706 chip->ecc.calculate(mtd, p, &chip->buffers->ecccalc[i]);
1707
1708 /*
1709 * The performance is faster if we position offsets according to
1710 * ecc.pos. Let's make sure that there are no gaps in ECC positions.
1711 */
1712 ret = mtd_ooblayout_find_eccregion(mtd, index, &section, &oobregion);
1713 if (ret)
1714 return ret;
1715
1716 if (oobregion.length < eccfrag_len)
1717 gaps = 1;
1718
1719 if (gaps) {
1720 chip->cmdfunc(mtd, NAND_CMD_RNDOUT, mtd->writesize, -1);
1721 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1722 } else {
1723 /*
1724 * Send the command to read the particular ECC bytes take care
1725 * about buswidth alignment in read_buf.
1726 */
1727 aligned_pos = oobregion.offset & ~(busw - 1);
1728 aligned_len = eccfrag_len;
1729 if (oobregion.offset & (busw - 1))
1730 aligned_len++;
1731 if ((oobregion.offset + (num_steps * chip->ecc.bytes)) &
1732 (busw - 1))
1733 aligned_len++;
1734
1735 chip->cmdfunc(mtd, NAND_CMD_RNDOUT,
1736 mtd->writesize + aligned_pos, -1);
1737 chip->read_buf(mtd, &chip->oob_poi[aligned_pos], aligned_len);
1738 }
1739
1740 ret = mtd_ooblayout_get_eccbytes(mtd, chip->buffers->ecccode,
1741 chip->oob_poi, index, eccfrag_len);
1742 if (ret)
1743 return ret;
1744
1745 p = bufpoi + data_col_addr;
1746 for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size) {
1747 int stat;
1748
1749 stat = chip->ecc.correct(mtd, p,
1750 &chip->buffers->ecccode[i], &chip->buffers->ecccalc[i]);
1751 if (stat == -EBADMSG &&
1752 (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
1753 /* check for empty pages with bitflips */
1754 stat = nand_check_erased_ecc_chunk(p, chip->ecc.size,
1755 &chip->buffers->ecccode[i],
1756 chip->ecc.bytes,
1757 NULL, 0,
1758 chip->ecc.strength);
1759 }
1760
1761 if (stat < 0) {
1762 mtd->ecc_stats.failed++;
1763 } else {
1764 mtd->ecc_stats.corrected += stat;
1765 max_bitflips = max_t(unsigned int, max_bitflips, stat);
1766 }
1767 }
1768 return max_bitflips;
1769 }
1770
1771 /**
1772 * nand_read_page_hwecc - [REPLACEABLE] hardware ECC based page read function
1773 * @mtd: mtd info structure
1774 * @chip: nand chip info structure
1775 * @buf: buffer to store read data
1776 * @oob_required: caller requires OOB data read to chip->oob_poi
1777 * @page: page number to read
1778 *
1779 * Not for syndrome calculating ECC controllers which need a special oob layout.
1780 */
1781 static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
1782 uint8_t *buf, int oob_required, int page)
1783 {
1784 int i, eccsize = chip->ecc.size, ret;
1785 int eccbytes = chip->ecc.bytes;
1786 int eccsteps = chip->ecc.steps;
1787 uint8_t *p = buf;
1788 uint8_t *ecc_calc = chip->buffers->ecccalc;
1789 uint8_t *ecc_code = chip->buffers->ecccode;
1790 unsigned int max_bitflips = 0;
1791
1792 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
1793 chip->ecc.hwctl(mtd, NAND_ECC_READ);
1794 chip->read_buf(mtd, p, eccsize);
1795 chip->ecc.calculate(mtd, p, &ecc_calc[i]);
1796 }
1797 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1798
1799 ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
1800 chip->ecc.total);
1801 if (ret)
1802 return ret;
1803
1804 eccsteps = chip->ecc.steps;
1805 p = buf;
1806
1807 for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
1808 int stat;
1809
1810 stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
1811 if (stat == -EBADMSG &&
1812 (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
1813 /* check for empty pages with bitflips */
1814 stat = nand_check_erased_ecc_chunk(p, eccsize,
1815 &ecc_code[i], eccbytes,
1816 NULL, 0,
1817 chip->ecc.strength);
1818 }
1819
1820 if (stat < 0) {
1821 mtd->ecc_stats.failed++;
1822 } else {
1823 mtd->ecc_stats.corrected += stat;
1824 max_bitflips = max_t(unsigned int, max_bitflips, stat);
1825 }
1826 }
1827 return max_bitflips;
1828 }
1829
1830 /**
1831 * nand_read_page_hwecc_oob_first - [REPLACEABLE] hw ecc, read oob first
1832 * @mtd: mtd info structure
1833 * @chip: nand chip info structure
1834 * @buf: buffer to store read data
1835 * @oob_required: caller requires OOB data read to chip->oob_poi
1836 * @page: page number to read
1837 *
1838 * Hardware ECC for large page chips, require OOB to be read first. For this
1839 * ECC mode, the write_page method is re-used from ECC_HW. These methods
1840 * read/write ECC from the OOB area, unlike the ECC_HW_SYNDROME support with
1841 * multiple ECC steps, follows the "infix ECC" scheme and reads/writes ECC from
1842 * the data area, by overwriting the NAND manufacturer bad block markings.
1843 */
1844 static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd,
1845 struct nand_chip *chip, uint8_t *buf, int oob_required, int page)
1846 {
1847 int i, eccsize = chip->ecc.size, ret;
1848 int eccbytes = chip->ecc.bytes;
1849 int eccsteps = chip->ecc.steps;
1850 uint8_t *p = buf;
1851 uint8_t *ecc_code = chip->buffers->ecccode;
1852 uint8_t *ecc_calc = chip->buffers->ecccalc;
1853 unsigned int max_bitflips = 0;
1854
1855 /* Read the OOB area first */
1856 chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
1857 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1858 chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
1859
1860 ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
1861 chip->ecc.total);
1862 if (ret)
1863 return ret;
1864
1865 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
1866 int stat;
1867
1868 chip->ecc.hwctl(mtd, NAND_ECC_READ);
1869 chip->read_buf(mtd, p, eccsize);
1870 chip->ecc.calculate(mtd, p, &ecc_calc[i]);
1871
1872 stat = chip->ecc.correct(mtd, p, &ecc_code[i], NULL);
1873 if (stat == -EBADMSG &&
1874 (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
1875 /* check for empty pages with bitflips */
1876 stat = nand_check_erased_ecc_chunk(p, eccsize,
1877 &ecc_code[i], eccbytes,
1878 NULL, 0,
1879 chip->ecc.strength);
1880 }
1881
1882 if (stat < 0) {
1883 mtd->ecc_stats.failed++;
1884 } else {
1885 mtd->ecc_stats.corrected += stat;
1886 max_bitflips = max_t(unsigned int, max_bitflips, stat);
1887 }
1888 }
1889 return max_bitflips;
1890 }
1891
1892 /**
1893 * nand_read_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page read
1894 * @mtd: mtd info structure
1895 * @chip: nand chip info structure
1896 * @buf: buffer to store read data
1897 * @oob_required: caller requires OOB data read to chip->oob_poi
1898 * @page: page number to read
1899 *
1900 * The hw generator calculates the error syndrome automatically. Therefore we
1901 * need a special oob layout and handling.
1902 */
1903 static int nand_read_page_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
1904 uint8_t *buf, int oob_required, int page)
1905 {
1906 int i, eccsize = chip->ecc.size;
1907 int eccbytes = chip->ecc.bytes;
1908 int eccsteps = chip->ecc.steps;
1909 int eccpadbytes = eccbytes + chip->ecc.prepad + chip->ecc.postpad;
1910 uint8_t *p = buf;
1911 uint8_t *oob = chip->oob_poi;
1912 unsigned int max_bitflips = 0;
1913
1914 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
1915 int stat;
1916
1917 chip->ecc.hwctl(mtd, NAND_ECC_READ);
1918 chip->read_buf(mtd, p, eccsize);
1919
1920 if (chip->ecc.prepad) {
1921 chip->read_buf(mtd, oob, chip->ecc.prepad);
1922 oob += chip->ecc.prepad;
1923 }
1924
1925 chip->ecc.hwctl(mtd, NAND_ECC_READSYN);
1926 chip->read_buf(mtd, oob, eccbytes);
1927 stat = chip->ecc.correct(mtd, p, oob, NULL);
1928
1929 oob += eccbytes;
1930
1931 if (chip->ecc.postpad) {
1932 chip->read_buf(mtd, oob, chip->ecc.postpad);
1933 oob += chip->ecc.postpad;
1934 }
1935
1936 if (stat == -EBADMSG &&
1937 (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
1938 /* check for empty pages with bitflips */
1939 stat = nand_check_erased_ecc_chunk(p, chip->ecc.size,
1940 oob - eccpadbytes,
1941 eccpadbytes,
1942 NULL, 0,
1943 chip->ecc.strength);
1944 }
1945
1946 if (stat < 0) {
1947 mtd->ecc_stats.failed++;
1948 } else {
1949 mtd->ecc_stats.corrected += stat;
1950 max_bitflips = max_t(unsigned int, max_bitflips, stat);
1951 }
1952 }
1953
1954 /* Calculate remaining oob bytes */
1955 i = mtd->oobsize - (oob - chip->oob_poi);
1956 if (i)
1957 chip->read_buf(mtd, oob, i);
1958
1959 return max_bitflips;
1960 }
1961
1962 /**
1963 * nand_transfer_oob - [INTERN] Transfer oob to client buffer
1964 * @mtd: mtd info structure
1965 * @oob: oob destination address
1966 * @ops: oob ops structure
1967 * @len: size of oob to transfer
1968 */
1969 static uint8_t *nand_transfer_oob(struct mtd_info *mtd, uint8_t *oob,
1970 struct mtd_oob_ops *ops, size_t len)
1971 {
1972 struct nand_chip *chip = mtd_to_nand(mtd);
1973 int ret;
1974
1975 switch (ops->mode) {
1976
1977 case MTD_OPS_PLACE_OOB:
1978 case MTD_OPS_RAW:
1979 memcpy(oob, chip->oob_poi + ops->ooboffs, len);
1980 return oob + len;
1981
1982 case MTD_OPS_AUTO_OOB:
1983 ret = mtd_ooblayout_get_databytes(mtd, oob, chip->oob_poi,
1984 ops->ooboffs, len);
1985 BUG_ON(ret);
1986 return oob + len;
1987
1988 default:
1989 BUG();
1990 }
1991 return NULL;
1992 }
1993
1994 /**
1995 * nand_setup_read_retry - [INTERN] Set the READ RETRY mode
1996 * @mtd: MTD device structure
1997 * @retry_mode: the retry mode to use
1998 *
1999 * Some vendors supply a special command to shift the Vt threshold, to be used
2000 * when there are too many bitflips in a page (i.e., ECC error). After setting
2001 * a new threshold, the host should retry reading the page.
2002 */
2003 static int nand_setup_read_retry(struct mtd_info *mtd, int retry_mode)
2004 {
2005 struct nand_chip *chip = mtd_to_nand(mtd);
2006
2007 pr_debug("setting READ RETRY mode %d\n", retry_mode);
2008
2009 if (retry_mode >= chip->read_retries)
2010 return -EINVAL;
2011
2012 if (!chip->setup_read_retry)
2013 return -EOPNOTSUPP;
2014
2015 return chip->setup_read_retry(mtd, retry_mode);
2016 }
2017
2018 /**
2019 * nand_do_read_ops - [INTERN] Read data with ECC
2020 * @mtd: MTD device structure
2021 * @from: offset to read from
2022 * @ops: oob ops structure
2023 *
2024 * Internal function. Called with chip held.
2025 */
2026 static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
2027 struct mtd_oob_ops *ops)
2028 {
2029 int chipnr, page, realpage, col, bytes, aligned, oob_required;
2030 struct nand_chip *chip = mtd_to_nand(mtd);
2031 int ret = 0;
2032 uint32_t readlen = ops->len;
2033 uint32_t oobreadlen = ops->ooblen;
2034 uint32_t max_oobsize = mtd_oobavail(mtd, ops);
2035
2036 uint8_t *bufpoi, *oob, *buf;
2037 int use_bufpoi;
2038 unsigned int max_bitflips = 0;
2039 int retry_mode = 0;
2040 bool ecc_fail = false;
2041
2042 chipnr = (int)(from >> chip->chip_shift);
2043 chip->select_chip(mtd, chipnr);
2044
2045 realpage = (int)(from >> chip->page_shift);
2046 page = realpage & chip->pagemask;
2047
2048 col = (int)(from & (mtd->writesize - 1));
2049
2050 buf = ops->datbuf;
2051 oob = ops->oobbuf;
2052 oob_required = oob ? 1 : 0;
2053
2054 while (1) {
2055 unsigned int ecc_failures = mtd->ecc_stats.failed;
2056
2057 bytes = min(mtd->writesize - col, readlen);
2058 aligned = (bytes == mtd->writesize);
2059
2060 if (!aligned)
2061 use_bufpoi = 1;
2062 else if (chip->options & NAND_USE_BOUNCE_BUFFER)
2063 use_bufpoi = !virt_addr_valid(buf) ||
2064 !IS_ALIGNED((unsigned long)buf,
2065 chip->buf_align);
2066 else
2067 use_bufpoi = 0;
2068
2069 /* Is the current page in the buffer? */
2070 if (realpage != chip->pagebuf || oob) {
2071 bufpoi = use_bufpoi ? chip->buffers->databuf : buf;
2072
2073 if (use_bufpoi && aligned)
2074 pr_debug("%s: using read bounce buffer for buf@%p\n",
2075 __func__, buf);
2076
2077 read_retry:
2078 if (nand_standard_page_accessors(&chip->ecc))
2079 chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
2080
2081 /*
2082 * Now read the page into the buffer. Absent an error,
2083 * the read methods return max bitflips per ecc step.
2084 */
2085 if (unlikely(ops->mode == MTD_OPS_RAW))
2086 ret = chip->ecc.read_page_raw(mtd, chip, bufpoi,
2087 oob_required,
2088 page);
2089 else if (!aligned && NAND_HAS_SUBPAGE_READ(chip) &&
2090 !oob)
2091 ret = chip->ecc.read_subpage(mtd, chip,
2092 col, bytes, bufpoi,
2093 page);
2094 else
2095 ret = chip->ecc.read_page(mtd, chip, bufpoi,
2096 oob_required, page);
2097 if (ret < 0) {
2098 if (use_bufpoi)
2099 /* Invalidate page cache */
2100 chip->pagebuf = -1;
2101 break;
2102 }
2103
2104 /* Transfer not aligned data */
2105 if (use_bufpoi) {
2106 if (!NAND_HAS_SUBPAGE_READ(chip) && !oob &&
2107 !(mtd->ecc_stats.failed - ecc_failures) &&
2108 (ops->mode != MTD_OPS_RAW)) {
2109 chip->pagebuf = realpage;
2110 chip->pagebuf_bitflips = ret;
2111 } else {
2112 /* Invalidate page cache */
2113 chip->pagebuf = -1;
2114 }
2115 memcpy(buf, chip->buffers->databuf + col, bytes);
2116 }
2117
2118 if (unlikely(oob)) {
2119 int toread = min(oobreadlen, max_oobsize);
2120
2121 if (toread) {
2122 oob = nand_transfer_oob(mtd,
2123 oob, ops, toread);
2124 oobreadlen -= toread;
2125 }
2126 }
2127
2128 if (chip->options & NAND_NEED_READRDY) {
2129 /* Apply delay or wait for ready/busy pin */
2130 if (!chip->dev_ready)
2131 udelay(chip->chip_delay);
2132 else
2133 nand_wait_ready(mtd);
2134 }
2135
2136 if (mtd->ecc_stats.failed - ecc_failures) {
2137 if (retry_mode + 1 < chip->read_retries) {
2138 retry_mode++;
2139 ret = nand_setup_read_retry(mtd,
2140 retry_mode);
2141 if (ret < 0)
2142 break;
2143
2144 /* Reset failures; retry */
2145 mtd->ecc_stats.failed = ecc_failures;
2146 goto read_retry;
2147 } else {
2148 /* No more retry modes; real failure */
2149 ecc_fail = true;
2150 }
2151 }
2152
2153 buf += bytes;
2154 max_bitflips = max_t(unsigned int, max_bitflips, ret);
2155 } else {
2156 memcpy(buf, chip->buffers->databuf + col, bytes);
2157 buf += bytes;
2158 max_bitflips = max_t(unsigned int, max_bitflips,
2159 chip->pagebuf_bitflips);
2160 }
2161
2162 readlen -= bytes;
2163
2164 /* Reset to retry mode 0 */
2165 if (retry_mode) {
2166 ret = nand_setup_read_retry(mtd, 0);
2167 if (ret < 0)
2168 break;
2169 retry_mode = 0;
2170 }
2171
2172 if (!readlen)
2173 break;
2174
2175 /* For subsequent reads align to page boundary */
2176 col = 0;
2177 /* Increment page address */
2178 realpage++;
2179
2180 page = realpage & chip->pagemask;
2181 /* Check, if we cross a chip boundary */
2182 if (!page) {
2183 chipnr++;
2184 chip->select_chip(mtd, -1);
2185 chip->select_chip(mtd, chipnr);
2186 }
2187 }
2188 chip->select_chip(mtd, -1);
2189
2190 ops->retlen = ops->len - (size_t) readlen;
2191 if (oob)
2192 ops->oobretlen = ops->ooblen - oobreadlen;
2193
2194 if (ret < 0)
2195 return ret;
2196
2197 if (ecc_fail)
2198 return -EBADMSG;
2199
2200 return max_bitflips;
2201 }
2202
2203 /**
2204 * nand_read - [MTD Interface] MTD compatibility function for nand_do_read_ecc
2205 * @mtd: MTD device structure
2206 * @from: offset to read from
2207 * @len: number of bytes to read
2208 * @retlen: pointer to variable to store the number of read bytes
2209 * @buf: the databuffer to put data
2210 *
2211 * Get hold of the chip and call nand_do_read.
2212 */
2213 static int nand_read(struct mtd_info *mtd, loff_t from, size_t len,
2214 size_t *retlen, uint8_t *buf)
2215 {
2216 struct mtd_oob_ops ops;
2217 int ret;
2218
2219 nand_get_device(mtd, FL_READING);
2220 memset(&ops, 0, sizeof(ops));
2221 ops.len = len;
2222 ops.datbuf = buf;
2223 ops.mode = MTD_OPS_PLACE_OOB;
2224 ret = nand_do_read_ops(mtd, from, &ops);
2225 *retlen = ops.retlen;
2226 nand_release_device(mtd);
2227 return ret;
2228 }
2229
2230 /**
2231 * nand_read_oob_std - [REPLACEABLE] the most common OOB data read function
2232 * @mtd: mtd info structure
2233 * @chip: nand chip info structure
2234 * @page: page number to read
2235 */
2236 int nand_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip, int page)
2237 {
2238 chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
2239 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
2240 return 0;
2241 }
2242 EXPORT_SYMBOL(nand_read_oob_std);
2243
2244 /**
2245 * nand_read_oob_syndrome - [REPLACEABLE] OOB data read function for HW ECC
2246 * with syndromes
2247 * @mtd: mtd info structure
2248 * @chip: nand chip info structure
2249 * @page: page number to read
2250 */
2251 int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
2252 int page)
2253 {
2254 int length = mtd->oobsize;
2255 int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
2256 int eccsize = chip->ecc.size;
2257 uint8_t *bufpoi = chip->oob_poi;
2258 int i, toread, sndrnd = 0, pos;
2259
2260 chip->cmdfunc(mtd, NAND_CMD_READ0, chip->ecc.size, page);
2261 for (i = 0; i < chip->ecc.steps; i++) {
2262 if (sndrnd) {
2263 pos = eccsize + i * (eccsize + chunk);
2264 if (mtd->writesize > 512)
2265 chip->cmdfunc(mtd, NAND_CMD_RNDOUT, pos, -1);
2266 else
2267 chip->cmdfunc(mtd, NAND_CMD_READ0, pos, page);
2268 } else
2269 sndrnd = 1;
2270 toread = min_t(int, length, chunk);
2271 chip->read_buf(mtd, bufpoi, toread);
2272 bufpoi += toread;
2273 length -= toread;
2274 }
2275 if (length > 0)
2276 chip->read_buf(mtd, bufpoi, length);
2277
2278 return 0;
2279 }
2280 EXPORT_SYMBOL(nand_read_oob_syndrome);
2281
2282 /**
2283 * nand_write_oob_std - [REPLACEABLE] the most common OOB data write function
2284 * @mtd: mtd info structure
2285 * @chip: nand chip info structure
2286 * @page: page number to write
2287 */
2288 int nand_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip, int page)
2289 {
2290 int status = 0;
2291 const uint8_t *buf = chip->oob_poi;
2292 int length = mtd->oobsize;
2293
2294 chip->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize, page);
2295 chip->write_buf(mtd, buf, length);
2296 /* Send command to program the OOB data */
2297 chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
2298
2299 status = chip->waitfunc(mtd, chip);
2300
2301 return status & NAND_STATUS_FAIL ? -EIO : 0;
2302 }
2303 EXPORT_SYMBOL(nand_write_oob_std);
2304
2305 /**
2306 * nand_write_oob_syndrome - [REPLACEABLE] OOB data write function for HW ECC
2307 * with syndrome - only for large page flash
2308 * @mtd: mtd info structure
2309 * @chip: nand chip info structure
2310 * @page: page number to write
2311 */
2312 int nand_write_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
2313 int page)
2314 {
2315 int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
2316 int eccsize = chip->ecc.size, length = mtd->oobsize;
2317 int i, len, pos, status = 0, sndcmd = 0, steps = chip->ecc.steps;
2318 const uint8_t *bufpoi = chip->oob_poi;
2319
2320 /*
2321 * data-ecc-data-ecc ... ecc-oob
2322 * or
2323 * data-pad-ecc-pad-data-pad .... ecc-pad-oob
2324 */
2325 if (!chip->ecc.prepad && !chip->ecc.postpad) {
2326 pos = steps * (eccsize + chunk);
2327 steps = 0;
2328 } else
2329 pos = eccsize;
2330
2331 chip->cmdfunc(mtd, NAND_CMD_SEQIN, pos, page);
2332 for (i = 0; i < steps; i++) {
2333 if (sndcmd) {
2334 if (mtd->writesize <= 512) {
2335 uint32_t fill = 0xFFFFFFFF;
2336
2337 len = eccsize;
2338 while (len > 0) {
2339 int num = min_t(int, len, 4);
2340 chip->write_buf(mtd, (uint8_t *)&fill,
2341 num);
2342 len -= num;
2343 }
2344 } else {
2345 pos = eccsize + i * (eccsize + chunk);
2346 chip->cmdfunc(mtd, NAND_CMD_RNDIN, pos, -1);
2347 }
2348 } else
2349 sndcmd = 1;
2350 len = min_t(int, length, chunk);
2351 chip->write_buf(mtd, bufpoi, len);
2352 bufpoi += len;
2353 length -= len;
2354 }
2355 if (length > 0)
2356 chip->write_buf(mtd, bufpoi, length);
2357
2358 chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
2359 status = chip->waitfunc(mtd, chip);
2360
2361 return status & NAND_STATUS_FAIL ? -EIO : 0;
2362 }
2363 EXPORT_SYMBOL(nand_write_oob_syndrome);
2364
2365 /**
2366 * nand_do_read_oob - [INTERN] NAND read out-of-band
2367 * @mtd: MTD device structure
2368 * @from: offset to read from
2369 * @ops: oob operations description structure
2370 *
2371 * NAND read out-of-band data from the spare area.
2372 */
2373 static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
2374 struct mtd_oob_ops *ops)
2375 {
2376 int page, realpage, chipnr;
2377 struct nand_chip *chip = mtd_to_nand(mtd);
2378 struct mtd_ecc_stats stats;
2379 int readlen = ops->ooblen;
2380 int len;
2381 uint8_t *buf = ops->oobbuf;
2382 int ret = 0;
2383
2384 pr_debug("%s: from = 0x%08Lx, len = %i\n",
2385 __func__, (unsigned long long)from, readlen);
2386
2387 stats = mtd->ecc_stats;
2388
2389 len = mtd_oobavail(mtd, ops);
2390
2391 if (unlikely(ops->ooboffs >= len)) {
2392 pr_debug("%s: attempt to start read outside oob\n",
2393 __func__);
2394 return -EINVAL;
2395 }
2396
2397 /* Do not allow reads past end of device */
2398 if (unlikely(from >= mtd->size ||
2399 ops->ooboffs + readlen > ((mtd->size >> chip->page_shift) -
2400 (from >> chip->page_shift)) * len)) {
2401 pr_debug("%s: attempt to read beyond end of device\n",
2402 __func__);
2403 return -EINVAL;
2404 }
2405
2406 chipnr = (int)(from >> chip->chip_shift);
2407 chip->select_chip(mtd, chipnr);
2408
2409 /* Shift to get page */
2410 realpage = (int)(from >> chip->page_shift);
2411 page = realpage & chip->pagemask;
2412
2413 while (1) {
2414 if (ops->mode == MTD_OPS_RAW)
2415 ret = chip->ecc.read_oob_raw(mtd, chip, page);
2416 else
2417 ret = chip->ecc.read_oob(mtd, chip, page);
2418
2419 if (ret < 0)
2420 break;
2421
2422 len = min(len, readlen);
2423 buf = nand_transfer_oob(mtd, buf, ops, len);
2424
2425 if (chip->options & NAND_NEED_READRDY) {
2426 /* Apply delay or wait for ready/busy pin */
2427 if (!chip->dev_ready)
2428 udelay(chip->chip_delay);
2429 else
2430 nand_wait_ready(mtd);
2431 }
2432
2433 readlen -= len;
2434 if (!readlen)
2435 break;
2436
2437 /* Increment page address */
2438 realpage++;
2439
2440 page = realpage & chip->pagemask;
2441 /* Check, if we cross a chip boundary */
2442 if (!page) {
2443 chipnr++;
2444 chip->select_chip(mtd, -1);
2445 chip->select_chip(mtd, chipnr);
2446 }
2447 }
2448 chip->select_chip(mtd, -1);
2449
2450 ops->oobretlen = ops->ooblen - readlen;
2451
2452 if (ret < 0)
2453 return ret;
2454
2455 if (mtd->ecc_stats.failed - stats.failed)
2456 return -EBADMSG;
2457
2458 return mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0;
2459 }
2460
2461 /**
2462 * nand_read_oob - [MTD Interface] NAND read data and/or out-of-band
2463 * @mtd: MTD device structure
2464 * @from: offset to read from
2465 * @ops: oob operation description structure
2466 *
2467 * NAND read data and/or out-of-band data.
2468 */
2469 static int nand_read_oob(struct mtd_info *mtd, loff_t from,
2470 struct mtd_oob_ops *ops)
2471 {
2472 int ret;
2473
2474 ops->retlen = 0;
2475
2476 /* Do not allow reads past end of device */
2477 if (ops->datbuf && (from + ops->len) > mtd->size) {
2478 pr_debug("%s: attempt to read beyond end of device\n",
2479 __func__);
2480 return -EINVAL;
2481 }
2482
2483 if (ops->mode != MTD_OPS_PLACE_OOB &&
2484 ops->mode != MTD_OPS_AUTO_OOB &&
2485 ops->mode != MTD_OPS_RAW)
2486 return -ENOTSUPP;
2487
2488 nand_get_device(mtd, FL_READING);
2489
2490 if (!ops->datbuf)
2491 ret = nand_do_read_oob(mtd, from, ops);
2492 else
2493 ret = nand_do_read_ops(mtd, from, ops);
2494
2495 nand_release_device(mtd);
2496 return ret;
2497 }
2498
2499
2500 /**
2501 * nand_write_page_raw - [INTERN] raw page write function
2502 * @mtd: mtd info structure
2503 * @chip: nand chip info structure
2504 * @buf: data buffer
2505 * @oob_required: must write chip->oob_poi to OOB
2506 * @page: page number to write
2507 *
2508 * Not for syndrome calculating ECC controllers, which use a special oob layout.
2509 */
2510 int nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
2511 const uint8_t *buf, int oob_required, int page)
2512 {
2513 chip->write_buf(mtd, buf, mtd->writesize);
2514 if (oob_required)
2515 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
2516
2517 return 0;
2518 }
2519 EXPORT_SYMBOL(nand_write_page_raw);
2520
2521 /**
2522 * nand_write_page_raw_syndrome - [INTERN] raw page write function
2523 * @mtd: mtd info structure
2524 * @chip: nand chip info structure
2525 * @buf: data buffer
2526 * @oob_required: must write chip->oob_poi to OOB
2527 * @page: page number to write
2528 *
2529 * We need a special oob layout and handling even when ECC isn't checked.
2530 */
2531 static int nand_write_page_raw_syndrome(struct mtd_info *mtd,
2532 struct nand_chip *chip,
2533 const uint8_t *buf, int oob_required,
2534 int page)
2535 {
2536 int eccsize = chip->ecc.size;
2537 int eccbytes = chip->ecc.bytes;
2538 uint8_t *oob = chip->oob_poi;
2539 int steps, size;
2540
2541 for (steps = chip->ecc.steps; steps > 0; steps--) {
2542 chip->write_buf(mtd, buf, eccsize);
2543 buf += eccsize;
2544
2545 if (chip->ecc.prepad) {
2546 chip->write_buf(mtd, oob, chip->ecc.prepad);
2547 oob += chip->ecc.prepad;
2548 }
2549
2550 chip->write_buf(mtd, oob, eccbytes);
2551 oob += eccbytes;
2552
2553 if (chip->ecc.postpad) {
2554 chip->write_buf(mtd, oob, chip->ecc.postpad);
2555 oob += chip->ecc.postpad;
2556 }
2557 }
2558
2559 size = mtd->oobsize - (oob - chip->oob_poi);
2560 if (size)
2561 chip->write_buf(mtd, oob, size);
2562
2563 return 0;
2564 }
2565 /**
2566 * nand_write_page_swecc - [REPLACEABLE] software ECC based page write function
2567 * @mtd: mtd info structure
2568 * @chip: nand chip info structure
2569 * @buf: data buffer
2570 * @oob_required: must write chip->oob_poi to OOB
2571 * @page: page number to write
2572 */
2573 static int nand_write_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
2574 const uint8_t *buf, int oob_required,
2575 int page)
2576 {
2577 int i, eccsize = chip->ecc.size, ret;
2578 int eccbytes = chip->ecc.bytes;
2579 int eccsteps = chip->ecc.steps;
2580 uint8_t *ecc_calc = chip->buffers->ecccalc;
2581 const uint8_t *p = buf;
2582
2583 /* Software ECC calculation */
2584 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
2585 chip->ecc.calculate(mtd, p, &ecc_calc[i]);
2586
2587 ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
2588 chip->ecc.total);
2589 if (ret)
2590 return ret;
2591
2592 return chip->ecc.write_page_raw(mtd, chip, buf, 1, page);
2593 }
2594
2595 /**
2596 * nand_write_page_hwecc - [REPLACEABLE] hardware ECC based page write function
2597 * @mtd: mtd info structure
2598 * @chip: nand chip info structure
2599 * @buf: data buffer
2600 * @oob_required: must write chip->oob_poi to OOB
2601 * @page: page number to write
2602 */
2603 static int nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
2604 const uint8_t *buf, int oob_required,
2605 int page)
2606 {
2607 int i, eccsize = chip->ecc.size, ret;
2608 int eccbytes = chip->ecc.bytes;
2609 int eccsteps = chip->ecc.steps;
2610 uint8_t *ecc_calc = chip->buffers->ecccalc;
2611 const uint8_t *p = buf;
2612
2613 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
2614 chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
2615 chip->write_buf(mtd, p, eccsize);
2616 chip->ecc.calculate(mtd, p, &ecc_calc[i]);
2617 }
2618
2619 ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
2620 chip->ecc.total);
2621 if (ret)
2622 return ret;
2623
2624 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
2625
2626 return 0;
2627 }
2628
2629
2630 /**
2631 * nand_write_subpage_hwecc - [REPLACEABLE] hardware ECC based subpage write
2632 * @mtd: mtd info structure
2633 * @chip: nand chip info structure
2634 * @offset: column address of subpage within the page
2635 * @data_len: data length
2636 * @buf: data buffer
2637 * @oob_required: must write chip->oob_poi to OOB
2638 * @page: page number to write
2639 */
2640 static int nand_write_subpage_hwecc(struct mtd_info *mtd,
2641 struct nand_chip *chip, uint32_t offset,
2642 uint32_t data_len, const uint8_t *buf,
2643 int oob_required, int page)
2644 {
2645 uint8_t *oob_buf = chip->oob_poi;
2646 uint8_t *ecc_calc = chip->buffers->ecccalc;
2647 int ecc_size = chip->ecc.size;
2648 int ecc_bytes = chip->ecc.bytes;
2649 int ecc_steps = chip->ecc.steps;
2650 uint32_t start_step = offset / ecc_size;
2651 uint32_t end_step = (offset + data_len - 1) / ecc_size;
2652 int oob_bytes = mtd->oobsize / ecc_steps;
2653 int step, ret;
2654
2655 for (step = 0; step < ecc_steps; step++) {
2656 /* configure controller for WRITE access */
2657 chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
2658
2659 /* write data (untouched subpages already masked by 0xFF) */
2660 chip->write_buf(mtd, buf, ecc_size);
2661
2662 /* mask ECC of un-touched subpages by padding 0xFF */
2663 if ((step < start_step) || (step > end_step))
2664 memset(ecc_calc, 0xff, ecc_bytes);
2665 else
2666 chip->ecc.calculate(mtd, buf, ecc_calc);
2667
2668 /* mask OOB of un-touched subpages by padding 0xFF */
2669 /* if oob_required, preserve OOB metadata of written subpage */
2670 if (!oob_required || (step < start_step) || (step > end_step))
2671 memset(oob_buf, 0xff, oob_bytes);
2672
2673 buf += ecc_size;
2674 ecc_calc += ecc_bytes;
2675 oob_buf += oob_bytes;
2676 }
2677
2678 /* copy calculated ECC for whole page to chip->buffer->oob */
2679 /* this include masked-value(0xFF) for unwritten subpages */
2680 ecc_calc = chip->buffers->ecccalc;
2681 ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
2682 chip->ecc.total);
2683 if (ret)
2684 return ret;
2685
2686 /* write OOB buffer to NAND device */
2687 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
2688
2689 return 0;
2690 }
2691
2692
2693 /**
2694 * nand_write_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page write
2695 * @mtd: mtd info structure
2696 * @chip: nand chip info structure
2697 * @buf: data buffer
2698 * @oob_required: must write chip->oob_poi to OOB
2699 * @page: page number to write
2700 *
2701 * The hw generator calculates the error syndrome automatically. Therefore we
2702 * need a special oob layout and handling.
2703 */
2704 static int nand_write_page_syndrome(struct mtd_info *mtd,
2705 struct nand_chip *chip,
2706 const uint8_t *buf, int oob_required,
2707 int page)
2708 {
2709 int i, eccsize = chip->ecc.size;
2710 int eccbytes = chip->ecc.bytes;
2711 int eccsteps = chip->ecc.steps;
2712 const uint8_t *p = buf;
2713 uint8_t *oob = chip->oob_poi;
2714
2715 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
2716
2717 chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
2718 chip->write_buf(mtd, p, eccsize);
2719
2720 if (chip->ecc.prepad) {
2721 chip->write_buf(mtd, oob, chip->ecc.prepad);
2722 oob += chip->ecc.prepad;
2723 }
2724
2725 chip->ecc.calculate(mtd, p, oob);
2726 chip->write_buf(mtd, oob, eccbytes);
2727 oob += eccbytes;
2728
2729 if (chip->ecc.postpad) {
2730 chip->write_buf(mtd, oob, chip->ecc.postpad);
2731 oob += chip->ecc.postpad;
2732 }
2733 }
2734
2735 /* Calculate remaining oob bytes */
2736 i = mtd->oobsize - (oob - chip->oob_poi);
2737 if (i)
2738 chip->write_buf(mtd, oob, i);
2739
2740 return 0;
2741 }
2742
2743 /**
2744 * nand_write_page - write one page
2745 * @mtd: MTD device structure
2746 * @chip: NAND chip descriptor
2747 * @offset: address offset within the page
2748 * @data_len: length of actual data to be written
2749 * @buf: the data to write
2750 * @oob_required: must write chip->oob_poi to OOB
2751 * @page: page number to write
2752 * @raw: use _raw version of write_page
2753 */
2754 static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
2755 uint32_t offset, int data_len, const uint8_t *buf,
2756 int oob_required, int page, int raw)
2757 {
2758 int status, subpage;
2759
2760 if (!(chip->options & NAND_NO_SUBPAGE_WRITE) &&
2761 chip->ecc.write_subpage)
2762 subpage = offset || (data_len < mtd->writesize);
2763 else
2764 subpage = 0;
2765
2766 if (nand_standard_page_accessors(&chip->ecc))
2767 chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
2768
2769 if (unlikely(raw))
2770 status = chip->ecc.write_page_raw(mtd, chip, buf,
2771 oob_required, page);
2772 else if (subpage)
2773 status = chip->ecc.write_subpage(mtd, chip, offset, data_len,
2774 buf, oob_required, page);
2775 else
2776 status = chip->ecc.write_page(mtd, chip, buf, oob_required,
2777 page);
2778
2779 if (status < 0)
2780 return status;
2781
2782 if (nand_standard_page_accessors(&chip->ecc)) {
2783 chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
2784
2785 status = chip->waitfunc(mtd, chip);
2786 if (status & NAND_STATUS_FAIL)
2787 return -EIO;
2788 }
2789
2790 return 0;
2791 }
2792
2793 /**
2794 * nand_fill_oob - [INTERN] Transfer client buffer to oob
2795 * @mtd: MTD device structure
2796 * @oob: oob data buffer
2797 * @len: oob data write length
2798 * @ops: oob ops structure
2799 */
2800 static uint8_t *nand_fill_oob(struct mtd_info *mtd, uint8_t *oob, size_t len,
2801 struct mtd_oob_ops *ops)
2802 {
2803 struct nand_chip *chip = mtd_to_nand(mtd);
2804 int ret;
2805
2806 /*
2807 * Initialise to all 0xFF, to avoid the possibility of left over OOB
2808 * data from a previous OOB read.
2809 */
2810 memset(chip->oob_poi, 0xff, mtd->oobsize);
2811
2812 switch (ops->mode) {
2813
2814 case MTD_OPS_PLACE_OOB:
2815 case MTD_OPS_RAW:
2816 memcpy(chip->oob_poi + ops->ooboffs, oob, len);
2817 return oob + len;
2818
2819 case MTD_OPS_AUTO_OOB:
2820 ret = mtd_ooblayout_set_databytes(mtd, oob, chip->oob_poi,
2821 ops->ooboffs, len);
2822 BUG_ON(ret);
2823 return oob + len;
2824
2825 default:
2826 BUG();
2827 }
2828 return NULL;
2829 }
2830
2831 #define NOTALIGNED(x) ((x & (chip->subpagesize - 1)) != 0)
2832
2833 /**
2834 * nand_do_write_ops - [INTERN] NAND write with ECC
2835 * @mtd: MTD device structure
2836 * @to: offset to write to
2837 * @ops: oob operations description structure
2838 *
2839 * NAND write with ECC.
2840 */
2841 static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
2842 struct mtd_oob_ops *ops)
2843 {
2844 int chipnr, realpage, page, blockmask, column;
2845 struct nand_chip *chip = mtd_to_nand(mtd);
2846 uint32_t writelen = ops->len;
2847
2848 uint32_t oobwritelen = ops->ooblen;
2849 uint32_t oobmaxlen = mtd_oobavail(mtd, ops);
2850
2851 uint8_t *oob = ops->oobbuf;
2852 uint8_t *buf = ops->datbuf;
2853 int ret;
2854 int oob_required = oob ? 1 : 0;
2855
2856 ops->retlen = 0;
2857 if (!writelen)
2858 return 0;
2859
2860 /* Reject writes, which are not page aligned */
2861 if (NOTALIGNED(to) || NOTALIGNED(ops->len)) {
2862 pr_notice("%s: attempt to write non page aligned data\n",
2863 __func__);
2864 return -EINVAL;
2865 }
2866
2867 column = to & (mtd->writesize - 1);
2868
2869 chipnr = (int)(to >> chip->chip_shift);
2870 chip->select_chip(mtd, chipnr);
2871
2872 /* Check, if it is write protected */
2873 if (nand_check_wp(mtd)) {
2874 ret = -EIO;
2875 goto err_out;
2876 }
2877
2878 realpage = (int)(to >> chip->page_shift);
2879 page = realpage & chip->pagemask;
2880 blockmask = (1 << (chip->phys_erase_shift - chip->page_shift)) - 1;
2881
2882 /* Invalidate the page cache, when we write to the cached page */
2883 if (to <= ((loff_t)chip->pagebuf << chip->page_shift) &&
2884 ((loff_t)chip->pagebuf << chip->page_shift) < (to + ops->len))
2885 chip->pagebuf = -1;
2886
2887 /* Don't allow multipage oob writes with offset */
2888 if (oob && ops->ooboffs && (ops->ooboffs + ops->ooblen > oobmaxlen)) {
2889 ret = -EINVAL;
2890 goto err_out;
2891 }
2892
2893 while (1) {
2894 int bytes = mtd->writesize;
2895 uint8_t *wbuf = buf;
2896 int use_bufpoi;
2897 int part_pagewr = (column || writelen < mtd->writesize);
2898
2899 if (part_pagewr)
2900 use_bufpoi = 1;
2901 else if (chip->options & NAND_USE_BOUNCE_BUFFER)
2902 use_bufpoi = !virt_addr_valid(buf) ||
2903 !IS_ALIGNED((unsigned long)buf,
2904 chip->buf_align);
2905 else
2906 use_bufpoi = 0;
2907
2908 /* Partial page write?, or need to use bounce buffer */
2909 if (use_bufpoi) {
2910 pr_debug("%s: using write bounce buffer for buf@%p\n",
2911 __func__, buf);
2912 if (part_pagewr)
2913 bytes = min_t(int, bytes - column, writelen);
2914 chip->pagebuf = -1;
2915 memset(chip->buffers->databuf, 0xff, mtd->writesize);
2916 memcpy(&chip->buffers->databuf[column], buf, bytes);
2917 wbuf = chip->buffers->databuf;
2918 }
2919
2920 if (unlikely(oob)) {
2921 size_t len = min(oobwritelen, oobmaxlen);
2922 oob = nand_fill_oob(mtd, oob, len, ops);
2923 oobwritelen -= len;
2924 } else {
2925 /* We still need to erase leftover OOB data */
2926 memset(chip->oob_poi, 0xff, mtd->oobsize);
2927 }
2928
2929 ret = nand_write_page(mtd, chip, column, bytes, wbuf,
2930 oob_required, page,
2931 (ops->mode == MTD_OPS_RAW));
2932 if (ret)
2933 break;
2934
2935 writelen -= bytes;
2936 if (!writelen)
2937 break;
2938
2939 column = 0;
2940 buf += bytes;
2941 realpage++;
2942
2943 page = realpage & chip->pagemask;
2944 /* Check, if we cross a chip boundary */
2945 if (!page) {
2946 chipnr++;
2947 chip->select_chip(mtd, -1);
2948 chip->select_chip(mtd, chipnr);
2949 }
2950 }
2951
2952 ops->retlen = ops->len - writelen;
2953 if (unlikely(oob))
2954 ops->oobretlen = ops->ooblen;
2955
2956 err_out:
2957 chip->select_chip(mtd, -1);
2958 return ret;
2959 }
2960
2961 /**
2962 * panic_nand_write - [MTD Interface] NAND write with ECC
2963 * @mtd: MTD device structure
2964 * @to: offset to write to
2965 * @len: number of bytes to write
2966 * @retlen: pointer to variable to store the number of written bytes
2967 * @buf: the data to write
2968 *
2969 * NAND write with ECC. Used when performing writes in interrupt context, this
2970 * may for example be called by mtdoops when writing an oops while in panic.
2971 */
2972 static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
2973 size_t *retlen, const uint8_t *buf)
2974 {
2975 struct nand_chip *chip = mtd_to_nand(mtd);
2976 struct mtd_oob_ops ops;
2977 int ret;
2978
2979 /* Wait for the device to get ready */
2980 panic_nand_wait(mtd, chip, 400);
2981
2982 /* Grab the device */
2983 panic_nand_get_device(chip, mtd, FL_WRITING);
2984
2985 memset(&ops, 0, sizeof(ops));
2986 ops.len = len;
2987 ops.datbuf = (uint8_t *)buf;
2988 ops.mode = MTD_OPS_PLACE_OOB;
2989
2990 ret = nand_do_write_ops(mtd, to, &ops);
2991
2992 *retlen = ops.retlen;
2993 return ret;
2994 }
2995
2996 /**
2997 * nand_write - [MTD Interface] NAND write with ECC
2998 * @mtd: MTD device structure
2999 * @to: offset to write to
3000 * @len: number of bytes to write
3001 * @retlen: pointer to variable to store the number of written bytes
3002 * @buf: the data to write
3003 *
3004 * NAND write with ECC.
3005 */
3006 static int nand_write(struct mtd_info *mtd, loff_t to, size_t len,
3007 size_t *retlen, const uint8_t *buf)
3008 {
3009 struct mtd_oob_ops ops;
3010 int ret;
3011
3012 nand_get_device(mtd, FL_WRITING);
3013 memset(&ops, 0, sizeof(ops));
3014 ops.len = len;
3015 ops.datbuf = (uint8_t *)buf;
3016 ops.mode = MTD_OPS_PLACE_OOB;
3017 ret = nand_do_write_ops(mtd, to, &ops);
3018 *retlen = ops.retlen;
3019 nand_release_device(mtd);
3020 return ret;
3021 }
3022
3023 /**
3024 * nand_do_write_oob - [MTD Interface] NAND write out-of-band
3025 * @mtd: MTD device structure
3026 * @to: offset to write to
3027 * @ops: oob operation description structure
3028 *
3029 * NAND write out-of-band.
3030 */
3031 static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
3032 struct mtd_oob_ops *ops)
3033 {
3034 int chipnr, page, status, len;
3035 struct nand_chip *chip = mtd_to_nand(mtd);
3036
3037 pr_debug("%s: to = 0x%08x, len = %i\n",
3038 __func__, (unsigned int)to, (int)ops->ooblen);
3039
3040 len = mtd_oobavail(mtd, ops);
3041
3042 /* Do not allow write past end of page */
3043 if ((ops->ooboffs + ops->ooblen) > len) {
3044 pr_debug("%s: attempt to write past end of page\n",
3045 __func__);
3046 return -EINVAL;
3047 }
3048
3049 if (unlikely(ops->ooboffs >= len)) {
3050 pr_debug("%s: attempt to start write outside oob\n",
3051 __func__);
3052 return -EINVAL;
3053 }
3054
3055 /* Do not allow write past end of device */
3056 if (unlikely(to >= mtd->size ||
3057 ops->ooboffs + ops->ooblen >
3058 ((mtd->size >> chip->page_shift) -
3059 (to >> chip->page_shift)) * len)) {
3060 pr_debug("%s: attempt to write beyond end of device\n",
3061 __func__);
3062 return -EINVAL;
3063 }
3064
3065 chipnr = (int)(to >> chip->chip_shift);
3066
3067 /*
3068 * Reset the chip. Some chips (like the Toshiba TC5832DC found in one
3069 * of my DiskOnChip 2000 test units) will clear the whole data page too
3070 * if we don't do this. I have no clue why, but I seem to have 'fixed'
3071 * it in the doc2000 driver in August 1999. dwmw2.
3072 */
3073 nand_reset(chip, chipnr);
3074
3075 chip->select_chip(mtd, chipnr);
3076
3077 /* Shift to get page */
3078 page = (int)(to >> chip->page_shift);
3079
3080 /* Check, if it is write protected */
3081 if (nand_check_wp(mtd)) {
3082 chip->select_chip(mtd, -1);
3083 return -EROFS;
3084 }
3085
3086 /* Invalidate the page cache, if we write to the cached page */
3087 if (page == chip->pagebuf)
3088 chip->pagebuf = -1;
3089
3090 nand_fill_oob(mtd, ops->oobbuf, ops->ooblen, ops);
3091
3092 if (ops->mode == MTD_OPS_RAW)
3093 status = chip->ecc.write_oob_raw(mtd, chip, page & chip->pagemask);
3094 else
3095 status = chip->ecc.write_oob(mtd, chip, page & chip->pagemask);
3096
3097 chip->select_chip(mtd, -1);
3098
3099 if (status)
3100 return status;
3101
3102 ops->oobretlen = ops->ooblen;
3103
3104 return 0;
3105 }
3106
3107 /**
3108 * nand_write_oob - [MTD Interface] NAND write data and/or out-of-band
3109 * @mtd: MTD device structure
3110 * @to: offset to write to
3111 * @ops: oob operation description structure
3112 */
3113 static int nand_write_oob(struct mtd_info *mtd, loff_t to,
3114 struct mtd_oob_ops *ops)
3115 {
3116 int ret = -ENOTSUPP;
3117
3118 ops->retlen = 0;
3119
3120 /* Do not allow writes past end of device */
3121 if (ops->datbuf && (to + ops->len) > mtd->size) {
3122 pr_debug("%s: attempt to write beyond end of device\n",
3123 __func__);
3124 return -EINVAL;
3125 }
3126
3127 nand_get_device(mtd, FL_WRITING);
3128
3129 switch (ops->mode) {
3130 case MTD_OPS_PLACE_OOB:
3131 case MTD_OPS_AUTO_OOB:
3132 case MTD_OPS_RAW:
3133 break;
3134
3135 default:
3136 goto out;
3137 }
3138
3139 if (!ops->datbuf)
3140 ret = nand_do_write_oob(mtd, to, ops);
3141 else
3142 ret = nand_do_write_ops(mtd, to, ops);
3143
3144 out:
3145 nand_release_device(mtd);
3146 return ret;
3147 }
3148
3149 /**
3150 * single_erase - [GENERIC] NAND standard block erase command function
3151 * @mtd: MTD device structure
3152 * @page: the page address of the block which will be erased
3153 *
3154 * Standard erase command for NAND chips. Returns NAND status.
3155 */
3156 static int single_erase(struct mtd_info *mtd, int page)
3157 {
3158 struct nand_chip *chip = mtd_to_nand(mtd);
3159 /* Send commands to erase a block */
3160 chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page);
3161 chip->cmdfunc(mtd, NAND_CMD_ERASE2, -1, -1);
3162
3163 return chip->waitfunc(mtd, chip);
3164 }
3165
3166 /**
3167 * nand_erase - [MTD Interface] erase block(s)
3168 * @mtd: MTD device structure
3169 * @instr: erase instruction
3170 *
3171 * Erase one ore more blocks.
3172 */
3173 static int nand_erase(struct mtd_info *mtd, struct erase_info *instr)
3174 {
3175 return nand_erase_nand(mtd, instr, 0);
3176 }
3177
3178 /**
3179 * nand_erase_nand - [INTERN] erase block(s)
3180 * @mtd: MTD device structure
3181 * @instr: erase instruction
3182 * @allowbbt: allow erasing the bbt area
3183 *
3184 * Erase one ore more blocks.
3185 */
3186 int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
3187 int allowbbt)
3188 {
3189 int page, status, pages_per_block, ret, chipnr;
3190 struct nand_chip *chip = mtd_to_nand(mtd);
3191 loff_t len;
3192
3193 pr_debug("%s: start = 0x%012llx, len = %llu\n",
3194 __func__, (unsigned long long)instr->addr,
3195 (unsigned long long)instr->len);
3196
3197 if (check_offs_len(mtd, instr->addr, instr->len))
3198 return -EINVAL;
3199
3200 /* Grab the lock and see if the device is available */
3201 nand_get_device(mtd, FL_ERASING);
3202
3203 /* Shift to get first page */
3204 page = (int)(instr->addr >> chip->page_shift);
3205 chipnr = (int)(instr->addr >> chip->chip_shift);
3206
3207 /* Calculate pages in each block */
3208 pages_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
3209
3210 /* Select the NAND device */
3211 chip->select_chip(mtd, chipnr);
3212
3213 /* Check, if it is write protected */
3214 if (nand_check_wp(mtd)) {
3215 pr_debug("%s: device is write protected!\n",
3216 __func__);
3217 instr->state = MTD_ERASE_FAILED;
3218 goto erase_exit;
3219 }
3220
3221 /* Loop through the pages */
3222 len = instr->len;
3223
3224 instr->state = MTD_ERASING;
3225
3226 while (len) {
3227 /* Check if we have a bad block, we do not erase bad blocks! */
3228 if (nand_block_checkbad(mtd, ((loff_t) page) <<
3229 chip->page_shift, allowbbt)) {
3230 pr_warn("%s: attempt to erase a bad block at page 0x%08x\n",
3231 __func__, page);
3232 instr->state = MTD_ERASE_FAILED;
3233 goto erase_exit;
3234 }
3235
3236 /*
3237 * Invalidate the page cache, if we erase the block which
3238 * contains the current cached page.
3239 */
3240 if (page <= chip->pagebuf && chip->pagebuf <
3241 (page + pages_per_block))
3242 chip->pagebuf = -1;
3243
3244 status = chip->erase(mtd, page & chip->pagemask);
3245
3246 /* See if block erase succeeded */
3247 if (status & NAND_STATUS_FAIL) {
3248 pr_debug("%s: failed erase, page 0x%08x\n",
3249 __func__, page);
3250 instr->state = MTD_ERASE_FAILED;
3251 instr->fail_addr =
3252 ((loff_t)page << chip->page_shift);
3253 goto erase_exit;
3254 }
3255
3256 /* Increment page address and decrement length */
3257 len -= (1ULL << chip->phys_erase_shift);
3258 page += pages_per_block;
3259
3260 /* Check, if we cross a chip boundary */
3261 if (len && !(page & chip->pagemask)) {
3262 chipnr++;
3263 chip->select_chip(mtd, -1);
3264 chip->select_chip(mtd, chipnr);
3265 }
3266 }
3267 instr->state = MTD_ERASE_DONE;
3268
3269 erase_exit:
3270
3271 ret = instr->state == MTD_ERASE_DONE ? 0 : -EIO;
3272
3273 /* Deselect and wake up anyone waiting on the device */
3274 chip->select_chip(mtd, -1);
3275 nand_release_device(mtd);
3276
3277 /* Do call back function */
3278 if (!ret)
3279 mtd_erase_callback(instr);
3280
3281 /* Return more or less happy */
3282 return ret;
3283 }
3284
3285 /**
3286 * nand_sync - [MTD Interface] sync
3287 * @mtd: MTD device structure
3288 *
3289 * Sync is actually a wait for chip ready function.
3290 */
3291 static void nand_sync(struct mtd_info *mtd)
3292 {
3293 pr_debug("%s: called\n", __func__);
3294
3295 /* Grab the lock and see if the device is available */
3296 nand_get_device(mtd, FL_SYNCING);
3297 /* Release it and go back */
3298 nand_release_device(mtd);
3299 }
3300
3301 /**
3302 * nand_block_isbad - [MTD Interface] Check if block at offset is bad
3303 * @mtd: MTD device structure
3304 * @offs: offset relative to mtd start
3305 */
3306 static int nand_block_isbad(struct mtd_info *mtd, loff_t offs)
3307 {
3308 struct nand_chip *chip = mtd_to_nand(mtd);
3309 int chipnr = (int)(offs >> chip->chip_shift);
3310 int ret;
3311
3312 /* Select the NAND device */
3313 nand_get_device(mtd, FL_READING);
3314 chip->select_chip(mtd, chipnr);
3315
3316 ret = nand_block_checkbad(mtd, offs, 0);
3317
3318 chip->select_chip(mtd, -1);
3319 nand_release_device(mtd);
3320
3321 return ret;
3322 }
3323
3324 /**
3325 * nand_block_markbad - [MTD Interface] Mark block at the given offset as bad
3326 * @mtd: MTD device structure
3327 * @ofs: offset relative to mtd start
3328 */
3329 static int nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
3330 {
3331 int ret;
3332
3333 ret = nand_block_isbad(mtd, ofs);
3334 if (ret) {
3335 /* If it was bad already, return success and do nothing */
3336 if (ret > 0)
3337 return 0;
3338 return ret;
3339 }
3340
3341 return nand_block_markbad_lowlevel(mtd, ofs);
3342 }
3343
3344 /**
3345 * nand_max_bad_blocks - [MTD Interface] Max number of bad blocks for an mtd
3346 * @mtd: MTD device structure
3347 * @ofs: offset relative to mtd start
3348 * @len: length of mtd
3349 */
3350 static int nand_max_bad_blocks(struct mtd_info *mtd, loff_t ofs, size_t len)
3351 {
3352 struct nand_chip *chip = mtd_to_nand(mtd);
3353 u32 part_start_block;
3354 u32 part_end_block;
3355 u32 part_start_die;
3356 u32 part_end_die;
3357
3358 /*
3359 * max_bb_per_die and blocks_per_die used to determine
3360 * the maximum bad block count.
3361 */
3362 if (!chip->max_bb_per_die || !chip->blocks_per_die)
3363 return -ENOTSUPP;
3364
3365 /* Get the start and end of the partition in erase blocks. */
3366 part_start_block = mtd_div_by_eb(ofs, mtd);
3367 part_end_block = mtd_div_by_eb(len, mtd) + part_start_block - 1;
3368
3369 /* Get the start and end LUNs of the partition. */
3370 part_start_die = part_start_block / chip->blocks_per_die;
3371 part_end_die = part_end_block / chip->blocks_per_die;
3372
3373 /*
3374 * Look up the bad blocks per unit and multiply by the number of units
3375 * that the partition spans.
3376 */
3377 return chip->max_bb_per_die * (part_end_die - part_start_die + 1);
3378 }
3379
3380 /**
3381 * nand_onfi_set_features- [REPLACEABLE] set features for ONFI nand
3382 * @mtd: MTD device structure
3383 * @chip: nand chip info structure
3384 * @addr: feature address.
3385 * @subfeature_param: the subfeature parameters, a four bytes array.
3386 */
3387 static int nand_onfi_set_features(struct mtd_info *mtd, struct nand_chip *chip,
3388 int addr, uint8_t *subfeature_param)
3389 {
3390 int status;
3391 int i;
3392
3393 if (!chip->onfi_version ||
3394 !(le16_to_cpu(chip->onfi_params.opt_cmd)
3395 & ONFI_OPT_CMD_SET_GET_FEATURES))
3396 return -EINVAL;
3397
3398 chip->cmdfunc(mtd, NAND_CMD_SET_FEATURES, addr, -1);
3399 for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
3400 chip->write_byte(mtd, subfeature_param[i]);
3401
3402 status = chip->waitfunc(mtd, chip);
3403 if (status & NAND_STATUS_FAIL)
3404 return -EIO;
3405 return 0;
3406 }
3407
3408 /**
3409 * nand_onfi_get_features- [REPLACEABLE] get features for ONFI nand
3410 * @mtd: MTD device structure
3411 * @chip: nand chip info structure
3412 * @addr: feature address.
3413 * @subfeature_param: the subfeature parameters, a four bytes array.
3414 */
3415 static int nand_onfi_get_features(struct mtd_info *mtd, struct nand_chip *chip,
3416 int addr, uint8_t *subfeature_param)
3417 {
3418 int i;
3419
3420 if (!chip->onfi_version ||
3421 !(le16_to_cpu(chip->onfi_params.opt_cmd)
3422 & ONFI_OPT_CMD_SET_GET_FEATURES))
3423 return -EINVAL;
3424
3425 chip->cmdfunc(mtd, NAND_CMD_GET_FEATURES, addr, -1);
3426 for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
3427 *subfeature_param++ = chip->read_byte(mtd);
3428 return 0;
3429 }
3430
3431 /**
3432 * nand_onfi_get_set_features_notsupp - set/get features stub returning
3433 * -ENOTSUPP
3434 * @mtd: MTD device structure
3435 * @chip: nand chip info structure
3436 * @addr: feature address.
3437 * @subfeature_param: the subfeature parameters, a four bytes array.
3438 *
3439 * Should be used by NAND controller drivers that do not support the SET/GET
3440 * FEATURES operations.
3441 */
3442 int nand_onfi_get_set_features_notsupp(struct mtd_info *mtd,
3443 struct nand_chip *chip, int addr,
3444 u8 *subfeature_param)
3445 {
3446 return -ENOTSUPP;
3447 }
3448 EXPORT_SYMBOL(nand_onfi_get_set_features_notsupp);
3449
3450 /**
3451 * nand_suspend - [MTD Interface] Suspend the NAND flash
3452 * @mtd: MTD device structure
3453 */
3454 static int nand_suspend(struct mtd_info *mtd)
3455 {
3456 return nand_get_device(mtd, FL_PM_SUSPENDED);
3457 }
3458
3459 /**
3460 * nand_resume - [MTD Interface] Resume the NAND flash
3461 * @mtd: MTD device structure
3462 */
3463 static void nand_resume(struct mtd_info *mtd)
3464 {
3465 struct nand_chip *chip = mtd_to_nand(mtd);
3466
3467 if (chip->state == FL_PM_SUSPENDED)
3468 nand_release_device(mtd);
3469 else
3470 pr_err("%s called for a chip which is not in suspended state\n",
3471 __func__);
3472 }
3473
3474 /**
3475 * nand_shutdown - [MTD Interface] Finish the current NAND operation and
3476 * prevent further operations
3477 * @mtd: MTD device structure
3478 */
3479 static void nand_shutdown(struct mtd_info *mtd)
3480 {
3481 nand_get_device(mtd, FL_PM_SUSPENDED);
3482 }
3483
3484 /* Set default functions */
3485 static void nand_set_defaults(struct nand_chip *chip)
3486 {
3487 unsigned int busw = chip->options & NAND_BUSWIDTH_16;
3488
3489 /* check for proper chip_delay setup, set 20us if not */
3490 if (!chip->chip_delay)
3491 chip->chip_delay = 20;
3492
3493 /* check, if a user supplied command function given */
3494 if (chip->cmdfunc == NULL)
3495 chip->cmdfunc = nand_command;
3496
3497 /* check, if a user supplied wait function given */
3498 if (chip->waitfunc == NULL)
3499 chip->waitfunc = nand_wait;
3500
3501 if (!chip->select_chip)
3502 chip->select_chip = nand_select_chip;
3503
3504 /* set for ONFI nand */
3505 if (!chip->onfi_set_features)
3506 chip->onfi_set_features = nand_onfi_set_features;
3507 if (!chip->onfi_get_features)
3508 chip->onfi_get_features = nand_onfi_get_features;
3509
3510 /* If called twice, pointers that depend on busw may need to be reset */
3511 if (!chip->read_byte || chip->read_byte == nand_read_byte)
3512 chip->read_byte = busw ? nand_read_byte16 : nand_read_byte;
3513 if (!chip->read_word)
3514 chip->read_word = nand_read_word;
3515 if (!chip->block_bad)
3516 chip->block_bad = nand_block_bad;
3517 if (!chip->block_markbad)
3518 chip->block_markbad = nand_default_block_markbad;
3519 if (!chip->write_buf || chip->write_buf == nand_write_buf)
3520 chip->write_buf = busw ? nand_write_buf16 : nand_write_buf;
3521 if (!chip->write_byte || chip->write_byte == nand_write_byte)
3522 chip->write_byte = busw ? nand_write_byte16 : nand_write_byte;
3523 if (!chip->read_buf || chip->read_buf == nand_read_buf)
3524 chip->read_buf = busw ? nand_read_buf16 : nand_read_buf;
3525 if (!chip->scan_bbt)
3526 chip->scan_bbt = nand_default_bbt;
3527
3528 if (!chip->controller) {
3529 chip->controller = &chip->hwcontrol;
3530 nand_hw_control_init(chip->controller);
3531 }
3532
3533 if (!chip->buf_align)
3534 chip->buf_align = 1;
3535 }
3536
3537 /* Sanitize ONFI strings so we can safely print them */
3538 static void sanitize_string(uint8_t *s, size_t len)
3539 {
3540 ssize_t i;
3541
3542 /* Null terminate */
3543 s[len - 1] = 0;
3544
3545 /* Remove non printable chars */
3546 for (i = 0; i < len - 1; i++) {
3547 if (s[i] < ' ' || s[i] > 127)
3548 s[i] = '?';
3549 }
3550
3551 /* Remove trailing spaces */
3552 strim(s);
3553 }
3554
3555 static u16 onfi_crc16(u16 crc, u8 const *p, size_t len)
3556 {
3557 int i;
3558 while (len--) {
3559 crc ^= *p++ << 8;
3560 for (i = 0; i < 8; i++)
3561 crc = (crc << 1) ^ ((crc & 0x8000) ? 0x8005 : 0);
3562 }
3563
3564 return crc;
3565 }
3566
3567 /* Parse the Extended Parameter Page. */
3568 static int nand_flash_detect_ext_param_page(struct nand_chip *chip,
3569 struct nand_onfi_params *p)
3570 {
3571 struct mtd_info *mtd = nand_to_mtd(chip);
3572 struct onfi_ext_param_page *ep;
3573 struct onfi_ext_section *s;
3574 struct onfi_ext_ecc_info *ecc;
3575 uint8_t *cursor;
3576 int ret = -EINVAL;
3577 int len;
3578 int i;
3579
3580 len = le16_to_cpu(p->ext_param_page_length) * 16;
3581 ep = kmalloc(len, GFP_KERNEL);
3582 if (!ep)
3583 return -ENOMEM;
3584
3585 /* Send our own NAND_CMD_PARAM. */
3586 chip->cmdfunc(mtd, NAND_CMD_PARAM, 0, -1);
3587
3588 /* Use the Change Read Column command to skip the ONFI param pages. */
3589 chip->cmdfunc(mtd, NAND_CMD_RNDOUT,
3590 sizeof(*p) * p->num_of_param_pages , -1);
3591
3592 /* Read out the Extended Parameter Page. */
3593 chip->read_buf(mtd, (uint8_t *)ep, len);
3594 if ((onfi_crc16(ONFI_CRC_BASE, ((uint8_t *)ep) + 2, len - 2)
3595 != le16_to_cpu(ep->crc))) {
3596 pr_debug("fail in the CRC.\n");
3597 goto ext_out;
3598 }
3599
3600 /*
3601 * Check the signature.
3602 * Do not strictly follow the ONFI spec, maybe changed in future.
3603 */
3604 if (strncmp(ep->sig, "EPPS", 4)) {
3605 pr_debug("The signature is invalid.\n");
3606 goto ext_out;
3607 }
3608
3609 /* find the ECC section. */
3610 cursor = (uint8_t *)(ep + 1);
3611 for (i = 0; i < ONFI_EXT_SECTION_MAX; i++) {
3612 s = ep->sections + i;
3613 if (s->type == ONFI_SECTION_TYPE_2)
3614 break;
3615 cursor += s->length * 16;
3616 }
3617 if (i == ONFI_EXT_SECTION_MAX) {
3618 pr_debug("We can not find the ECC section.\n");
3619 goto ext_out;
3620 }
3621
3622 /* get the info we want. */
3623 ecc = (struct onfi_ext_ecc_info *)cursor;
3624
3625 if (!ecc->codeword_size) {
3626 pr_debug("Invalid codeword size\n");
3627 goto ext_out;
3628 }
3629
3630 chip->ecc_strength_ds = ecc->ecc_bits;
3631 chip->ecc_step_ds = 1 << ecc->codeword_size;
3632 ret = 0;
3633
3634 ext_out:
3635 kfree(ep);
3636 return ret;
3637 }
3638
3639 /*
3640 * Check if the NAND chip is ONFI compliant, returns 1 if it is, 0 otherwise.
3641 */
3642 static int nand_flash_detect_onfi(struct nand_chip *chip)
3643 {
3644 struct mtd_info *mtd = nand_to_mtd(chip);
3645 struct nand_onfi_params *p = &chip->onfi_params;
3646 int i, j;
3647 int val;
3648
3649 /* Try ONFI for unknown chip or LP */
3650 chip->cmdfunc(mtd, NAND_CMD_READID, 0x20, -1);
3651 if (chip->read_byte(mtd) != 'O' || chip->read_byte(mtd) != 'N' ||
3652 chip->read_byte(mtd) != 'F' || chip->read_byte(mtd) != 'I')
3653 return 0;
3654
3655 chip->cmdfunc(mtd, NAND_CMD_PARAM, 0, -1);
3656 for (i = 0; i < 3; i++) {
3657 for (j = 0; j < sizeof(*p); j++)
3658 ((uint8_t *)p)[j] = chip->read_byte(mtd);
3659 if (onfi_crc16(ONFI_CRC_BASE, (uint8_t *)p, 254) ==
3660 le16_to_cpu(p->crc)) {
3661 break;
3662 }
3663 }
3664
3665 if (i == 3) {
3666 pr_err("Could not find valid ONFI parameter page; aborting\n");
3667 return 0;
3668 }
3669
3670 /* Check version */
3671 val = le16_to_cpu(p->revision);
3672 if (val & (1 << 5))
3673 chip->onfi_version = 23;
3674 else if (val & (1 << 4))
3675 chip->onfi_version = 22;
3676 else if (val & (1 << 3))
3677 chip->onfi_version = 21;
3678 else if (val & (1 << 2))
3679 chip->onfi_version = 20;
3680 else if (val & (1 << 1))
3681 chip->onfi_version = 10;
3682
3683 if (!chip->onfi_version) {
3684 pr_info("unsupported ONFI version: %d\n", val);
3685 return 0;
3686 }
3687
3688 sanitize_string(p->manufacturer, sizeof(p->manufacturer));
3689 sanitize_string(p->model, sizeof(p->model));
3690 if (!mtd->name)
3691 mtd->name = p->model;
3692
3693 mtd->writesize = le32_to_cpu(p->byte_per_page);
3694
3695 /*
3696 * pages_per_block and blocks_per_lun may not be a power-of-2 size
3697 * (don't ask me who thought of this...). MTD assumes that these
3698 * dimensions will be power-of-2, so just truncate the remaining area.
3699 */
3700 mtd->erasesize = 1 << (fls(le32_to_cpu(p->pages_per_block)) - 1);
3701 mtd->erasesize *= mtd->writesize;
3702
3703 mtd->oobsize = le16_to_cpu(p->spare_bytes_per_page);
3704
3705 /* See erasesize comment */
3706 chip->chipsize = 1 << (fls(le32_to_cpu(p->blocks_per_lun)) - 1);
3707 chip->chipsize *= (uint64_t)mtd->erasesize * p->lun_count;
3708 chip->bits_per_cell = p->bits_per_cell;
3709
3710 chip->max_bb_per_die = le16_to_cpu(p->bb_per_lun);
3711 chip->blocks_per_die = le32_to_cpu(p->blocks_per_lun);
3712
3713 if (onfi_feature(chip) & ONFI_FEATURE_16_BIT_BUS)
3714 chip->options |= NAND_BUSWIDTH_16;
3715
3716 if (p->ecc_bits != 0xff) {
3717 chip->ecc_strength_ds = p->ecc_bits;
3718 chip->ecc_step_ds = 512;
3719 } else if (chip->onfi_version >= 21 &&
3720 (onfi_feature(chip) & ONFI_FEATURE_EXT_PARAM_PAGE)) {
3721
3722 /*
3723 * The nand_flash_detect_ext_param_page() uses the
3724 * Change Read Column command which maybe not supported
3725 * by the chip->cmdfunc. So try to update the chip->cmdfunc
3726 * now. We do not replace user supplied command function.
3727 */
3728 if (mtd->writesize > 512 && chip->cmdfunc == nand_command)
3729 chip->cmdfunc = nand_command_lp;
3730
3731 /* The Extended Parameter Page is supported since ONFI 2.1. */
3732 if (nand_flash_detect_ext_param_page(chip, p))
3733 pr_warn("Failed to detect ONFI extended param page\n");
3734 } else {
3735 pr_warn("Could not retrieve ONFI ECC requirements\n");
3736 }
3737
3738 return 1;
3739 }
3740
3741 /*
3742 * Check if the NAND chip is JEDEC compliant, returns 1 if it is, 0 otherwise.
3743 */
3744 static int nand_flash_detect_jedec(struct nand_chip *chip)
3745 {
3746 struct mtd_info *mtd = nand_to_mtd(chip);
3747 struct nand_jedec_params *p = &chip->jedec_params;
3748 struct jedec_ecc_info *ecc;
3749 int val;
3750 int i, j;
3751
3752 /* Try JEDEC for unknown chip or LP */
3753 chip->cmdfunc(mtd, NAND_CMD_READID, 0x40, -1);
3754 if (chip->read_byte(mtd) != 'J' || chip->read_byte(mtd) != 'E' ||
3755 chip->read_byte(mtd) != 'D' || chip->read_byte(mtd) != 'E' ||
3756 chip->read_byte(mtd) != 'C')
3757 return 0;
3758
3759 chip->cmdfunc(mtd, NAND_CMD_PARAM, 0x40, -1);
3760 for (i = 0; i < 3; i++) {
3761 for (j = 0; j < sizeof(*p); j++)
3762 ((uint8_t *)p)[j] = chip->read_byte(mtd);
3763
3764 if (onfi_crc16(ONFI_CRC_BASE, (uint8_t *)p, 510) ==
3765 le16_to_cpu(p->crc))
3766 break;
3767 }
3768
3769 if (i == 3) {
3770 pr_err("Could not find valid JEDEC parameter page; aborting\n");
3771 return 0;
3772 }
3773
3774 /* Check version */
3775 val = le16_to_cpu(p->revision);
3776 if (val & (1 << 2))
3777 chip->jedec_version = 10;
3778 else if (val & (1 << 1))
3779 chip->jedec_version = 1; /* vendor specific version */
3780
3781 if (!chip->jedec_version) {
3782 pr_info("unsupported JEDEC version: %d\n", val);
3783 return 0;
3784 }
3785
3786 sanitize_string(p->manufacturer, sizeof(p->manufacturer));
3787 sanitize_string(p->model, sizeof(p->model));
3788 if (!mtd->name)
3789 mtd->name = p->model;
3790
3791 mtd->writesize = le32_to_cpu(p->byte_per_page);
3792
3793 /* Please reference to the comment for nand_flash_detect_onfi. */
3794 mtd->erasesize = 1 << (fls(le32_to_cpu(p->pages_per_block)) - 1);
3795 mtd->erasesize *= mtd->writesize;
3796
3797 mtd->oobsize = le16_to_cpu(p->spare_bytes_per_page);
3798
3799 /* Please reference to the comment for nand_flash_detect_onfi. */
3800 chip->chipsize = 1 << (fls(le32_to_cpu(p->blocks_per_lun)) - 1);
3801 chip->chipsize *= (uint64_t)mtd->erasesize * p->lun_count;
3802 chip->bits_per_cell = p->bits_per_cell;
3803
3804 if (jedec_feature(chip) & JEDEC_FEATURE_16_BIT_BUS)
3805 chip->options |= NAND_BUSWIDTH_16;
3806
3807 /* ECC info */
3808 ecc = &p->ecc_info[0];
3809
3810 if (ecc->codeword_size >= 9) {
3811 chip->ecc_strength_ds = ecc->ecc_bits;
3812 chip->ecc_step_ds = 1 << ecc->codeword_size;
3813 } else {
3814 pr_warn("Invalid codeword size\n");
3815 }
3816
3817 return 1;
3818 }
3819
3820 /*
3821 * nand_id_has_period - Check if an ID string has a given wraparound period
3822 * @id_data: the ID string
3823 * @arrlen: the length of the @id_data array
3824 * @period: the period of repitition
3825 *
3826 * Check if an ID string is repeated within a given sequence of bytes at
3827 * specific repetition interval period (e.g., {0x20,0x01,0x7F,0x20} has a
3828 * period of 3). This is a helper function for nand_id_len(). Returns non-zero
3829 * if the repetition has a period of @period; otherwise, returns zero.
3830 */
3831 static int nand_id_has_period(u8 *id_data, int arrlen, int period)
3832 {
3833 int i, j;
3834 for (i = 0; i < period; i++)
3835 for (j = i + period; j < arrlen; j += period)
3836 if (id_data[i] != id_data[j])
3837 return 0;
3838 return 1;
3839 }
3840
3841 /*
3842 * nand_id_len - Get the length of an ID string returned by CMD_READID
3843 * @id_data: the ID string
3844 * @arrlen: the length of the @id_data array
3845
3846 * Returns the length of the ID string, according to known wraparound/trailing
3847 * zero patterns. If no pattern exists, returns the length of the array.
3848 */
3849 static int nand_id_len(u8 *id_data, int arrlen)
3850 {
3851 int last_nonzero, period;
3852
3853 /* Find last non-zero byte */
3854 for (last_nonzero = arrlen - 1; last_nonzero >= 0; last_nonzero--)
3855 if (id_data[last_nonzero])
3856 break;
3857
3858 /* All zeros */
3859 if (last_nonzero < 0)
3860 return 0;
3861
3862 /* Calculate wraparound period */
3863 for (period = 1; period < arrlen; period++)
3864 if (nand_id_has_period(id_data, arrlen, period))
3865 break;
3866
3867 /* There's a repeated pattern */
3868 if (period < arrlen)
3869 return period;
3870
3871 /* There are trailing zeros */
3872 if (last_nonzero < arrlen - 1)
3873 return last_nonzero + 1;
3874
3875 /* No pattern detected */
3876 return arrlen;
3877 }
3878
3879 /* Extract the bits of per cell from the 3rd byte of the extended ID */
3880 static int nand_get_bits_per_cell(u8 cellinfo)
3881 {
3882 int bits;
3883
3884 bits = cellinfo & NAND_CI_CELLTYPE_MSK;
3885 bits >>= NAND_CI_CELLTYPE_SHIFT;
3886 return bits + 1;
3887 }
3888
3889 /*
3890 * Many new NAND share similar device ID codes, which represent the size of the
3891 * chip. The rest of the parameters must be decoded according to generic or
3892 * manufacturer-specific "extended ID" decoding patterns.
3893 */
3894 void nand_decode_ext_id(struct nand_chip *chip)
3895 {
3896 struct mtd_info *mtd = nand_to_mtd(chip);
3897 int extid;
3898 u8 *id_data = chip->id.data;
3899 /* The 3rd id byte holds MLC / multichip data */
3900 chip->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
3901 /* The 4th id byte is the important one */
3902 extid = id_data[3];
3903
3904 /* Calc pagesize */
3905 mtd->writesize = 1024 << (extid & 0x03);
3906 extid >>= 2;
3907 /* Calc oobsize */
3908 mtd->oobsize = (8 << (extid & 0x01)) * (mtd->writesize >> 9);
3909 extid >>= 2;
3910 /* Calc blocksize. Blocksize is multiples of 64KiB */
3911 mtd->erasesize = (64 * 1024) << (extid & 0x03);
3912 extid >>= 2;
3913 /* Get buswidth information */
3914 if (extid & 0x1)
3915 chip->options |= NAND_BUSWIDTH_16;
3916 }
3917 EXPORT_SYMBOL_GPL(nand_decode_ext_id);
3918
3919 /*
3920 * Old devices have chip data hardcoded in the device ID table. nand_decode_id
3921 * decodes a matching ID table entry and assigns the MTD size parameters for
3922 * the chip.
3923 */
3924 static void nand_decode_id(struct nand_chip *chip, struct nand_flash_dev *type)
3925 {
3926 struct mtd_info *mtd = nand_to_mtd(chip);
3927
3928 mtd->erasesize = type->erasesize;
3929 mtd->writesize = type->pagesize;
3930 mtd->oobsize = mtd->writesize / 32;
3931
3932 /* All legacy ID NAND are small-page, SLC */
3933 chip->bits_per_cell = 1;
3934 }
3935
3936 /*
3937 * Set the bad block marker/indicator (BBM/BBI) patterns according to some
3938 * heuristic patterns using various detected parameters (e.g., manufacturer,
3939 * page size, cell-type information).
3940 */
3941 static void nand_decode_bbm_options(struct nand_chip *chip)
3942 {
3943 struct mtd_info *mtd = nand_to_mtd(chip);
3944
3945 /* Set the bad block position */
3946 if (mtd->writesize > 512 || (chip->options & NAND_BUSWIDTH_16))
3947 chip->badblockpos = NAND_LARGE_BADBLOCK_POS;
3948 else
3949 chip->badblockpos = NAND_SMALL_BADBLOCK_POS;
3950 }
3951
3952 static inline bool is_full_id_nand(struct nand_flash_dev *type)
3953 {
3954 return type->id_len;
3955 }
3956
3957 static bool find_full_id_nand(struct nand_chip *chip,
3958 struct nand_flash_dev *type)
3959 {
3960 struct mtd_info *mtd = nand_to_mtd(chip);
3961 u8 *id_data = chip->id.data;
3962
3963 if (!strncmp(type->id, id_data, type->id_len)) {
3964 mtd->writesize = type->pagesize;
3965 mtd->erasesize = type->erasesize;
3966 mtd->oobsize = type->oobsize;
3967
3968 chip->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
3969 chip->chipsize = (uint64_t)type->chipsize << 20;
3970 chip->options |= type->options;
3971 chip->ecc_strength_ds = NAND_ECC_STRENGTH(type);
3972 chip->ecc_step_ds = NAND_ECC_STEP(type);
3973 chip->onfi_timing_mode_default =
3974 type->onfi_timing_mode_default;
3975
3976 if (!mtd->name)
3977 mtd->name = type->name;
3978
3979 return true;
3980 }
3981 return false;
3982 }
3983
3984 /*
3985 * Manufacturer detection. Only used when the NAND is not ONFI or JEDEC
3986 * compliant and does not have a full-id or legacy-id entry in the nand_ids
3987 * table.
3988 */
3989 static void nand_manufacturer_detect(struct nand_chip *chip)
3990 {
3991 /*
3992 * Try manufacturer detection if available and use
3993 * nand_decode_ext_id() otherwise.
3994 */
3995 if (chip->manufacturer.desc && chip->manufacturer.desc->ops &&
3996 chip->manufacturer.desc->ops->detect) {
3997 /* The 3rd id byte holds MLC / multichip data */
3998 chip->bits_per_cell = nand_get_bits_per_cell(chip->id.data[2]);
3999 chip->manufacturer.desc->ops->detect(chip);
4000 } else {
4001 nand_decode_ext_id(chip);
4002 }
4003 }
4004
4005 /*
4006 * Manufacturer initialization. This function is called for all NANDs including
4007 * ONFI and JEDEC compliant ones.
4008 * Manufacturer drivers should put all their specific initialization code in
4009 * their ->init() hook.
4010 */
4011 static int nand_manufacturer_init(struct nand_chip *chip)
4012 {
4013 if (!chip->manufacturer.desc || !chip->manufacturer.desc->ops ||
4014 !chip->manufacturer.desc->ops->init)
4015 return 0;
4016
4017 return chip->manufacturer.desc->ops->init(chip);
4018 }
4019
4020 /*
4021 * Manufacturer cleanup. This function is called for all NANDs including
4022 * ONFI and JEDEC compliant ones.
4023 * Manufacturer drivers should put all their specific cleanup code in their
4024 * ->cleanup() hook.
4025 */
4026 static void nand_manufacturer_cleanup(struct nand_chip *chip)
4027 {
4028 /* Release manufacturer private data */
4029 if (chip->manufacturer.desc && chip->manufacturer.desc->ops &&
4030 chip->manufacturer.desc->ops->cleanup)
4031 chip->manufacturer.desc->ops->cleanup(chip);
4032 }
4033
4034 /*
4035 * Get the flash and manufacturer id and lookup if the type is supported.
4036 */
4037 static int nand_detect(struct nand_chip *chip, struct nand_flash_dev *type)
4038 {
4039 const struct nand_manufacturer *manufacturer;
4040 struct mtd_info *mtd = nand_to_mtd(chip);
4041 int busw;
4042 int i, ret;
4043 u8 *id_data = chip->id.data;
4044 u8 maf_id, dev_id;
4045
4046 /*
4047 * Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx)
4048 * after power-up.
4049 */
4050 nand_reset(chip, 0);
4051
4052 /* Select the device */
4053 chip->select_chip(mtd, 0);
4054
4055 /* Send the command for reading device ID */
4056 chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
4057
4058 /* Read manufacturer and device IDs */
4059 maf_id = chip->read_byte(mtd);
4060 dev_id = chip->read_byte(mtd);
4061
4062 /*
4063 * Try again to make sure, as some systems the bus-hold or other
4064 * interface concerns can cause random data which looks like a
4065 * possibly credible NAND flash to appear. If the two results do
4066 * not match, ignore the device completely.
4067 */
4068
4069 chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
4070
4071 /* Read entire ID string */
4072 for (i = 0; i < 8; i++)
4073 id_data[i] = chip->read_byte(mtd);
4074
4075 if (id_data[0] != maf_id || id_data[1] != dev_id) {
4076 pr_info("second ID read did not match %02x,%02x against %02x,%02x\n",
4077 maf_id, dev_id, id_data[0], id_data[1]);
4078 return -ENODEV;
4079 }
4080
4081 chip->id.len = nand_id_len(id_data, 8);
4082
4083 /* Try to identify manufacturer */
4084 manufacturer = nand_get_manufacturer(maf_id);
4085 chip->manufacturer.desc = manufacturer;
4086
4087 if (!type)
4088 type = nand_flash_ids;
4089
4090 /*
4091 * Save the NAND_BUSWIDTH_16 flag before letting auto-detection logic
4092 * override it.
4093 * This is required to make sure initial NAND bus width set by the
4094 * NAND controller driver is coherent with the real NAND bus width
4095 * (extracted by auto-detection code).
4096 */
4097 busw = chip->options & NAND_BUSWIDTH_16;
4098
4099 /*
4100 * The flag is only set (never cleared), reset it to its default value
4101 * before starting auto-detection.
4102 */
4103 chip->options &= ~NAND_BUSWIDTH_16;
4104
4105 for (; type->name != NULL; type++) {
4106 if (is_full_id_nand(type)) {
4107 if (find_full_id_nand(chip, type))
4108 goto ident_done;
4109 } else if (dev_id == type->dev_id) {
4110 break;
4111 }
4112 }
4113
4114 chip->onfi_version = 0;
4115 if (!type->name || !type->pagesize) {
4116 /* Check if the chip is ONFI compliant */
4117 if (nand_flash_detect_onfi(chip))
4118 goto ident_done;
4119
4120 /* Check if the chip is JEDEC compliant */
4121 if (nand_flash_detect_jedec(chip))
4122 goto ident_done;
4123 }
4124
4125 if (!type->name)
4126 return -ENODEV;
4127
4128 if (!mtd->name)
4129 mtd->name = type->name;
4130
4131 chip->chipsize = (uint64_t)type->chipsize << 20;
4132
4133 if (!type->pagesize)
4134 nand_manufacturer_detect(chip);
4135 else
4136 nand_decode_id(chip, type);
4137
4138 /* Get chip options */
4139 chip->options |= type->options;
4140
4141 ident_done:
4142
4143 if (chip->options & NAND_BUSWIDTH_AUTO) {
4144 WARN_ON(busw & NAND_BUSWIDTH_16);
4145 nand_set_defaults(chip);
4146 } else if (busw != (chip->options & NAND_BUSWIDTH_16)) {
4147 /*
4148 * Check, if buswidth is correct. Hardware drivers should set
4149 * chip correct!
4150 */
4151 pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
4152 maf_id, dev_id);
4153 pr_info("%s %s\n", nand_manufacturer_name(manufacturer),
4154 mtd->name);
4155 pr_warn("bus width %d instead of %d bits\n", busw ? 16 : 8,
4156 (chip->options & NAND_BUSWIDTH_16) ? 16 : 8);
4157 return -EINVAL;
4158 }
4159
4160 nand_decode_bbm_options(chip);
4161
4162 /* Calculate the address shift from the page size */
4163 chip->page_shift = ffs(mtd->writesize) - 1;
4164 /* Convert chipsize to number of pages per chip -1 */
4165 chip->pagemask = (chip->chipsize >> chip->page_shift) - 1;
4166
4167 chip->bbt_erase_shift = chip->phys_erase_shift =
4168 ffs(mtd->erasesize) - 1;
4169 if (chip->chipsize & 0xffffffff)
4170 chip->chip_shift = ffs((unsigned)chip->chipsize) - 1;
4171 else {
4172 chip->chip_shift = ffs((unsigned)(chip->chipsize >> 32));
4173 chip->chip_shift += 32 - 1;
4174 }
4175
4176 chip->badblockbits = 8;
4177 chip->erase = single_erase;
4178
4179 /* Do not replace user supplied command function! */
4180 if (mtd->writesize > 512 && chip->cmdfunc == nand_command)
4181 chip->cmdfunc = nand_command_lp;
4182
4183 ret = nand_manufacturer_init(chip);
4184 if (ret)
4185 return ret;
4186
4187 pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
4188 maf_id, dev_id);
4189
4190 if (chip->onfi_version)
4191 pr_info("%s %s\n", nand_manufacturer_name(manufacturer),
4192 chip->onfi_params.model);
4193 else if (chip->jedec_version)
4194 pr_info("%s %s\n", nand_manufacturer_name(manufacturer),
4195 chip->jedec_params.model);
4196 else
4197 pr_info("%s %s\n", nand_manufacturer_name(manufacturer),
4198 type->name);
4199
4200 pr_info("%d MiB, %s, erase size: %d KiB, page size: %d, OOB size: %d\n",
4201 (int)(chip->chipsize >> 20), nand_is_slc(chip) ? "SLC" : "MLC",
4202 mtd->erasesize >> 10, mtd->writesize, mtd->oobsize);
4203 return 0;
4204 }
4205
4206 static const char * const nand_ecc_modes[] = {
4207 [NAND_ECC_NONE] = "none",
4208 [NAND_ECC_SOFT] = "soft",
4209 [NAND_ECC_HW] = "hw",
4210 [NAND_ECC_HW_SYNDROME] = "hw_syndrome",
4211 [NAND_ECC_HW_OOB_FIRST] = "hw_oob_first",
4212 [NAND_ECC_ON_DIE] = "on-die",
4213 };
4214
4215 static int of_get_nand_ecc_mode(struct device_node *np)
4216 {
4217 const char *pm;
4218 int err, i;
4219
4220 err = of_property_read_string(np, "nand-ecc-mode", &pm);
4221 if (err < 0)
4222 return err;
4223
4224 for (i = 0; i < ARRAY_SIZE(nand_ecc_modes); i++)
4225 if (!strcasecmp(pm, nand_ecc_modes[i]))
4226 return i;
4227
4228 /*
4229 * For backward compatibility we support few obsoleted values that don't
4230 * have their mappings into nand_ecc_modes_t anymore (they were merged
4231 * with other enums).
4232 */
4233 if (!strcasecmp(pm, "soft_bch"))
4234 return NAND_ECC_SOFT;
4235
4236 return -ENODEV;
4237 }
4238
4239 static const char * const nand_ecc_algos[] = {
4240 [NAND_ECC_HAMMING] = "hamming",
4241 [NAND_ECC_BCH] = "bch",
4242 };
4243
4244 static int of_get_nand_ecc_algo(struct device_node *np)
4245 {
4246 const char *pm;
4247 int err, i;
4248
4249 err = of_property_read_string(np, "nand-ecc-algo", &pm);
4250 if (!err) {
4251 for (i = NAND_ECC_HAMMING; i < ARRAY_SIZE(nand_ecc_algos); i++)
4252 if (!strcasecmp(pm, nand_ecc_algos[i]))
4253 return i;
4254 return -ENODEV;
4255 }
4256
4257 /*
4258 * For backward compatibility we also read "nand-ecc-mode" checking
4259 * for some obsoleted values that were specifying ECC algorithm.
4260 */
4261 err = of_property_read_string(np, "nand-ecc-mode", &pm);
4262 if (err < 0)
4263 return err;
4264
4265 if (!strcasecmp(pm, "soft"))
4266 return NAND_ECC_HAMMING;
4267 else if (!strcasecmp(pm, "soft_bch"))
4268 return NAND_ECC_BCH;
4269
4270 return -ENODEV;
4271 }
4272
4273 static int of_get_nand_ecc_step_size(struct device_node *np)
4274 {
4275 int ret;
4276 u32 val;
4277
4278 ret = of_property_read_u32(np, "nand-ecc-step-size", &val);
4279 return ret ? ret : val;
4280 }
4281
4282 static int of_get_nand_ecc_strength(struct device_node *np)
4283 {
4284 int ret;
4285 u32 val;
4286
4287 ret = of_property_read_u32(np, "nand-ecc-strength", &val);
4288 return ret ? ret : val;
4289 }
4290
4291 static int of_get_nand_bus_width(struct device_node *np)
4292 {
4293 u32 val;
4294
4295 if (of_property_read_u32(np, "nand-bus-width", &val))
4296 return 8;
4297
4298 switch (val) {
4299 case 8:
4300 case 16:
4301 return val;
4302 default:
4303 return -EIO;
4304 }
4305 }
4306
4307 static bool of_get_nand_on_flash_bbt(struct device_node *np)
4308 {
4309 return of_property_read_bool(np, "nand-on-flash-bbt");
4310 }
4311
4312 static int nand_dt_init(struct nand_chip *chip)
4313 {
4314 struct device_node *dn = nand_get_flash_node(chip);
4315 int ecc_mode, ecc_algo, ecc_strength, ecc_step;
4316
4317 if (!dn)
4318 return 0;
4319
4320 if (of_get_nand_bus_width(dn) == 16)
4321 chip->options |= NAND_BUSWIDTH_16;
4322
4323 if (of_get_nand_on_flash_bbt(dn))
4324 chip->bbt_options |= NAND_BBT_USE_FLASH;
4325
4326 ecc_mode = of_get_nand_ecc_mode(dn);
4327 ecc_algo = of_get_nand_ecc_algo(dn);
4328 ecc_strength = of_get_nand_ecc_strength(dn);
4329 ecc_step = of_get_nand_ecc_step_size(dn);
4330
4331 if (ecc_mode >= 0)
4332 chip->ecc.mode = ecc_mode;
4333
4334 if (ecc_algo >= 0)
4335 chip->ecc.algo = ecc_algo;
4336
4337 if (ecc_strength >= 0)
4338 chip->ecc.strength = ecc_strength;
4339
4340 if (ecc_step > 0)
4341 chip->ecc.size = ecc_step;
4342
4343 if (of_property_read_bool(dn, "nand-ecc-maximize"))
4344 chip->ecc.options |= NAND_ECC_MAXIMIZE;
4345
4346 return 0;
4347 }
4348
4349 /**
4350 * nand_scan_ident - [NAND Interface] Scan for the NAND device
4351 * @mtd: MTD device structure
4352 * @maxchips: number of chips to scan for
4353 * @table: alternative NAND ID table
4354 *
4355 * This is the first phase of the normal nand_scan() function. It reads the
4356 * flash ID and sets up MTD fields accordingly.
4357 *
4358 */
4359 int nand_scan_ident(struct mtd_info *mtd, int maxchips,
4360 struct nand_flash_dev *table)
4361 {
4362 int i, nand_maf_id, nand_dev_id;
4363 struct nand_chip *chip = mtd_to_nand(mtd);
4364 int ret;
4365
4366 ret = nand_dt_init(chip);
4367 if (ret)
4368 return ret;
4369
4370 if (!mtd->name && mtd->dev.parent)
4371 mtd->name = dev_name(mtd->dev.parent);
4372
4373 if ((!chip->cmdfunc || !chip->select_chip) && !chip->cmd_ctrl) {
4374 /*
4375 * Default functions assigned for chip_select() and
4376 * cmdfunc() both expect cmd_ctrl() to be populated,
4377 * so we need to check that that's the case
4378 */
4379 pr_err("chip.cmd_ctrl() callback is not provided");
4380 return -EINVAL;
4381 }
4382 /* Set the default functions */
4383 nand_set_defaults(chip);
4384
4385 /* Read the flash type */
4386 ret = nand_detect(chip, table);
4387 if (ret) {
4388 if (!(chip->options & NAND_SCAN_SILENT_NODEV))
4389 pr_warn("No NAND device found\n");
4390 chip->select_chip(mtd, -1);
4391 return ret;
4392 }
4393
4394 /* Initialize the ->data_interface field. */
4395 ret = nand_init_data_interface(chip);
4396 if (ret)
4397 goto err_nand_init;
4398
4399 /*
4400 * Setup the data interface correctly on the chip and controller side.
4401 * This explicit call to nand_setup_data_interface() is only required
4402 * for the first die, because nand_reset() has been called before
4403 * ->data_interface and ->default_onfi_timing_mode were set.
4404 * For the other dies, nand_reset() will automatically switch to the
4405 * best mode for us.
4406 */
4407 ret = nand_setup_data_interface(chip, 0);
4408 if (ret)
4409 goto err_nand_init;
4410
4411 nand_maf_id = chip->id.data[0];
4412 nand_dev_id = chip->id.data[1];
4413
4414 chip->select_chip(mtd, -1);
4415
4416 /* Check for a chip array */
4417 for (i = 1; i < maxchips; i++) {
4418 /* See comment in nand_get_flash_type for reset */
4419 nand_reset(chip, i);
4420
4421 chip->select_chip(mtd, i);
4422 /* Send the command for reading device ID */
4423 chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
4424 /* Read manufacturer and device IDs */
4425 if (nand_maf_id != chip->read_byte(mtd) ||
4426 nand_dev_id != chip->read_byte(mtd)) {
4427 chip->select_chip(mtd, -1);
4428 break;
4429 }
4430 chip->select_chip(mtd, -1);
4431 }
4432 if (i > 1)
4433 pr_info("%d chips detected\n", i);
4434
4435 /* Store the number of chips and calc total size for mtd */
4436 chip->numchips = i;
4437 mtd->size = i * chip->chipsize;
4438
4439 return 0;
4440
4441 err_nand_init:
4442 /* Free manufacturer priv data. */
4443 nand_manufacturer_cleanup(chip);
4444
4445 return ret;
4446 }
4447 EXPORT_SYMBOL(nand_scan_ident);
4448
4449 static int nand_set_ecc_soft_ops(struct mtd_info *mtd)
4450 {
4451 struct nand_chip *chip = mtd_to_nand(mtd);
4452 struct nand_ecc_ctrl *ecc = &chip->ecc;
4453
4454 if (WARN_ON(ecc->mode != NAND_ECC_SOFT))
4455 return -EINVAL;
4456
4457 switch (ecc->algo) {
4458 case NAND_ECC_HAMMING:
4459 ecc->calculate = nand_calculate_ecc;
4460 ecc->correct = nand_correct_data;
4461 ecc->read_page = nand_read_page_swecc;
4462 ecc->read_subpage = nand_read_subpage;
4463 ecc->write_page = nand_write_page_swecc;
4464 ecc->read_page_raw = nand_read_page_raw;
4465 ecc->write_page_raw = nand_write_page_raw;
4466 ecc->read_oob = nand_read_oob_std;
4467 ecc->write_oob = nand_write_oob_std;
4468 if (!ecc->size)
4469 ecc->size = 256;
4470 ecc->bytes = 3;
4471 ecc->strength = 1;
4472 return 0;
4473 case NAND_ECC_BCH:
4474 if (!mtd_nand_has_bch()) {
4475 WARN(1, "CONFIG_MTD_NAND_ECC_BCH not enabled\n");
4476 return -EINVAL;
4477 }
4478 ecc->calculate = nand_bch_calculate_ecc;
4479 ecc->correct = nand_bch_correct_data;
4480 ecc->read_page = nand_read_page_swecc;
4481 ecc->read_subpage = nand_read_subpage;
4482 ecc->write_page = nand_write_page_swecc;
4483 ecc->read_page_raw = nand_read_page_raw;
4484 ecc->write_page_raw = nand_write_page_raw;
4485 ecc->read_oob = nand_read_oob_std;
4486 ecc->write_oob = nand_write_oob_std;
4487
4488 /*
4489 * Board driver should supply ecc.size and ecc.strength
4490 * values to select how many bits are correctable.
4491 * Otherwise, default to 4 bits for large page devices.
4492 */
4493 if (!ecc->size && (mtd->oobsize >= 64)) {
4494 ecc->size = 512;
4495 ecc->strength = 4;
4496 }
4497
4498 /*
4499 * if no ecc placement scheme was provided pickup the default
4500 * large page one.
4501 */
4502 if (!mtd->ooblayout) {
4503 /* handle large page devices only */
4504 if (mtd->oobsize < 64) {
4505 WARN(1, "OOB layout is required when using software BCH on small pages\n");
4506 return -EINVAL;
4507 }
4508
4509 mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
4510
4511 }
4512
4513 /*
4514 * We can only maximize ECC config when the default layout is
4515 * used, otherwise we don't know how many bytes can really be
4516 * used.
4517 */
4518 if (mtd->ooblayout == &nand_ooblayout_lp_ops &&
4519 ecc->options & NAND_ECC_MAXIMIZE) {
4520 int steps, bytes;
4521
4522 /* Always prefer 1k blocks over 512bytes ones */
4523 ecc->size = 1024;
4524 steps = mtd->writesize / ecc->size;
4525
4526 /* Reserve 2 bytes for the BBM */
4527 bytes = (mtd->oobsize - 2) / steps;
4528 ecc->strength = bytes * 8 / fls(8 * ecc->size);
4529 }
4530
4531 /* See nand_bch_init() for details. */
4532 ecc->bytes = 0;
4533 ecc->priv = nand_bch_init(mtd);
4534 if (!ecc->priv) {
4535 WARN(1, "BCH ECC initialization failed!\n");
4536 return -EINVAL;
4537 }
4538 return 0;
4539 default:
4540 WARN(1, "Unsupported ECC algorithm!\n");
4541 return -EINVAL;
4542 }
4543 }
4544
4545 /**
4546 * nand_check_ecc_caps - check the sanity of preset ECC settings
4547 * @chip: nand chip info structure
4548 * @caps: ECC caps info structure
4549 * @oobavail: OOB size that the ECC engine can use
4550 *
4551 * When ECC step size and strength are already set, check if they are supported
4552 * by the controller and the calculated ECC bytes fit within the chip's OOB.
4553 * On success, the calculated ECC bytes is set.
4554 */
4555 int nand_check_ecc_caps(struct nand_chip *chip,
4556 const struct nand_ecc_caps *caps, int oobavail)
4557 {
4558 struct mtd_info *mtd = nand_to_mtd(chip);
4559 const struct nand_ecc_step_info *stepinfo;
4560 int preset_step = chip->ecc.size;
4561 int preset_strength = chip->ecc.strength;
4562 int nsteps, ecc_bytes;
4563 int i, j;
4564
4565 if (WARN_ON(oobavail < 0))
4566 return -EINVAL;
4567
4568 if (!preset_step || !preset_strength)
4569 return -ENODATA;
4570
4571 nsteps = mtd->writesize / preset_step;
4572
4573 for (i = 0; i < caps->nstepinfos; i++) {
4574 stepinfo = &caps->stepinfos[i];
4575
4576 if (stepinfo->stepsize != preset_step)
4577 continue;
4578
4579 for (j = 0; j < stepinfo->nstrengths; j++) {
4580 if (stepinfo->strengths[j] != preset_strength)
4581 continue;
4582
4583 ecc_bytes = caps->calc_ecc_bytes(preset_step,
4584 preset_strength);
4585 if (WARN_ON_ONCE(ecc_bytes < 0))
4586 return ecc_bytes;
4587
4588 if (ecc_bytes * nsteps > oobavail) {
4589 pr_err("ECC (step, strength) = (%d, %d) does not fit in OOB",
4590 preset_step, preset_strength);
4591 return -ENOSPC;
4592 }
4593
4594 chip->ecc.bytes = ecc_bytes;
4595
4596 return 0;
4597 }
4598 }
4599
4600 pr_err("ECC (step, strength) = (%d, %d) not supported on this controller",
4601 preset_step, preset_strength);
4602
4603 return -ENOTSUPP;
4604 }
4605 EXPORT_SYMBOL_GPL(nand_check_ecc_caps);
4606
4607 /**
4608 * nand_match_ecc_req - meet the chip's requirement with least ECC bytes
4609 * @chip: nand chip info structure
4610 * @caps: ECC engine caps info structure
4611 * @oobavail: OOB size that the ECC engine can use
4612 *
4613 * If a chip's ECC requirement is provided, try to meet it with the least
4614 * number of ECC bytes (i.e. with the largest number of OOB-free bytes).
4615 * On success, the chosen ECC settings are set.
4616 */
4617 int nand_match_ecc_req(struct nand_chip *chip,
4618 const struct nand_ecc_caps *caps, int oobavail)
4619 {
4620 struct mtd_info *mtd = nand_to_mtd(chip);
4621 const struct nand_ecc_step_info *stepinfo;
4622 int req_step = chip->ecc_step_ds;
4623 int req_strength = chip->ecc_strength_ds;
4624 int req_corr, step_size, strength, nsteps, ecc_bytes, ecc_bytes_total;
4625 int best_step, best_strength, best_ecc_bytes;
4626 int best_ecc_bytes_total = INT_MAX;
4627 int i, j;
4628
4629 if (WARN_ON(oobavail < 0))
4630 return -EINVAL;
4631
4632 /* No information provided by the NAND chip */
4633 if (!req_step || !req_strength)
4634 return -ENOTSUPP;
4635
4636 /* number of correctable bits the chip requires in a page */
4637 req_corr = mtd->writesize / req_step * req_strength;
4638
4639 for (i = 0; i < caps->nstepinfos; i++) {
4640 stepinfo = &caps->stepinfos[i];
4641 step_size = stepinfo->stepsize;
4642
4643 for (j = 0; j < stepinfo->nstrengths; j++) {
4644 strength = stepinfo->strengths[j];
4645
4646 /*
4647 * If both step size and strength are smaller than the
4648 * chip's requirement, it is not easy to compare the
4649 * resulted reliability.
4650 */
4651 if (step_size < req_step && strength < req_strength)
4652 continue;
4653
4654 if (mtd->writesize % step_size)
4655 continue;
4656
4657 nsteps = mtd->writesize / step_size;
4658
4659 ecc_bytes = caps->calc_ecc_bytes(step_size, strength);
4660 if (WARN_ON_ONCE(ecc_bytes < 0))
4661 continue;
4662 ecc_bytes_total = ecc_bytes * nsteps;
4663
4664 if (ecc_bytes_total > oobavail ||
4665 strength * nsteps < req_corr)
4666 continue;
4667
4668 /*
4669 * We assume the best is to meet the chip's requrement
4670 * with the least number of ECC bytes.
4671 */
4672 if (ecc_bytes_total < best_ecc_bytes_total) {
4673 best_ecc_bytes_total = ecc_bytes_total;
4674 best_step = step_size;
4675 best_strength = strength;
4676 best_ecc_bytes = ecc_bytes;
4677 }
4678 }
4679 }
4680
4681 if (best_ecc_bytes_total == INT_MAX)
4682 return -ENOTSUPP;
4683
4684 chip->ecc.size = best_step;
4685 chip->ecc.strength = best_strength;
4686 chip->ecc.bytes = best_ecc_bytes;
4687
4688 return 0;
4689 }
4690 EXPORT_SYMBOL_GPL(nand_match_ecc_req);
4691
4692 /**
4693 * nand_maximize_ecc - choose the max ECC strength available
4694 * @chip: nand chip info structure
4695 * @caps: ECC engine caps info structure
4696 * @oobavail: OOB size that the ECC engine can use
4697 *
4698 * Choose the max ECC strength that is supported on the controller, and can fit
4699 * within the chip's OOB. On success, the chosen ECC settings are set.
4700 */
4701 int nand_maximize_ecc(struct nand_chip *chip,
4702 const struct nand_ecc_caps *caps, int oobavail)
4703 {
4704 struct mtd_info *mtd = nand_to_mtd(chip);
4705 const struct nand_ecc_step_info *stepinfo;
4706 int step_size, strength, nsteps, ecc_bytes, corr;
4707 int best_corr = 0;
4708 int best_step = 0;
4709 int best_strength, best_ecc_bytes;
4710 int i, j;
4711
4712 if (WARN_ON(oobavail < 0))
4713 return -EINVAL;
4714
4715 for (i = 0; i < caps->nstepinfos; i++) {
4716 stepinfo = &caps->stepinfos[i];
4717 step_size = stepinfo->stepsize;
4718
4719 /* If chip->ecc.size is already set, respect it */
4720 if (chip->ecc.size && step_size != chip->ecc.size)
4721 continue;
4722
4723 for (j = 0; j < stepinfo->nstrengths; j++) {
4724 strength = stepinfo->strengths[j];
4725
4726 if (mtd->writesize % step_size)
4727 continue;
4728
4729 nsteps = mtd->writesize / step_size;
4730
4731 ecc_bytes = caps->calc_ecc_bytes(step_size, strength);
4732 if (WARN_ON_ONCE(ecc_bytes < 0))
4733 continue;
4734
4735 if (ecc_bytes * nsteps > oobavail)
4736 continue;
4737
4738 corr = strength * nsteps;
4739
4740 /*
4741 * If the number of correctable bits is the same,
4742 * bigger step_size has more reliability.
4743 */
4744 if (corr > best_corr ||
4745 (corr == best_corr && step_size > best_step)) {
4746 best_corr = corr;
4747 best_step = step_size;
4748 best_strength = strength;
4749 best_ecc_bytes = ecc_bytes;
4750 }
4751 }
4752 }
4753
4754 if (!best_corr)
4755 return -ENOTSUPP;
4756
4757 chip->ecc.size = best_step;
4758 chip->ecc.strength = best_strength;
4759 chip->ecc.bytes = best_ecc_bytes;
4760
4761 return 0;
4762 }
4763 EXPORT_SYMBOL_GPL(nand_maximize_ecc);
4764
4765 /*
4766 * Check if the chip configuration meet the datasheet requirements.
4767
4768 * If our configuration corrects A bits per B bytes and the minimum
4769 * required correction level is X bits per Y bytes, then we must ensure
4770 * both of the following are true:
4771 *
4772 * (1) A / B >= X / Y
4773 * (2) A >= X
4774 *
4775 * Requirement (1) ensures we can correct for the required bitflip density.
4776 * Requirement (2) ensures we can correct even when all bitflips are clumped
4777 * in the same sector.
4778 */
4779 static bool nand_ecc_strength_good(struct mtd_info *mtd)
4780 {
4781 struct nand_chip *chip = mtd_to_nand(mtd);
4782 struct nand_ecc_ctrl *ecc = &chip->ecc;
4783 int corr, ds_corr;
4784
4785 if (ecc->size == 0 || chip->ecc_step_ds == 0)
4786 /* Not enough information */
4787 return true;
4788
4789 /*
4790 * We get the number of corrected bits per page to compare
4791 * the correction density.
4792 */
4793 corr = (mtd->writesize * ecc->strength) / ecc->size;
4794 ds_corr = (mtd->writesize * chip->ecc_strength_ds) / chip->ecc_step_ds;
4795
4796 return corr >= ds_corr && ecc->strength >= chip->ecc_strength_ds;
4797 }
4798
4799 static bool invalid_ecc_page_accessors(struct nand_chip *chip)
4800 {
4801 struct nand_ecc_ctrl *ecc = &chip->ecc;
4802
4803 if (nand_standard_page_accessors(ecc))
4804 return false;
4805
4806 /*
4807 * NAND_ECC_CUSTOM_PAGE_ACCESS flag is set, make sure the NAND
4808 * controller driver implements all the page accessors because
4809 * default helpers are not suitable when the core does not
4810 * send the READ0/PAGEPROG commands.
4811 */
4812 return (!ecc->read_page || !ecc->write_page ||
4813 !ecc->read_page_raw || !ecc->write_page_raw ||
4814 (NAND_HAS_SUBPAGE_READ(chip) && !ecc->read_subpage) ||
4815 (NAND_HAS_SUBPAGE_WRITE(chip) && !ecc->write_subpage &&
4816 ecc->hwctl && ecc->calculate));
4817 }
4818
4819 /**
4820 * nand_scan_tail - [NAND Interface] Scan for the NAND device
4821 * @mtd: MTD device structure
4822 *
4823 * This is the second phase of the normal nand_scan() function. It fills out
4824 * all the uninitialized function pointers with the defaults and scans for a
4825 * bad block table if appropriate.
4826 */
4827 int nand_scan_tail(struct mtd_info *mtd)
4828 {
4829 struct nand_chip *chip = mtd_to_nand(mtd);
4830 struct nand_ecc_ctrl *ecc = &chip->ecc;
4831 struct nand_buffers *nbuf = NULL;
4832 int ret;
4833
4834 /* New bad blocks should be marked in OOB, flash-based BBT, or both */
4835 if (WARN_ON((chip->bbt_options & NAND_BBT_NO_OOB_BBM) &&
4836 !(chip->bbt_options & NAND_BBT_USE_FLASH))) {
4837 ret = -EINVAL;
4838 goto err_ident;
4839 }
4840
4841 if (invalid_ecc_page_accessors(chip)) {
4842 pr_err("Invalid ECC page accessors setup\n");
4843 ret = -EINVAL;
4844 goto err_ident;
4845 }
4846
4847 if (!(chip->options & NAND_OWN_BUFFERS)) {
4848 nbuf = kzalloc(sizeof(*nbuf), GFP_KERNEL);
4849 if (!nbuf) {
4850 ret = -ENOMEM;
4851 goto err_ident;
4852 }
4853
4854 nbuf->ecccalc = kmalloc(mtd->oobsize, GFP_KERNEL);
4855 if (!nbuf->ecccalc) {
4856 ret = -ENOMEM;
4857 goto err_free;
4858 }
4859
4860 nbuf->ecccode = kmalloc(mtd->oobsize, GFP_KERNEL);
4861 if (!nbuf->ecccode) {
4862 ret = -ENOMEM;
4863 goto err_free;
4864 }
4865
4866 nbuf->databuf = kmalloc(mtd->writesize + mtd->oobsize,
4867 GFP_KERNEL);
4868 if (!nbuf->databuf) {
4869 ret = -ENOMEM;
4870 goto err_free;
4871 }
4872
4873 chip->buffers = nbuf;
4874 } else {
4875 if (!chip->buffers) {
4876 ret = -ENOMEM;
4877 goto err_ident;
4878 }
4879 }
4880
4881 /* Set the internal oob buffer location, just after the page data */
4882 chip->oob_poi = chip->buffers->databuf + mtd->writesize;
4883
4884 /*
4885 * If no default placement scheme is given, select an appropriate one.
4886 */
4887 if (!mtd->ooblayout &&
4888 !(ecc->mode == NAND_ECC_SOFT && ecc->algo == NAND_ECC_BCH)) {
4889 switch (mtd->oobsize) {
4890 case 8:
4891 case 16:
4892 mtd_set_ooblayout(mtd, &nand_ooblayout_sp_ops);
4893 break;
4894 case 64:
4895 case 128:
4896 mtd_set_ooblayout(mtd, &nand_ooblayout_lp_hamming_ops);
4897 break;
4898 default:
4899 WARN(1, "No oob scheme defined for oobsize %d\n",
4900 mtd->oobsize);
4901 ret = -EINVAL;
4902 goto err_free;
4903 }
4904 }
4905
4906 /*
4907 * Check ECC mode, default to software if 3byte/512byte hardware ECC is
4908 * selected and we have 256 byte pagesize fallback to software ECC
4909 */
4910
4911 switch (ecc->mode) {
4912 case NAND_ECC_HW_OOB_FIRST:
4913 /* Similar to NAND_ECC_HW, but a separate read_page handle */
4914 if (!ecc->calculate || !ecc->correct || !ecc->hwctl) {
4915 WARN(1, "No ECC functions supplied; hardware ECC not possible\n");
4916 ret = -EINVAL;
4917 goto err_free;
4918 }
4919 if (!ecc->read_page)
4920 ecc->read_page = nand_read_page_hwecc_oob_first;
4921
4922 case NAND_ECC_HW:
4923 /* Use standard hwecc read page function? */
4924 if (!ecc->read_page)
4925 ecc->read_page = nand_read_page_hwecc;
4926 if (!ecc->write_page)
4927 ecc->write_page = nand_write_page_hwecc;
4928 if (!ecc->read_page_raw)
4929 ecc->read_page_raw = nand_read_page_raw;
4930 if (!ecc->write_page_raw)
4931 ecc->write_page_raw = nand_write_page_raw;
4932 if (!ecc->read_oob)
4933 ecc->read_oob = nand_read_oob_std;
4934 if (!ecc->write_oob)
4935 ecc->write_oob = nand_write_oob_std;
4936 if (!ecc->read_subpage)
4937 ecc->read_subpage = nand_read_subpage;
4938 if (!ecc->write_subpage && ecc->hwctl && ecc->calculate)
4939 ecc->write_subpage = nand_write_subpage_hwecc;
4940
4941 case NAND_ECC_HW_SYNDROME:
4942 if ((!ecc->calculate || !ecc->correct || !ecc->hwctl) &&
4943 (!ecc->read_page ||
4944 ecc->read_page == nand_read_page_hwecc ||
4945 !ecc->write_page ||
4946 ecc->write_page == nand_write_page_hwecc)) {
4947 WARN(1, "No ECC functions supplied; hardware ECC not possible\n");
4948 ret = -EINVAL;
4949 goto err_free;
4950 }
4951 /* Use standard syndrome read/write page function? */
4952 if (!ecc->read_page)
4953 ecc->read_page = nand_read_page_syndrome;
4954 if (!ecc->write_page)
4955 ecc->write_page = nand_write_page_syndrome;
4956 if (!ecc->read_page_raw)
4957 ecc->read_page_raw = nand_read_page_raw_syndrome;
4958 if (!ecc->write_page_raw)
4959 ecc->write_page_raw = nand_write_page_raw_syndrome;
4960 if (!ecc->read_oob)
4961 ecc->read_oob = nand_read_oob_syndrome;
4962 if (!ecc->write_oob)
4963 ecc->write_oob = nand_write_oob_syndrome;
4964
4965 if (mtd->writesize >= ecc->size) {
4966 if (!ecc->strength) {
4967 WARN(1, "Driver must set ecc.strength when using hardware ECC\n");
4968 ret = -EINVAL;
4969 goto err_free;
4970 }
4971 break;
4972 }
4973 pr_warn("%d byte HW ECC not possible on %d byte page size, fallback to SW ECC\n",
4974 ecc->size, mtd->writesize);
4975 ecc->mode = NAND_ECC_SOFT;
4976 ecc->algo = NAND_ECC_HAMMING;
4977
4978 case NAND_ECC_SOFT:
4979 ret = nand_set_ecc_soft_ops(mtd);
4980 if (ret) {
4981 ret = -EINVAL;
4982 goto err_free;
4983 }
4984 break;
4985
4986 case NAND_ECC_ON_DIE:
4987 if (!ecc->read_page || !ecc->write_page) {
4988 WARN(1, "No ECC functions supplied; on-die ECC not possible\n");
4989 ret = -EINVAL;
4990 goto err_free;
4991 }
4992 if (!ecc->read_oob)
4993 ecc->read_oob = nand_read_oob_std;
4994 if (!ecc->write_oob)
4995 ecc->write_oob = nand_write_oob_std;
4996 break;
4997
4998 case NAND_ECC_NONE:
4999 pr_warn("NAND_ECC_NONE selected by board driver. This is not recommended!\n");
5000 ecc->read_page = nand_read_page_raw;
5001 ecc->write_page = nand_write_page_raw;
5002 ecc->read_oob = nand_read_oob_std;
5003 ecc->read_page_raw = nand_read_page_raw;
5004 ecc->write_page_raw = nand_write_page_raw;
5005 ecc->write_oob = nand_write_oob_std;
5006 ecc->size = mtd->writesize;
5007 ecc->bytes = 0;
5008 ecc->strength = 0;
5009 break;
5010
5011 default:
5012 WARN(1, "Invalid NAND_ECC_MODE %d\n", ecc->mode);
5013 ret = -EINVAL;
5014 goto err_free;
5015 }
5016
5017 /* For many systems, the standard OOB write also works for raw */
5018 if (!ecc->read_oob_raw)
5019 ecc->read_oob_raw = ecc->read_oob;
5020 if (!ecc->write_oob_raw)
5021 ecc->write_oob_raw = ecc->write_oob;
5022
5023 /* propagate ecc info to mtd_info */
5024 mtd->ecc_strength = ecc->strength;
5025 mtd->ecc_step_size = ecc->size;
5026
5027 /*
5028 * Set the number of read / write steps for one page depending on ECC
5029 * mode.
5030 */
5031 ecc->steps = mtd->writesize / ecc->size;
5032 if (ecc->steps * ecc->size != mtd->writesize) {
5033 WARN(1, "Invalid ECC parameters\n");
5034 ret = -EINVAL;
5035 goto err_free;
5036 }
5037 ecc->total = ecc->steps * ecc->bytes;
5038 if (ecc->total > mtd->oobsize) {
5039 WARN(1, "Total number of ECC bytes exceeded oobsize\n");
5040 ret = -EINVAL;
5041 goto err_free;
5042 }
5043
5044 /*
5045 * The number of bytes available for a client to place data into
5046 * the out of band area.
5047 */
5048 ret = mtd_ooblayout_count_freebytes(mtd);
5049 if (ret < 0)
5050 ret = 0;
5051
5052 mtd->oobavail = ret;
5053
5054 /* ECC sanity check: warn if it's too weak */
5055 if (!nand_ecc_strength_good(mtd))
5056 pr_warn("WARNING: %s: the ECC used on your system is too weak compared to the one required by the NAND chip\n",
5057 mtd->name);
5058
5059 /* Allow subpage writes up to ecc.steps. Not possible for MLC flash */
5060 if (!(chip->options & NAND_NO_SUBPAGE_WRITE) && nand_is_slc(chip)) {
5061 switch (ecc->steps) {
5062 case 2:
5063 mtd->subpage_sft = 1;
5064 break;
5065 case 4:
5066 case 8:
5067 case 16:
5068 mtd->subpage_sft = 2;
5069 break;
5070 }
5071 }
5072 chip->subpagesize = mtd->writesize >> mtd->subpage_sft;
5073
5074 /* Initialize state */
5075 chip->state = FL_READY;
5076
5077 /* Invalidate the pagebuffer reference */
5078 chip->pagebuf = -1;
5079
5080 /* Large page NAND with SOFT_ECC should support subpage reads */
5081 switch (ecc->mode) {
5082 case NAND_ECC_SOFT:
5083 if (chip->page_shift > 9)
5084 chip->options |= NAND_SUBPAGE_READ;
5085 break;
5086
5087 default:
5088 break;
5089 }
5090
5091 /* Fill in remaining MTD driver data */
5092 mtd->type = nand_is_slc(chip) ? MTD_NANDFLASH : MTD_MLCNANDFLASH;
5093 mtd->flags = (chip->options & NAND_ROM) ? MTD_CAP_ROM :
5094 MTD_CAP_NANDFLASH;
5095 mtd->_erase = nand_erase;
5096 mtd->_point = NULL;
5097 mtd->_unpoint = NULL;
5098 mtd->_read = nand_read;
5099 mtd->_write = nand_write;
5100 mtd->_panic_write = panic_nand_write;
5101 mtd->_read_oob = nand_read_oob;
5102 mtd->_write_oob = nand_write_oob;
5103 mtd->_sync = nand_sync;
5104 mtd->_lock = NULL;
5105 mtd->_unlock = NULL;
5106 mtd->_suspend = nand_suspend;
5107 mtd->_resume = nand_resume;
5108 mtd->_reboot = nand_shutdown;
5109 mtd->_block_isreserved = nand_block_isreserved;
5110 mtd->_block_isbad = nand_block_isbad;
5111 mtd->_block_markbad = nand_block_markbad;
5112 mtd->_max_bad_blocks = nand_max_bad_blocks;
5113 mtd->writebufsize = mtd->writesize;
5114
5115 /*
5116 * Initialize bitflip_threshold to its default prior scan_bbt() call.
5117 * scan_bbt() might invoke mtd_read(), thus bitflip_threshold must be
5118 * properly set.
5119 */
5120 if (!mtd->bitflip_threshold)
5121 mtd->bitflip_threshold = DIV_ROUND_UP(mtd->ecc_strength * 3, 4);
5122
5123 /* Check, if we should skip the bad block table scan */
5124 if (chip->options & NAND_SKIP_BBTSCAN)
5125 return 0;
5126
5127 /* Build bad block table */
5128 ret = chip->scan_bbt(mtd);
5129 if (ret)
5130 goto err_free;
5131 return 0;
5132
5133 err_free:
5134 if (nbuf) {
5135 kfree(nbuf->databuf);
5136 kfree(nbuf->ecccode);
5137 kfree(nbuf->ecccalc);
5138 kfree(nbuf);
5139 }
5140
5141 err_ident:
5142 /* Clean up nand_scan_ident(). */
5143
5144 /* Free manufacturer priv data. */
5145 nand_manufacturer_cleanup(chip);
5146
5147 return ret;
5148 }
5149 EXPORT_SYMBOL(nand_scan_tail);
5150
5151 /*
5152 * is_module_text_address() isn't exported, and it's mostly a pointless
5153 * test if this is a module _anyway_ -- they'd have to try _really_ hard
5154 * to call us from in-kernel code if the core NAND support is modular.
5155 */
5156 #ifdef MODULE
5157 #define caller_is_module() (1)
5158 #else
5159 #define caller_is_module() \
5160 is_module_text_address((unsigned long)__builtin_return_address(0))
5161 #endif
5162
5163 /**
5164 * nand_scan - [NAND Interface] Scan for the NAND device
5165 * @mtd: MTD device structure
5166 * @maxchips: number of chips to scan for
5167 *
5168 * This fills out all the uninitialized function pointers with the defaults.
5169 * The flash ID is read and the mtd/chip structures are filled with the
5170 * appropriate values.
5171 */
5172 int nand_scan(struct mtd_info *mtd, int maxchips)
5173 {
5174 int ret;
5175
5176 ret = nand_scan_ident(mtd, maxchips, NULL);
5177 if (!ret)
5178 ret = nand_scan_tail(mtd);
5179 return ret;
5180 }
5181 EXPORT_SYMBOL(nand_scan);
5182
5183 /**
5184 * nand_cleanup - [NAND Interface] Free resources held by the NAND device
5185 * @chip: NAND chip object
5186 */
5187 void nand_cleanup(struct nand_chip *chip)
5188 {
5189 if (chip->ecc.mode == NAND_ECC_SOFT &&
5190 chip->ecc.algo == NAND_ECC_BCH)
5191 nand_bch_free((struct nand_bch_control *)chip->ecc.priv);
5192
5193 nand_release_data_interface(chip);
5194
5195 /* Free bad block table memory */
5196 kfree(chip->bbt);
5197 if (!(chip->options & NAND_OWN_BUFFERS) && chip->buffers) {
5198 kfree(chip->buffers->databuf);
5199 kfree(chip->buffers->ecccode);
5200 kfree(chip->buffers->ecccalc);
5201 kfree(chip->buffers);
5202 }
5203
5204 /* Free bad block descriptor memory */
5205 if (chip->badblock_pattern && chip->badblock_pattern->options
5206 & NAND_BBT_DYNAMICSTRUCT)
5207 kfree(chip->badblock_pattern);
5208
5209 /* Free manufacturer priv data. */
5210 nand_manufacturer_cleanup(chip);
5211 }
5212 EXPORT_SYMBOL_GPL(nand_cleanup);
5213
5214 /**
5215 * nand_release - [NAND Interface] Unregister the MTD device and free resources
5216 * held by the NAND device
5217 * @mtd: MTD device structure
5218 */
5219 void nand_release(struct mtd_info *mtd)
5220 {
5221 mtd_device_unregister(mtd);
5222 nand_cleanup(mtd_to_nand(mtd));
5223 }
5224 EXPORT_SYMBOL_GPL(nand_release);
5225
5226 MODULE_LICENSE("GPL");
5227 MODULE_AUTHOR("Steven J. Hill <sjhill@realitydiluted.com>");
5228 MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>");
5229 MODULE_DESCRIPTION("Generic NAND flash driver code");