]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/mtd/nand/nand_base.c
Merge branch 'stable-4.13' of git://git.infradead.org/users/pcmoore/audit
[mirror_ubuntu-bionic-kernel.git] / drivers / mtd / nand / nand_base.c
1 /*
2 * Overview:
3 * This is the generic MTD driver for NAND flash devices. It should be
4 * capable of working with almost all NAND chips currently available.
5 *
6 * Additional technical information is available on
7 * http://www.linux-mtd.infradead.org/doc/nand.html
8 *
9 * Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com)
10 * 2002-2006 Thomas Gleixner (tglx@linutronix.de)
11 *
12 * Credits:
13 * David Woodhouse for adding multichip support
14 *
15 * Aleph One Ltd. and Toby Churchill Ltd. for supporting the
16 * rework for 2K page size chips
17 *
18 * TODO:
19 * Enable cached programming for 2k page size chips
20 * Check, if mtd->ecctype should be set to MTD_ECC_HW
21 * if we have HW ECC support.
22 * BBT table is not serialized, has to be fixed
23 *
24 * This program is free software; you can redistribute it and/or modify
25 * it under the terms of the GNU General Public License version 2 as
26 * published by the Free Software Foundation.
27 *
28 */
29
30 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31
32 #include <linux/module.h>
33 #include <linux/delay.h>
34 #include <linux/errno.h>
35 #include <linux/err.h>
36 #include <linux/sched.h>
37 #include <linux/slab.h>
38 #include <linux/mm.h>
39 #include <linux/nmi.h>
40 #include <linux/types.h>
41 #include <linux/mtd/mtd.h>
42 #include <linux/mtd/nand.h>
43 #include <linux/mtd/nand_ecc.h>
44 #include <linux/mtd/nand_bch.h>
45 #include <linux/interrupt.h>
46 #include <linux/bitops.h>
47 #include <linux/io.h>
48 #include <linux/mtd/partitions.h>
49 #include <linux/of.h>
50
51 static int nand_get_device(struct mtd_info *mtd, int new_state);
52
53 static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
54 struct mtd_oob_ops *ops);
55
56 /* Define default oob placement schemes for large and small page devices */
57 static int nand_ooblayout_ecc_sp(struct mtd_info *mtd, int section,
58 struct mtd_oob_region *oobregion)
59 {
60 struct nand_chip *chip = mtd_to_nand(mtd);
61 struct nand_ecc_ctrl *ecc = &chip->ecc;
62
63 if (section > 1)
64 return -ERANGE;
65
66 if (!section) {
67 oobregion->offset = 0;
68 oobregion->length = 4;
69 } else {
70 oobregion->offset = 6;
71 oobregion->length = ecc->total - 4;
72 }
73
74 return 0;
75 }
76
77 static int nand_ooblayout_free_sp(struct mtd_info *mtd, int section,
78 struct mtd_oob_region *oobregion)
79 {
80 if (section > 1)
81 return -ERANGE;
82
83 if (mtd->oobsize == 16) {
84 if (section)
85 return -ERANGE;
86
87 oobregion->length = 8;
88 oobregion->offset = 8;
89 } else {
90 oobregion->length = 2;
91 if (!section)
92 oobregion->offset = 3;
93 else
94 oobregion->offset = 6;
95 }
96
97 return 0;
98 }
99
100 const struct mtd_ooblayout_ops nand_ooblayout_sp_ops = {
101 .ecc = nand_ooblayout_ecc_sp,
102 .free = nand_ooblayout_free_sp,
103 };
104 EXPORT_SYMBOL_GPL(nand_ooblayout_sp_ops);
105
106 static int nand_ooblayout_ecc_lp(struct mtd_info *mtd, int section,
107 struct mtd_oob_region *oobregion)
108 {
109 struct nand_chip *chip = mtd_to_nand(mtd);
110 struct nand_ecc_ctrl *ecc = &chip->ecc;
111
112 if (section)
113 return -ERANGE;
114
115 oobregion->length = ecc->total;
116 oobregion->offset = mtd->oobsize - oobregion->length;
117
118 return 0;
119 }
120
121 static int nand_ooblayout_free_lp(struct mtd_info *mtd, int section,
122 struct mtd_oob_region *oobregion)
123 {
124 struct nand_chip *chip = mtd_to_nand(mtd);
125 struct nand_ecc_ctrl *ecc = &chip->ecc;
126
127 if (section)
128 return -ERANGE;
129
130 oobregion->length = mtd->oobsize - ecc->total - 2;
131 oobregion->offset = 2;
132
133 return 0;
134 }
135
136 const struct mtd_ooblayout_ops nand_ooblayout_lp_ops = {
137 .ecc = nand_ooblayout_ecc_lp,
138 .free = nand_ooblayout_free_lp,
139 };
140 EXPORT_SYMBOL_GPL(nand_ooblayout_lp_ops);
141
142 /*
143 * Support the old "large page" layout used for 1-bit Hamming ECC where ECC
144 * are placed at a fixed offset.
145 */
146 static int nand_ooblayout_ecc_lp_hamming(struct mtd_info *mtd, int section,
147 struct mtd_oob_region *oobregion)
148 {
149 struct nand_chip *chip = mtd_to_nand(mtd);
150 struct nand_ecc_ctrl *ecc = &chip->ecc;
151
152 if (section)
153 return -ERANGE;
154
155 switch (mtd->oobsize) {
156 case 64:
157 oobregion->offset = 40;
158 break;
159 case 128:
160 oobregion->offset = 80;
161 break;
162 default:
163 return -EINVAL;
164 }
165
166 oobregion->length = ecc->total;
167 if (oobregion->offset + oobregion->length > mtd->oobsize)
168 return -ERANGE;
169
170 return 0;
171 }
172
173 static int nand_ooblayout_free_lp_hamming(struct mtd_info *mtd, int section,
174 struct mtd_oob_region *oobregion)
175 {
176 struct nand_chip *chip = mtd_to_nand(mtd);
177 struct nand_ecc_ctrl *ecc = &chip->ecc;
178 int ecc_offset = 0;
179
180 if (section < 0 || section > 1)
181 return -ERANGE;
182
183 switch (mtd->oobsize) {
184 case 64:
185 ecc_offset = 40;
186 break;
187 case 128:
188 ecc_offset = 80;
189 break;
190 default:
191 return -EINVAL;
192 }
193
194 if (section == 0) {
195 oobregion->offset = 2;
196 oobregion->length = ecc_offset - 2;
197 } else {
198 oobregion->offset = ecc_offset + ecc->total;
199 oobregion->length = mtd->oobsize - oobregion->offset;
200 }
201
202 return 0;
203 }
204
205 static const struct mtd_ooblayout_ops nand_ooblayout_lp_hamming_ops = {
206 .ecc = nand_ooblayout_ecc_lp_hamming,
207 .free = nand_ooblayout_free_lp_hamming,
208 };
209
210 static int check_offs_len(struct mtd_info *mtd,
211 loff_t ofs, uint64_t len)
212 {
213 struct nand_chip *chip = mtd_to_nand(mtd);
214 int ret = 0;
215
216 /* Start address must align on block boundary */
217 if (ofs & ((1ULL << chip->phys_erase_shift) - 1)) {
218 pr_debug("%s: unaligned address\n", __func__);
219 ret = -EINVAL;
220 }
221
222 /* Length must align on block boundary */
223 if (len & ((1ULL << chip->phys_erase_shift) - 1)) {
224 pr_debug("%s: length not block aligned\n", __func__);
225 ret = -EINVAL;
226 }
227
228 return ret;
229 }
230
231 /**
232 * nand_release_device - [GENERIC] release chip
233 * @mtd: MTD device structure
234 *
235 * Release chip lock and wake up anyone waiting on the device.
236 */
237 static void nand_release_device(struct mtd_info *mtd)
238 {
239 struct nand_chip *chip = mtd_to_nand(mtd);
240
241 /* Release the controller and the chip */
242 spin_lock(&chip->controller->lock);
243 chip->controller->active = NULL;
244 chip->state = FL_READY;
245 wake_up(&chip->controller->wq);
246 spin_unlock(&chip->controller->lock);
247 }
248
249 /**
250 * nand_read_byte - [DEFAULT] read one byte from the chip
251 * @mtd: MTD device structure
252 *
253 * Default read function for 8bit buswidth
254 */
255 static uint8_t nand_read_byte(struct mtd_info *mtd)
256 {
257 struct nand_chip *chip = mtd_to_nand(mtd);
258 return readb(chip->IO_ADDR_R);
259 }
260
261 /**
262 * nand_read_byte16 - [DEFAULT] read one byte endianness aware from the chip
263 * @mtd: MTD device structure
264 *
265 * Default read function for 16bit buswidth with endianness conversion.
266 *
267 */
268 static uint8_t nand_read_byte16(struct mtd_info *mtd)
269 {
270 struct nand_chip *chip = mtd_to_nand(mtd);
271 return (uint8_t) cpu_to_le16(readw(chip->IO_ADDR_R));
272 }
273
274 /**
275 * nand_read_word - [DEFAULT] read one word from the chip
276 * @mtd: MTD device structure
277 *
278 * Default read function for 16bit buswidth without endianness conversion.
279 */
280 static u16 nand_read_word(struct mtd_info *mtd)
281 {
282 struct nand_chip *chip = mtd_to_nand(mtd);
283 return readw(chip->IO_ADDR_R);
284 }
285
286 /**
287 * nand_select_chip - [DEFAULT] control CE line
288 * @mtd: MTD device structure
289 * @chipnr: chipnumber to select, -1 for deselect
290 *
291 * Default select function for 1 chip devices.
292 */
293 static void nand_select_chip(struct mtd_info *mtd, int chipnr)
294 {
295 struct nand_chip *chip = mtd_to_nand(mtd);
296
297 switch (chipnr) {
298 case -1:
299 chip->cmd_ctrl(mtd, NAND_CMD_NONE, 0 | NAND_CTRL_CHANGE);
300 break;
301 case 0:
302 break;
303
304 default:
305 BUG();
306 }
307 }
308
309 /**
310 * nand_write_byte - [DEFAULT] write single byte to chip
311 * @mtd: MTD device structure
312 * @byte: value to write
313 *
314 * Default function to write a byte to I/O[7:0]
315 */
316 static void nand_write_byte(struct mtd_info *mtd, uint8_t byte)
317 {
318 struct nand_chip *chip = mtd_to_nand(mtd);
319
320 chip->write_buf(mtd, &byte, 1);
321 }
322
323 /**
324 * nand_write_byte16 - [DEFAULT] write single byte to a chip with width 16
325 * @mtd: MTD device structure
326 * @byte: value to write
327 *
328 * Default function to write a byte to I/O[7:0] on a 16-bit wide chip.
329 */
330 static void nand_write_byte16(struct mtd_info *mtd, uint8_t byte)
331 {
332 struct nand_chip *chip = mtd_to_nand(mtd);
333 uint16_t word = byte;
334
335 /*
336 * It's not entirely clear what should happen to I/O[15:8] when writing
337 * a byte. The ONFi spec (Revision 3.1; 2012-09-19, Section 2.16) reads:
338 *
339 * When the host supports a 16-bit bus width, only data is
340 * transferred at the 16-bit width. All address and command line
341 * transfers shall use only the lower 8-bits of the data bus. During
342 * command transfers, the host may place any value on the upper
343 * 8-bits of the data bus. During address transfers, the host shall
344 * set the upper 8-bits of the data bus to 00h.
345 *
346 * One user of the write_byte callback is nand_onfi_set_features. The
347 * four parameters are specified to be written to I/O[7:0], but this is
348 * neither an address nor a command transfer. Let's assume a 0 on the
349 * upper I/O lines is OK.
350 */
351 chip->write_buf(mtd, (uint8_t *)&word, 2);
352 }
353
354 /**
355 * nand_write_buf - [DEFAULT] write buffer to chip
356 * @mtd: MTD device structure
357 * @buf: data buffer
358 * @len: number of bytes to write
359 *
360 * Default write function for 8bit buswidth.
361 */
362 static void nand_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
363 {
364 struct nand_chip *chip = mtd_to_nand(mtd);
365
366 iowrite8_rep(chip->IO_ADDR_W, buf, len);
367 }
368
369 /**
370 * nand_read_buf - [DEFAULT] read chip data into buffer
371 * @mtd: MTD device structure
372 * @buf: buffer to store date
373 * @len: number of bytes to read
374 *
375 * Default read function for 8bit buswidth.
376 */
377 static void nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
378 {
379 struct nand_chip *chip = mtd_to_nand(mtd);
380
381 ioread8_rep(chip->IO_ADDR_R, buf, len);
382 }
383
384 /**
385 * nand_write_buf16 - [DEFAULT] write buffer to chip
386 * @mtd: MTD device structure
387 * @buf: data buffer
388 * @len: number of bytes to write
389 *
390 * Default write function for 16bit buswidth.
391 */
392 static void nand_write_buf16(struct mtd_info *mtd, const uint8_t *buf, int len)
393 {
394 struct nand_chip *chip = mtd_to_nand(mtd);
395 u16 *p = (u16 *) buf;
396
397 iowrite16_rep(chip->IO_ADDR_W, p, len >> 1);
398 }
399
400 /**
401 * nand_read_buf16 - [DEFAULT] read chip data into buffer
402 * @mtd: MTD device structure
403 * @buf: buffer to store date
404 * @len: number of bytes to read
405 *
406 * Default read function for 16bit buswidth.
407 */
408 static void nand_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len)
409 {
410 struct nand_chip *chip = mtd_to_nand(mtd);
411 u16 *p = (u16 *) buf;
412
413 ioread16_rep(chip->IO_ADDR_R, p, len >> 1);
414 }
415
416 /**
417 * nand_block_bad - [DEFAULT] Read bad block marker from the chip
418 * @mtd: MTD device structure
419 * @ofs: offset from device start
420 *
421 * Check, if the block is bad.
422 */
423 static int nand_block_bad(struct mtd_info *mtd, loff_t ofs)
424 {
425 int page, page_end, res;
426 struct nand_chip *chip = mtd_to_nand(mtd);
427 u8 bad;
428
429 if (chip->bbt_options & NAND_BBT_SCANLASTPAGE)
430 ofs += mtd->erasesize - mtd->writesize;
431
432 page = (int)(ofs >> chip->page_shift) & chip->pagemask;
433 page_end = page + (chip->bbt_options & NAND_BBT_SCAN2NDPAGE ? 2 : 1);
434
435 for (; page < page_end; page++) {
436 res = chip->ecc.read_oob(mtd, chip, page);
437 if (res)
438 return res;
439
440 bad = chip->oob_poi[chip->badblockpos];
441
442 if (likely(chip->badblockbits == 8))
443 res = bad != 0xFF;
444 else
445 res = hweight8(bad) < chip->badblockbits;
446 if (res)
447 return res;
448 }
449
450 return 0;
451 }
452
453 /**
454 * nand_default_block_markbad - [DEFAULT] mark a block bad via bad block marker
455 * @mtd: MTD device structure
456 * @ofs: offset from device start
457 *
458 * This is the default implementation, which can be overridden by a hardware
459 * specific driver. It provides the details for writing a bad block marker to a
460 * block.
461 */
462 static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
463 {
464 struct nand_chip *chip = mtd_to_nand(mtd);
465 struct mtd_oob_ops ops;
466 uint8_t buf[2] = { 0, 0 };
467 int ret = 0, res, i = 0;
468
469 memset(&ops, 0, sizeof(ops));
470 ops.oobbuf = buf;
471 ops.ooboffs = chip->badblockpos;
472 if (chip->options & NAND_BUSWIDTH_16) {
473 ops.ooboffs &= ~0x01;
474 ops.len = ops.ooblen = 2;
475 } else {
476 ops.len = ops.ooblen = 1;
477 }
478 ops.mode = MTD_OPS_PLACE_OOB;
479
480 /* Write to first/last page(s) if necessary */
481 if (chip->bbt_options & NAND_BBT_SCANLASTPAGE)
482 ofs += mtd->erasesize - mtd->writesize;
483 do {
484 res = nand_do_write_oob(mtd, ofs, &ops);
485 if (!ret)
486 ret = res;
487
488 i++;
489 ofs += mtd->writesize;
490 } while ((chip->bbt_options & NAND_BBT_SCAN2NDPAGE) && i < 2);
491
492 return ret;
493 }
494
495 /**
496 * nand_block_markbad_lowlevel - mark a block bad
497 * @mtd: MTD device structure
498 * @ofs: offset from device start
499 *
500 * This function performs the generic NAND bad block marking steps (i.e., bad
501 * block table(s) and/or marker(s)). We only allow the hardware driver to
502 * specify how to write bad block markers to OOB (chip->block_markbad).
503 *
504 * We try operations in the following order:
505 *
506 * (1) erase the affected block, to allow OOB marker to be written cleanly
507 * (2) write bad block marker to OOB area of affected block (unless flag
508 * NAND_BBT_NO_OOB_BBM is present)
509 * (3) update the BBT
510 *
511 * Note that we retain the first error encountered in (2) or (3), finish the
512 * procedures, and dump the error in the end.
513 */
514 static int nand_block_markbad_lowlevel(struct mtd_info *mtd, loff_t ofs)
515 {
516 struct nand_chip *chip = mtd_to_nand(mtd);
517 int res, ret = 0;
518
519 if (!(chip->bbt_options & NAND_BBT_NO_OOB_BBM)) {
520 struct erase_info einfo;
521
522 /* Attempt erase before marking OOB */
523 memset(&einfo, 0, sizeof(einfo));
524 einfo.mtd = mtd;
525 einfo.addr = ofs;
526 einfo.len = 1ULL << chip->phys_erase_shift;
527 nand_erase_nand(mtd, &einfo, 0);
528
529 /* Write bad block marker to OOB */
530 nand_get_device(mtd, FL_WRITING);
531 ret = chip->block_markbad(mtd, ofs);
532 nand_release_device(mtd);
533 }
534
535 /* Mark block bad in BBT */
536 if (chip->bbt) {
537 res = nand_markbad_bbt(mtd, ofs);
538 if (!ret)
539 ret = res;
540 }
541
542 if (!ret)
543 mtd->ecc_stats.badblocks++;
544
545 return ret;
546 }
547
548 /**
549 * nand_check_wp - [GENERIC] check if the chip is write protected
550 * @mtd: MTD device structure
551 *
552 * Check, if the device is write protected. The function expects, that the
553 * device is already selected.
554 */
555 static int nand_check_wp(struct mtd_info *mtd)
556 {
557 struct nand_chip *chip = mtd_to_nand(mtd);
558
559 /* Broken xD cards report WP despite being writable */
560 if (chip->options & NAND_BROKEN_XD)
561 return 0;
562
563 /* Check the WP bit */
564 chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
565 return (chip->read_byte(mtd) & NAND_STATUS_WP) ? 0 : 1;
566 }
567
568 /**
569 * nand_block_isreserved - [GENERIC] Check if a block is marked reserved.
570 * @mtd: MTD device structure
571 * @ofs: offset from device start
572 *
573 * Check if the block is marked as reserved.
574 */
575 static int nand_block_isreserved(struct mtd_info *mtd, loff_t ofs)
576 {
577 struct nand_chip *chip = mtd_to_nand(mtd);
578
579 if (!chip->bbt)
580 return 0;
581 /* Return info from the table */
582 return nand_isreserved_bbt(mtd, ofs);
583 }
584
585 /**
586 * nand_block_checkbad - [GENERIC] Check if a block is marked bad
587 * @mtd: MTD device structure
588 * @ofs: offset from device start
589 * @allowbbt: 1, if its allowed to access the bbt area
590 *
591 * Check, if the block is bad. Either by reading the bad block table or
592 * calling of the scan function.
593 */
594 static int nand_block_checkbad(struct mtd_info *mtd, loff_t ofs, int allowbbt)
595 {
596 struct nand_chip *chip = mtd_to_nand(mtd);
597
598 if (!chip->bbt)
599 return chip->block_bad(mtd, ofs);
600
601 /* Return info from the table */
602 return nand_isbad_bbt(mtd, ofs, allowbbt);
603 }
604
605 /**
606 * panic_nand_wait_ready - [GENERIC] Wait for the ready pin after commands.
607 * @mtd: MTD device structure
608 * @timeo: Timeout
609 *
610 * Helper function for nand_wait_ready used when needing to wait in interrupt
611 * context.
612 */
613 static void panic_nand_wait_ready(struct mtd_info *mtd, unsigned long timeo)
614 {
615 struct nand_chip *chip = mtd_to_nand(mtd);
616 int i;
617
618 /* Wait for the device to get ready */
619 for (i = 0; i < timeo; i++) {
620 if (chip->dev_ready(mtd))
621 break;
622 touch_softlockup_watchdog();
623 mdelay(1);
624 }
625 }
626
627 /**
628 * nand_wait_ready - [GENERIC] Wait for the ready pin after commands.
629 * @mtd: MTD device structure
630 *
631 * Wait for the ready pin after a command, and warn if a timeout occurs.
632 */
633 void nand_wait_ready(struct mtd_info *mtd)
634 {
635 struct nand_chip *chip = mtd_to_nand(mtd);
636 unsigned long timeo = 400;
637
638 if (in_interrupt() || oops_in_progress)
639 return panic_nand_wait_ready(mtd, timeo);
640
641 /* Wait until command is processed or timeout occurs */
642 timeo = jiffies + msecs_to_jiffies(timeo);
643 do {
644 if (chip->dev_ready(mtd))
645 return;
646 cond_resched();
647 } while (time_before(jiffies, timeo));
648
649 if (!chip->dev_ready(mtd))
650 pr_warn_ratelimited("timeout while waiting for chip to become ready\n");
651 }
652 EXPORT_SYMBOL_GPL(nand_wait_ready);
653
654 /**
655 * nand_wait_status_ready - [GENERIC] Wait for the ready status after commands.
656 * @mtd: MTD device structure
657 * @timeo: Timeout in ms
658 *
659 * Wait for status ready (i.e. command done) or timeout.
660 */
661 static void nand_wait_status_ready(struct mtd_info *mtd, unsigned long timeo)
662 {
663 register struct nand_chip *chip = mtd_to_nand(mtd);
664
665 timeo = jiffies + msecs_to_jiffies(timeo);
666 do {
667 if ((chip->read_byte(mtd) & NAND_STATUS_READY))
668 break;
669 touch_softlockup_watchdog();
670 } while (time_before(jiffies, timeo));
671 };
672
673 /**
674 * nand_command - [DEFAULT] Send command to NAND device
675 * @mtd: MTD device structure
676 * @command: the command to be sent
677 * @column: the column address for this command, -1 if none
678 * @page_addr: the page address for this command, -1 if none
679 *
680 * Send command to NAND device. This function is used for small page devices
681 * (512 Bytes per page).
682 */
683 static void nand_command(struct mtd_info *mtd, unsigned int command,
684 int column, int page_addr)
685 {
686 register struct nand_chip *chip = mtd_to_nand(mtd);
687 int ctrl = NAND_CTRL_CLE | NAND_CTRL_CHANGE;
688
689 /* Write out the command to the device */
690 if (command == NAND_CMD_SEQIN) {
691 int readcmd;
692
693 if (column >= mtd->writesize) {
694 /* OOB area */
695 column -= mtd->writesize;
696 readcmd = NAND_CMD_READOOB;
697 } else if (column < 256) {
698 /* First 256 bytes --> READ0 */
699 readcmd = NAND_CMD_READ0;
700 } else {
701 column -= 256;
702 readcmd = NAND_CMD_READ1;
703 }
704 chip->cmd_ctrl(mtd, readcmd, ctrl);
705 ctrl &= ~NAND_CTRL_CHANGE;
706 }
707 chip->cmd_ctrl(mtd, command, ctrl);
708
709 /* Address cycle, when necessary */
710 ctrl = NAND_CTRL_ALE | NAND_CTRL_CHANGE;
711 /* Serially input address */
712 if (column != -1) {
713 /* Adjust columns for 16 bit buswidth */
714 if (chip->options & NAND_BUSWIDTH_16 &&
715 !nand_opcode_8bits(command))
716 column >>= 1;
717 chip->cmd_ctrl(mtd, column, ctrl);
718 ctrl &= ~NAND_CTRL_CHANGE;
719 }
720 if (page_addr != -1) {
721 chip->cmd_ctrl(mtd, page_addr, ctrl);
722 ctrl &= ~NAND_CTRL_CHANGE;
723 chip->cmd_ctrl(mtd, page_addr >> 8, ctrl);
724 /* One more address cycle for devices > 32MiB */
725 if (chip->chipsize > (32 << 20))
726 chip->cmd_ctrl(mtd, page_addr >> 16, ctrl);
727 }
728 chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
729
730 /*
731 * Program and erase have their own busy handlers status and sequential
732 * in needs no delay
733 */
734 switch (command) {
735
736 case NAND_CMD_PAGEPROG:
737 case NAND_CMD_ERASE1:
738 case NAND_CMD_ERASE2:
739 case NAND_CMD_SEQIN:
740 case NAND_CMD_STATUS:
741 case NAND_CMD_READID:
742 case NAND_CMD_SET_FEATURES:
743 return;
744
745 case NAND_CMD_RESET:
746 if (chip->dev_ready)
747 break;
748 udelay(chip->chip_delay);
749 chip->cmd_ctrl(mtd, NAND_CMD_STATUS,
750 NAND_CTRL_CLE | NAND_CTRL_CHANGE);
751 chip->cmd_ctrl(mtd,
752 NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
753 /* EZ-NAND can take upto 250ms as per ONFi v4.0 */
754 nand_wait_status_ready(mtd, 250);
755 return;
756
757 /* This applies to read commands */
758 default:
759 /*
760 * If we don't have access to the busy pin, we apply the given
761 * command delay
762 */
763 if (!chip->dev_ready) {
764 udelay(chip->chip_delay);
765 return;
766 }
767 }
768 /*
769 * Apply this short delay always to ensure that we do wait tWB in
770 * any case on any machine.
771 */
772 ndelay(100);
773
774 nand_wait_ready(mtd);
775 }
776
777 static void nand_ccs_delay(struct nand_chip *chip)
778 {
779 /*
780 * The controller already takes care of waiting for tCCS when the RNDIN
781 * or RNDOUT command is sent, return directly.
782 */
783 if (!(chip->options & NAND_WAIT_TCCS))
784 return;
785
786 /*
787 * Wait tCCS_min if it is correctly defined, otherwise wait 500ns
788 * (which should be safe for all NANDs).
789 */
790 if (chip->data_interface && chip->data_interface->timings.sdr.tCCS_min)
791 ndelay(chip->data_interface->timings.sdr.tCCS_min / 1000);
792 else
793 ndelay(500);
794 }
795
796 /**
797 * nand_command_lp - [DEFAULT] Send command to NAND large page device
798 * @mtd: MTD device structure
799 * @command: the command to be sent
800 * @column: the column address for this command, -1 if none
801 * @page_addr: the page address for this command, -1 if none
802 *
803 * Send command to NAND device. This is the version for the new large page
804 * devices. We don't have the separate regions as we have in the small page
805 * devices. We must emulate NAND_CMD_READOOB to keep the code compatible.
806 */
807 static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
808 int column, int page_addr)
809 {
810 register struct nand_chip *chip = mtd_to_nand(mtd);
811
812 /* Emulate NAND_CMD_READOOB */
813 if (command == NAND_CMD_READOOB) {
814 column += mtd->writesize;
815 command = NAND_CMD_READ0;
816 }
817
818 /* Command latch cycle */
819 chip->cmd_ctrl(mtd, command, NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
820
821 if (column != -1 || page_addr != -1) {
822 int ctrl = NAND_CTRL_CHANGE | NAND_NCE | NAND_ALE;
823
824 /* Serially input address */
825 if (column != -1) {
826 /* Adjust columns for 16 bit buswidth */
827 if (chip->options & NAND_BUSWIDTH_16 &&
828 !nand_opcode_8bits(command))
829 column >>= 1;
830 chip->cmd_ctrl(mtd, column, ctrl);
831 ctrl &= ~NAND_CTRL_CHANGE;
832
833 /* Only output a single addr cycle for 8bits opcodes. */
834 if (!nand_opcode_8bits(command))
835 chip->cmd_ctrl(mtd, column >> 8, ctrl);
836 }
837 if (page_addr != -1) {
838 chip->cmd_ctrl(mtd, page_addr, ctrl);
839 chip->cmd_ctrl(mtd, page_addr >> 8,
840 NAND_NCE | NAND_ALE);
841 /* One more address cycle for devices > 128MiB */
842 if (chip->chipsize > (128 << 20))
843 chip->cmd_ctrl(mtd, page_addr >> 16,
844 NAND_NCE | NAND_ALE);
845 }
846 }
847 chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
848
849 /*
850 * Program and erase have their own busy handlers status, sequential
851 * in and status need no delay.
852 */
853 switch (command) {
854
855 case NAND_CMD_CACHEDPROG:
856 case NAND_CMD_PAGEPROG:
857 case NAND_CMD_ERASE1:
858 case NAND_CMD_ERASE2:
859 case NAND_CMD_SEQIN:
860 case NAND_CMD_STATUS:
861 case NAND_CMD_READID:
862 case NAND_CMD_SET_FEATURES:
863 return;
864
865 case NAND_CMD_RNDIN:
866 nand_ccs_delay(chip);
867 return;
868
869 case NAND_CMD_RESET:
870 if (chip->dev_ready)
871 break;
872 udelay(chip->chip_delay);
873 chip->cmd_ctrl(mtd, NAND_CMD_STATUS,
874 NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
875 chip->cmd_ctrl(mtd, NAND_CMD_NONE,
876 NAND_NCE | NAND_CTRL_CHANGE);
877 /* EZ-NAND can take upto 250ms as per ONFi v4.0 */
878 nand_wait_status_ready(mtd, 250);
879 return;
880
881 case NAND_CMD_RNDOUT:
882 /* No ready / busy check necessary */
883 chip->cmd_ctrl(mtd, NAND_CMD_RNDOUTSTART,
884 NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
885 chip->cmd_ctrl(mtd, NAND_CMD_NONE,
886 NAND_NCE | NAND_CTRL_CHANGE);
887
888 nand_ccs_delay(chip);
889 return;
890
891 case NAND_CMD_READ0:
892 chip->cmd_ctrl(mtd, NAND_CMD_READSTART,
893 NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
894 chip->cmd_ctrl(mtd, NAND_CMD_NONE,
895 NAND_NCE | NAND_CTRL_CHANGE);
896
897 /* This applies to read commands */
898 default:
899 /*
900 * If we don't have access to the busy pin, we apply the given
901 * command delay.
902 */
903 if (!chip->dev_ready) {
904 udelay(chip->chip_delay);
905 return;
906 }
907 }
908
909 /*
910 * Apply this short delay always to ensure that we do wait tWB in
911 * any case on any machine.
912 */
913 ndelay(100);
914
915 nand_wait_ready(mtd);
916 }
917
918 /**
919 * panic_nand_get_device - [GENERIC] Get chip for selected access
920 * @chip: the nand chip descriptor
921 * @mtd: MTD device structure
922 * @new_state: the state which is requested
923 *
924 * Used when in panic, no locks are taken.
925 */
926 static void panic_nand_get_device(struct nand_chip *chip,
927 struct mtd_info *mtd, int new_state)
928 {
929 /* Hardware controller shared among independent devices */
930 chip->controller->active = chip;
931 chip->state = new_state;
932 }
933
934 /**
935 * nand_get_device - [GENERIC] Get chip for selected access
936 * @mtd: MTD device structure
937 * @new_state: the state which is requested
938 *
939 * Get the device and lock it for exclusive access
940 */
941 static int
942 nand_get_device(struct mtd_info *mtd, int new_state)
943 {
944 struct nand_chip *chip = mtd_to_nand(mtd);
945 spinlock_t *lock = &chip->controller->lock;
946 wait_queue_head_t *wq = &chip->controller->wq;
947 DECLARE_WAITQUEUE(wait, current);
948 retry:
949 spin_lock(lock);
950
951 /* Hardware controller shared among independent devices */
952 if (!chip->controller->active)
953 chip->controller->active = chip;
954
955 if (chip->controller->active == chip && chip->state == FL_READY) {
956 chip->state = new_state;
957 spin_unlock(lock);
958 return 0;
959 }
960 if (new_state == FL_PM_SUSPENDED) {
961 if (chip->controller->active->state == FL_PM_SUSPENDED) {
962 chip->state = FL_PM_SUSPENDED;
963 spin_unlock(lock);
964 return 0;
965 }
966 }
967 set_current_state(TASK_UNINTERRUPTIBLE);
968 add_wait_queue(wq, &wait);
969 spin_unlock(lock);
970 schedule();
971 remove_wait_queue(wq, &wait);
972 goto retry;
973 }
974
975 /**
976 * panic_nand_wait - [GENERIC] wait until the command is done
977 * @mtd: MTD device structure
978 * @chip: NAND chip structure
979 * @timeo: timeout
980 *
981 * Wait for command done. This is a helper function for nand_wait used when
982 * we are in interrupt context. May happen when in panic and trying to write
983 * an oops through mtdoops.
984 */
985 static void panic_nand_wait(struct mtd_info *mtd, struct nand_chip *chip,
986 unsigned long timeo)
987 {
988 int i;
989 for (i = 0; i < timeo; i++) {
990 if (chip->dev_ready) {
991 if (chip->dev_ready(mtd))
992 break;
993 } else {
994 if (chip->read_byte(mtd) & NAND_STATUS_READY)
995 break;
996 }
997 mdelay(1);
998 }
999 }
1000
1001 /**
1002 * nand_wait - [DEFAULT] wait until the command is done
1003 * @mtd: MTD device structure
1004 * @chip: NAND chip structure
1005 *
1006 * Wait for command done. This applies to erase and program only.
1007 */
1008 static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
1009 {
1010
1011 int status;
1012 unsigned long timeo = 400;
1013
1014 /*
1015 * Apply this short delay always to ensure that we do wait tWB in any
1016 * case on any machine.
1017 */
1018 ndelay(100);
1019
1020 chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
1021
1022 if (in_interrupt() || oops_in_progress)
1023 panic_nand_wait(mtd, chip, timeo);
1024 else {
1025 timeo = jiffies + msecs_to_jiffies(timeo);
1026 do {
1027 if (chip->dev_ready) {
1028 if (chip->dev_ready(mtd))
1029 break;
1030 } else {
1031 if (chip->read_byte(mtd) & NAND_STATUS_READY)
1032 break;
1033 }
1034 cond_resched();
1035 } while (time_before(jiffies, timeo));
1036 }
1037
1038 status = (int)chip->read_byte(mtd);
1039 /* This can happen if in case of timeout or buggy dev_ready */
1040 WARN_ON(!(status & NAND_STATUS_READY));
1041 return status;
1042 }
1043
1044 /**
1045 * nand_reset_data_interface - Reset data interface and timings
1046 * @chip: The NAND chip
1047 *
1048 * Reset the Data interface and timings to ONFI mode 0.
1049 *
1050 * Returns 0 for success or negative error code otherwise.
1051 */
1052 static int nand_reset_data_interface(struct nand_chip *chip)
1053 {
1054 struct mtd_info *mtd = nand_to_mtd(chip);
1055 const struct nand_data_interface *conf;
1056 int ret;
1057
1058 if (!chip->setup_data_interface)
1059 return 0;
1060
1061 /*
1062 * The ONFI specification says:
1063 * "
1064 * To transition from NV-DDR or NV-DDR2 to the SDR data
1065 * interface, the host shall use the Reset (FFh) command
1066 * using SDR timing mode 0. A device in any timing mode is
1067 * required to recognize Reset (FFh) command issued in SDR
1068 * timing mode 0.
1069 * "
1070 *
1071 * Configure the data interface in SDR mode and set the
1072 * timings to timing mode 0.
1073 */
1074
1075 conf = nand_get_default_data_interface();
1076 ret = chip->setup_data_interface(mtd, conf, false);
1077 if (ret)
1078 pr_err("Failed to configure data interface to SDR timing mode 0\n");
1079
1080 return ret;
1081 }
1082
1083 /**
1084 * nand_setup_data_interface - Setup the best data interface and timings
1085 * @chip: The NAND chip
1086 *
1087 * Find and configure the best data interface and NAND timings supported by
1088 * the chip and the driver.
1089 * First tries to retrieve supported timing modes from ONFI information,
1090 * and if the NAND chip does not support ONFI, relies on the
1091 * ->onfi_timing_mode_default specified in the nand_ids table.
1092 *
1093 * Returns 0 for success or negative error code otherwise.
1094 */
1095 static int nand_setup_data_interface(struct nand_chip *chip)
1096 {
1097 struct mtd_info *mtd = nand_to_mtd(chip);
1098 int ret;
1099
1100 if (!chip->setup_data_interface || !chip->data_interface)
1101 return 0;
1102
1103 /*
1104 * Ensure the timing mode has been changed on the chip side
1105 * before changing timings on the controller side.
1106 */
1107 if (chip->onfi_version) {
1108 u8 tmode_param[ONFI_SUBFEATURE_PARAM_LEN] = {
1109 chip->onfi_timing_mode_default,
1110 };
1111
1112 ret = chip->onfi_set_features(mtd, chip,
1113 ONFI_FEATURE_ADDR_TIMING_MODE,
1114 tmode_param);
1115 if (ret)
1116 goto err;
1117 }
1118
1119 ret = chip->setup_data_interface(mtd, chip->data_interface, false);
1120 err:
1121 return ret;
1122 }
1123
1124 /**
1125 * nand_init_data_interface - find the best data interface and timings
1126 * @chip: The NAND chip
1127 *
1128 * Find the best data interface and NAND timings supported by the chip
1129 * and the driver.
1130 * First tries to retrieve supported timing modes from ONFI information,
1131 * and if the NAND chip does not support ONFI, relies on the
1132 * ->onfi_timing_mode_default specified in the nand_ids table. After this
1133 * function nand_chip->data_interface is initialized with the best timing mode
1134 * available.
1135 *
1136 * Returns 0 for success or negative error code otherwise.
1137 */
1138 static int nand_init_data_interface(struct nand_chip *chip)
1139 {
1140 struct mtd_info *mtd = nand_to_mtd(chip);
1141 int modes, mode, ret;
1142
1143 if (!chip->setup_data_interface)
1144 return 0;
1145
1146 /*
1147 * First try to identify the best timings from ONFI parameters and
1148 * if the NAND does not support ONFI, fallback to the default ONFI
1149 * timing mode.
1150 */
1151 modes = onfi_get_async_timing_mode(chip);
1152 if (modes == ONFI_TIMING_MODE_UNKNOWN) {
1153 if (!chip->onfi_timing_mode_default)
1154 return 0;
1155
1156 modes = GENMASK(chip->onfi_timing_mode_default, 0);
1157 }
1158
1159 chip->data_interface = kzalloc(sizeof(*chip->data_interface),
1160 GFP_KERNEL);
1161 if (!chip->data_interface)
1162 return -ENOMEM;
1163
1164 for (mode = fls(modes) - 1; mode >= 0; mode--) {
1165 ret = onfi_init_data_interface(chip, chip->data_interface,
1166 NAND_SDR_IFACE, mode);
1167 if (ret)
1168 continue;
1169
1170 ret = chip->setup_data_interface(mtd, chip->data_interface,
1171 true);
1172 if (!ret) {
1173 chip->onfi_timing_mode_default = mode;
1174 break;
1175 }
1176 }
1177
1178 return 0;
1179 }
1180
1181 static void nand_release_data_interface(struct nand_chip *chip)
1182 {
1183 kfree(chip->data_interface);
1184 }
1185
1186 /**
1187 * nand_reset - Reset and initialize a NAND device
1188 * @chip: The NAND chip
1189 * @chipnr: Internal die id
1190 *
1191 * Returns 0 for success or negative error code otherwise
1192 */
1193 int nand_reset(struct nand_chip *chip, int chipnr)
1194 {
1195 struct mtd_info *mtd = nand_to_mtd(chip);
1196 int ret;
1197
1198 ret = nand_reset_data_interface(chip);
1199 if (ret)
1200 return ret;
1201
1202 /*
1203 * The CS line has to be released before we can apply the new NAND
1204 * interface settings, hence this weird ->select_chip() dance.
1205 */
1206 chip->select_chip(mtd, chipnr);
1207 chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
1208 chip->select_chip(mtd, -1);
1209
1210 chip->select_chip(mtd, chipnr);
1211 ret = nand_setup_data_interface(chip);
1212 chip->select_chip(mtd, -1);
1213 if (ret)
1214 return ret;
1215
1216 return 0;
1217 }
1218
1219 /**
1220 * __nand_unlock - [REPLACEABLE] unlocks specified locked blocks
1221 * @mtd: mtd info
1222 * @ofs: offset to start unlock from
1223 * @len: length to unlock
1224 * @invert:
1225 * - when = 0, unlock the range of blocks within the lower and
1226 * upper boundary address
1227 * - when = 1, unlock the range of blocks outside the boundaries
1228 * of the lower and upper boundary address
1229 *
1230 * Returs unlock status.
1231 */
1232 static int __nand_unlock(struct mtd_info *mtd, loff_t ofs,
1233 uint64_t len, int invert)
1234 {
1235 int ret = 0;
1236 int status, page;
1237 struct nand_chip *chip = mtd_to_nand(mtd);
1238
1239 /* Submit address of first page to unlock */
1240 page = ofs >> chip->page_shift;
1241 chip->cmdfunc(mtd, NAND_CMD_UNLOCK1, -1, page & chip->pagemask);
1242
1243 /* Submit address of last page to unlock */
1244 page = (ofs + len) >> chip->page_shift;
1245 chip->cmdfunc(mtd, NAND_CMD_UNLOCK2, -1,
1246 (page | invert) & chip->pagemask);
1247
1248 /* Call wait ready function */
1249 status = chip->waitfunc(mtd, chip);
1250 /* See if device thinks it succeeded */
1251 if (status & NAND_STATUS_FAIL) {
1252 pr_debug("%s: error status = 0x%08x\n",
1253 __func__, status);
1254 ret = -EIO;
1255 }
1256
1257 return ret;
1258 }
1259
1260 /**
1261 * nand_unlock - [REPLACEABLE] unlocks specified locked blocks
1262 * @mtd: mtd info
1263 * @ofs: offset to start unlock from
1264 * @len: length to unlock
1265 *
1266 * Returns unlock status.
1267 */
1268 int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1269 {
1270 int ret = 0;
1271 int chipnr;
1272 struct nand_chip *chip = mtd_to_nand(mtd);
1273
1274 pr_debug("%s: start = 0x%012llx, len = %llu\n",
1275 __func__, (unsigned long long)ofs, len);
1276
1277 if (check_offs_len(mtd, ofs, len))
1278 return -EINVAL;
1279
1280 /* Align to last block address if size addresses end of the device */
1281 if (ofs + len == mtd->size)
1282 len -= mtd->erasesize;
1283
1284 nand_get_device(mtd, FL_UNLOCKING);
1285
1286 /* Shift to get chip number */
1287 chipnr = ofs >> chip->chip_shift;
1288
1289 /*
1290 * Reset the chip.
1291 * If we want to check the WP through READ STATUS and check the bit 7
1292 * we must reset the chip
1293 * some operation can also clear the bit 7 of status register
1294 * eg. erase/program a locked block
1295 */
1296 nand_reset(chip, chipnr);
1297
1298 chip->select_chip(mtd, chipnr);
1299
1300 /* Check, if it is write protected */
1301 if (nand_check_wp(mtd)) {
1302 pr_debug("%s: device is write protected!\n",
1303 __func__);
1304 ret = -EIO;
1305 goto out;
1306 }
1307
1308 ret = __nand_unlock(mtd, ofs, len, 0);
1309
1310 out:
1311 chip->select_chip(mtd, -1);
1312 nand_release_device(mtd);
1313
1314 return ret;
1315 }
1316 EXPORT_SYMBOL(nand_unlock);
1317
1318 /**
1319 * nand_lock - [REPLACEABLE] locks all blocks present in the device
1320 * @mtd: mtd info
1321 * @ofs: offset to start unlock from
1322 * @len: length to unlock
1323 *
1324 * This feature is not supported in many NAND parts. 'Micron' NAND parts do
1325 * have this feature, but it allows only to lock all blocks, not for specified
1326 * range for block. Implementing 'lock' feature by making use of 'unlock', for
1327 * now.
1328 *
1329 * Returns lock status.
1330 */
1331 int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1332 {
1333 int ret = 0;
1334 int chipnr, status, page;
1335 struct nand_chip *chip = mtd_to_nand(mtd);
1336
1337 pr_debug("%s: start = 0x%012llx, len = %llu\n",
1338 __func__, (unsigned long long)ofs, len);
1339
1340 if (check_offs_len(mtd, ofs, len))
1341 return -EINVAL;
1342
1343 nand_get_device(mtd, FL_LOCKING);
1344
1345 /* Shift to get chip number */
1346 chipnr = ofs >> chip->chip_shift;
1347
1348 /*
1349 * Reset the chip.
1350 * If we want to check the WP through READ STATUS and check the bit 7
1351 * we must reset the chip
1352 * some operation can also clear the bit 7 of status register
1353 * eg. erase/program a locked block
1354 */
1355 nand_reset(chip, chipnr);
1356
1357 chip->select_chip(mtd, chipnr);
1358
1359 /* Check, if it is write protected */
1360 if (nand_check_wp(mtd)) {
1361 pr_debug("%s: device is write protected!\n",
1362 __func__);
1363 status = MTD_ERASE_FAILED;
1364 ret = -EIO;
1365 goto out;
1366 }
1367
1368 /* Submit address of first page to lock */
1369 page = ofs >> chip->page_shift;
1370 chip->cmdfunc(mtd, NAND_CMD_LOCK, -1, page & chip->pagemask);
1371
1372 /* Call wait ready function */
1373 status = chip->waitfunc(mtd, chip);
1374 /* See if device thinks it succeeded */
1375 if (status & NAND_STATUS_FAIL) {
1376 pr_debug("%s: error status = 0x%08x\n",
1377 __func__, status);
1378 ret = -EIO;
1379 goto out;
1380 }
1381
1382 ret = __nand_unlock(mtd, ofs, len, 0x1);
1383
1384 out:
1385 chip->select_chip(mtd, -1);
1386 nand_release_device(mtd);
1387
1388 return ret;
1389 }
1390 EXPORT_SYMBOL(nand_lock);
1391
1392 /**
1393 * nand_check_erased_buf - check if a buffer contains (almost) only 0xff data
1394 * @buf: buffer to test
1395 * @len: buffer length
1396 * @bitflips_threshold: maximum number of bitflips
1397 *
1398 * Check if a buffer contains only 0xff, which means the underlying region
1399 * has been erased and is ready to be programmed.
1400 * The bitflips_threshold specify the maximum number of bitflips before
1401 * considering the region is not erased.
1402 * Note: The logic of this function has been extracted from the memweight
1403 * implementation, except that nand_check_erased_buf function exit before
1404 * testing the whole buffer if the number of bitflips exceed the
1405 * bitflips_threshold value.
1406 *
1407 * Returns a positive number of bitflips less than or equal to
1408 * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the
1409 * threshold.
1410 */
1411 static int nand_check_erased_buf(void *buf, int len, int bitflips_threshold)
1412 {
1413 const unsigned char *bitmap = buf;
1414 int bitflips = 0;
1415 int weight;
1416
1417 for (; len && ((uintptr_t)bitmap) % sizeof(long);
1418 len--, bitmap++) {
1419 weight = hweight8(*bitmap);
1420 bitflips += BITS_PER_BYTE - weight;
1421 if (unlikely(bitflips > bitflips_threshold))
1422 return -EBADMSG;
1423 }
1424
1425 for (; len >= sizeof(long);
1426 len -= sizeof(long), bitmap += sizeof(long)) {
1427 weight = hweight_long(*((unsigned long *)bitmap));
1428 bitflips += BITS_PER_LONG - weight;
1429 if (unlikely(bitflips > bitflips_threshold))
1430 return -EBADMSG;
1431 }
1432
1433 for (; len > 0; len--, bitmap++) {
1434 weight = hweight8(*bitmap);
1435 bitflips += BITS_PER_BYTE - weight;
1436 if (unlikely(bitflips > bitflips_threshold))
1437 return -EBADMSG;
1438 }
1439
1440 return bitflips;
1441 }
1442
1443 /**
1444 * nand_check_erased_ecc_chunk - check if an ECC chunk contains (almost) only
1445 * 0xff data
1446 * @data: data buffer to test
1447 * @datalen: data length
1448 * @ecc: ECC buffer
1449 * @ecclen: ECC length
1450 * @extraoob: extra OOB buffer
1451 * @extraooblen: extra OOB length
1452 * @bitflips_threshold: maximum number of bitflips
1453 *
1454 * Check if a data buffer and its associated ECC and OOB data contains only
1455 * 0xff pattern, which means the underlying region has been erased and is
1456 * ready to be programmed.
1457 * The bitflips_threshold specify the maximum number of bitflips before
1458 * considering the region as not erased.
1459 *
1460 * Note:
1461 * 1/ ECC algorithms are working on pre-defined block sizes which are usually
1462 * different from the NAND page size. When fixing bitflips, ECC engines will
1463 * report the number of errors per chunk, and the NAND core infrastructure
1464 * expect you to return the maximum number of bitflips for the whole page.
1465 * This is why you should always use this function on a single chunk and
1466 * not on the whole page. After checking each chunk you should update your
1467 * max_bitflips value accordingly.
1468 * 2/ When checking for bitflips in erased pages you should not only check
1469 * the payload data but also their associated ECC data, because a user might
1470 * have programmed almost all bits to 1 but a few. In this case, we
1471 * shouldn't consider the chunk as erased, and checking ECC bytes prevent
1472 * this case.
1473 * 3/ The extraoob argument is optional, and should be used if some of your OOB
1474 * data are protected by the ECC engine.
1475 * It could also be used if you support subpages and want to attach some
1476 * extra OOB data to an ECC chunk.
1477 *
1478 * Returns a positive number of bitflips less than or equal to
1479 * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the
1480 * threshold. In case of success, the passed buffers are filled with 0xff.
1481 */
1482 int nand_check_erased_ecc_chunk(void *data, int datalen,
1483 void *ecc, int ecclen,
1484 void *extraoob, int extraooblen,
1485 int bitflips_threshold)
1486 {
1487 int data_bitflips = 0, ecc_bitflips = 0, extraoob_bitflips = 0;
1488
1489 data_bitflips = nand_check_erased_buf(data, datalen,
1490 bitflips_threshold);
1491 if (data_bitflips < 0)
1492 return data_bitflips;
1493
1494 bitflips_threshold -= data_bitflips;
1495
1496 ecc_bitflips = nand_check_erased_buf(ecc, ecclen, bitflips_threshold);
1497 if (ecc_bitflips < 0)
1498 return ecc_bitflips;
1499
1500 bitflips_threshold -= ecc_bitflips;
1501
1502 extraoob_bitflips = nand_check_erased_buf(extraoob, extraooblen,
1503 bitflips_threshold);
1504 if (extraoob_bitflips < 0)
1505 return extraoob_bitflips;
1506
1507 if (data_bitflips)
1508 memset(data, 0xff, datalen);
1509
1510 if (ecc_bitflips)
1511 memset(ecc, 0xff, ecclen);
1512
1513 if (extraoob_bitflips)
1514 memset(extraoob, 0xff, extraooblen);
1515
1516 return data_bitflips + ecc_bitflips + extraoob_bitflips;
1517 }
1518 EXPORT_SYMBOL(nand_check_erased_ecc_chunk);
1519
1520 /**
1521 * nand_read_page_raw - [INTERN] read raw page data without ecc
1522 * @mtd: mtd info structure
1523 * @chip: nand chip info structure
1524 * @buf: buffer to store read data
1525 * @oob_required: caller requires OOB data read to chip->oob_poi
1526 * @page: page number to read
1527 *
1528 * Not for syndrome calculating ECC controllers, which use a special oob layout.
1529 */
1530 static int nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
1531 uint8_t *buf, int oob_required, int page)
1532 {
1533 chip->read_buf(mtd, buf, mtd->writesize);
1534 if (oob_required)
1535 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1536 return 0;
1537 }
1538
1539 /**
1540 * nand_read_page_raw_syndrome - [INTERN] read raw page data without ecc
1541 * @mtd: mtd info structure
1542 * @chip: nand chip info structure
1543 * @buf: buffer to store read data
1544 * @oob_required: caller requires OOB data read to chip->oob_poi
1545 * @page: page number to read
1546 *
1547 * We need a special oob layout and handling even when OOB isn't used.
1548 */
1549 static int nand_read_page_raw_syndrome(struct mtd_info *mtd,
1550 struct nand_chip *chip, uint8_t *buf,
1551 int oob_required, int page)
1552 {
1553 int eccsize = chip->ecc.size;
1554 int eccbytes = chip->ecc.bytes;
1555 uint8_t *oob = chip->oob_poi;
1556 int steps, size;
1557
1558 for (steps = chip->ecc.steps; steps > 0; steps--) {
1559 chip->read_buf(mtd, buf, eccsize);
1560 buf += eccsize;
1561
1562 if (chip->ecc.prepad) {
1563 chip->read_buf(mtd, oob, chip->ecc.prepad);
1564 oob += chip->ecc.prepad;
1565 }
1566
1567 chip->read_buf(mtd, oob, eccbytes);
1568 oob += eccbytes;
1569
1570 if (chip->ecc.postpad) {
1571 chip->read_buf(mtd, oob, chip->ecc.postpad);
1572 oob += chip->ecc.postpad;
1573 }
1574 }
1575
1576 size = mtd->oobsize - (oob - chip->oob_poi);
1577 if (size)
1578 chip->read_buf(mtd, oob, size);
1579
1580 return 0;
1581 }
1582
1583 /**
1584 * nand_read_page_swecc - [REPLACEABLE] software ECC based page read function
1585 * @mtd: mtd info structure
1586 * @chip: nand chip info structure
1587 * @buf: buffer to store read data
1588 * @oob_required: caller requires OOB data read to chip->oob_poi
1589 * @page: page number to read
1590 */
1591 static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
1592 uint8_t *buf, int oob_required, int page)
1593 {
1594 int i, eccsize = chip->ecc.size, ret;
1595 int eccbytes = chip->ecc.bytes;
1596 int eccsteps = chip->ecc.steps;
1597 uint8_t *p = buf;
1598 uint8_t *ecc_calc = chip->buffers->ecccalc;
1599 uint8_t *ecc_code = chip->buffers->ecccode;
1600 unsigned int max_bitflips = 0;
1601
1602 chip->ecc.read_page_raw(mtd, chip, buf, 1, page);
1603
1604 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
1605 chip->ecc.calculate(mtd, p, &ecc_calc[i]);
1606
1607 ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
1608 chip->ecc.total);
1609 if (ret)
1610 return ret;
1611
1612 eccsteps = chip->ecc.steps;
1613 p = buf;
1614
1615 for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
1616 int stat;
1617
1618 stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
1619 if (stat < 0) {
1620 mtd->ecc_stats.failed++;
1621 } else {
1622 mtd->ecc_stats.corrected += stat;
1623 max_bitflips = max_t(unsigned int, max_bitflips, stat);
1624 }
1625 }
1626 return max_bitflips;
1627 }
1628
1629 /**
1630 * nand_read_subpage - [REPLACEABLE] ECC based sub-page read function
1631 * @mtd: mtd info structure
1632 * @chip: nand chip info structure
1633 * @data_offs: offset of requested data within the page
1634 * @readlen: data length
1635 * @bufpoi: buffer to store read data
1636 * @page: page number to read
1637 */
1638 static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
1639 uint32_t data_offs, uint32_t readlen, uint8_t *bufpoi,
1640 int page)
1641 {
1642 int start_step, end_step, num_steps, ret;
1643 uint8_t *p;
1644 int data_col_addr, i, gaps = 0;
1645 int datafrag_len, eccfrag_len, aligned_len, aligned_pos;
1646 int busw = (chip->options & NAND_BUSWIDTH_16) ? 2 : 1;
1647 int index, section = 0;
1648 unsigned int max_bitflips = 0;
1649 struct mtd_oob_region oobregion = { };
1650
1651 /* Column address within the page aligned to ECC size (256bytes) */
1652 start_step = data_offs / chip->ecc.size;
1653 end_step = (data_offs + readlen - 1) / chip->ecc.size;
1654 num_steps = end_step - start_step + 1;
1655 index = start_step * chip->ecc.bytes;
1656
1657 /* Data size aligned to ECC ecc.size */
1658 datafrag_len = num_steps * chip->ecc.size;
1659 eccfrag_len = num_steps * chip->ecc.bytes;
1660
1661 data_col_addr = start_step * chip->ecc.size;
1662 /* If we read not a page aligned data */
1663 if (data_col_addr != 0)
1664 chip->cmdfunc(mtd, NAND_CMD_RNDOUT, data_col_addr, -1);
1665
1666 p = bufpoi + data_col_addr;
1667 chip->read_buf(mtd, p, datafrag_len);
1668
1669 /* Calculate ECC */
1670 for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size)
1671 chip->ecc.calculate(mtd, p, &chip->buffers->ecccalc[i]);
1672
1673 /*
1674 * The performance is faster if we position offsets according to
1675 * ecc.pos. Let's make sure that there are no gaps in ECC positions.
1676 */
1677 ret = mtd_ooblayout_find_eccregion(mtd, index, &section, &oobregion);
1678 if (ret)
1679 return ret;
1680
1681 if (oobregion.length < eccfrag_len)
1682 gaps = 1;
1683
1684 if (gaps) {
1685 chip->cmdfunc(mtd, NAND_CMD_RNDOUT, mtd->writesize, -1);
1686 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1687 } else {
1688 /*
1689 * Send the command to read the particular ECC bytes take care
1690 * about buswidth alignment in read_buf.
1691 */
1692 aligned_pos = oobregion.offset & ~(busw - 1);
1693 aligned_len = eccfrag_len;
1694 if (oobregion.offset & (busw - 1))
1695 aligned_len++;
1696 if ((oobregion.offset + (num_steps * chip->ecc.bytes)) &
1697 (busw - 1))
1698 aligned_len++;
1699
1700 chip->cmdfunc(mtd, NAND_CMD_RNDOUT,
1701 mtd->writesize + aligned_pos, -1);
1702 chip->read_buf(mtd, &chip->oob_poi[aligned_pos], aligned_len);
1703 }
1704
1705 ret = mtd_ooblayout_get_eccbytes(mtd, chip->buffers->ecccode,
1706 chip->oob_poi, index, eccfrag_len);
1707 if (ret)
1708 return ret;
1709
1710 p = bufpoi + data_col_addr;
1711 for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size) {
1712 int stat;
1713
1714 stat = chip->ecc.correct(mtd, p,
1715 &chip->buffers->ecccode[i], &chip->buffers->ecccalc[i]);
1716 if (stat == -EBADMSG &&
1717 (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
1718 /* check for empty pages with bitflips */
1719 stat = nand_check_erased_ecc_chunk(p, chip->ecc.size,
1720 &chip->buffers->ecccode[i],
1721 chip->ecc.bytes,
1722 NULL, 0,
1723 chip->ecc.strength);
1724 }
1725
1726 if (stat < 0) {
1727 mtd->ecc_stats.failed++;
1728 } else {
1729 mtd->ecc_stats.corrected += stat;
1730 max_bitflips = max_t(unsigned int, max_bitflips, stat);
1731 }
1732 }
1733 return max_bitflips;
1734 }
1735
1736 /**
1737 * nand_read_page_hwecc - [REPLACEABLE] hardware ECC based page read function
1738 * @mtd: mtd info structure
1739 * @chip: nand chip info structure
1740 * @buf: buffer to store read data
1741 * @oob_required: caller requires OOB data read to chip->oob_poi
1742 * @page: page number to read
1743 *
1744 * Not for syndrome calculating ECC controllers which need a special oob layout.
1745 */
1746 static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
1747 uint8_t *buf, int oob_required, int page)
1748 {
1749 int i, eccsize = chip->ecc.size, ret;
1750 int eccbytes = chip->ecc.bytes;
1751 int eccsteps = chip->ecc.steps;
1752 uint8_t *p = buf;
1753 uint8_t *ecc_calc = chip->buffers->ecccalc;
1754 uint8_t *ecc_code = chip->buffers->ecccode;
1755 unsigned int max_bitflips = 0;
1756
1757 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
1758 chip->ecc.hwctl(mtd, NAND_ECC_READ);
1759 chip->read_buf(mtd, p, eccsize);
1760 chip->ecc.calculate(mtd, p, &ecc_calc[i]);
1761 }
1762 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1763
1764 ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
1765 chip->ecc.total);
1766 if (ret)
1767 return ret;
1768
1769 eccsteps = chip->ecc.steps;
1770 p = buf;
1771
1772 for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
1773 int stat;
1774
1775 stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
1776 if (stat == -EBADMSG &&
1777 (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
1778 /* check for empty pages with bitflips */
1779 stat = nand_check_erased_ecc_chunk(p, eccsize,
1780 &ecc_code[i], eccbytes,
1781 NULL, 0,
1782 chip->ecc.strength);
1783 }
1784
1785 if (stat < 0) {
1786 mtd->ecc_stats.failed++;
1787 } else {
1788 mtd->ecc_stats.corrected += stat;
1789 max_bitflips = max_t(unsigned int, max_bitflips, stat);
1790 }
1791 }
1792 return max_bitflips;
1793 }
1794
1795 /**
1796 * nand_read_page_hwecc_oob_first - [REPLACEABLE] hw ecc, read oob first
1797 * @mtd: mtd info structure
1798 * @chip: nand chip info structure
1799 * @buf: buffer to store read data
1800 * @oob_required: caller requires OOB data read to chip->oob_poi
1801 * @page: page number to read
1802 *
1803 * Hardware ECC for large page chips, require OOB to be read first. For this
1804 * ECC mode, the write_page method is re-used from ECC_HW. These methods
1805 * read/write ECC from the OOB area, unlike the ECC_HW_SYNDROME support with
1806 * multiple ECC steps, follows the "infix ECC" scheme and reads/writes ECC from
1807 * the data area, by overwriting the NAND manufacturer bad block markings.
1808 */
1809 static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd,
1810 struct nand_chip *chip, uint8_t *buf, int oob_required, int page)
1811 {
1812 int i, eccsize = chip->ecc.size, ret;
1813 int eccbytes = chip->ecc.bytes;
1814 int eccsteps = chip->ecc.steps;
1815 uint8_t *p = buf;
1816 uint8_t *ecc_code = chip->buffers->ecccode;
1817 uint8_t *ecc_calc = chip->buffers->ecccalc;
1818 unsigned int max_bitflips = 0;
1819
1820 /* Read the OOB area first */
1821 chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
1822 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1823 chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
1824
1825 ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
1826 chip->ecc.total);
1827 if (ret)
1828 return ret;
1829
1830 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
1831 int stat;
1832
1833 chip->ecc.hwctl(mtd, NAND_ECC_READ);
1834 chip->read_buf(mtd, p, eccsize);
1835 chip->ecc.calculate(mtd, p, &ecc_calc[i]);
1836
1837 stat = chip->ecc.correct(mtd, p, &ecc_code[i], NULL);
1838 if (stat == -EBADMSG &&
1839 (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
1840 /* check for empty pages with bitflips */
1841 stat = nand_check_erased_ecc_chunk(p, eccsize,
1842 &ecc_code[i], eccbytes,
1843 NULL, 0,
1844 chip->ecc.strength);
1845 }
1846
1847 if (stat < 0) {
1848 mtd->ecc_stats.failed++;
1849 } else {
1850 mtd->ecc_stats.corrected += stat;
1851 max_bitflips = max_t(unsigned int, max_bitflips, stat);
1852 }
1853 }
1854 return max_bitflips;
1855 }
1856
1857 /**
1858 * nand_read_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page read
1859 * @mtd: mtd info structure
1860 * @chip: nand chip info structure
1861 * @buf: buffer to store read data
1862 * @oob_required: caller requires OOB data read to chip->oob_poi
1863 * @page: page number to read
1864 *
1865 * The hw generator calculates the error syndrome automatically. Therefore we
1866 * need a special oob layout and handling.
1867 */
1868 static int nand_read_page_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
1869 uint8_t *buf, int oob_required, int page)
1870 {
1871 int i, eccsize = chip->ecc.size;
1872 int eccbytes = chip->ecc.bytes;
1873 int eccsteps = chip->ecc.steps;
1874 int eccpadbytes = eccbytes + chip->ecc.prepad + chip->ecc.postpad;
1875 uint8_t *p = buf;
1876 uint8_t *oob = chip->oob_poi;
1877 unsigned int max_bitflips = 0;
1878
1879 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
1880 int stat;
1881
1882 chip->ecc.hwctl(mtd, NAND_ECC_READ);
1883 chip->read_buf(mtd, p, eccsize);
1884
1885 if (chip->ecc.prepad) {
1886 chip->read_buf(mtd, oob, chip->ecc.prepad);
1887 oob += chip->ecc.prepad;
1888 }
1889
1890 chip->ecc.hwctl(mtd, NAND_ECC_READSYN);
1891 chip->read_buf(mtd, oob, eccbytes);
1892 stat = chip->ecc.correct(mtd, p, oob, NULL);
1893
1894 oob += eccbytes;
1895
1896 if (chip->ecc.postpad) {
1897 chip->read_buf(mtd, oob, chip->ecc.postpad);
1898 oob += chip->ecc.postpad;
1899 }
1900
1901 if (stat == -EBADMSG &&
1902 (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
1903 /* check for empty pages with bitflips */
1904 stat = nand_check_erased_ecc_chunk(p, chip->ecc.size,
1905 oob - eccpadbytes,
1906 eccpadbytes,
1907 NULL, 0,
1908 chip->ecc.strength);
1909 }
1910
1911 if (stat < 0) {
1912 mtd->ecc_stats.failed++;
1913 } else {
1914 mtd->ecc_stats.corrected += stat;
1915 max_bitflips = max_t(unsigned int, max_bitflips, stat);
1916 }
1917 }
1918
1919 /* Calculate remaining oob bytes */
1920 i = mtd->oobsize - (oob - chip->oob_poi);
1921 if (i)
1922 chip->read_buf(mtd, oob, i);
1923
1924 return max_bitflips;
1925 }
1926
1927 /**
1928 * nand_transfer_oob - [INTERN] Transfer oob to client buffer
1929 * @mtd: mtd info structure
1930 * @oob: oob destination address
1931 * @ops: oob ops structure
1932 * @len: size of oob to transfer
1933 */
1934 static uint8_t *nand_transfer_oob(struct mtd_info *mtd, uint8_t *oob,
1935 struct mtd_oob_ops *ops, size_t len)
1936 {
1937 struct nand_chip *chip = mtd_to_nand(mtd);
1938 int ret;
1939
1940 switch (ops->mode) {
1941
1942 case MTD_OPS_PLACE_OOB:
1943 case MTD_OPS_RAW:
1944 memcpy(oob, chip->oob_poi + ops->ooboffs, len);
1945 return oob + len;
1946
1947 case MTD_OPS_AUTO_OOB:
1948 ret = mtd_ooblayout_get_databytes(mtd, oob, chip->oob_poi,
1949 ops->ooboffs, len);
1950 BUG_ON(ret);
1951 return oob + len;
1952
1953 default:
1954 BUG();
1955 }
1956 return NULL;
1957 }
1958
1959 /**
1960 * nand_setup_read_retry - [INTERN] Set the READ RETRY mode
1961 * @mtd: MTD device structure
1962 * @retry_mode: the retry mode to use
1963 *
1964 * Some vendors supply a special command to shift the Vt threshold, to be used
1965 * when there are too many bitflips in a page (i.e., ECC error). After setting
1966 * a new threshold, the host should retry reading the page.
1967 */
1968 static int nand_setup_read_retry(struct mtd_info *mtd, int retry_mode)
1969 {
1970 struct nand_chip *chip = mtd_to_nand(mtd);
1971
1972 pr_debug("setting READ RETRY mode %d\n", retry_mode);
1973
1974 if (retry_mode >= chip->read_retries)
1975 return -EINVAL;
1976
1977 if (!chip->setup_read_retry)
1978 return -EOPNOTSUPP;
1979
1980 return chip->setup_read_retry(mtd, retry_mode);
1981 }
1982
1983 /**
1984 * nand_do_read_ops - [INTERN] Read data with ECC
1985 * @mtd: MTD device structure
1986 * @from: offset to read from
1987 * @ops: oob ops structure
1988 *
1989 * Internal function. Called with chip held.
1990 */
1991 static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
1992 struct mtd_oob_ops *ops)
1993 {
1994 int chipnr, page, realpage, col, bytes, aligned, oob_required;
1995 struct nand_chip *chip = mtd_to_nand(mtd);
1996 int ret = 0;
1997 uint32_t readlen = ops->len;
1998 uint32_t oobreadlen = ops->ooblen;
1999 uint32_t max_oobsize = mtd_oobavail(mtd, ops);
2000
2001 uint8_t *bufpoi, *oob, *buf;
2002 int use_bufpoi;
2003 unsigned int max_bitflips = 0;
2004 int retry_mode = 0;
2005 bool ecc_fail = false;
2006
2007 chipnr = (int)(from >> chip->chip_shift);
2008 chip->select_chip(mtd, chipnr);
2009
2010 realpage = (int)(from >> chip->page_shift);
2011 page = realpage & chip->pagemask;
2012
2013 col = (int)(from & (mtd->writesize - 1));
2014
2015 buf = ops->datbuf;
2016 oob = ops->oobbuf;
2017 oob_required = oob ? 1 : 0;
2018
2019 while (1) {
2020 unsigned int ecc_failures = mtd->ecc_stats.failed;
2021
2022 bytes = min(mtd->writesize - col, readlen);
2023 aligned = (bytes == mtd->writesize);
2024
2025 if (!aligned)
2026 use_bufpoi = 1;
2027 else if (chip->options & NAND_USE_BOUNCE_BUFFER)
2028 use_bufpoi = !virt_addr_valid(buf) ||
2029 !IS_ALIGNED((unsigned long)buf,
2030 chip->buf_align);
2031 else
2032 use_bufpoi = 0;
2033
2034 /* Is the current page in the buffer? */
2035 if (realpage != chip->pagebuf || oob) {
2036 bufpoi = use_bufpoi ? chip->buffers->databuf : buf;
2037
2038 if (use_bufpoi && aligned)
2039 pr_debug("%s: using read bounce buffer for buf@%p\n",
2040 __func__, buf);
2041
2042 read_retry:
2043 if (nand_standard_page_accessors(&chip->ecc))
2044 chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
2045
2046 /*
2047 * Now read the page into the buffer. Absent an error,
2048 * the read methods return max bitflips per ecc step.
2049 */
2050 if (unlikely(ops->mode == MTD_OPS_RAW))
2051 ret = chip->ecc.read_page_raw(mtd, chip, bufpoi,
2052 oob_required,
2053 page);
2054 else if (!aligned && NAND_HAS_SUBPAGE_READ(chip) &&
2055 !oob)
2056 ret = chip->ecc.read_subpage(mtd, chip,
2057 col, bytes, bufpoi,
2058 page);
2059 else
2060 ret = chip->ecc.read_page(mtd, chip, bufpoi,
2061 oob_required, page);
2062 if (ret < 0) {
2063 if (use_bufpoi)
2064 /* Invalidate page cache */
2065 chip->pagebuf = -1;
2066 break;
2067 }
2068
2069 /* Transfer not aligned data */
2070 if (use_bufpoi) {
2071 if (!NAND_HAS_SUBPAGE_READ(chip) && !oob &&
2072 !(mtd->ecc_stats.failed - ecc_failures) &&
2073 (ops->mode != MTD_OPS_RAW)) {
2074 chip->pagebuf = realpage;
2075 chip->pagebuf_bitflips = ret;
2076 } else {
2077 /* Invalidate page cache */
2078 chip->pagebuf = -1;
2079 }
2080 memcpy(buf, chip->buffers->databuf + col, bytes);
2081 }
2082
2083 if (unlikely(oob)) {
2084 int toread = min(oobreadlen, max_oobsize);
2085
2086 if (toread) {
2087 oob = nand_transfer_oob(mtd,
2088 oob, ops, toread);
2089 oobreadlen -= toread;
2090 }
2091 }
2092
2093 if (chip->options & NAND_NEED_READRDY) {
2094 /* Apply delay or wait for ready/busy pin */
2095 if (!chip->dev_ready)
2096 udelay(chip->chip_delay);
2097 else
2098 nand_wait_ready(mtd);
2099 }
2100
2101 if (mtd->ecc_stats.failed - ecc_failures) {
2102 if (retry_mode + 1 < chip->read_retries) {
2103 retry_mode++;
2104 ret = nand_setup_read_retry(mtd,
2105 retry_mode);
2106 if (ret < 0)
2107 break;
2108
2109 /* Reset failures; retry */
2110 mtd->ecc_stats.failed = ecc_failures;
2111 goto read_retry;
2112 } else {
2113 /* No more retry modes; real failure */
2114 ecc_fail = true;
2115 }
2116 }
2117
2118 buf += bytes;
2119 max_bitflips = max_t(unsigned int, max_bitflips, ret);
2120 } else {
2121 memcpy(buf, chip->buffers->databuf + col, bytes);
2122 buf += bytes;
2123 max_bitflips = max_t(unsigned int, max_bitflips,
2124 chip->pagebuf_bitflips);
2125 }
2126
2127 readlen -= bytes;
2128
2129 /* Reset to retry mode 0 */
2130 if (retry_mode) {
2131 ret = nand_setup_read_retry(mtd, 0);
2132 if (ret < 0)
2133 break;
2134 retry_mode = 0;
2135 }
2136
2137 if (!readlen)
2138 break;
2139
2140 /* For subsequent reads align to page boundary */
2141 col = 0;
2142 /* Increment page address */
2143 realpage++;
2144
2145 page = realpage & chip->pagemask;
2146 /* Check, if we cross a chip boundary */
2147 if (!page) {
2148 chipnr++;
2149 chip->select_chip(mtd, -1);
2150 chip->select_chip(mtd, chipnr);
2151 }
2152 }
2153 chip->select_chip(mtd, -1);
2154
2155 ops->retlen = ops->len - (size_t) readlen;
2156 if (oob)
2157 ops->oobretlen = ops->ooblen - oobreadlen;
2158
2159 if (ret < 0)
2160 return ret;
2161
2162 if (ecc_fail)
2163 return -EBADMSG;
2164
2165 return max_bitflips;
2166 }
2167
2168 /**
2169 * nand_read - [MTD Interface] MTD compatibility function for nand_do_read_ecc
2170 * @mtd: MTD device structure
2171 * @from: offset to read from
2172 * @len: number of bytes to read
2173 * @retlen: pointer to variable to store the number of read bytes
2174 * @buf: the databuffer to put data
2175 *
2176 * Get hold of the chip and call nand_do_read.
2177 */
2178 static int nand_read(struct mtd_info *mtd, loff_t from, size_t len,
2179 size_t *retlen, uint8_t *buf)
2180 {
2181 struct mtd_oob_ops ops;
2182 int ret;
2183
2184 nand_get_device(mtd, FL_READING);
2185 memset(&ops, 0, sizeof(ops));
2186 ops.len = len;
2187 ops.datbuf = buf;
2188 ops.mode = MTD_OPS_PLACE_OOB;
2189 ret = nand_do_read_ops(mtd, from, &ops);
2190 *retlen = ops.retlen;
2191 nand_release_device(mtd);
2192 return ret;
2193 }
2194
2195 /**
2196 * nand_read_oob_std - [REPLACEABLE] the most common OOB data read function
2197 * @mtd: mtd info structure
2198 * @chip: nand chip info structure
2199 * @page: page number to read
2200 */
2201 int nand_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip, int page)
2202 {
2203 chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
2204 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
2205 return 0;
2206 }
2207 EXPORT_SYMBOL(nand_read_oob_std);
2208
2209 /**
2210 * nand_read_oob_syndrome - [REPLACEABLE] OOB data read function for HW ECC
2211 * with syndromes
2212 * @mtd: mtd info structure
2213 * @chip: nand chip info structure
2214 * @page: page number to read
2215 */
2216 int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
2217 int page)
2218 {
2219 int length = mtd->oobsize;
2220 int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
2221 int eccsize = chip->ecc.size;
2222 uint8_t *bufpoi = chip->oob_poi;
2223 int i, toread, sndrnd = 0, pos;
2224
2225 chip->cmdfunc(mtd, NAND_CMD_READ0, chip->ecc.size, page);
2226 for (i = 0; i < chip->ecc.steps; i++) {
2227 if (sndrnd) {
2228 pos = eccsize + i * (eccsize + chunk);
2229 if (mtd->writesize > 512)
2230 chip->cmdfunc(mtd, NAND_CMD_RNDOUT, pos, -1);
2231 else
2232 chip->cmdfunc(mtd, NAND_CMD_READ0, pos, page);
2233 } else
2234 sndrnd = 1;
2235 toread = min_t(int, length, chunk);
2236 chip->read_buf(mtd, bufpoi, toread);
2237 bufpoi += toread;
2238 length -= toread;
2239 }
2240 if (length > 0)
2241 chip->read_buf(mtd, bufpoi, length);
2242
2243 return 0;
2244 }
2245 EXPORT_SYMBOL(nand_read_oob_syndrome);
2246
2247 /**
2248 * nand_write_oob_std - [REPLACEABLE] the most common OOB data write function
2249 * @mtd: mtd info structure
2250 * @chip: nand chip info structure
2251 * @page: page number to write
2252 */
2253 int nand_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip, int page)
2254 {
2255 int status = 0;
2256 const uint8_t *buf = chip->oob_poi;
2257 int length = mtd->oobsize;
2258
2259 chip->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize, page);
2260 chip->write_buf(mtd, buf, length);
2261 /* Send command to program the OOB data */
2262 chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
2263
2264 status = chip->waitfunc(mtd, chip);
2265
2266 return status & NAND_STATUS_FAIL ? -EIO : 0;
2267 }
2268 EXPORT_SYMBOL(nand_write_oob_std);
2269
2270 /**
2271 * nand_write_oob_syndrome - [REPLACEABLE] OOB data write function for HW ECC
2272 * with syndrome - only for large page flash
2273 * @mtd: mtd info structure
2274 * @chip: nand chip info structure
2275 * @page: page number to write
2276 */
2277 int nand_write_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
2278 int page)
2279 {
2280 int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
2281 int eccsize = chip->ecc.size, length = mtd->oobsize;
2282 int i, len, pos, status = 0, sndcmd = 0, steps = chip->ecc.steps;
2283 const uint8_t *bufpoi = chip->oob_poi;
2284
2285 /*
2286 * data-ecc-data-ecc ... ecc-oob
2287 * or
2288 * data-pad-ecc-pad-data-pad .... ecc-pad-oob
2289 */
2290 if (!chip->ecc.prepad && !chip->ecc.postpad) {
2291 pos = steps * (eccsize + chunk);
2292 steps = 0;
2293 } else
2294 pos = eccsize;
2295
2296 chip->cmdfunc(mtd, NAND_CMD_SEQIN, pos, page);
2297 for (i = 0; i < steps; i++) {
2298 if (sndcmd) {
2299 if (mtd->writesize <= 512) {
2300 uint32_t fill = 0xFFFFFFFF;
2301
2302 len = eccsize;
2303 while (len > 0) {
2304 int num = min_t(int, len, 4);
2305 chip->write_buf(mtd, (uint8_t *)&fill,
2306 num);
2307 len -= num;
2308 }
2309 } else {
2310 pos = eccsize + i * (eccsize + chunk);
2311 chip->cmdfunc(mtd, NAND_CMD_RNDIN, pos, -1);
2312 }
2313 } else
2314 sndcmd = 1;
2315 len = min_t(int, length, chunk);
2316 chip->write_buf(mtd, bufpoi, len);
2317 bufpoi += len;
2318 length -= len;
2319 }
2320 if (length > 0)
2321 chip->write_buf(mtd, bufpoi, length);
2322
2323 chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
2324 status = chip->waitfunc(mtd, chip);
2325
2326 return status & NAND_STATUS_FAIL ? -EIO : 0;
2327 }
2328 EXPORT_SYMBOL(nand_write_oob_syndrome);
2329
2330 /**
2331 * nand_do_read_oob - [INTERN] NAND read out-of-band
2332 * @mtd: MTD device structure
2333 * @from: offset to read from
2334 * @ops: oob operations description structure
2335 *
2336 * NAND read out-of-band data from the spare area.
2337 */
2338 static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
2339 struct mtd_oob_ops *ops)
2340 {
2341 int page, realpage, chipnr;
2342 struct nand_chip *chip = mtd_to_nand(mtd);
2343 struct mtd_ecc_stats stats;
2344 int readlen = ops->ooblen;
2345 int len;
2346 uint8_t *buf = ops->oobbuf;
2347 int ret = 0;
2348
2349 pr_debug("%s: from = 0x%08Lx, len = %i\n",
2350 __func__, (unsigned long long)from, readlen);
2351
2352 stats = mtd->ecc_stats;
2353
2354 len = mtd_oobavail(mtd, ops);
2355
2356 if (unlikely(ops->ooboffs >= len)) {
2357 pr_debug("%s: attempt to start read outside oob\n",
2358 __func__);
2359 return -EINVAL;
2360 }
2361
2362 /* Do not allow reads past end of device */
2363 if (unlikely(from >= mtd->size ||
2364 ops->ooboffs + readlen > ((mtd->size >> chip->page_shift) -
2365 (from >> chip->page_shift)) * len)) {
2366 pr_debug("%s: attempt to read beyond end of device\n",
2367 __func__);
2368 return -EINVAL;
2369 }
2370
2371 chipnr = (int)(from >> chip->chip_shift);
2372 chip->select_chip(mtd, chipnr);
2373
2374 /* Shift to get page */
2375 realpage = (int)(from >> chip->page_shift);
2376 page = realpage & chip->pagemask;
2377
2378 while (1) {
2379 if (ops->mode == MTD_OPS_RAW)
2380 ret = chip->ecc.read_oob_raw(mtd, chip, page);
2381 else
2382 ret = chip->ecc.read_oob(mtd, chip, page);
2383
2384 if (ret < 0)
2385 break;
2386
2387 len = min(len, readlen);
2388 buf = nand_transfer_oob(mtd, buf, ops, len);
2389
2390 if (chip->options & NAND_NEED_READRDY) {
2391 /* Apply delay or wait for ready/busy pin */
2392 if (!chip->dev_ready)
2393 udelay(chip->chip_delay);
2394 else
2395 nand_wait_ready(mtd);
2396 }
2397
2398 readlen -= len;
2399 if (!readlen)
2400 break;
2401
2402 /* Increment page address */
2403 realpage++;
2404
2405 page = realpage & chip->pagemask;
2406 /* Check, if we cross a chip boundary */
2407 if (!page) {
2408 chipnr++;
2409 chip->select_chip(mtd, -1);
2410 chip->select_chip(mtd, chipnr);
2411 }
2412 }
2413 chip->select_chip(mtd, -1);
2414
2415 ops->oobretlen = ops->ooblen - readlen;
2416
2417 if (ret < 0)
2418 return ret;
2419
2420 if (mtd->ecc_stats.failed - stats.failed)
2421 return -EBADMSG;
2422
2423 return mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0;
2424 }
2425
2426 /**
2427 * nand_read_oob - [MTD Interface] NAND read data and/or out-of-band
2428 * @mtd: MTD device structure
2429 * @from: offset to read from
2430 * @ops: oob operation description structure
2431 *
2432 * NAND read data and/or out-of-band data.
2433 */
2434 static int nand_read_oob(struct mtd_info *mtd, loff_t from,
2435 struct mtd_oob_ops *ops)
2436 {
2437 int ret;
2438
2439 ops->retlen = 0;
2440
2441 /* Do not allow reads past end of device */
2442 if (ops->datbuf && (from + ops->len) > mtd->size) {
2443 pr_debug("%s: attempt to read beyond end of device\n",
2444 __func__);
2445 return -EINVAL;
2446 }
2447
2448 if (ops->mode != MTD_OPS_PLACE_OOB &&
2449 ops->mode != MTD_OPS_AUTO_OOB &&
2450 ops->mode != MTD_OPS_RAW)
2451 return -ENOTSUPP;
2452
2453 nand_get_device(mtd, FL_READING);
2454
2455 if (!ops->datbuf)
2456 ret = nand_do_read_oob(mtd, from, ops);
2457 else
2458 ret = nand_do_read_ops(mtd, from, ops);
2459
2460 nand_release_device(mtd);
2461 return ret;
2462 }
2463
2464
2465 /**
2466 * nand_write_page_raw - [INTERN] raw page write function
2467 * @mtd: mtd info structure
2468 * @chip: nand chip info structure
2469 * @buf: data buffer
2470 * @oob_required: must write chip->oob_poi to OOB
2471 * @page: page number to write
2472 *
2473 * Not for syndrome calculating ECC controllers, which use a special oob layout.
2474 */
2475 static int nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
2476 const uint8_t *buf, int oob_required, int page)
2477 {
2478 chip->write_buf(mtd, buf, mtd->writesize);
2479 if (oob_required)
2480 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
2481
2482 return 0;
2483 }
2484
2485 /**
2486 * nand_write_page_raw_syndrome - [INTERN] raw page write function
2487 * @mtd: mtd info structure
2488 * @chip: nand chip info structure
2489 * @buf: data buffer
2490 * @oob_required: must write chip->oob_poi to OOB
2491 * @page: page number to write
2492 *
2493 * We need a special oob layout and handling even when ECC isn't checked.
2494 */
2495 static int nand_write_page_raw_syndrome(struct mtd_info *mtd,
2496 struct nand_chip *chip,
2497 const uint8_t *buf, int oob_required,
2498 int page)
2499 {
2500 int eccsize = chip->ecc.size;
2501 int eccbytes = chip->ecc.bytes;
2502 uint8_t *oob = chip->oob_poi;
2503 int steps, size;
2504
2505 for (steps = chip->ecc.steps; steps > 0; steps--) {
2506 chip->write_buf(mtd, buf, eccsize);
2507 buf += eccsize;
2508
2509 if (chip->ecc.prepad) {
2510 chip->write_buf(mtd, oob, chip->ecc.prepad);
2511 oob += chip->ecc.prepad;
2512 }
2513
2514 chip->write_buf(mtd, oob, eccbytes);
2515 oob += eccbytes;
2516
2517 if (chip->ecc.postpad) {
2518 chip->write_buf(mtd, oob, chip->ecc.postpad);
2519 oob += chip->ecc.postpad;
2520 }
2521 }
2522
2523 size = mtd->oobsize - (oob - chip->oob_poi);
2524 if (size)
2525 chip->write_buf(mtd, oob, size);
2526
2527 return 0;
2528 }
2529 /**
2530 * nand_write_page_swecc - [REPLACEABLE] software ECC based page write function
2531 * @mtd: mtd info structure
2532 * @chip: nand chip info structure
2533 * @buf: data buffer
2534 * @oob_required: must write chip->oob_poi to OOB
2535 * @page: page number to write
2536 */
2537 static int nand_write_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
2538 const uint8_t *buf, int oob_required,
2539 int page)
2540 {
2541 int i, eccsize = chip->ecc.size, ret;
2542 int eccbytes = chip->ecc.bytes;
2543 int eccsteps = chip->ecc.steps;
2544 uint8_t *ecc_calc = chip->buffers->ecccalc;
2545 const uint8_t *p = buf;
2546
2547 /* Software ECC calculation */
2548 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
2549 chip->ecc.calculate(mtd, p, &ecc_calc[i]);
2550
2551 ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
2552 chip->ecc.total);
2553 if (ret)
2554 return ret;
2555
2556 return chip->ecc.write_page_raw(mtd, chip, buf, 1, page);
2557 }
2558
2559 /**
2560 * nand_write_page_hwecc - [REPLACEABLE] hardware ECC based page write function
2561 * @mtd: mtd info structure
2562 * @chip: nand chip info structure
2563 * @buf: data buffer
2564 * @oob_required: must write chip->oob_poi to OOB
2565 * @page: page number to write
2566 */
2567 static int nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
2568 const uint8_t *buf, int oob_required,
2569 int page)
2570 {
2571 int i, eccsize = chip->ecc.size, ret;
2572 int eccbytes = chip->ecc.bytes;
2573 int eccsteps = chip->ecc.steps;
2574 uint8_t *ecc_calc = chip->buffers->ecccalc;
2575 const uint8_t *p = buf;
2576
2577 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
2578 chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
2579 chip->write_buf(mtd, p, eccsize);
2580 chip->ecc.calculate(mtd, p, &ecc_calc[i]);
2581 }
2582
2583 ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
2584 chip->ecc.total);
2585 if (ret)
2586 return ret;
2587
2588 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
2589
2590 return 0;
2591 }
2592
2593
2594 /**
2595 * nand_write_subpage_hwecc - [REPLACEABLE] hardware ECC based subpage write
2596 * @mtd: mtd info structure
2597 * @chip: nand chip info structure
2598 * @offset: column address of subpage within the page
2599 * @data_len: data length
2600 * @buf: data buffer
2601 * @oob_required: must write chip->oob_poi to OOB
2602 * @page: page number to write
2603 */
2604 static int nand_write_subpage_hwecc(struct mtd_info *mtd,
2605 struct nand_chip *chip, uint32_t offset,
2606 uint32_t data_len, const uint8_t *buf,
2607 int oob_required, int page)
2608 {
2609 uint8_t *oob_buf = chip->oob_poi;
2610 uint8_t *ecc_calc = chip->buffers->ecccalc;
2611 int ecc_size = chip->ecc.size;
2612 int ecc_bytes = chip->ecc.bytes;
2613 int ecc_steps = chip->ecc.steps;
2614 uint32_t start_step = offset / ecc_size;
2615 uint32_t end_step = (offset + data_len - 1) / ecc_size;
2616 int oob_bytes = mtd->oobsize / ecc_steps;
2617 int step, ret;
2618
2619 for (step = 0; step < ecc_steps; step++) {
2620 /* configure controller for WRITE access */
2621 chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
2622
2623 /* write data (untouched subpages already masked by 0xFF) */
2624 chip->write_buf(mtd, buf, ecc_size);
2625
2626 /* mask ECC of un-touched subpages by padding 0xFF */
2627 if ((step < start_step) || (step > end_step))
2628 memset(ecc_calc, 0xff, ecc_bytes);
2629 else
2630 chip->ecc.calculate(mtd, buf, ecc_calc);
2631
2632 /* mask OOB of un-touched subpages by padding 0xFF */
2633 /* if oob_required, preserve OOB metadata of written subpage */
2634 if (!oob_required || (step < start_step) || (step > end_step))
2635 memset(oob_buf, 0xff, oob_bytes);
2636
2637 buf += ecc_size;
2638 ecc_calc += ecc_bytes;
2639 oob_buf += oob_bytes;
2640 }
2641
2642 /* copy calculated ECC for whole page to chip->buffer->oob */
2643 /* this include masked-value(0xFF) for unwritten subpages */
2644 ecc_calc = chip->buffers->ecccalc;
2645 ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
2646 chip->ecc.total);
2647 if (ret)
2648 return ret;
2649
2650 /* write OOB buffer to NAND device */
2651 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
2652
2653 return 0;
2654 }
2655
2656
2657 /**
2658 * nand_write_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page write
2659 * @mtd: mtd info structure
2660 * @chip: nand chip info structure
2661 * @buf: data buffer
2662 * @oob_required: must write chip->oob_poi to OOB
2663 * @page: page number to write
2664 *
2665 * The hw generator calculates the error syndrome automatically. Therefore we
2666 * need a special oob layout and handling.
2667 */
2668 static int nand_write_page_syndrome(struct mtd_info *mtd,
2669 struct nand_chip *chip,
2670 const uint8_t *buf, int oob_required,
2671 int page)
2672 {
2673 int i, eccsize = chip->ecc.size;
2674 int eccbytes = chip->ecc.bytes;
2675 int eccsteps = chip->ecc.steps;
2676 const uint8_t *p = buf;
2677 uint8_t *oob = chip->oob_poi;
2678
2679 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
2680
2681 chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
2682 chip->write_buf(mtd, p, eccsize);
2683
2684 if (chip->ecc.prepad) {
2685 chip->write_buf(mtd, oob, chip->ecc.prepad);
2686 oob += chip->ecc.prepad;
2687 }
2688
2689 chip->ecc.calculate(mtd, p, oob);
2690 chip->write_buf(mtd, oob, eccbytes);
2691 oob += eccbytes;
2692
2693 if (chip->ecc.postpad) {
2694 chip->write_buf(mtd, oob, chip->ecc.postpad);
2695 oob += chip->ecc.postpad;
2696 }
2697 }
2698
2699 /* Calculate remaining oob bytes */
2700 i = mtd->oobsize - (oob - chip->oob_poi);
2701 if (i)
2702 chip->write_buf(mtd, oob, i);
2703
2704 return 0;
2705 }
2706
2707 /**
2708 * nand_write_page - write one page
2709 * @mtd: MTD device structure
2710 * @chip: NAND chip descriptor
2711 * @offset: address offset within the page
2712 * @data_len: length of actual data to be written
2713 * @buf: the data to write
2714 * @oob_required: must write chip->oob_poi to OOB
2715 * @page: page number to write
2716 * @cached: cached programming
2717 * @raw: use _raw version of write_page
2718 */
2719 static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
2720 uint32_t offset, int data_len, const uint8_t *buf,
2721 int oob_required, int page, int cached, int raw)
2722 {
2723 int status, subpage;
2724
2725 if (!(chip->options & NAND_NO_SUBPAGE_WRITE) &&
2726 chip->ecc.write_subpage)
2727 subpage = offset || (data_len < mtd->writesize);
2728 else
2729 subpage = 0;
2730
2731 if (nand_standard_page_accessors(&chip->ecc))
2732 chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
2733
2734 if (unlikely(raw))
2735 status = chip->ecc.write_page_raw(mtd, chip, buf,
2736 oob_required, page);
2737 else if (subpage)
2738 status = chip->ecc.write_subpage(mtd, chip, offset, data_len,
2739 buf, oob_required, page);
2740 else
2741 status = chip->ecc.write_page(mtd, chip, buf, oob_required,
2742 page);
2743
2744 if (status < 0)
2745 return status;
2746
2747 /*
2748 * Cached progamming disabled for now. Not sure if it's worth the
2749 * trouble. The speed gain is not very impressive. (2.3->2.6Mib/s).
2750 */
2751 cached = 0;
2752
2753 if (!cached || !NAND_HAS_CACHEPROG(chip)) {
2754
2755 if (nand_standard_page_accessors(&chip->ecc))
2756 chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
2757 status = chip->waitfunc(mtd, chip);
2758 /*
2759 * See if operation failed and additional status checks are
2760 * available.
2761 */
2762 if ((status & NAND_STATUS_FAIL) && (chip->errstat))
2763 status = chip->errstat(mtd, chip, FL_WRITING, status,
2764 page);
2765
2766 if (status & NAND_STATUS_FAIL)
2767 return -EIO;
2768 } else {
2769 chip->cmdfunc(mtd, NAND_CMD_CACHEDPROG, -1, -1);
2770 status = chip->waitfunc(mtd, chip);
2771 }
2772
2773 return 0;
2774 }
2775
2776 /**
2777 * nand_fill_oob - [INTERN] Transfer client buffer to oob
2778 * @mtd: MTD device structure
2779 * @oob: oob data buffer
2780 * @len: oob data write length
2781 * @ops: oob ops structure
2782 */
2783 static uint8_t *nand_fill_oob(struct mtd_info *mtd, uint8_t *oob, size_t len,
2784 struct mtd_oob_ops *ops)
2785 {
2786 struct nand_chip *chip = mtd_to_nand(mtd);
2787 int ret;
2788
2789 /*
2790 * Initialise to all 0xFF, to avoid the possibility of left over OOB
2791 * data from a previous OOB read.
2792 */
2793 memset(chip->oob_poi, 0xff, mtd->oobsize);
2794
2795 switch (ops->mode) {
2796
2797 case MTD_OPS_PLACE_OOB:
2798 case MTD_OPS_RAW:
2799 memcpy(chip->oob_poi + ops->ooboffs, oob, len);
2800 return oob + len;
2801
2802 case MTD_OPS_AUTO_OOB:
2803 ret = mtd_ooblayout_set_databytes(mtd, oob, chip->oob_poi,
2804 ops->ooboffs, len);
2805 BUG_ON(ret);
2806 return oob + len;
2807
2808 default:
2809 BUG();
2810 }
2811 return NULL;
2812 }
2813
2814 #define NOTALIGNED(x) ((x & (chip->subpagesize - 1)) != 0)
2815
2816 /**
2817 * nand_do_write_ops - [INTERN] NAND write with ECC
2818 * @mtd: MTD device structure
2819 * @to: offset to write to
2820 * @ops: oob operations description structure
2821 *
2822 * NAND write with ECC.
2823 */
2824 static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
2825 struct mtd_oob_ops *ops)
2826 {
2827 int chipnr, realpage, page, blockmask, column;
2828 struct nand_chip *chip = mtd_to_nand(mtd);
2829 uint32_t writelen = ops->len;
2830
2831 uint32_t oobwritelen = ops->ooblen;
2832 uint32_t oobmaxlen = mtd_oobavail(mtd, ops);
2833
2834 uint8_t *oob = ops->oobbuf;
2835 uint8_t *buf = ops->datbuf;
2836 int ret;
2837 int oob_required = oob ? 1 : 0;
2838
2839 ops->retlen = 0;
2840 if (!writelen)
2841 return 0;
2842
2843 /* Reject writes, which are not page aligned */
2844 if (NOTALIGNED(to) || NOTALIGNED(ops->len)) {
2845 pr_notice("%s: attempt to write non page aligned data\n",
2846 __func__);
2847 return -EINVAL;
2848 }
2849
2850 column = to & (mtd->writesize - 1);
2851
2852 chipnr = (int)(to >> chip->chip_shift);
2853 chip->select_chip(mtd, chipnr);
2854
2855 /* Check, if it is write protected */
2856 if (nand_check_wp(mtd)) {
2857 ret = -EIO;
2858 goto err_out;
2859 }
2860
2861 realpage = (int)(to >> chip->page_shift);
2862 page = realpage & chip->pagemask;
2863 blockmask = (1 << (chip->phys_erase_shift - chip->page_shift)) - 1;
2864
2865 /* Invalidate the page cache, when we write to the cached page */
2866 if (to <= ((loff_t)chip->pagebuf << chip->page_shift) &&
2867 ((loff_t)chip->pagebuf << chip->page_shift) < (to + ops->len))
2868 chip->pagebuf = -1;
2869
2870 /* Don't allow multipage oob writes with offset */
2871 if (oob && ops->ooboffs && (ops->ooboffs + ops->ooblen > oobmaxlen)) {
2872 ret = -EINVAL;
2873 goto err_out;
2874 }
2875
2876 while (1) {
2877 int bytes = mtd->writesize;
2878 int cached = writelen > bytes && page != blockmask;
2879 uint8_t *wbuf = buf;
2880 int use_bufpoi;
2881 int part_pagewr = (column || writelen < mtd->writesize);
2882
2883 if (part_pagewr)
2884 use_bufpoi = 1;
2885 else if (chip->options & NAND_USE_BOUNCE_BUFFER)
2886 use_bufpoi = !virt_addr_valid(buf) ||
2887 !IS_ALIGNED((unsigned long)buf,
2888 chip->buf_align);
2889 else
2890 use_bufpoi = 0;
2891
2892 /* Partial page write?, or need to use bounce buffer */
2893 if (use_bufpoi) {
2894 pr_debug("%s: using write bounce buffer for buf@%p\n",
2895 __func__, buf);
2896 cached = 0;
2897 if (part_pagewr)
2898 bytes = min_t(int, bytes - column, writelen);
2899 chip->pagebuf = -1;
2900 memset(chip->buffers->databuf, 0xff, mtd->writesize);
2901 memcpy(&chip->buffers->databuf[column], buf, bytes);
2902 wbuf = chip->buffers->databuf;
2903 }
2904
2905 if (unlikely(oob)) {
2906 size_t len = min(oobwritelen, oobmaxlen);
2907 oob = nand_fill_oob(mtd, oob, len, ops);
2908 oobwritelen -= len;
2909 } else {
2910 /* We still need to erase leftover OOB data */
2911 memset(chip->oob_poi, 0xff, mtd->oobsize);
2912 }
2913
2914 ret = nand_write_page(mtd, chip, column, bytes, wbuf,
2915 oob_required, page, cached,
2916 (ops->mode == MTD_OPS_RAW));
2917 if (ret)
2918 break;
2919
2920 writelen -= bytes;
2921 if (!writelen)
2922 break;
2923
2924 column = 0;
2925 buf += bytes;
2926 realpage++;
2927
2928 page = realpage & chip->pagemask;
2929 /* Check, if we cross a chip boundary */
2930 if (!page) {
2931 chipnr++;
2932 chip->select_chip(mtd, -1);
2933 chip->select_chip(mtd, chipnr);
2934 }
2935 }
2936
2937 ops->retlen = ops->len - writelen;
2938 if (unlikely(oob))
2939 ops->oobretlen = ops->ooblen;
2940
2941 err_out:
2942 chip->select_chip(mtd, -1);
2943 return ret;
2944 }
2945
2946 /**
2947 * panic_nand_write - [MTD Interface] NAND write with ECC
2948 * @mtd: MTD device structure
2949 * @to: offset to write to
2950 * @len: number of bytes to write
2951 * @retlen: pointer to variable to store the number of written bytes
2952 * @buf: the data to write
2953 *
2954 * NAND write with ECC. Used when performing writes in interrupt context, this
2955 * may for example be called by mtdoops when writing an oops while in panic.
2956 */
2957 static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
2958 size_t *retlen, const uint8_t *buf)
2959 {
2960 struct nand_chip *chip = mtd_to_nand(mtd);
2961 struct mtd_oob_ops ops;
2962 int ret;
2963
2964 /* Wait for the device to get ready */
2965 panic_nand_wait(mtd, chip, 400);
2966
2967 /* Grab the device */
2968 panic_nand_get_device(chip, mtd, FL_WRITING);
2969
2970 memset(&ops, 0, sizeof(ops));
2971 ops.len = len;
2972 ops.datbuf = (uint8_t *)buf;
2973 ops.mode = MTD_OPS_PLACE_OOB;
2974
2975 ret = nand_do_write_ops(mtd, to, &ops);
2976
2977 *retlen = ops.retlen;
2978 return ret;
2979 }
2980
2981 /**
2982 * nand_write - [MTD Interface] NAND write with ECC
2983 * @mtd: MTD device structure
2984 * @to: offset to write to
2985 * @len: number of bytes to write
2986 * @retlen: pointer to variable to store the number of written bytes
2987 * @buf: the data to write
2988 *
2989 * NAND write with ECC.
2990 */
2991 static int nand_write(struct mtd_info *mtd, loff_t to, size_t len,
2992 size_t *retlen, const uint8_t *buf)
2993 {
2994 struct mtd_oob_ops ops;
2995 int ret;
2996
2997 nand_get_device(mtd, FL_WRITING);
2998 memset(&ops, 0, sizeof(ops));
2999 ops.len = len;
3000 ops.datbuf = (uint8_t *)buf;
3001 ops.mode = MTD_OPS_PLACE_OOB;
3002 ret = nand_do_write_ops(mtd, to, &ops);
3003 *retlen = ops.retlen;
3004 nand_release_device(mtd);
3005 return ret;
3006 }
3007
3008 /**
3009 * nand_do_write_oob - [MTD Interface] NAND write out-of-band
3010 * @mtd: MTD device structure
3011 * @to: offset to write to
3012 * @ops: oob operation description structure
3013 *
3014 * NAND write out-of-band.
3015 */
3016 static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
3017 struct mtd_oob_ops *ops)
3018 {
3019 int chipnr, page, status, len;
3020 struct nand_chip *chip = mtd_to_nand(mtd);
3021
3022 pr_debug("%s: to = 0x%08x, len = %i\n",
3023 __func__, (unsigned int)to, (int)ops->ooblen);
3024
3025 len = mtd_oobavail(mtd, ops);
3026
3027 /* Do not allow write past end of page */
3028 if ((ops->ooboffs + ops->ooblen) > len) {
3029 pr_debug("%s: attempt to write past end of page\n",
3030 __func__);
3031 return -EINVAL;
3032 }
3033
3034 if (unlikely(ops->ooboffs >= len)) {
3035 pr_debug("%s: attempt to start write outside oob\n",
3036 __func__);
3037 return -EINVAL;
3038 }
3039
3040 /* Do not allow write past end of device */
3041 if (unlikely(to >= mtd->size ||
3042 ops->ooboffs + ops->ooblen >
3043 ((mtd->size >> chip->page_shift) -
3044 (to >> chip->page_shift)) * len)) {
3045 pr_debug("%s: attempt to write beyond end of device\n",
3046 __func__);
3047 return -EINVAL;
3048 }
3049
3050 chipnr = (int)(to >> chip->chip_shift);
3051
3052 /*
3053 * Reset the chip. Some chips (like the Toshiba TC5832DC found in one
3054 * of my DiskOnChip 2000 test units) will clear the whole data page too
3055 * if we don't do this. I have no clue why, but I seem to have 'fixed'
3056 * it in the doc2000 driver in August 1999. dwmw2.
3057 */
3058 nand_reset(chip, chipnr);
3059
3060 chip->select_chip(mtd, chipnr);
3061
3062 /* Shift to get page */
3063 page = (int)(to >> chip->page_shift);
3064
3065 /* Check, if it is write protected */
3066 if (nand_check_wp(mtd)) {
3067 chip->select_chip(mtd, -1);
3068 return -EROFS;
3069 }
3070
3071 /* Invalidate the page cache, if we write to the cached page */
3072 if (page == chip->pagebuf)
3073 chip->pagebuf = -1;
3074
3075 nand_fill_oob(mtd, ops->oobbuf, ops->ooblen, ops);
3076
3077 if (ops->mode == MTD_OPS_RAW)
3078 status = chip->ecc.write_oob_raw(mtd, chip, page & chip->pagemask);
3079 else
3080 status = chip->ecc.write_oob(mtd, chip, page & chip->pagemask);
3081
3082 chip->select_chip(mtd, -1);
3083
3084 if (status)
3085 return status;
3086
3087 ops->oobretlen = ops->ooblen;
3088
3089 return 0;
3090 }
3091
3092 /**
3093 * nand_write_oob - [MTD Interface] NAND write data and/or out-of-band
3094 * @mtd: MTD device structure
3095 * @to: offset to write to
3096 * @ops: oob operation description structure
3097 */
3098 static int nand_write_oob(struct mtd_info *mtd, loff_t to,
3099 struct mtd_oob_ops *ops)
3100 {
3101 int ret = -ENOTSUPP;
3102
3103 ops->retlen = 0;
3104
3105 /* Do not allow writes past end of device */
3106 if (ops->datbuf && (to + ops->len) > mtd->size) {
3107 pr_debug("%s: attempt to write beyond end of device\n",
3108 __func__);
3109 return -EINVAL;
3110 }
3111
3112 nand_get_device(mtd, FL_WRITING);
3113
3114 switch (ops->mode) {
3115 case MTD_OPS_PLACE_OOB:
3116 case MTD_OPS_AUTO_OOB:
3117 case MTD_OPS_RAW:
3118 break;
3119
3120 default:
3121 goto out;
3122 }
3123
3124 if (!ops->datbuf)
3125 ret = nand_do_write_oob(mtd, to, ops);
3126 else
3127 ret = nand_do_write_ops(mtd, to, ops);
3128
3129 out:
3130 nand_release_device(mtd);
3131 return ret;
3132 }
3133
3134 /**
3135 * single_erase - [GENERIC] NAND standard block erase command function
3136 * @mtd: MTD device structure
3137 * @page: the page address of the block which will be erased
3138 *
3139 * Standard erase command for NAND chips. Returns NAND status.
3140 */
3141 static int single_erase(struct mtd_info *mtd, int page)
3142 {
3143 struct nand_chip *chip = mtd_to_nand(mtd);
3144 /* Send commands to erase a block */
3145 chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page);
3146 chip->cmdfunc(mtd, NAND_CMD_ERASE2, -1, -1);
3147
3148 return chip->waitfunc(mtd, chip);
3149 }
3150
3151 /**
3152 * nand_erase - [MTD Interface] erase block(s)
3153 * @mtd: MTD device structure
3154 * @instr: erase instruction
3155 *
3156 * Erase one ore more blocks.
3157 */
3158 static int nand_erase(struct mtd_info *mtd, struct erase_info *instr)
3159 {
3160 return nand_erase_nand(mtd, instr, 0);
3161 }
3162
3163 /**
3164 * nand_erase_nand - [INTERN] erase block(s)
3165 * @mtd: MTD device structure
3166 * @instr: erase instruction
3167 * @allowbbt: allow erasing the bbt area
3168 *
3169 * Erase one ore more blocks.
3170 */
3171 int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
3172 int allowbbt)
3173 {
3174 int page, status, pages_per_block, ret, chipnr;
3175 struct nand_chip *chip = mtd_to_nand(mtd);
3176 loff_t len;
3177
3178 pr_debug("%s: start = 0x%012llx, len = %llu\n",
3179 __func__, (unsigned long long)instr->addr,
3180 (unsigned long long)instr->len);
3181
3182 if (check_offs_len(mtd, instr->addr, instr->len))
3183 return -EINVAL;
3184
3185 /* Grab the lock and see if the device is available */
3186 nand_get_device(mtd, FL_ERASING);
3187
3188 /* Shift to get first page */
3189 page = (int)(instr->addr >> chip->page_shift);
3190 chipnr = (int)(instr->addr >> chip->chip_shift);
3191
3192 /* Calculate pages in each block */
3193 pages_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
3194
3195 /* Select the NAND device */
3196 chip->select_chip(mtd, chipnr);
3197
3198 /* Check, if it is write protected */
3199 if (nand_check_wp(mtd)) {
3200 pr_debug("%s: device is write protected!\n",
3201 __func__);
3202 instr->state = MTD_ERASE_FAILED;
3203 goto erase_exit;
3204 }
3205
3206 /* Loop through the pages */
3207 len = instr->len;
3208
3209 instr->state = MTD_ERASING;
3210
3211 while (len) {
3212 /* Check if we have a bad block, we do not erase bad blocks! */
3213 if (nand_block_checkbad(mtd, ((loff_t) page) <<
3214 chip->page_shift, allowbbt)) {
3215 pr_warn("%s: attempt to erase a bad block at page 0x%08x\n",
3216 __func__, page);
3217 instr->state = MTD_ERASE_FAILED;
3218 goto erase_exit;
3219 }
3220
3221 /*
3222 * Invalidate the page cache, if we erase the block which
3223 * contains the current cached page.
3224 */
3225 if (page <= chip->pagebuf && chip->pagebuf <
3226 (page + pages_per_block))
3227 chip->pagebuf = -1;
3228
3229 status = chip->erase(mtd, page & chip->pagemask);
3230
3231 /*
3232 * See if operation failed and additional status checks are
3233 * available
3234 */
3235 if ((status & NAND_STATUS_FAIL) && (chip->errstat))
3236 status = chip->errstat(mtd, chip, FL_ERASING,
3237 status, page);
3238
3239 /* See if block erase succeeded */
3240 if (status & NAND_STATUS_FAIL) {
3241 pr_debug("%s: failed erase, page 0x%08x\n",
3242 __func__, page);
3243 instr->state = MTD_ERASE_FAILED;
3244 instr->fail_addr =
3245 ((loff_t)page << chip->page_shift);
3246 goto erase_exit;
3247 }
3248
3249 /* Increment page address and decrement length */
3250 len -= (1ULL << chip->phys_erase_shift);
3251 page += pages_per_block;
3252
3253 /* Check, if we cross a chip boundary */
3254 if (len && !(page & chip->pagemask)) {
3255 chipnr++;
3256 chip->select_chip(mtd, -1);
3257 chip->select_chip(mtd, chipnr);
3258 }
3259 }
3260 instr->state = MTD_ERASE_DONE;
3261
3262 erase_exit:
3263
3264 ret = instr->state == MTD_ERASE_DONE ? 0 : -EIO;
3265
3266 /* Deselect and wake up anyone waiting on the device */
3267 chip->select_chip(mtd, -1);
3268 nand_release_device(mtd);
3269
3270 /* Do call back function */
3271 if (!ret)
3272 mtd_erase_callback(instr);
3273
3274 /* Return more or less happy */
3275 return ret;
3276 }
3277
3278 /**
3279 * nand_sync - [MTD Interface] sync
3280 * @mtd: MTD device structure
3281 *
3282 * Sync is actually a wait for chip ready function.
3283 */
3284 static void nand_sync(struct mtd_info *mtd)
3285 {
3286 pr_debug("%s: called\n", __func__);
3287
3288 /* Grab the lock and see if the device is available */
3289 nand_get_device(mtd, FL_SYNCING);
3290 /* Release it and go back */
3291 nand_release_device(mtd);
3292 }
3293
3294 /**
3295 * nand_block_isbad - [MTD Interface] Check if block at offset is bad
3296 * @mtd: MTD device structure
3297 * @offs: offset relative to mtd start
3298 */
3299 static int nand_block_isbad(struct mtd_info *mtd, loff_t offs)
3300 {
3301 struct nand_chip *chip = mtd_to_nand(mtd);
3302 int chipnr = (int)(offs >> chip->chip_shift);
3303 int ret;
3304
3305 /* Select the NAND device */
3306 nand_get_device(mtd, FL_READING);
3307 chip->select_chip(mtd, chipnr);
3308
3309 ret = nand_block_checkbad(mtd, offs, 0);
3310
3311 chip->select_chip(mtd, -1);
3312 nand_release_device(mtd);
3313
3314 return ret;
3315 }
3316
3317 /**
3318 * nand_block_markbad - [MTD Interface] Mark block at the given offset as bad
3319 * @mtd: MTD device structure
3320 * @ofs: offset relative to mtd start
3321 */
3322 static int nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
3323 {
3324 int ret;
3325
3326 ret = nand_block_isbad(mtd, ofs);
3327 if (ret) {
3328 /* If it was bad already, return success and do nothing */
3329 if (ret > 0)
3330 return 0;
3331 return ret;
3332 }
3333
3334 return nand_block_markbad_lowlevel(mtd, ofs);
3335 }
3336
3337 /**
3338 * nand_max_bad_blocks - [MTD Interface] Max number of bad blocks for an mtd
3339 * @mtd: MTD device structure
3340 * @ofs: offset relative to mtd start
3341 * @len: length of mtd
3342 */
3343 static int nand_max_bad_blocks(struct mtd_info *mtd, loff_t ofs, size_t len)
3344 {
3345 struct nand_chip *chip = mtd_to_nand(mtd);
3346 u32 part_start_block;
3347 u32 part_end_block;
3348 u32 part_start_die;
3349 u32 part_end_die;
3350
3351 /*
3352 * max_bb_per_die and blocks_per_die used to determine
3353 * the maximum bad block count.
3354 */
3355 if (!chip->max_bb_per_die || !chip->blocks_per_die)
3356 return -ENOTSUPP;
3357
3358 /* Get the start and end of the partition in erase blocks. */
3359 part_start_block = mtd_div_by_eb(ofs, mtd);
3360 part_end_block = mtd_div_by_eb(len, mtd) + part_start_block - 1;
3361
3362 /* Get the start and end LUNs of the partition. */
3363 part_start_die = part_start_block / chip->blocks_per_die;
3364 part_end_die = part_end_block / chip->blocks_per_die;
3365
3366 /*
3367 * Look up the bad blocks per unit and multiply by the number of units
3368 * that the partition spans.
3369 */
3370 return chip->max_bb_per_die * (part_end_die - part_start_die + 1);
3371 }
3372
3373 /**
3374 * nand_onfi_set_features- [REPLACEABLE] set features for ONFI nand
3375 * @mtd: MTD device structure
3376 * @chip: nand chip info structure
3377 * @addr: feature address.
3378 * @subfeature_param: the subfeature parameters, a four bytes array.
3379 */
3380 static int nand_onfi_set_features(struct mtd_info *mtd, struct nand_chip *chip,
3381 int addr, uint8_t *subfeature_param)
3382 {
3383 int status;
3384 int i;
3385
3386 if (!chip->onfi_version ||
3387 !(le16_to_cpu(chip->onfi_params.opt_cmd)
3388 & ONFI_OPT_CMD_SET_GET_FEATURES))
3389 return -EINVAL;
3390
3391 chip->cmdfunc(mtd, NAND_CMD_SET_FEATURES, addr, -1);
3392 for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
3393 chip->write_byte(mtd, subfeature_param[i]);
3394
3395 status = chip->waitfunc(mtd, chip);
3396 if (status & NAND_STATUS_FAIL)
3397 return -EIO;
3398 return 0;
3399 }
3400
3401 /**
3402 * nand_onfi_get_features- [REPLACEABLE] get features for ONFI nand
3403 * @mtd: MTD device structure
3404 * @chip: nand chip info structure
3405 * @addr: feature address.
3406 * @subfeature_param: the subfeature parameters, a four bytes array.
3407 */
3408 static int nand_onfi_get_features(struct mtd_info *mtd, struct nand_chip *chip,
3409 int addr, uint8_t *subfeature_param)
3410 {
3411 int i;
3412
3413 if (!chip->onfi_version ||
3414 !(le16_to_cpu(chip->onfi_params.opt_cmd)
3415 & ONFI_OPT_CMD_SET_GET_FEATURES))
3416 return -EINVAL;
3417
3418 chip->cmdfunc(mtd, NAND_CMD_GET_FEATURES, addr, -1);
3419 for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
3420 *subfeature_param++ = chip->read_byte(mtd);
3421 return 0;
3422 }
3423
3424 /**
3425 * nand_suspend - [MTD Interface] Suspend the NAND flash
3426 * @mtd: MTD device structure
3427 */
3428 static int nand_suspend(struct mtd_info *mtd)
3429 {
3430 return nand_get_device(mtd, FL_PM_SUSPENDED);
3431 }
3432
3433 /**
3434 * nand_resume - [MTD Interface] Resume the NAND flash
3435 * @mtd: MTD device structure
3436 */
3437 static void nand_resume(struct mtd_info *mtd)
3438 {
3439 struct nand_chip *chip = mtd_to_nand(mtd);
3440
3441 if (chip->state == FL_PM_SUSPENDED)
3442 nand_release_device(mtd);
3443 else
3444 pr_err("%s called for a chip which is not in suspended state\n",
3445 __func__);
3446 }
3447
3448 /**
3449 * nand_shutdown - [MTD Interface] Finish the current NAND operation and
3450 * prevent further operations
3451 * @mtd: MTD device structure
3452 */
3453 static void nand_shutdown(struct mtd_info *mtd)
3454 {
3455 nand_get_device(mtd, FL_PM_SUSPENDED);
3456 }
3457
3458 /* Set default functions */
3459 static void nand_set_defaults(struct nand_chip *chip)
3460 {
3461 unsigned int busw = chip->options & NAND_BUSWIDTH_16;
3462
3463 /* check for proper chip_delay setup, set 20us if not */
3464 if (!chip->chip_delay)
3465 chip->chip_delay = 20;
3466
3467 /* check, if a user supplied command function given */
3468 if (chip->cmdfunc == NULL)
3469 chip->cmdfunc = nand_command;
3470
3471 /* check, if a user supplied wait function given */
3472 if (chip->waitfunc == NULL)
3473 chip->waitfunc = nand_wait;
3474
3475 if (!chip->select_chip)
3476 chip->select_chip = nand_select_chip;
3477
3478 /* set for ONFI nand */
3479 if (!chip->onfi_set_features)
3480 chip->onfi_set_features = nand_onfi_set_features;
3481 if (!chip->onfi_get_features)
3482 chip->onfi_get_features = nand_onfi_get_features;
3483
3484 /* If called twice, pointers that depend on busw may need to be reset */
3485 if (!chip->read_byte || chip->read_byte == nand_read_byte)
3486 chip->read_byte = busw ? nand_read_byte16 : nand_read_byte;
3487 if (!chip->read_word)
3488 chip->read_word = nand_read_word;
3489 if (!chip->block_bad)
3490 chip->block_bad = nand_block_bad;
3491 if (!chip->block_markbad)
3492 chip->block_markbad = nand_default_block_markbad;
3493 if (!chip->write_buf || chip->write_buf == nand_write_buf)
3494 chip->write_buf = busw ? nand_write_buf16 : nand_write_buf;
3495 if (!chip->write_byte || chip->write_byte == nand_write_byte)
3496 chip->write_byte = busw ? nand_write_byte16 : nand_write_byte;
3497 if (!chip->read_buf || chip->read_buf == nand_read_buf)
3498 chip->read_buf = busw ? nand_read_buf16 : nand_read_buf;
3499 if (!chip->scan_bbt)
3500 chip->scan_bbt = nand_default_bbt;
3501
3502 if (!chip->controller) {
3503 chip->controller = &chip->hwcontrol;
3504 nand_hw_control_init(chip->controller);
3505 }
3506
3507 if (!chip->buf_align)
3508 chip->buf_align = 1;
3509 }
3510
3511 /* Sanitize ONFI strings so we can safely print them */
3512 static void sanitize_string(uint8_t *s, size_t len)
3513 {
3514 ssize_t i;
3515
3516 /* Null terminate */
3517 s[len - 1] = 0;
3518
3519 /* Remove non printable chars */
3520 for (i = 0; i < len - 1; i++) {
3521 if (s[i] < ' ' || s[i] > 127)
3522 s[i] = '?';
3523 }
3524
3525 /* Remove trailing spaces */
3526 strim(s);
3527 }
3528
3529 static u16 onfi_crc16(u16 crc, u8 const *p, size_t len)
3530 {
3531 int i;
3532 while (len--) {
3533 crc ^= *p++ << 8;
3534 for (i = 0; i < 8; i++)
3535 crc = (crc << 1) ^ ((crc & 0x8000) ? 0x8005 : 0);
3536 }
3537
3538 return crc;
3539 }
3540
3541 /* Parse the Extended Parameter Page. */
3542 static int nand_flash_detect_ext_param_page(struct nand_chip *chip,
3543 struct nand_onfi_params *p)
3544 {
3545 struct mtd_info *mtd = nand_to_mtd(chip);
3546 struct onfi_ext_param_page *ep;
3547 struct onfi_ext_section *s;
3548 struct onfi_ext_ecc_info *ecc;
3549 uint8_t *cursor;
3550 int ret = -EINVAL;
3551 int len;
3552 int i;
3553
3554 len = le16_to_cpu(p->ext_param_page_length) * 16;
3555 ep = kmalloc(len, GFP_KERNEL);
3556 if (!ep)
3557 return -ENOMEM;
3558
3559 /* Send our own NAND_CMD_PARAM. */
3560 chip->cmdfunc(mtd, NAND_CMD_PARAM, 0, -1);
3561
3562 /* Use the Change Read Column command to skip the ONFI param pages. */
3563 chip->cmdfunc(mtd, NAND_CMD_RNDOUT,
3564 sizeof(*p) * p->num_of_param_pages , -1);
3565
3566 /* Read out the Extended Parameter Page. */
3567 chip->read_buf(mtd, (uint8_t *)ep, len);
3568 if ((onfi_crc16(ONFI_CRC_BASE, ((uint8_t *)ep) + 2, len - 2)
3569 != le16_to_cpu(ep->crc))) {
3570 pr_debug("fail in the CRC.\n");
3571 goto ext_out;
3572 }
3573
3574 /*
3575 * Check the signature.
3576 * Do not strictly follow the ONFI spec, maybe changed in future.
3577 */
3578 if (strncmp(ep->sig, "EPPS", 4)) {
3579 pr_debug("The signature is invalid.\n");
3580 goto ext_out;
3581 }
3582
3583 /* find the ECC section. */
3584 cursor = (uint8_t *)(ep + 1);
3585 for (i = 0; i < ONFI_EXT_SECTION_MAX; i++) {
3586 s = ep->sections + i;
3587 if (s->type == ONFI_SECTION_TYPE_2)
3588 break;
3589 cursor += s->length * 16;
3590 }
3591 if (i == ONFI_EXT_SECTION_MAX) {
3592 pr_debug("We can not find the ECC section.\n");
3593 goto ext_out;
3594 }
3595
3596 /* get the info we want. */
3597 ecc = (struct onfi_ext_ecc_info *)cursor;
3598
3599 if (!ecc->codeword_size) {
3600 pr_debug("Invalid codeword size\n");
3601 goto ext_out;
3602 }
3603
3604 chip->ecc_strength_ds = ecc->ecc_bits;
3605 chip->ecc_step_ds = 1 << ecc->codeword_size;
3606 ret = 0;
3607
3608 ext_out:
3609 kfree(ep);
3610 return ret;
3611 }
3612
3613 /*
3614 * Check if the NAND chip is ONFI compliant, returns 1 if it is, 0 otherwise.
3615 */
3616 static int nand_flash_detect_onfi(struct nand_chip *chip)
3617 {
3618 struct mtd_info *mtd = nand_to_mtd(chip);
3619 struct nand_onfi_params *p = &chip->onfi_params;
3620 int i, j;
3621 int val;
3622
3623 /* Try ONFI for unknown chip or LP */
3624 chip->cmdfunc(mtd, NAND_CMD_READID, 0x20, -1);
3625 if (chip->read_byte(mtd) != 'O' || chip->read_byte(mtd) != 'N' ||
3626 chip->read_byte(mtd) != 'F' || chip->read_byte(mtd) != 'I')
3627 return 0;
3628
3629 chip->cmdfunc(mtd, NAND_CMD_PARAM, 0, -1);
3630 for (i = 0; i < 3; i++) {
3631 for (j = 0; j < sizeof(*p); j++)
3632 ((uint8_t *)p)[j] = chip->read_byte(mtd);
3633 if (onfi_crc16(ONFI_CRC_BASE, (uint8_t *)p, 254) ==
3634 le16_to_cpu(p->crc)) {
3635 break;
3636 }
3637 }
3638
3639 if (i == 3) {
3640 pr_err("Could not find valid ONFI parameter page; aborting\n");
3641 return 0;
3642 }
3643
3644 /* Check version */
3645 val = le16_to_cpu(p->revision);
3646 if (val & (1 << 5))
3647 chip->onfi_version = 23;
3648 else if (val & (1 << 4))
3649 chip->onfi_version = 22;
3650 else if (val & (1 << 3))
3651 chip->onfi_version = 21;
3652 else if (val & (1 << 2))
3653 chip->onfi_version = 20;
3654 else if (val & (1 << 1))
3655 chip->onfi_version = 10;
3656
3657 if (!chip->onfi_version) {
3658 pr_info("unsupported ONFI version: %d\n", val);
3659 return 0;
3660 }
3661
3662 sanitize_string(p->manufacturer, sizeof(p->manufacturer));
3663 sanitize_string(p->model, sizeof(p->model));
3664 if (!mtd->name)
3665 mtd->name = p->model;
3666
3667 mtd->writesize = le32_to_cpu(p->byte_per_page);
3668
3669 /*
3670 * pages_per_block and blocks_per_lun may not be a power-of-2 size
3671 * (don't ask me who thought of this...). MTD assumes that these
3672 * dimensions will be power-of-2, so just truncate the remaining area.
3673 */
3674 mtd->erasesize = 1 << (fls(le32_to_cpu(p->pages_per_block)) - 1);
3675 mtd->erasesize *= mtd->writesize;
3676
3677 mtd->oobsize = le16_to_cpu(p->spare_bytes_per_page);
3678
3679 /* See erasesize comment */
3680 chip->chipsize = 1 << (fls(le32_to_cpu(p->blocks_per_lun)) - 1);
3681 chip->chipsize *= (uint64_t)mtd->erasesize * p->lun_count;
3682 chip->bits_per_cell = p->bits_per_cell;
3683
3684 chip->max_bb_per_die = le16_to_cpu(p->bb_per_lun);
3685 chip->blocks_per_die = le32_to_cpu(p->blocks_per_lun);
3686
3687 if (onfi_feature(chip) & ONFI_FEATURE_16_BIT_BUS)
3688 chip->options |= NAND_BUSWIDTH_16;
3689
3690 if (p->ecc_bits != 0xff) {
3691 chip->ecc_strength_ds = p->ecc_bits;
3692 chip->ecc_step_ds = 512;
3693 } else if (chip->onfi_version >= 21 &&
3694 (onfi_feature(chip) & ONFI_FEATURE_EXT_PARAM_PAGE)) {
3695
3696 /*
3697 * The nand_flash_detect_ext_param_page() uses the
3698 * Change Read Column command which maybe not supported
3699 * by the chip->cmdfunc. So try to update the chip->cmdfunc
3700 * now. We do not replace user supplied command function.
3701 */
3702 if (mtd->writesize > 512 && chip->cmdfunc == nand_command)
3703 chip->cmdfunc = nand_command_lp;
3704
3705 /* The Extended Parameter Page is supported since ONFI 2.1. */
3706 if (nand_flash_detect_ext_param_page(chip, p))
3707 pr_warn("Failed to detect ONFI extended param page\n");
3708 } else {
3709 pr_warn("Could not retrieve ONFI ECC requirements\n");
3710 }
3711
3712 return 1;
3713 }
3714
3715 /*
3716 * Check if the NAND chip is JEDEC compliant, returns 1 if it is, 0 otherwise.
3717 */
3718 static int nand_flash_detect_jedec(struct nand_chip *chip)
3719 {
3720 struct mtd_info *mtd = nand_to_mtd(chip);
3721 struct nand_jedec_params *p = &chip->jedec_params;
3722 struct jedec_ecc_info *ecc;
3723 int val;
3724 int i, j;
3725
3726 /* Try JEDEC for unknown chip or LP */
3727 chip->cmdfunc(mtd, NAND_CMD_READID, 0x40, -1);
3728 if (chip->read_byte(mtd) != 'J' || chip->read_byte(mtd) != 'E' ||
3729 chip->read_byte(mtd) != 'D' || chip->read_byte(mtd) != 'E' ||
3730 chip->read_byte(mtd) != 'C')
3731 return 0;
3732
3733 chip->cmdfunc(mtd, NAND_CMD_PARAM, 0x40, -1);
3734 for (i = 0; i < 3; i++) {
3735 for (j = 0; j < sizeof(*p); j++)
3736 ((uint8_t *)p)[j] = chip->read_byte(mtd);
3737
3738 if (onfi_crc16(ONFI_CRC_BASE, (uint8_t *)p, 510) ==
3739 le16_to_cpu(p->crc))
3740 break;
3741 }
3742
3743 if (i == 3) {
3744 pr_err("Could not find valid JEDEC parameter page; aborting\n");
3745 return 0;
3746 }
3747
3748 /* Check version */
3749 val = le16_to_cpu(p->revision);
3750 if (val & (1 << 2))
3751 chip->jedec_version = 10;
3752 else if (val & (1 << 1))
3753 chip->jedec_version = 1; /* vendor specific version */
3754
3755 if (!chip->jedec_version) {
3756 pr_info("unsupported JEDEC version: %d\n", val);
3757 return 0;
3758 }
3759
3760 sanitize_string(p->manufacturer, sizeof(p->manufacturer));
3761 sanitize_string(p->model, sizeof(p->model));
3762 if (!mtd->name)
3763 mtd->name = p->model;
3764
3765 mtd->writesize = le32_to_cpu(p->byte_per_page);
3766
3767 /* Please reference to the comment for nand_flash_detect_onfi. */
3768 mtd->erasesize = 1 << (fls(le32_to_cpu(p->pages_per_block)) - 1);
3769 mtd->erasesize *= mtd->writesize;
3770
3771 mtd->oobsize = le16_to_cpu(p->spare_bytes_per_page);
3772
3773 /* Please reference to the comment for nand_flash_detect_onfi. */
3774 chip->chipsize = 1 << (fls(le32_to_cpu(p->blocks_per_lun)) - 1);
3775 chip->chipsize *= (uint64_t)mtd->erasesize * p->lun_count;
3776 chip->bits_per_cell = p->bits_per_cell;
3777
3778 if (jedec_feature(chip) & JEDEC_FEATURE_16_BIT_BUS)
3779 chip->options |= NAND_BUSWIDTH_16;
3780
3781 /* ECC info */
3782 ecc = &p->ecc_info[0];
3783
3784 if (ecc->codeword_size >= 9) {
3785 chip->ecc_strength_ds = ecc->ecc_bits;
3786 chip->ecc_step_ds = 1 << ecc->codeword_size;
3787 } else {
3788 pr_warn("Invalid codeword size\n");
3789 }
3790
3791 return 1;
3792 }
3793
3794 /*
3795 * nand_id_has_period - Check if an ID string has a given wraparound period
3796 * @id_data: the ID string
3797 * @arrlen: the length of the @id_data array
3798 * @period: the period of repitition
3799 *
3800 * Check if an ID string is repeated within a given sequence of bytes at
3801 * specific repetition interval period (e.g., {0x20,0x01,0x7F,0x20} has a
3802 * period of 3). This is a helper function for nand_id_len(). Returns non-zero
3803 * if the repetition has a period of @period; otherwise, returns zero.
3804 */
3805 static int nand_id_has_period(u8 *id_data, int arrlen, int period)
3806 {
3807 int i, j;
3808 for (i = 0; i < period; i++)
3809 for (j = i + period; j < arrlen; j += period)
3810 if (id_data[i] != id_data[j])
3811 return 0;
3812 return 1;
3813 }
3814
3815 /*
3816 * nand_id_len - Get the length of an ID string returned by CMD_READID
3817 * @id_data: the ID string
3818 * @arrlen: the length of the @id_data array
3819
3820 * Returns the length of the ID string, according to known wraparound/trailing
3821 * zero patterns. If no pattern exists, returns the length of the array.
3822 */
3823 static int nand_id_len(u8 *id_data, int arrlen)
3824 {
3825 int last_nonzero, period;
3826
3827 /* Find last non-zero byte */
3828 for (last_nonzero = arrlen - 1; last_nonzero >= 0; last_nonzero--)
3829 if (id_data[last_nonzero])
3830 break;
3831
3832 /* All zeros */
3833 if (last_nonzero < 0)
3834 return 0;
3835
3836 /* Calculate wraparound period */
3837 for (period = 1; period < arrlen; period++)
3838 if (nand_id_has_period(id_data, arrlen, period))
3839 break;
3840
3841 /* There's a repeated pattern */
3842 if (period < arrlen)
3843 return period;
3844
3845 /* There are trailing zeros */
3846 if (last_nonzero < arrlen - 1)
3847 return last_nonzero + 1;
3848
3849 /* No pattern detected */
3850 return arrlen;
3851 }
3852
3853 /* Extract the bits of per cell from the 3rd byte of the extended ID */
3854 static int nand_get_bits_per_cell(u8 cellinfo)
3855 {
3856 int bits;
3857
3858 bits = cellinfo & NAND_CI_CELLTYPE_MSK;
3859 bits >>= NAND_CI_CELLTYPE_SHIFT;
3860 return bits + 1;
3861 }
3862
3863 /*
3864 * Many new NAND share similar device ID codes, which represent the size of the
3865 * chip. The rest of the parameters must be decoded according to generic or
3866 * manufacturer-specific "extended ID" decoding patterns.
3867 */
3868 void nand_decode_ext_id(struct nand_chip *chip)
3869 {
3870 struct mtd_info *mtd = nand_to_mtd(chip);
3871 int extid;
3872 u8 *id_data = chip->id.data;
3873 /* The 3rd id byte holds MLC / multichip data */
3874 chip->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
3875 /* The 4th id byte is the important one */
3876 extid = id_data[3];
3877
3878 /* Calc pagesize */
3879 mtd->writesize = 1024 << (extid & 0x03);
3880 extid >>= 2;
3881 /* Calc oobsize */
3882 mtd->oobsize = (8 << (extid & 0x01)) * (mtd->writesize >> 9);
3883 extid >>= 2;
3884 /* Calc blocksize. Blocksize is multiples of 64KiB */
3885 mtd->erasesize = (64 * 1024) << (extid & 0x03);
3886 extid >>= 2;
3887 /* Get buswidth information */
3888 if (extid & 0x1)
3889 chip->options |= NAND_BUSWIDTH_16;
3890 }
3891 EXPORT_SYMBOL_GPL(nand_decode_ext_id);
3892
3893 /*
3894 * Old devices have chip data hardcoded in the device ID table. nand_decode_id
3895 * decodes a matching ID table entry and assigns the MTD size parameters for
3896 * the chip.
3897 */
3898 static void nand_decode_id(struct nand_chip *chip, struct nand_flash_dev *type)
3899 {
3900 struct mtd_info *mtd = nand_to_mtd(chip);
3901
3902 mtd->erasesize = type->erasesize;
3903 mtd->writesize = type->pagesize;
3904 mtd->oobsize = mtd->writesize / 32;
3905
3906 /* All legacy ID NAND are small-page, SLC */
3907 chip->bits_per_cell = 1;
3908 }
3909
3910 /*
3911 * Set the bad block marker/indicator (BBM/BBI) patterns according to some
3912 * heuristic patterns using various detected parameters (e.g., manufacturer,
3913 * page size, cell-type information).
3914 */
3915 static void nand_decode_bbm_options(struct nand_chip *chip)
3916 {
3917 struct mtd_info *mtd = nand_to_mtd(chip);
3918
3919 /* Set the bad block position */
3920 if (mtd->writesize > 512 || (chip->options & NAND_BUSWIDTH_16))
3921 chip->badblockpos = NAND_LARGE_BADBLOCK_POS;
3922 else
3923 chip->badblockpos = NAND_SMALL_BADBLOCK_POS;
3924 }
3925
3926 static inline bool is_full_id_nand(struct nand_flash_dev *type)
3927 {
3928 return type->id_len;
3929 }
3930
3931 static bool find_full_id_nand(struct nand_chip *chip,
3932 struct nand_flash_dev *type)
3933 {
3934 struct mtd_info *mtd = nand_to_mtd(chip);
3935 u8 *id_data = chip->id.data;
3936
3937 if (!strncmp(type->id, id_data, type->id_len)) {
3938 mtd->writesize = type->pagesize;
3939 mtd->erasesize = type->erasesize;
3940 mtd->oobsize = type->oobsize;
3941
3942 chip->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
3943 chip->chipsize = (uint64_t)type->chipsize << 20;
3944 chip->options |= type->options;
3945 chip->ecc_strength_ds = NAND_ECC_STRENGTH(type);
3946 chip->ecc_step_ds = NAND_ECC_STEP(type);
3947 chip->onfi_timing_mode_default =
3948 type->onfi_timing_mode_default;
3949
3950 if (!mtd->name)
3951 mtd->name = type->name;
3952
3953 return true;
3954 }
3955 return false;
3956 }
3957
3958 /*
3959 * Manufacturer detection. Only used when the NAND is not ONFI or JEDEC
3960 * compliant and does not have a full-id or legacy-id entry in the nand_ids
3961 * table.
3962 */
3963 static void nand_manufacturer_detect(struct nand_chip *chip)
3964 {
3965 /*
3966 * Try manufacturer detection if available and use
3967 * nand_decode_ext_id() otherwise.
3968 */
3969 if (chip->manufacturer.desc && chip->manufacturer.desc->ops &&
3970 chip->manufacturer.desc->ops->detect)
3971 chip->manufacturer.desc->ops->detect(chip);
3972 else
3973 nand_decode_ext_id(chip);
3974 }
3975
3976 /*
3977 * Manufacturer initialization. This function is called for all NANDs including
3978 * ONFI and JEDEC compliant ones.
3979 * Manufacturer drivers should put all their specific initialization code in
3980 * their ->init() hook.
3981 */
3982 static int nand_manufacturer_init(struct nand_chip *chip)
3983 {
3984 if (!chip->manufacturer.desc || !chip->manufacturer.desc->ops ||
3985 !chip->manufacturer.desc->ops->init)
3986 return 0;
3987
3988 return chip->manufacturer.desc->ops->init(chip);
3989 }
3990
3991 /*
3992 * Manufacturer cleanup. This function is called for all NANDs including
3993 * ONFI and JEDEC compliant ones.
3994 * Manufacturer drivers should put all their specific cleanup code in their
3995 * ->cleanup() hook.
3996 */
3997 static void nand_manufacturer_cleanup(struct nand_chip *chip)
3998 {
3999 /* Release manufacturer private data */
4000 if (chip->manufacturer.desc && chip->manufacturer.desc->ops &&
4001 chip->manufacturer.desc->ops->cleanup)
4002 chip->manufacturer.desc->ops->cleanup(chip);
4003 }
4004
4005 /*
4006 * Get the flash and manufacturer id and lookup if the type is supported.
4007 */
4008 static int nand_detect(struct nand_chip *chip, struct nand_flash_dev *type)
4009 {
4010 const struct nand_manufacturer *manufacturer;
4011 struct mtd_info *mtd = nand_to_mtd(chip);
4012 int busw;
4013 int i, ret;
4014 u8 *id_data = chip->id.data;
4015 u8 maf_id, dev_id;
4016
4017 /*
4018 * Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx)
4019 * after power-up.
4020 */
4021 nand_reset(chip, 0);
4022
4023 /* Select the device */
4024 chip->select_chip(mtd, 0);
4025
4026 /* Send the command for reading device ID */
4027 chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
4028
4029 /* Read manufacturer and device IDs */
4030 maf_id = chip->read_byte(mtd);
4031 dev_id = chip->read_byte(mtd);
4032
4033 /*
4034 * Try again to make sure, as some systems the bus-hold or other
4035 * interface concerns can cause random data which looks like a
4036 * possibly credible NAND flash to appear. If the two results do
4037 * not match, ignore the device completely.
4038 */
4039
4040 chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
4041
4042 /* Read entire ID string */
4043 for (i = 0; i < 8; i++)
4044 id_data[i] = chip->read_byte(mtd);
4045
4046 if (id_data[0] != maf_id || id_data[1] != dev_id) {
4047 pr_info("second ID read did not match %02x,%02x against %02x,%02x\n",
4048 maf_id, dev_id, id_data[0], id_data[1]);
4049 return -ENODEV;
4050 }
4051
4052 chip->id.len = nand_id_len(id_data, 8);
4053
4054 /* Try to identify manufacturer */
4055 manufacturer = nand_get_manufacturer(maf_id);
4056 chip->manufacturer.desc = manufacturer;
4057
4058 if (!type)
4059 type = nand_flash_ids;
4060
4061 /*
4062 * Save the NAND_BUSWIDTH_16 flag before letting auto-detection logic
4063 * override it.
4064 * This is required to make sure initial NAND bus width set by the
4065 * NAND controller driver is coherent with the real NAND bus width
4066 * (extracted by auto-detection code).
4067 */
4068 busw = chip->options & NAND_BUSWIDTH_16;
4069
4070 /*
4071 * The flag is only set (never cleared), reset it to its default value
4072 * before starting auto-detection.
4073 */
4074 chip->options &= ~NAND_BUSWIDTH_16;
4075
4076 for (; type->name != NULL; type++) {
4077 if (is_full_id_nand(type)) {
4078 if (find_full_id_nand(chip, type))
4079 goto ident_done;
4080 } else if (dev_id == type->dev_id) {
4081 break;
4082 }
4083 }
4084
4085 chip->onfi_version = 0;
4086 if (!type->name || !type->pagesize) {
4087 /* Check if the chip is ONFI compliant */
4088 if (nand_flash_detect_onfi(chip))
4089 goto ident_done;
4090
4091 /* Check if the chip is JEDEC compliant */
4092 if (nand_flash_detect_jedec(chip))
4093 goto ident_done;
4094 }
4095
4096 if (!type->name)
4097 return -ENODEV;
4098
4099 if (!mtd->name)
4100 mtd->name = type->name;
4101
4102 chip->chipsize = (uint64_t)type->chipsize << 20;
4103
4104 if (!type->pagesize)
4105 nand_manufacturer_detect(chip);
4106 else
4107 nand_decode_id(chip, type);
4108
4109 /* Get chip options */
4110 chip->options |= type->options;
4111
4112 ident_done:
4113
4114 if (chip->options & NAND_BUSWIDTH_AUTO) {
4115 WARN_ON(busw & NAND_BUSWIDTH_16);
4116 nand_set_defaults(chip);
4117 } else if (busw != (chip->options & NAND_BUSWIDTH_16)) {
4118 /*
4119 * Check, if buswidth is correct. Hardware drivers should set
4120 * chip correct!
4121 */
4122 pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
4123 maf_id, dev_id);
4124 pr_info("%s %s\n", nand_manufacturer_name(manufacturer),
4125 mtd->name);
4126 pr_warn("bus width %d instead of %d bits\n", busw ? 16 : 8,
4127 (chip->options & NAND_BUSWIDTH_16) ? 16 : 8);
4128 return -EINVAL;
4129 }
4130
4131 nand_decode_bbm_options(chip);
4132
4133 /* Calculate the address shift from the page size */
4134 chip->page_shift = ffs(mtd->writesize) - 1;
4135 /* Convert chipsize to number of pages per chip -1 */
4136 chip->pagemask = (chip->chipsize >> chip->page_shift) - 1;
4137
4138 chip->bbt_erase_shift = chip->phys_erase_shift =
4139 ffs(mtd->erasesize) - 1;
4140 if (chip->chipsize & 0xffffffff)
4141 chip->chip_shift = ffs((unsigned)chip->chipsize) - 1;
4142 else {
4143 chip->chip_shift = ffs((unsigned)(chip->chipsize >> 32));
4144 chip->chip_shift += 32 - 1;
4145 }
4146
4147 chip->badblockbits = 8;
4148 chip->erase = single_erase;
4149
4150 /* Do not replace user supplied command function! */
4151 if (mtd->writesize > 512 && chip->cmdfunc == nand_command)
4152 chip->cmdfunc = nand_command_lp;
4153
4154 ret = nand_manufacturer_init(chip);
4155 if (ret)
4156 return ret;
4157
4158 pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
4159 maf_id, dev_id);
4160
4161 if (chip->onfi_version)
4162 pr_info("%s %s\n", nand_manufacturer_name(manufacturer),
4163 chip->onfi_params.model);
4164 else if (chip->jedec_version)
4165 pr_info("%s %s\n", nand_manufacturer_name(manufacturer),
4166 chip->jedec_params.model);
4167 else
4168 pr_info("%s %s\n", nand_manufacturer_name(manufacturer),
4169 type->name);
4170
4171 pr_info("%d MiB, %s, erase size: %d KiB, page size: %d, OOB size: %d\n",
4172 (int)(chip->chipsize >> 20), nand_is_slc(chip) ? "SLC" : "MLC",
4173 mtd->erasesize >> 10, mtd->writesize, mtd->oobsize);
4174 return 0;
4175 }
4176
4177 static const char * const nand_ecc_modes[] = {
4178 [NAND_ECC_NONE] = "none",
4179 [NAND_ECC_SOFT] = "soft",
4180 [NAND_ECC_HW] = "hw",
4181 [NAND_ECC_HW_SYNDROME] = "hw_syndrome",
4182 [NAND_ECC_HW_OOB_FIRST] = "hw_oob_first",
4183 };
4184
4185 static int of_get_nand_ecc_mode(struct device_node *np)
4186 {
4187 const char *pm;
4188 int err, i;
4189
4190 err = of_property_read_string(np, "nand-ecc-mode", &pm);
4191 if (err < 0)
4192 return err;
4193
4194 for (i = 0; i < ARRAY_SIZE(nand_ecc_modes); i++)
4195 if (!strcasecmp(pm, nand_ecc_modes[i]))
4196 return i;
4197
4198 /*
4199 * For backward compatibility we support few obsoleted values that don't
4200 * have their mappings into nand_ecc_modes_t anymore (they were merged
4201 * with other enums).
4202 */
4203 if (!strcasecmp(pm, "soft_bch"))
4204 return NAND_ECC_SOFT;
4205
4206 return -ENODEV;
4207 }
4208
4209 static const char * const nand_ecc_algos[] = {
4210 [NAND_ECC_HAMMING] = "hamming",
4211 [NAND_ECC_BCH] = "bch",
4212 };
4213
4214 static int of_get_nand_ecc_algo(struct device_node *np)
4215 {
4216 const char *pm;
4217 int err, i;
4218
4219 err = of_property_read_string(np, "nand-ecc-algo", &pm);
4220 if (!err) {
4221 for (i = NAND_ECC_HAMMING; i < ARRAY_SIZE(nand_ecc_algos); i++)
4222 if (!strcasecmp(pm, nand_ecc_algos[i]))
4223 return i;
4224 return -ENODEV;
4225 }
4226
4227 /*
4228 * For backward compatibility we also read "nand-ecc-mode" checking
4229 * for some obsoleted values that were specifying ECC algorithm.
4230 */
4231 err = of_property_read_string(np, "nand-ecc-mode", &pm);
4232 if (err < 0)
4233 return err;
4234
4235 if (!strcasecmp(pm, "soft"))
4236 return NAND_ECC_HAMMING;
4237 else if (!strcasecmp(pm, "soft_bch"))
4238 return NAND_ECC_BCH;
4239
4240 return -ENODEV;
4241 }
4242
4243 static int of_get_nand_ecc_step_size(struct device_node *np)
4244 {
4245 int ret;
4246 u32 val;
4247
4248 ret = of_property_read_u32(np, "nand-ecc-step-size", &val);
4249 return ret ? ret : val;
4250 }
4251
4252 static int of_get_nand_ecc_strength(struct device_node *np)
4253 {
4254 int ret;
4255 u32 val;
4256
4257 ret = of_property_read_u32(np, "nand-ecc-strength", &val);
4258 return ret ? ret : val;
4259 }
4260
4261 static int of_get_nand_bus_width(struct device_node *np)
4262 {
4263 u32 val;
4264
4265 if (of_property_read_u32(np, "nand-bus-width", &val))
4266 return 8;
4267
4268 switch (val) {
4269 case 8:
4270 case 16:
4271 return val;
4272 default:
4273 return -EIO;
4274 }
4275 }
4276
4277 static bool of_get_nand_on_flash_bbt(struct device_node *np)
4278 {
4279 return of_property_read_bool(np, "nand-on-flash-bbt");
4280 }
4281
4282 static int nand_dt_init(struct nand_chip *chip)
4283 {
4284 struct device_node *dn = nand_get_flash_node(chip);
4285 int ecc_mode, ecc_algo, ecc_strength, ecc_step;
4286
4287 if (!dn)
4288 return 0;
4289
4290 if (of_get_nand_bus_width(dn) == 16)
4291 chip->options |= NAND_BUSWIDTH_16;
4292
4293 if (of_get_nand_on_flash_bbt(dn))
4294 chip->bbt_options |= NAND_BBT_USE_FLASH;
4295
4296 ecc_mode = of_get_nand_ecc_mode(dn);
4297 ecc_algo = of_get_nand_ecc_algo(dn);
4298 ecc_strength = of_get_nand_ecc_strength(dn);
4299 ecc_step = of_get_nand_ecc_step_size(dn);
4300
4301 if (ecc_mode >= 0)
4302 chip->ecc.mode = ecc_mode;
4303
4304 if (ecc_algo >= 0)
4305 chip->ecc.algo = ecc_algo;
4306
4307 if (ecc_strength >= 0)
4308 chip->ecc.strength = ecc_strength;
4309
4310 if (ecc_step > 0)
4311 chip->ecc.size = ecc_step;
4312
4313 if (of_property_read_bool(dn, "nand-ecc-maximize"))
4314 chip->ecc.options |= NAND_ECC_MAXIMIZE;
4315
4316 return 0;
4317 }
4318
4319 /**
4320 * nand_scan_ident - [NAND Interface] Scan for the NAND device
4321 * @mtd: MTD device structure
4322 * @maxchips: number of chips to scan for
4323 * @table: alternative NAND ID table
4324 *
4325 * This is the first phase of the normal nand_scan() function. It reads the
4326 * flash ID and sets up MTD fields accordingly.
4327 *
4328 */
4329 int nand_scan_ident(struct mtd_info *mtd, int maxchips,
4330 struct nand_flash_dev *table)
4331 {
4332 int i, nand_maf_id, nand_dev_id;
4333 struct nand_chip *chip = mtd_to_nand(mtd);
4334 int ret;
4335
4336 ret = nand_dt_init(chip);
4337 if (ret)
4338 return ret;
4339
4340 if (!mtd->name && mtd->dev.parent)
4341 mtd->name = dev_name(mtd->dev.parent);
4342
4343 if ((!chip->cmdfunc || !chip->select_chip) && !chip->cmd_ctrl) {
4344 /*
4345 * Default functions assigned for chip_select() and
4346 * cmdfunc() both expect cmd_ctrl() to be populated,
4347 * so we need to check that that's the case
4348 */
4349 pr_err("chip.cmd_ctrl() callback is not provided");
4350 return -EINVAL;
4351 }
4352 /* Set the default functions */
4353 nand_set_defaults(chip);
4354
4355 /* Read the flash type */
4356 ret = nand_detect(chip, table);
4357 if (ret) {
4358 if (!(chip->options & NAND_SCAN_SILENT_NODEV))
4359 pr_warn("No NAND device found\n");
4360 chip->select_chip(mtd, -1);
4361 return ret;
4362 }
4363
4364 /* Initialize the ->data_interface field. */
4365 ret = nand_init_data_interface(chip);
4366 if (ret)
4367 goto err_nand_init;
4368
4369 /*
4370 * Setup the data interface correctly on the chip and controller side.
4371 * This explicit call to nand_setup_data_interface() is only required
4372 * for the first die, because nand_reset() has been called before
4373 * ->data_interface and ->default_onfi_timing_mode were set.
4374 * For the other dies, nand_reset() will automatically switch to the
4375 * best mode for us.
4376 */
4377 ret = nand_setup_data_interface(chip);
4378 if (ret)
4379 goto err_nand_init;
4380
4381 nand_maf_id = chip->id.data[0];
4382 nand_dev_id = chip->id.data[1];
4383
4384 chip->select_chip(mtd, -1);
4385
4386 /* Check for a chip array */
4387 for (i = 1; i < maxchips; i++) {
4388 /* See comment in nand_get_flash_type for reset */
4389 nand_reset(chip, i);
4390
4391 chip->select_chip(mtd, i);
4392 /* Send the command for reading device ID */
4393 chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
4394 /* Read manufacturer and device IDs */
4395 if (nand_maf_id != chip->read_byte(mtd) ||
4396 nand_dev_id != chip->read_byte(mtd)) {
4397 chip->select_chip(mtd, -1);
4398 break;
4399 }
4400 chip->select_chip(mtd, -1);
4401 }
4402 if (i > 1)
4403 pr_info("%d chips detected\n", i);
4404
4405 /* Store the number of chips and calc total size for mtd */
4406 chip->numchips = i;
4407 mtd->size = i * chip->chipsize;
4408
4409 return 0;
4410
4411 err_nand_init:
4412 /* Free manufacturer priv data. */
4413 nand_manufacturer_cleanup(chip);
4414
4415 return ret;
4416 }
4417 EXPORT_SYMBOL(nand_scan_ident);
4418
4419 static int nand_set_ecc_soft_ops(struct mtd_info *mtd)
4420 {
4421 struct nand_chip *chip = mtd_to_nand(mtd);
4422 struct nand_ecc_ctrl *ecc = &chip->ecc;
4423
4424 if (WARN_ON(ecc->mode != NAND_ECC_SOFT))
4425 return -EINVAL;
4426
4427 switch (ecc->algo) {
4428 case NAND_ECC_HAMMING:
4429 ecc->calculate = nand_calculate_ecc;
4430 ecc->correct = nand_correct_data;
4431 ecc->read_page = nand_read_page_swecc;
4432 ecc->read_subpage = nand_read_subpage;
4433 ecc->write_page = nand_write_page_swecc;
4434 ecc->read_page_raw = nand_read_page_raw;
4435 ecc->write_page_raw = nand_write_page_raw;
4436 ecc->read_oob = nand_read_oob_std;
4437 ecc->write_oob = nand_write_oob_std;
4438 if (!ecc->size)
4439 ecc->size = 256;
4440 ecc->bytes = 3;
4441 ecc->strength = 1;
4442 return 0;
4443 case NAND_ECC_BCH:
4444 if (!mtd_nand_has_bch()) {
4445 WARN(1, "CONFIG_MTD_NAND_ECC_BCH not enabled\n");
4446 return -EINVAL;
4447 }
4448 ecc->calculate = nand_bch_calculate_ecc;
4449 ecc->correct = nand_bch_correct_data;
4450 ecc->read_page = nand_read_page_swecc;
4451 ecc->read_subpage = nand_read_subpage;
4452 ecc->write_page = nand_write_page_swecc;
4453 ecc->read_page_raw = nand_read_page_raw;
4454 ecc->write_page_raw = nand_write_page_raw;
4455 ecc->read_oob = nand_read_oob_std;
4456 ecc->write_oob = nand_write_oob_std;
4457
4458 /*
4459 * Board driver should supply ecc.size and ecc.strength
4460 * values to select how many bits are correctable.
4461 * Otherwise, default to 4 bits for large page devices.
4462 */
4463 if (!ecc->size && (mtd->oobsize >= 64)) {
4464 ecc->size = 512;
4465 ecc->strength = 4;
4466 }
4467
4468 /*
4469 * if no ecc placement scheme was provided pickup the default
4470 * large page one.
4471 */
4472 if (!mtd->ooblayout) {
4473 /* handle large page devices only */
4474 if (mtd->oobsize < 64) {
4475 WARN(1, "OOB layout is required when using software BCH on small pages\n");
4476 return -EINVAL;
4477 }
4478
4479 mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
4480
4481 }
4482
4483 /*
4484 * We can only maximize ECC config when the default layout is
4485 * used, otherwise we don't know how many bytes can really be
4486 * used.
4487 */
4488 if (mtd->ooblayout == &nand_ooblayout_lp_ops &&
4489 ecc->options & NAND_ECC_MAXIMIZE) {
4490 int steps, bytes;
4491
4492 /* Always prefer 1k blocks over 512bytes ones */
4493 ecc->size = 1024;
4494 steps = mtd->writesize / ecc->size;
4495
4496 /* Reserve 2 bytes for the BBM */
4497 bytes = (mtd->oobsize - 2) / steps;
4498 ecc->strength = bytes * 8 / fls(8 * ecc->size);
4499 }
4500
4501 /* See nand_bch_init() for details. */
4502 ecc->bytes = 0;
4503 ecc->priv = nand_bch_init(mtd);
4504 if (!ecc->priv) {
4505 WARN(1, "BCH ECC initialization failed!\n");
4506 return -EINVAL;
4507 }
4508 return 0;
4509 default:
4510 WARN(1, "Unsupported ECC algorithm!\n");
4511 return -EINVAL;
4512 }
4513 }
4514
4515 /*
4516 * Check if the chip configuration meet the datasheet requirements.
4517
4518 * If our configuration corrects A bits per B bytes and the minimum
4519 * required correction level is X bits per Y bytes, then we must ensure
4520 * both of the following are true:
4521 *
4522 * (1) A / B >= X / Y
4523 * (2) A >= X
4524 *
4525 * Requirement (1) ensures we can correct for the required bitflip density.
4526 * Requirement (2) ensures we can correct even when all bitflips are clumped
4527 * in the same sector.
4528 */
4529 static bool nand_ecc_strength_good(struct mtd_info *mtd)
4530 {
4531 struct nand_chip *chip = mtd_to_nand(mtd);
4532 struct nand_ecc_ctrl *ecc = &chip->ecc;
4533 int corr, ds_corr;
4534
4535 if (ecc->size == 0 || chip->ecc_step_ds == 0)
4536 /* Not enough information */
4537 return true;
4538
4539 /*
4540 * We get the number of corrected bits per page to compare
4541 * the correction density.
4542 */
4543 corr = (mtd->writesize * ecc->strength) / ecc->size;
4544 ds_corr = (mtd->writesize * chip->ecc_strength_ds) / chip->ecc_step_ds;
4545
4546 return corr >= ds_corr && ecc->strength >= chip->ecc_strength_ds;
4547 }
4548
4549 static bool invalid_ecc_page_accessors(struct nand_chip *chip)
4550 {
4551 struct nand_ecc_ctrl *ecc = &chip->ecc;
4552
4553 if (nand_standard_page_accessors(ecc))
4554 return false;
4555
4556 /*
4557 * NAND_ECC_CUSTOM_PAGE_ACCESS flag is set, make sure the NAND
4558 * controller driver implements all the page accessors because
4559 * default helpers are not suitable when the core does not
4560 * send the READ0/PAGEPROG commands.
4561 */
4562 return (!ecc->read_page || !ecc->write_page ||
4563 !ecc->read_page_raw || !ecc->write_page_raw ||
4564 (NAND_HAS_SUBPAGE_READ(chip) && !ecc->read_subpage) ||
4565 (NAND_HAS_SUBPAGE_WRITE(chip) && !ecc->write_subpage &&
4566 ecc->hwctl && ecc->calculate));
4567 }
4568
4569 /**
4570 * nand_scan_tail - [NAND Interface] Scan for the NAND device
4571 * @mtd: MTD device structure
4572 *
4573 * This is the second phase of the normal nand_scan() function. It fills out
4574 * all the uninitialized function pointers with the defaults and scans for a
4575 * bad block table if appropriate.
4576 */
4577 int nand_scan_tail(struct mtd_info *mtd)
4578 {
4579 struct nand_chip *chip = mtd_to_nand(mtd);
4580 struct nand_ecc_ctrl *ecc = &chip->ecc;
4581 struct nand_buffers *nbuf = NULL;
4582 int ret;
4583
4584 /* New bad blocks should be marked in OOB, flash-based BBT, or both */
4585 if (WARN_ON((chip->bbt_options & NAND_BBT_NO_OOB_BBM) &&
4586 !(chip->bbt_options & NAND_BBT_USE_FLASH))) {
4587 ret = -EINVAL;
4588 goto err_ident;
4589 }
4590
4591 if (invalid_ecc_page_accessors(chip)) {
4592 pr_err("Invalid ECC page accessors setup\n");
4593 ret = -EINVAL;
4594 goto err_ident;
4595 }
4596
4597 if (!(chip->options & NAND_OWN_BUFFERS)) {
4598 nbuf = kzalloc(sizeof(*nbuf), GFP_KERNEL);
4599 if (!nbuf) {
4600 ret = -ENOMEM;
4601 goto err_ident;
4602 }
4603
4604 nbuf->ecccalc = kmalloc(mtd->oobsize, GFP_KERNEL);
4605 if (!nbuf->ecccalc) {
4606 ret = -ENOMEM;
4607 goto err_free;
4608 }
4609
4610 nbuf->ecccode = kmalloc(mtd->oobsize, GFP_KERNEL);
4611 if (!nbuf->ecccode) {
4612 ret = -ENOMEM;
4613 goto err_free;
4614 }
4615
4616 nbuf->databuf = kmalloc(mtd->writesize + mtd->oobsize,
4617 GFP_KERNEL);
4618 if (!nbuf->databuf) {
4619 ret = -ENOMEM;
4620 goto err_free;
4621 }
4622
4623 chip->buffers = nbuf;
4624 } else {
4625 if (!chip->buffers) {
4626 ret = -ENOMEM;
4627 goto err_ident;
4628 }
4629 }
4630
4631 /* Set the internal oob buffer location, just after the page data */
4632 chip->oob_poi = chip->buffers->databuf + mtd->writesize;
4633
4634 /*
4635 * If no default placement scheme is given, select an appropriate one.
4636 */
4637 if (!mtd->ooblayout &&
4638 !(ecc->mode == NAND_ECC_SOFT && ecc->algo == NAND_ECC_BCH)) {
4639 switch (mtd->oobsize) {
4640 case 8:
4641 case 16:
4642 mtd_set_ooblayout(mtd, &nand_ooblayout_sp_ops);
4643 break;
4644 case 64:
4645 case 128:
4646 mtd_set_ooblayout(mtd, &nand_ooblayout_lp_hamming_ops);
4647 break;
4648 default:
4649 WARN(1, "No oob scheme defined for oobsize %d\n",
4650 mtd->oobsize);
4651 ret = -EINVAL;
4652 goto err_free;
4653 }
4654 }
4655
4656 /*
4657 * Check ECC mode, default to software if 3byte/512byte hardware ECC is
4658 * selected and we have 256 byte pagesize fallback to software ECC
4659 */
4660
4661 switch (ecc->mode) {
4662 case NAND_ECC_HW_OOB_FIRST:
4663 /* Similar to NAND_ECC_HW, but a separate read_page handle */
4664 if (!ecc->calculate || !ecc->correct || !ecc->hwctl) {
4665 WARN(1, "No ECC functions supplied; hardware ECC not possible\n");
4666 ret = -EINVAL;
4667 goto err_free;
4668 }
4669 if (!ecc->read_page)
4670 ecc->read_page = nand_read_page_hwecc_oob_first;
4671
4672 case NAND_ECC_HW:
4673 /* Use standard hwecc read page function? */
4674 if (!ecc->read_page)
4675 ecc->read_page = nand_read_page_hwecc;
4676 if (!ecc->write_page)
4677 ecc->write_page = nand_write_page_hwecc;
4678 if (!ecc->read_page_raw)
4679 ecc->read_page_raw = nand_read_page_raw;
4680 if (!ecc->write_page_raw)
4681 ecc->write_page_raw = nand_write_page_raw;
4682 if (!ecc->read_oob)
4683 ecc->read_oob = nand_read_oob_std;
4684 if (!ecc->write_oob)
4685 ecc->write_oob = nand_write_oob_std;
4686 if (!ecc->read_subpage)
4687 ecc->read_subpage = nand_read_subpage;
4688 if (!ecc->write_subpage && ecc->hwctl && ecc->calculate)
4689 ecc->write_subpage = nand_write_subpage_hwecc;
4690
4691 case NAND_ECC_HW_SYNDROME:
4692 if ((!ecc->calculate || !ecc->correct || !ecc->hwctl) &&
4693 (!ecc->read_page ||
4694 ecc->read_page == nand_read_page_hwecc ||
4695 !ecc->write_page ||
4696 ecc->write_page == nand_write_page_hwecc)) {
4697 WARN(1, "No ECC functions supplied; hardware ECC not possible\n");
4698 ret = -EINVAL;
4699 goto err_free;
4700 }
4701 /* Use standard syndrome read/write page function? */
4702 if (!ecc->read_page)
4703 ecc->read_page = nand_read_page_syndrome;
4704 if (!ecc->write_page)
4705 ecc->write_page = nand_write_page_syndrome;
4706 if (!ecc->read_page_raw)
4707 ecc->read_page_raw = nand_read_page_raw_syndrome;
4708 if (!ecc->write_page_raw)
4709 ecc->write_page_raw = nand_write_page_raw_syndrome;
4710 if (!ecc->read_oob)
4711 ecc->read_oob = nand_read_oob_syndrome;
4712 if (!ecc->write_oob)
4713 ecc->write_oob = nand_write_oob_syndrome;
4714
4715 if (mtd->writesize >= ecc->size) {
4716 if (!ecc->strength) {
4717 WARN(1, "Driver must set ecc.strength when using hardware ECC\n");
4718 ret = -EINVAL;
4719 goto err_free;
4720 }
4721 break;
4722 }
4723 pr_warn("%d byte HW ECC not possible on %d byte page size, fallback to SW ECC\n",
4724 ecc->size, mtd->writesize);
4725 ecc->mode = NAND_ECC_SOFT;
4726 ecc->algo = NAND_ECC_HAMMING;
4727
4728 case NAND_ECC_SOFT:
4729 ret = nand_set_ecc_soft_ops(mtd);
4730 if (ret) {
4731 ret = -EINVAL;
4732 goto err_free;
4733 }
4734 break;
4735
4736 case NAND_ECC_NONE:
4737 pr_warn("NAND_ECC_NONE selected by board driver. This is not recommended!\n");
4738 ecc->read_page = nand_read_page_raw;
4739 ecc->write_page = nand_write_page_raw;
4740 ecc->read_oob = nand_read_oob_std;
4741 ecc->read_page_raw = nand_read_page_raw;
4742 ecc->write_page_raw = nand_write_page_raw;
4743 ecc->write_oob = nand_write_oob_std;
4744 ecc->size = mtd->writesize;
4745 ecc->bytes = 0;
4746 ecc->strength = 0;
4747 break;
4748
4749 default:
4750 WARN(1, "Invalid NAND_ECC_MODE %d\n", ecc->mode);
4751 ret = -EINVAL;
4752 goto err_free;
4753 }
4754
4755 /* For many systems, the standard OOB write also works for raw */
4756 if (!ecc->read_oob_raw)
4757 ecc->read_oob_raw = ecc->read_oob;
4758 if (!ecc->write_oob_raw)
4759 ecc->write_oob_raw = ecc->write_oob;
4760
4761 /* propagate ecc info to mtd_info */
4762 mtd->ecc_strength = ecc->strength;
4763 mtd->ecc_step_size = ecc->size;
4764
4765 /*
4766 * Set the number of read / write steps for one page depending on ECC
4767 * mode.
4768 */
4769 ecc->steps = mtd->writesize / ecc->size;
4770 if (ecc->steps * ecc->size != mtd->writesize) {
4771 WARN(1, "Invalid ECC parameters\n");
4772 ret = -EINVAL;
4773 goto err_free;
4774 }
4775 ecc->total = ecc->steps * ecc->bytes;
4776
4777 /*
4778 * The number of bytes available for a client to place data into
4779 * the out of band area.
4780 */
4781 ret = mtd_ooblayout_count_freebytes(mtd);
4782 if (ret < 0)
4783 ret = 0;
4784
4785 mtd->oobavail = ret;
4786
4787 /* ECC sanity check: warn if it's too weak */
4788 if (!nand_ecc_strength_good(mtd))
4789 pr_warn("WARNING: %s: the ECC used on your system is too weak compared to the one required by the NAND chip\n",
4790 mtd->name);
4791
4792 /* Allow subpage writes up to ecc.steps. Not possible for MLC flash */
4793 if (!(chip->options & NAND_NO_SUBPAGE_WRITE) && nand_is_slc(chip)) {
4794 switch (ecc->steps) {
4795 case 2:
4796 mtd->subpage_sft = 1;
4797 break;
4798 case 4:
4799 case 8:
4800 case 16:
4801 mtd->subpage_sft = 2;
4802 break;
4803 }
4804 }
4805 chip->subpagesize = mtd->writesize >> mtd->subpage_sft;
4806
4807 /* Initialize state */
4808 chip->state = FL_READY;
4809
4810 /* Invalidate the pagebuffer reference */
4811 chip->pagebuf = -1;
4812
4813 /* Large page NAND with SOFT_ECC should support subpage reads */
4814 switch (ecc->mode) {
4815 case NAND_ECC_SOFT:
4816 if (chip->page_shift > 9)
4817 chip->options |= NAND_SUBPAGE_READ;
4818 break;
4819
4820 default:
4821 break;
4822 }
4823
4824 /* Fill in remaining MTD driver data */
4825 mtd->type = nand_is_slc(chip) ? MTD_NANDFLASH : MTD_MLCNANDFLASH;
4826 mtd->flags = (chip->options & NAND_ROM) ? MTD_CAP_ROM :
4827 MTD_CAP_NANDFLASH;
4828 mtd->_erase = nand_erase;
4829 mtd->_point = NULL;
4830 mtd->_unpoint = NULL;
4831 mtd->_read = nand_read;
4832 mtd->_write = nand_write;
4833 mtd->_panic_write = panic_nand_write;
4834 mtd->_read_oob = nand_read_oob;
4835 mtd->_write_oob = nand_write_oob;
4836 mtd->_sync = nand_sync;
4837 mtd->_lock = NULL;
4838 mtd->_unlock = NULL;
4839 mtd->_suspend = nand_suspend;
4840 mtd->_resume = nand_resume;
4841 mtd->_reboot = nand_shutdown;
4842 mtd->_block_isreserved = nand_block_isreserved;
4843 mtd->_block_isbad = nand_block_isbad;
4844 mtd->_block_markbad = nand_block_markbad;
4845 mtd->_max_bad_blocks = nand_max_bad_blocks;
4846 mtd->writebufsize = mtd->writesize;
4847
4848 /*
4849 * Initialize bitflip_threshold to its default prior scan_bbt() call.
4850 * scan_bbt() might invoke mtd_read(), thus bitflip_threshold must be
4851 * properly set.
4852 */
4853 if (!mtd->bitflip_threshold)
4854 mtd->bitflip_threshold = DIV_ROUND_UP(mtd->ecc_strength * 3, 4);
4855
4856 /* Check, if we should skip the bad block table scan */
4857 if (chip->options & NAND_SKIP_BBTSCAN)
4858 return 0;
4859
4860 /* Build bad block table */
4861 ret = chip->scan_bbt(mtd);
4862 if (ret)
4863 goto err_free;
4864 return 0;
4865
4866 err_free:
4867 if (nbuf) {
4868 kfree(nbuf->databuf);
4869 kfree(nbuf->ecccode);
4870 kfree(nbuf->ecccalc);
4871 kfree(nbuf);
4872 }
4873
4874 err_ident:
4875 /* Clean up nand_scan_ident(). */
4876
4877 /* Free manufacturer priv data. */
4878 nand_manufacturer_cleanup(chip);
4879
4880 return ret;
4881 }
4882 EXPORT_SYMBOL(nand_scan_tail);
4883
4884 /*
4885 * is_module_text_address() isn't exported, and it's mostly a pointless
4886 * test if this is a module _anyway_ -- they'd have to try _really_ hard
4887 * to call us from in-kernel code if the core NAND support is modular.
4888 */
4889 #ifdef MODULE
4890 #define caller_is_module() (1)
4891 #else
4892 #define caller_is_module() \
4893 is_module_text_address((unsigned long)__builtin_return_address(0))
4894 #endif
4895
4896 /**
4897 * nand_scan - [NAND Interface] Scan for the NAND device
4898 * @mtd: MTD device structure
4899 * @maxchips: number of chips to scan for
4900 *
4901 * This fills out all the uninitialized function pointers with the defaults.
4902 * The flash ID is read and the mtd/chip structures are filled with the
4903 * appropriate values.
4904 */
4905 int nand_scan(struct mtd_info *mtd, int maxchips)
4906 {
4907 int ret;
4908
4909 ret = nand_scan_ident(mtd, maxchips, NULL);
4910 if (!ret)
4911 ret = nand_scan_tail(mtd);
4912 return ret;
4913 }
4914 EXPORT_SYMBOL(nand_scan);
4915
4916 /**
4917 * nand_cleanup - [NAND Interface] Free resources held by the NAND device
4918 * @chip: NAND chip object
4919 */
4920 void nand_cleanup(struct nand_chip *chip)
4921 {
4922 if (chip->ecc.mode == NAND_ECC_SOFT &&
4923 chip->ecc.algo == NAND_ECC_BCH)
4924 nand_bch_free((struct nand_bch_control *)chip->ecc.priv);
4925
4926 nand_release_data_interface(chip);
4927
4928 /* Free bad block table memory */
4929 kfree(chip->bbt);
4930 if (!(chip->options & NAND_OWN_BUFFERS) && chip->buffers) {
4931 kfree(chip->buffers->databuf);
4932 kfree(chip->buffers->ecccode);
4933 kfree(chip->buffers->ecccalc);
4934 kfree(chip->buffers);
4935 }
4936
4937 /* Free bad block descriptor memory */
4938 if (chip->badblock_pattern && chip->badblock_pattern->options
4939 & NAND_BBT_DYNAMICSTRUCT)
4940 kfree(chip->badblock_pattern);
4941
4942 /* Free manufacturer priv data. */
4943 nand_manufacturer_cleanup(chip);
4944 }
4945 EXPORT_SYMBOL_GPL(nand_cleanup);
4946
4947 /**
4948 * nand_release - [NAND Interface] Unregister the MTD device and free resources
4949 * held by the NAND device
4950 * @mtd: MTD device structure
4951 */
4952 void nand_release(struct mtd_info *mtd)
4953 {
4954 mtd_device_unregister(mtd);
4955 nand_cleanup(mtd_to_nand(mtd));
4956 }
4957 EXPORT_SYMBOL_GPL(nand_release);
4958
4959 MODULE_LICENSE("GPL");
4960 MODULE_AUTHOR("Steven J. Hill <sjhill@realitydiluted.com>");
4961 MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>");
4962 MODULE_DESCRIPTION("Generic NAND flash driver code");