]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blob - drivers/mtd/nand/nand_base.c
Input: wm97xx: add new AC97 bus support
[mirror_ubuntu-focal-kernel.git] / drivers / mtd / nand / nand_base.c
1 /*
2 * Overview:
3 * This is the generic MTD driver for NAND flash devices. It should be
4 * capable of working with almost all NAND chips currently available.
5 *
6 * Additional technical information is available on
7 * http://www.linux-mtd.infradead.org/doc/nand.html
8 *
9 * Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com)
10 * 2002-2006 Thomas Gleixner (tglx@linutronix.de)
11 *
12 * Credits:
13 * David Woodhouse for adding multichip support
14 *
15 * Aleph One Ltd. and Toby Churchill Ltd. for supporting the
16 * rework for 2K page size chips
17 *
18 * TODO:
19 * Enable cached programming for 2k page size chips
20 * Check, if mtd->ecctype should be set to MTD_ECC_HW
21 * if we have HW ECC support.
22 * BBT table is not serialized, has to be fixed
23 *
24 * This program is free software; you can redistribute it and/or modify
25 * it under the terms of the GNU General Public License version 2 as
26 * published by the Free Software Foundation.
27 *
28 */
29
30 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31
32 #include <linux/module.h>
33 #include <linux/delay.h>
34 #include <linux/errno.h>
35 #include <linux/err.h>
36 #include <linux/sched.h>
37 #include <linux/slab.h>
38 #include <linux/mm.h>
39 #include <linux/nmi.h>
40 #include <linux/types.h>
41 #include <linux/mtd/mtd.h>
42 #include <linux/mtd/rawnand.h>
43 #include <linux/mtd/nand_ecc.h>
44 #include <linux/mtd/nand_bch.h>
45 #include <linux/interrupt.h>
46 #include <linux/bitops.h>
47 #include <linux/io.h>
48 #include <linux/mtd/partitions.h>
49 #include <linux/of.h>
50
51 static int nand_get_device(struct mtd_info *mtd, int new_state);
52
53 static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
54 struct mtd_oob_ops *ops);
55
56 /* Define default oob placement schemes for large and small page devices */
57 static int nand_ooblayout_ecc_sp(struct mtd_info *mtd, int section,
58 struct mtd_oob_region *oobregion)
59 {
60 struct nand_chip *chip = mtd_to_nand(mtd);
61 struct nand_ecc_ctrl *ecc = &chip->ecc;
62
63 if (section > 1)
64 return -ERANGE;
65
66 if (!section) {
67 oobregion->offset = 0;
68 if (mtd->oobsize == 16)
69 oobregion->length = 4;
70 else
71 oobregion->length = 3;
72 } else {
73 if (mtd->oobsize == 8)
74 return -ERANGE;
75
76 oobregion->offset = 6;
77 oobregion->length = ecc->total - 4;
78 }
79
80 return 0;
81 }
82
83 static int nand_ooblayout_free_sp(struct mtd_info *mtd, int section,
84 struct mtd_oob_region *oobregion)
85 {
86 if (section > 1)
87 return -ERANGE;
88
89 if (mtd->oobsize == 16) {
90 if (section)
91 return -ERANGE;
92
93 oobregion->length = 8;
94 oobregion->offset = 8;
95 } else {
96 oobregion->length = 2;
97 if (!section)
98 oobregion->offset = 3;
99 else
100 oobregion->offset = 6;
101 }
102
103 return 0;
104 }
105
106 const struct mtd_ooblayout_ops nand_ooblayout_sp_ops = {
107 .ecc = nand_ooblayout_ecc_sp,
108 .free = nand_ooblayout_free_sp,
109 };
110 EXPORT_SYMBOL_GPL(nand_ooblayout_sp_ops);
111
112 static int nand_ooblayout_ecc_lp(struct mtd_info *mtd, int section,
113 struct mtd_oob_region *oobregion)
114 {
115 struct nand_chip *chip = mtd_to_nand(mtd);
116 struct nand_ecc_ctrl *ecc = &chip->ecc;
117
118 if (section)
119 return -ERANGE;
120
121 oobregion->length = ecc->total;
122 oobregion->offset = mtd->oobsize - oobregion->length;
123
124 return 0;
125 }
126
127 static int nand_ooblayout_free_lp(struct mtd_info *mtd, int section,
128 struct mtd_oob_region *oobregion)
129 {
130 struct nand_chip *chip = mtd_to_nand(mtd);
131 struct nand_ecc_ctrl *ecc = &chip->ecc;
132
133 if (section)
134 return -ERANGE;
135
136 oobregion->length = mtd->oobsize - ecc->total - 2;
137 oobregion->offset = 2;
138
139 return 0;
140 }
141
142 const struct mtd_ooblayout_ops nand_ooblayout_lp_ops = {
143 .ecc = nand_ooblayout_ecc_lp,
144 .free = nand_ooblayout_free_lp,
145 };
146 EXPORT_SYMBOL_GPL(nand_ooblayout_lp_ops);
147
148 /*
149 * Support the old "large page" layout used for 1-bit Hamming ECC where ECC
150 * are placed at a fixed offset.
151 */
152 static int nand_ooblayout_ecc_lp_hamming(struct mtd_info *mtd, int section,
153 struct mtd_oob_region *oobregion)
154 {
155 struct nand_chip *chip = mtd_to_nand(mtd);
156 struct nand_ecc_ctrl *ecc = &chip->ecc;
157
158 if (section)
159 return -ERANGE;
160
161 switch (mtd->oobsize) {
162 case 64:
163 oobregion->offset = 40;
164 break;
165 case 128:
166 oobregion->offset = 80;
167 break;
168 default:
169 return -EINVAL;
170 }
171
172 oobregion->length = ecc->total;
173 if (oobregion->offset + oobregion->length > mtd->oobsize)
174 return -ERANGE;
175
176 return 0;
177 }
178
179 static int nand_ooblayout_free_lp_hamming(struct mtd_info *mtd, int section,
180 struct mtd_oob_region *oobregion)
181 {
182 struct nand_chip *chip = mtd_to_nand(mtd);
183 struct nand_ecc_ctrl *ecc = &chip->ecc;
184 int ecc_offset = 0;
185
186 if (section < 0 || section > 1)
187 return -ERANGE;
188
189 switch (mtd->oobsize) {
190 case 64:
191 ecc_offset = 40;
192 break;
193 case 128:
194 ecc_offset = 80;
195 break;
196 default:
197 return -EINVAL;
198 }
199
200 if (section == 0) {
201 oobregion->offset = 2;
202 oobregion->length = ecc_offset - 2;
203 } else {
204 oobregion->offset = ecc_offset + ecc->total;
205 oobregion->length = mtd->oobsize - oobregion->offset;
206 }
207
208 return 0;
209 }
210
211 static const struct mtd_ooblayout_ops nand_ooblayout_lp_hamming_ops = {
212 .ecc = nand_ooblayout_ecc_lp_hamming,
213 .free = nand_ooblayout_free_lp_hamming,
214 };
215
216 static int check_offs_len(struct mtd_info *mtd,
217 loff_t ofs, uint64_t len)
218 {
219 struct nand_chip *chip = mtd_to_nand(mtd);
220 int ret = 0;
221
222 /* Start address must align on block boundary */
223 if (ofs & ((1ULL << chip->phys_erase_shift) - 1)) {
224 pr_debug("%s: unaligned address\n", __func__);
225 ret = -EINVAL;
226 }
227
228 /* Length must align on block boundary */
229 if (len & ((1ULL << chip->phys_erase_shift) - 1)) {
230 pr_debug("%s: length not block aligned\n", __func__);
231 ret = -EINVAL;
232 }
233
234 return ret;
235 }
236
237 /**
238 * nand_release_device - [GENERIC] release chip
239 * @mtd: MTD device structure
240 *
241 * Release chip lock and wake up anyone waiting on the device.
242 */
243 static void nand_release_device(struct mtd_info *mtd)
244 {
245 struct nand_chip *chip = mtd_to_nand(mtd);
246
247 /* Release the controller and the chip */
248 spin_lock(&chip->controller->lock);
249 chip->controller->active = NULL;
250 chip->state = FL_READY;
251 wake_up(&chip->controller->wq);
252 spin_unlock(&chip->controller->lock);
253 }
254
255 /**
256 * nand_read_byte - [DEFAULT] read one byte from the chip
257 * @mtd: MTD device structure
258 *
259 * Default read function for 8bit buswidth
260 */
261 static uint8_t nand_read_byte(struct mtd_info *mtd)
262 {
263 struct nand_chip *chip = mtd_to_nand(mtd);
264 return readb(chip->IO_ADDR_R);
265 }
266
267 /**
268 * nand_read_byte16 - [DEFAULT] read one byte endianness aware from the chip
269 * @mtd: MTD device structure
270 *
271 * Default read function for 16bit buswidth with endianness conversion.
272 *
273 */
274 static uint8_t nand_read_byte16(struct mtd_info *mtd)
275 {
276 struct nand_chip *chip = mtd_to_nand(mtd);
277 return (uint8_t) cpu_to_le16(readw(chip->IO_ADDR_R));
278 }
279
280 /**
281 * nand_read_word - [DEFAULT] read one word from the chip
282 * @mtd: MTD device structure
283 *
284 * Default read function for 16bit buswidth without endianness conversion.
285 */
286 static u16 nand_read_word(struct mtd_info *mtd)
287 {
288 struct nand_chip *chip = mtd_to_nand(mtd);
289 return readw(chip->IO_ADDR_R);
290 }
291
292 /**
293 * nand_select_chip - [DEFAULT] control CE line
294 * @mtd: MTD device structure
295 * @chipnr: chipnumber to select, -1 for deselect
296 *
297 * Default select function for 1 chip devices.
298 */
299 static void nand_select_chip(struct mtd_info *mtd, int chipnr)
300 {
301 struct nand_chip *chip = mtd_to_nand(mtd);
302
303 switch (chipnr) {
304 case -1:
305 chip->cmd_ctrl(mtd, NAND_CMD_NONE, 0 | NAND_CTRL_CHANGE);
306 break;
307 case 0:
308 break;
309
310 default:
311 BUG();
312 }
313 }
314
315 /**
316 * nand_write_byte - [DEFAULT] write single byte to chip
317 * @mtd: MTD device structure
318 * @byte: value to write
319 *
320 * Default function to write a byte to I/O[7:0]
321 */
322 static void nand_write_byte(struct mtd_info *mtd, uint8_t byte)
323 {
324 struct nand_chip *chip = mtd_to_nand(mtd);
325
326 chip->write_buf(mtd, &byte, 1);
327 }
328
329 /**
330 * nand_write_byte16 - [DEFAULT] write single byte to a chip with width 16
331 * @mtd: MTD device structure
332 * @byte: value to write
333 *
334 * Default function to write a byte to I/O[7:0] on a 16-bit wide chip.
335 */
336 static void nand_write_byte16(struct mtd_info *mtd, uint8_t byte)
337 {
338 struct nand_chip *chip = mtd_to_nand(mtd);
339 uint16_t word = byte;
340
341 /*
342 * It's not entirely clear what should happen to I/O[15:8] when writing
343 * a byte. The ONFi spec (Revision 3.1; 2012-09-19, Section 2.16) reads:
344 *
345 * When the host supports a 16-bit bus width, only data is
346 * transferred at the 16-bit width. All address and command line
347 * transfers shall use only the lower 8-bits of the data bus. During
348 * command transfers, the host may place any value on the upper
349 * 8-bits of the data bus. During address transfers, the host shall
350 * set the upper 8-bits of the data bus to 00h.
351 *
352 * One user of the write_byte callback is nand_onfi_set_features. The
353 * four parameters are specified to be written to I/O[7:0], but this is
354 * neither an address nor a command transfer. Let's assume a 0 on the
355 * upper I/O lines is OK.
356 */
357 chip->write_buf(mtd, (uint8_t *)&word, 2);
358 }
359
360 /**
361 * nand_write_buf - [DEFAULT] write buffer to chip
362 * @mtd: MTD device structure
363 * @buf: data buffer
364 * @len: number of bytes to write
365 *
366 * Default write function for 8bit buswidth.
367 */
368 static void nand_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
369 {
370 struct nand_chip *chip = mtd_to_nand(mtd);
371
372 iowrite8_rep(chip->IO_ADDR_W, buf, len);
373 }
374
375 /**
376 * nand_read_buf - [DEFAULT] read chip data into buffer
377 * @mtd: MTD device structure
378 * @buf: buffer to store date
379 * @len: number of bytes to read
380 *
381 * Default read function for 8bit buswidth.
382 */
383 static void nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
384 {
385 struct nand_chip *chip = mtd_to_nand(mtd);
386
387 ioread8_rep(chip->IO_ADDR_R, buf, len);
388 }
389
390 /**
391 * nand_write_buf16 - [DEFAULT] write buffer to chip
392 * @mtd: MTD device structure
393 * @buf: data buffer
394 * @len: number of bytes to write
395 *
396 * Default write function for 16bit buswidth.
397 */
398 static void nand_write_buf16(struct mtd_info *mtd, const uint8_t *buf, int len)
399 {
400 struct nand_chip *chip = mtd_to_nand(mtd);
401 u16 *p = (u16 *) buf;
402
403 iowrite16_rep(chip->IO_ADDR_W, p, len >> 1);
404 }
405
406 /**
407 * nand_read_buf16 - [DEFAULT] read chip data into buffer
408 * @mtd: MTD device structure
409 * @buf: buffer to store date
410 * @len: number of bytes to read
411 *
412 * Default read function for 16bit buswidth.
413 */
414 static void nand_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len)
415 {
416 struct nand_chip *chip = mtd_to_nand(mtd);
417 u16 *p = (u16 *) buf;
418
419 ioread16_rep(chip->IO_ADDR_R, p, len >> 1);
420 }
421
422 /**
423 * nand_block_bad - [DEFAULT] Read bad block marker from the chip
424 * @mtd: MTD device structure
425 * @ofs: offset from device start
426 *
427 * Check, if the block is bad.
428 */
429 static int nand_block_bad(struct mtd_info *mtd, loff_t ofs)
430 {
431 int page, page_end, res;
432 struct nand_chip *chip = mtd_to_nand(mtd);
433 u8 bad;
434
435 if (chip->bbt_options & NAND_BBT_SCANLASTPAGE)
436 ofs += mtd->erasesize - mtd->writesize;
437
438 page = (int)(ofs >> chip->page_shift) & chip->pagemask;
439 page_end = page + (chip->bbt_options & NAND_BBT_SCAN2NDPAGE ? 2 : 1);
440
441 for (; page < page_end; page++) {
442 res = chip->ecc.read_oob(mtd, chip, page);
443 if (res)
444 return res;
445
446 bad = chip->oob_poi[chip->badblockpos];
447
448 if (likely(chip->badblockbits == 8))
449 res = bad != 0xFF;
450 else
451 res = hweight8(bad) < chip->badblockbits;
452 if (res)
453 return res;
454 }
455
456 return 0;
457 }
458
459 /**
460 * nand_default_block_markbad - [DEFAULT] mark a block bad via bad block marker
461 * @mtd: MTD device structure
462 * @ofs: offset from device start
463 *
464 * This is the default implementation, which can be overridden by a hardware
465 * specific driver. It provides the details for writing a bad block marker to a
466 * block.
467 */
468 static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
469 {
470 struct nand_chip *chip = mtd_to_nand(mtd);
471 struct mtd_oob_ops ops;
472 uint8_t buf[2] = { 0, 0 };
473 int ret = 0, res, i = 0;
474
475 memset(&ops, 0, sizeof(ops));
476 ops.oobbuf = buf;
477 ops.ooboffs = chip->badblockpos;
478 if (chip->options & NAND_BUSWIDTH_16) {
479 ops.ooboffs &= ~0x01;
480 ops.len = ops.ooblen = 2;
481 } else {
482 ops.len = ops.ooblen = 1;
483 }
484 ops.mode = MTD_OPS_PLACE_OOB;
485
486 /* Write to first/last page(s) if necessary */
487 if (chip->bbt_options & NAND_BBT_SCANLASTPAGE)
488 ofs += mtd->erasesize - mtd->writesize;
489 do {
490 res = nand_do_write_oob(mtd, ofs, &ops);
491 if (!ret)
492 ret = res;
493
494 i++;
495 ofs += mtd->writesize;
496 } while ((chip->bbt_options & NAND_BBT_SCAN2NDPAGE) && i < 2);
497
498 return ret;
499 }
500
501 /**
502 * nand_block_markbad_lowlevel - mark a block bad
503 * @mtd: MTD device structure
504 * @ofs: offset from device start
505 *
506 * This function performs the generic NAND bad block marking steps (i.e., bad
507 * block table(s) and/or marker(s)). We only allow the hardware driver to
508 * specify how to write bad block markers to OOB (chip->block_markbad).
509 *
510 * We try operations in the following order:
511 *
512 * (1) erase the affected block, to allow OOB marker to be written cleanly
513 * (2) write bad block marker to OOB area of affected block (unless flag
514 * NAND_BBT_NO_OOB_BBM is present)
515 * (3) update the BBT
516 *
517 * Note that we retain the first error encountered in (2) or (3), finish the
518 * procedures, and dump the error in the end.
519 */
520 static int nand_block_markbad_lowlevel(struct mtd_info *mtd, loff_t ofs)
521 {
522 struct nand_chip *chip = mtd_to_nand(mtd);
523 int res, ret = 0;
524
525 if (!(chip->bbt_options & NAND_BBT_NO_OOB_BBM)) {
526 struct erase_info einfo;
527
528 /* Attempt erase before marking OOB */
529 memset(&einfo, 0, sizeof(einfo));
530 einfo.mtd = mtd;
531 einfo.addr = ofs;
532 einfo.len = 1ULL << chip->phys_erase_shift;
533 nand_erase_nand(mtd, &einfo, 0);
534
535 /* Write bad block marker to OOB */
536 nand_get_device(mtd, FL_WRITING);
537 ret = chip->block_markbad(mtd, ofs);
538 nand_release_device(mtd);
539 }
540
541 /* Mark block bad in BBT */
542 if (chip->bbt) {
543 res = nand_markbad_bbt(mtd, ofs);
544 if (!ret)
545 ret = res;
546 }
547
548 if (!ret)
549 mtd->ecc_stats.badblocks++;
550
551 return ret;
552 }
553
554 /**
555 * nand_check_wp - [GENERIC] check if the chip is write protected
556 * @mtd: MTD device structure
557 *
558 * Check, if the device is write protected. The function expects, that the
559 * device is already selected.
560 */
561 static int nand_check_wp(struct mtd_info *mtd)
562 {
563 struct nand_chip *chip = mtd_to_nand(mtd);
564
565 /* Broken xD cards report WP despite being writable */
566 if (chip->options & NAND_BROKEN_XD)
567 return 0;
568
569 /* Check the WP bit */
570 chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
571 return (chip->read_byte(mtd) & NAND_STATUS_WP) ? 0 : 1;
572 }
573
574 /**
575 * nand_block_isreserved - [GENERIC] Check if a block is marked reserved.
576 * @mtd: MTD device structure
577 * @ofs: offset from device start
578 *
579 * Check if the block is marked as reserved.
580 */
581 static int nand_block_isreserved(struct mtd_info *mtd, loff_t ofs)
582 {
583 struct nand_chip *chip = mtd_to_nand(mtd);
584
585 if (!chip->bbt)
586 return 0;
587 /* Return info from the table */
588 return nand_isreserved_bbt(mtd, ofs);
589 }
590
591 /**
592 * nand_block_checkbad - [GENERIC] Check if a block is marked bad
593 * @mtd: MTD device structure
594 * @ofs: offset from device start
595 * @allowbbt: 1, if its allowed to access the bbt area
596 *
597 * Check, if the block is bad. Either by reading the bad block table or
598 * calling of the scan function.
599 */
600 static int nand_block_checkbad(struct mtd_info *mtd, loff_t ofs, int allowbbt)
601 {
602 struct nand_chip *chip = mtd_to_nand(mtd);
603
604 if (!chip->bbt)
605 return chip->block_bad(mtd, ofs);
606
607 /* Return info from the table */
608 return nand_isbad_bbt(mtd, ofs, allowbbt);
609 }
610
611 /**
612 * panic_nand_wait_ready - [GENERIC] Wait for the ready pin after commands.
613 * @mtd: MTD device structure
614 * @timeo: Timeout
615 *
616 * Helper function for nand_wait_ready used when needing to wait in interrupt
617 * context.
618 */
619 static void panic_nand_wait_ready(struct mtd_info *mtd, unsigned long timeo)
620 {
621 struct nand_chip *chip = mtd_to_nand(mtd);
622 int i;
623
624 /* Wait for the device to get ready */
625 for (i = 0; i < timeo; i++) {
626 if (chip->dev_ready(mtd))
627 break;
628 touch_softlockup_watchdog();
629 mdelay(1);
630 }
631 }
632
633 /**
634 * nand_wait_ready - [GENERIC] Wait for the ready pin after commands.
635 * @mtd: MTD device structure
636 *
637 * Wait for the ready pin after a command, and warn if a timeout occurs.
638 */
639 void nand_wait_ready(struct mtd_info *mtd)
640 {
641 struct nand_chip *chip = mtd_to_nand(mtd);
642 unsigned long timeo = 400;
643
644 if (in_interrupt() || oops_in_progress)
645 return panic_nand_wait_ready(mtd, timeo);
646
647 /* Wait until command is processed or timeout occurs */
648 timeo = jiffies + msecs_to_jiffies(timeo);
649 do {
650 if (chip->dev_ready(mtd))
651 return;
652 cond_resched();
653 } while (time_before(jiffies, timeo));
654
655 if (!chip->dev_ready(mtd))
656 pr_warn_ratelimited("timeout while waiting for chip to become ready\n");
657 }
658 EXPORT_SYMBOL_GPL(nand_wait_ready);
659
660 /**
661 * nand_wait_status_ready - [GENERIC] Wait for the ready status after commands.
662 * @mtd: MTD device structure
663 * @timeo: Timeout in ms
664 *
665 * Wait for status ready (i.e. command done) or timeout.
666 */
667 static void nand_wait_status_ready(struct mtd_info *mtd, unsigned long timeo)
668 {
669 register struct nand_chip *chip = mtd_to_nand(mtd);
670
671 timeo = jiffies + msecs_to_jiffies(timeo);
672 do {
673 if ((chip->read_byte(mtd) & NAND_STATUS_READY))
674 break;
675 touch_softlockup_watchdog();
676 } while (time_before(jiffies, timeo));
677 };
678
679 /**
680 * nand_command - [DEFAULT] Send command to NAND device
681 * @mtd: MTD device structure
682 * @command: the command to be sent
683 * @column: the column address for this command, -1 if none
684 * @page_addr: the page address for this command, -1 if none
685 *
686 * Send command to NAND device. This function is used for small page devices
687 * (512 Bytes per page).
688 */
689 static void nand_command(struct mtd_info *mtd, unsigned int command,
690 int column, int page_addr)
691 {
692 register struct nand_chip *chip = mtd_to_nand(mtd);
693 int ctrl = NAND_CTRL_CLE | NAND_CTRL_CHANGE;
694
695 /* Write out the command to the device */
696 if (command == NAND_CMD_SEQIN) {
697 int readcmd;
698
699 if (column >= mtd->writesize) {
700 /* OOB area */
701 column -= mtd->writesize;
702 readcmd = NAND_CMD_READOOB;
703 } else if (column < 256) {
704 /* First 256 bytes --> READ0 */
705 readcmd = NAND_CMD_READ0;
706 } else {
707 column -= 256;
708 readcmd = NAND_CMD_READ1;
709 }
710 chip->cmd_ctrl(mtd, readcmd, ctrl);
711 ctrl &= ~NAND_CTRL_CHANGE;
712 }
713 chip->cmd_ctrl(mtd, command, ctrl);
714
715 /* Address cycle, when necessary */
716 ctrl = NAND_CTRL_ALE | NAND_CTRL_CHANGE;
717 /* Serially input address */
718 if (column != -1) {
719 /* Adjust columns for 16 bit buswidth */
720 if (chip->options & NAND_BUSWIDTH_16 &&
721 !nand_opcode_8bits(command))
722 column >>= 1;
723 chip->cmd_ctrl(mtd, column, ctrl);
724 ctrl &= ~NAND_CTRL_CHANGE;
725 }
726 if (page_addr != -1) {
727 chip->cmd_ctrl(mtd, page_addr, ctrl);
728 ctrl &= ~NAND_CTRL_CHANGE;
729 chip->cmd_ctrl(mtd, page_addr >> 8, ctrl);
730 /* One more address cycle for devices > 32MiB */
731 if (chip->chipsize > (32 << 20))
732 chip->cmd_ctrl(mtd, page_addr >> 16, ctrl);
733 }
734 chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
735
736 /*
737 * Program and erase have their own busy handlers status and sequential
738 * in needs no delay
739 */
740 switch (command) {
741
742 case NAND_CMD_PAGEPROG:
743 case NAND_CMD_ERASE1:
744 case NAND_CMD_ERASE2:
745 case NAND_CMD_SEQIN:
746 case NAND_CMD_STATUS:
747 case NAND_CMD_READID:
748 case NAND_CMD_SET_FEATURES:
749 return;
750
751 case NAND_CMD_RESET:
752 if (chip->dev_ready)
753 break;
754 udelay(chip->chip_delay);
755 chip->cmd_ctrl(mtd, NAND_CMD_STATUS,
756 NAND_CTRL_CLE | NAND_CTRL_CHANGE);
757 chip->cmd_ctrl(mtd,
758 NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
759 /* EZ-NAND can take upto 250ms as per ONFi v4.0 */
760 nand_wait_status_ready(mtd, 250);
761 return;
762
763 /* This applies to read commands */
764 case NAND_CMD_READ0:
765 /*
766 * READ0 is sometimes used to exit GET STATUS mode. When this
767 * is the case no address cycles are requested, and we can use
768 * this information to detect that we should not wait for the
769 * device to be ready.
770 */
771 if (column == -1 && page_addr == -1)
772 return;
773
774 default:
775 /*
776 * If we don't have access to the busy pin, we apply the given
777 * command delay
778 */
779 if (!chip->dev_ready) {
780 udelay(chip->chip_delay);
781 return;
782 }
783 }
784 /*
785 * Apply this short delay always to ensure that we do wait tWB in
786 * any case on any machine.
787 */
788 ndelay(100);
789
790 nand_wait_ready(mtd);
791 }
792
793 static void nand_ccs_delay(struct nand_chip *chip)
794 {
795 /*
796 * The controller already takes care of waiting for tCCS when the RNDIN
797 * or RNDOUT command is sent, return directly.
798 */
799 if (!(chip->options & NAND_WAIT_TCCS))
800 return;
801
802 /*
803 * Wait tCCS_min if it is correctly defined, otherwise wait 500ns
804 * (which should be safe for all NANDs).
805 */
806 if (chip->data_interface && chip->data_interface->timings.sdr.tCCS_min)
807 ndelay(chip->data_interface->timings.sdr.tCCS_min / 1000);
808 else
809 ndelay(500);
810 }
811
812 /**
813 * nand_command_lp - [DEFAULT] Send command to NAND large page device
814 * @mtd: MTD device structure
815 * @command: the command to be sent
816 * @column: the column address for this command, -1 if none
817 * @page_addr: the page address for this command, -1 if none
818 *
819 * Send command to NAND device. This is the version for the new large page
820 * devices. We don't have the separate regions as we have in the small page
821 * devices. We must emulate NAND_CMD_READOOB to keep the code compatible.
822 */
823 static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
824 int column, int page_addr)
825 {
826 register struct nand_chip *chip = mtd_to_nand(mtd);
827
828 /* Emulate NAND_CMD_READOOB */
829 if (command == NAND_CMD_READOOB) {
830 column += mtd->writesize;
831 command = NAND_CMD_READ0;
832 }
833
834 /* Command latch cycle */
835 chip->cmd_ctrl(mtd, command, NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
836
837 if (column != -1 || page_addr != -1) {
838 int ctrl = NAND_CTRL_CHANGE | NAND_NCE | NAND_ALE;
839
840 /* Serially input address */
841 if (column != -1) {
842 /* Adjust columns for 16 bit buswidth */
843 if (chip->options & NAND_BUSWIDTH_16 &&
844 !nand_opcode_8bits(command))
845 column >>= 1;
846 chip->cmd_ctrl(mtd, column, ctrl);
847 ctrl &= ~NAND_CTRL_CHANGE;
848
849 /* Only output a single addr cycle for 8bits opcodes. */
850 if (!nand_opcode_8bits(command))
851 chip->cmd_ctrl(mtd, column >> 8, ctrl);
852 }
853 if (page_addr != -1) {
854 chip->cmd_ctrl(mtd, page_addr, ctrl);
855 chip->cmd_ctrl(mtd, page_addr >> 8,
856 NAND_NCE | NAND_ALE);
857 /* One more address cycle for devices > 128MiB */
858 if (chip->chipsize > (128 << 20))
859 chip->cmd_ctrl(mtd, page_addr >> 16,
860 NAND_NCE | NAND_ALE);
861 }
862 }
863 chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
864
865 /*
866 * Program and erase have their own busy handlers status, sequential
867 * in and status need no delay.
868 */
869 switch (command) {
870
871 case NAND_CMD_CACHEDPROG:
872 case NAND_CMD_PAGEPROG:
873 case NAND_CMD_ERASE1:
874 case NAND_CMD_ERASE2:
875 case NAND_CMD_SEQIN:
876 case NAND_CMD_STATUS:
877 case NAND_CMD_READID:
878 case NAND_CMD_SET_FEATURES:
879 return;
880
881 case NAND_CMD_RNDIN:
882 nand_ccs_delay(chip);
883 return;
884
885 case NAND_CMD_RESET:
886 if (chip->dev_ready)
887 break;
888 udelay(chip->chip_delay);
889 chip->cmd_ctrl(mtd, NAND_CMD_STATUS,
890 NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
891 chip->cmd_ctrl(mtd, NAND_CMD_NONE,
892 NAND_NCE | NAND_CTRL_CHANGE);
893 /* EZ-NAND can take upto 250ms as per ONFi v4.0 */
894 nand_wait_status_ready(mtd, 250);
895 return;
896
897 case NAND_CMD_RNDOUT:
898 /* No ready / busy check necessary */
899 chip->cmd_ctrl(mtd, NAND_CMD_RNDOUTSTART,
900 NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
901 chip->cmd_ctrl(mtd, NAND_CMD_NONE,
902 NAND_NCE | NAND_CTRL_CHANGE);
903
904 nand_ccs_delay(chip);
905 return;
906
907 case NAND_CMD_READ0:
908 /*
909 * READ0 is sometimes used to exit GET STATUS mode. When this
910 * is the case no address cycles are requested, and we can use
911 * this information to detect that READSTART should not be
912 * issued.
913 */
914 if (column == -1 && page_addr == -1)
915 return;
916
917 chip->cmd_ctrl(mtd, NAND_CMD_READSTART,
918 NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
919 chip->cmd_ctrl(mtd, NAND_CMD_NONE,
920 NAND_NCE | NAND_CTRL_CHANGE);
921
922 /* This applies to read commands */
923 default:
924 /*
925 * If we don't have access to the busy pin, we apply the given
926 * command delay.
927 */
928 if (!chip->dev_ready) {
929 udelay(chip->chip_delay);
930 return;
931 }
932 }
933
934 /*
935 * Apply this short delay always to ensure that we do wait tWB in
936 * any case on any machine.
937 */
938 ndelay(100);
939
940 nand_wait_ready(mtd);
941 }
942
943 /**
944 * panic_nand_get_device - [GENERIC] Get chip for selected access
945 * @chip: the nand chip descriptor
946 * @mtd: MTD device structure
947 * @new_state: the state which is requested
948 *
949 * Used when in panic, no locks are taken.
950 */
951 static void panic_nand_get_device(struct nand_chip *chip,
952 struct mtd_info *mtd, int new_state)
953 {
954 /* Hardware controller shared among independent devices */
955 chip->controller->active = chip;
956 chip->state = new_state;
957 }
958
959 /**
960 * nand_get_device - [GENERIC] Get chip for selected access
961 * @mtd: MTD device structure
962 * @new_state: the state which is requested
963 *
964 * Get the device and lock it for exclusive access
965 */
966 static int
967 nand_get_device(struct mtd_info *mtd, int new_state)
968 {
969 struct nand_chip *chip = mtd_to_nand(mtd);
970 spinlock_t *lock = &chip->controller->lock;
971 wait_queue_head_t *wq = &chip->controller->wq;
972 DECLARE_WAITQUEUE(wait, current);
973 retry:
974 spin_lock(lock);
975
976 /* Hardware controller shared among independent devices */
977 if (!chip->controller->active)
978 chip->controller->active = chip;
979
980 if (chip->controller->active == chip && chip->state == FL_READY) {
981 chip->state = new_state;
982 spin_unlock(lock);
983 return 0;
984 }
985 if (new_state == FL_PM_SUSPENDED) {
986 if (chip->controller->active->state == FL_PM_SUSPENDED) {
987 chip->state = FL_PM_SUSPENDED;
988 spin_unlock(lock);
989 return 0;
990 }
991 }
992 set_current_state(TASK_UNINTERRUPTIBLE);
993 add_wait_queue(wq, &wait);
994 spin_unlock(lock);
995 schedule();
996 remove_wait_queue(wq, &wait);
997 goto retry;
998 }
999
1000 /**
1001 * panic_nand_wait - [GENERIC] wait until the command is done
1002 * @mtd: MTD device structure
1003 * @chip: NAND chip structure
1004 * @timeo: timeout
1005 *
1006 * Wait for command done. This is a helper function for nand_wait used when
1007 * we are in interrupt context. May happen when in panic and trying to write
1008 * an oops through mtdoops.
1009 */
1010 static void panic_nand_wait(struct mtd_info *mtd, struct nand_chip *chip,
1011 unsigned long timeo)
1012 {
1013 int i;
1014 for (i = 0; i < timeo; i++) {
1015 if (chip->dev_ready) {
1016 if (chip->dev_ready(mtd))
1017 break;
1018 } else {
1019 if (chip->read_byte(mtd) & NAND_STATUS_READY)
1020 break;
1021 }
1022 mdelay(1);
1023 }
1024 }
1025
1026 /**
1027 * nand_wait - [DEFAULT] wait until the command is done
1028 * @mtd: MTD device structure
1029 * @chip: NAND chip structure
1030 *
1031 * Wait for command done. This applies to erase and program only.
1032 */
1033 static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
1034 {
1035
1036 int status;
1037 unsigned long timeo = 400;
1038
1039 /*
1040 * Apply this short delay always to ensure that we do wait tWB in any
1041 * case on any machine.
1042 */
1043 ndelay(100);
1044
1045 chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
1046
1047 if (in_interrupt() || oops_in_progress)
1048 panic_nand_wait(mtd, chip, timeo);
1049 else {
1050 timeo = jiffies + msecs_to_jiffies(timeo);
1051 do {
1052 if (chip->dev_ready) {
1053 if (chip->dev_ready(mtd))
1054 break;
1055 } else {
1056 if (chip->read_byte(mtd) & NAND_STATUS_READY)
1057 break;
1058 }
1059 cond_resched();
1060 } while (time_before(jiffies, timeo));
1061 }
1062
1063 status = (int)chip->read_byte(mtd);
1064 /* This can happen if in case of timeout or buggy dev_ready */
1065 WARN_ON(!(status & NAND_STATUS_READY));
1066 return status;
1067 }
1068
1069 /**
1070 * nand_reset_data_interface - Reset data interface and timings
1071 * @chip: The NAND chip
1072 * @chipnr: Internal die id
1073 *
1074 * Reset the Data interface and timings to ONFI mode 0.
1075 *
1076 * Returns 0 for success or negative error code otherwise.
1077 */
1078 static int nand_reset_data_interface(struct nand_chip *chip, int chipnr)
1079 {
1080 struct mtd_info *mtd = nand_to_mtd(chip);
1081 const struct nand_data_interface *conf;
1082 int ret;
1083
1084 if (!chip->setup_data_interface)
1085 return 0;
1086
1087 /*
1088 * The ONFI specification says:
1089 * "
1090 * To transition from NV-DDR or NV-DDR2 to the SDR data
1091 * interface, the host shall use the Reset (FFh) command
1092 * using SDR timing mode 0. A device in any timing mode is
1093 * required to recognize Reset (FFh) command issued in SDR
1094 * timing mode 0.
1095 * "
1096 *
1097 * Configure the data interface in SDR mode and set the
1098 * timings to timing mode 0.
1099 */
1100
1101 conf = nand_get_default_data_interface();
1102 ret = chip->setup_data_interface(mtd, chipnr, conf);
1103 if (ret)
1104 pr_err("Failed to configure data interface to SDR timing mode 0\n");
1105
1106 return ret;
1107 }
1108
1109 /**
1110 * nand_setup_data_interface - Setup the best data interface and timings
1111 * @chip: The NAND chip
1112 * @chipnr: Internal die id
1113 *
1114 * Find and configure the best data interface and NAND timings supported by
1115 * the chip and the driver.
1116 * First tries to retrieve supported timing modes from ONFI information,
1117 * and if the NAND chip does not support ONFI, relies on the
1118 * ->onfi_timing_mode_default specified in the nand_ids table.
1119 *
1120 * Returns 0 for success or negative error code otherwise.
1121 */
1122 static int nand_setup_data_interface(struct nand_chip *chip, int chipnr)
1123 {
1124 struct mtd_info *mtd = nand_to_mtd(chip);
1125 int ret;
1126
1127 if (!chip->setup_data_interface || !chip->data_interface)
1128 return 0;
1129
1130 /*
1131 * Ensure the timing mode has been changed on the chip side
1132 * before changing timings on the controller side.
1133 */
1134 if (chip->onfi_version &&
1135 (le16_to_cpu(chip->onfi_params.opt_cmd) &
1136 ONFI_OPT_CMD_SET_GET_FEATURES)) {
1137 u8 tmode_param[ONFI_SUBFEATURE_PARAM_LEN] = {
1138 chip->onfi_timing_mode_default,
1139 };
1140
1141 ret = chip->onfi_set_features(mtd, chip,
1142 ONFI_FEATURE_ADDR_TIMING_MODE,
1143 tmode_param);
1144 if (ret)
1145 goto err;
1146 }
1147
1148 ret = chip->setup_data_interface(mtd, chipnr, chip->data_interface);
1149 err:
1150 return ret;
1151 }
1152
1153 /**
1154 * nand_init_data_interface - find the best data interface and timings
1155 * @chip: The NAND chip
1156 *
1157 * Find the best data interface and NAND timings supported by the chip
1158 * and the driver.
1159 * First tries to retrieve supported timing modes from ONFI information,
1160 * and if the NAND chip does not support ONFI, relies on the
1161 * ->onfi_timing_mode_default specified in the nand_ids table. After this
1162 * function nand_chip->data_interface is initialized with the best timing mode
1163 * available.
1164 *
1165 * Returns 0 for success or negative error code otherwise.
1166 */
1167 static int nand_init_data_interface(struct nand_chip *chip)
1168 {
1169 struct mtd_info *mtd = nand_to_mtd(chip);
1170 int modes, mode, ret;
1171
1172 if (!chip->setup_data_interface)
1173 return 0;
1174
1175 /*
1176 * First try to identify the best timings from ONFI parameters and
1177 * if the NAND does not support ONFI, fallback to the default ONFI
1178 * timing mode.
1179 */
1180 modes = onfi_get_async_timing_mode(chip);
1181 if (modes == ONFI_TIMING_MODE_UNKNOWN) {
1182 if (!chip->onfi_timing_mode_default)
1183 return 0;
1184
1185 modes = GENMASK(chip->onfi_timing_mode_default, 0);
1186 }
1187
1188 chip->data_interface = kzalloc(sizeof(*chip->data_interface),
1189 GFP_KERNEL);
1190 if (!chip->data_interface)
1191 return -ENOMEM;
1192
1193 for (mode = fls(modes) - 1; mode >= 0; mode--) {
1194 ret = onfi_init_data_interface(chip, chip->data_interface,
1195 NAND_SDR_IFACE, mode);
1196 if (ret)
1197 continue;
1198
1199 /* Pass -1 to only */
1200 ret = chip->setup_data_interface(mtd,
1201 NAND_DATA_IFACE_CHECK_ONLY,
1202 chip->data_interface);
1203 if (!ret) {
1204 chip->onfi_timing_mode_default = mode;
1205 break;
1206 }
1207 }
1208
1209 return 0;
1210 }
1211
1212 static void nand_release_data_interface(struct nand_chip *chip)
1213 {
1214 kfree(chip->data_interface);
1215 }
1216
1217 /**
1218 * nand_reset - Reset and initialize a NAND device
1219 * @chip: The NAND chip
1220 * @chipnr: Internal die id
1221 *
1222 * Returns 0 for success or negative error code otherwise
1223 */
1224 int nand_reset(struct nand_chip *chip, int chipnr)
1225 {
1226 struct mtd_info *mtd = nand_to_mtd(chip);
1227 int ret;
1228
1229 ret = nand_reset_data_interface(chip, chipnr);
1230 if (ret)
1231 return ret;
1232
1233 /*
1234 * The CS line has to be released before we can apply the new NAND
1235 * interface settings, hence this weird ->select_chip() dance.
1236 */
1237 chip->select_chip(mtd, chipnr);
1238 chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
1239 chip->select_chip(mtd, -1);
1240
1241 chip->select_chip(mtd, chipnr);
1242 ret = nand_setup_data_interface(chip, chipnr);
1243 chip->select_chip(mtd, -1);
1244 if (ret)
1245 return ret;
1246
1247 return 0;
1248 }
1249
1250 /**
1251 * nand_check_erased_buf - check if a buffer contains (almost) only 0xff data
1252 * @buf: buffer to test
1253 * @len: buffer length
1254 * @bitflips_threshold: maximum number of bitflips
1255 *
1256 * Check if a buffer contains only 0xff, which means the underlying region
1257 * has been erased and is ready to be programmed.
1258 * The bitflips_threshold specify the maximum number of bitflips before
1259 * considering the region is not erased.
1260 * Note: The logic of this function has been extracted from the memweight
1261 * implementation, except that nand_check_erased_buf function exit before
1262 * testing the whole buffer if the number of bitflips exceed the
1263 * bitflips_threshold value.
1264 *
1265 * Returns a positive number of bitflips less than or equal to
1266 * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the
1267 * threshold.
1268 */
1269 static int nand_check_erased_buf(void *buf, int len, int bitflips_threshold)
1270 {
1271 const unsigned char *bitmap = buf;
1272 int bitflips = 0;
1273 int weight;
1274
1275 for (; len && ((uintptr_t)bitmap) % sizeof(long);
1276 len--, bitmap++) {
1277 weight = hweight8(*bitmap);
1278 bitflips += BITS_PER_BYTE - weight;
1279 if (unlikely(bitflips > bitflips_threshold))
1280 return -EBADMSG;
1281 }
1282
1283 for (; len >= sizeof(long);
1284 len -= sizeof(long), bitmap += sizeof(long)) {
1285 unsigned long d = *((unsigned long *)bitmap);
1286 if (d == ~0UL)
1287 continue;
1288 weight = hweight_long(d);
1289 bitflips += BITS_PER_LONG - weight;
1290 if (unlikely(bitflips > bitflips_threshold))
1291 return -EBADMSG;
1292 }
1293
1294 for (; len > 0; len--, bitmap++) {
1295 weight = hweight8(*bitmap);
1296 bitflips += BITS_PER_BYTE - weight;
1297 if (unlikely(bitflips > bitflips_threshold))
1298 return -EBADMSG;
1299 }
1300
1301 return bitflips;
1302 }
1303
1304 /**
1305 * nand_check_erased_ecc_chunk - check if an ECC chunk contains (almost) only
1306 * 0xff data
1307 * @data: data buffer to test
1308 * @datalen: data length
1309 * @ecc: ECC buffer
1310 * @ecclen: ECC length
1311 * @extraoob: extra OOB buffer
1312 * @extraooblen: extra OOB length
1313 * @bitflips_threshold: maximum number of bitflips
1314 *
1315 * Check if a data buffer and its associated ECC and OOB data contains only
1316 * 0xff pattern, which means the underlying region has been erased and is
1317 * ready to be programmed.
1318 * The bitflips_threshold specify the maximum number of bitflips before
1319 * considering the region as not erased.
1320 *
1321 * Note:
1322 * 1/ ECC algorithms are working on pre-defined block sizes which are usually
1323 * different from the NAND page size. When fixing bitflips, ECC engines will
1324 * report the number of errors per chunk, and the NAND core infrastructure
1325 * expect you to return the maximum number of bitflips for the whole page.
1326 * This is why you should always use this function on a single chunk and
1327 * not on the whole page. After checking each chunk you should update your
1328 * max_bitflips value accordingly.
1329 * 2/ When checking for bitflips in erased pages you should not only check
1330 * the payload data but also their associated ECC data, because a user might
1331 * have programmed almost all bits to 1 but a few. In this case, we
1332 * shouldn't consider the chunk as erased, and checking ECC bytes prevent
1333 * this case.
1334 * 3/ The extraoob argument is optional, and should be used if some of your OOB
1335 * data are protected by the ECC engine.
1336 * It could also be used if you support subpages and want to attach some
1337 * extra OOB data to an ECC chunk.
1338 *
1339 * Returns a positive number of bitflips less than or equal to
1340 * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the
1341 * threshold. In case of success, the passed buffers are filled with 0xff.
1342 */
1343 int nand_check_erased_ecc_chunk(void *data, int datalen,
1344 void *ecc, int ecclen,
1345 void *extraoob, int extraooblen,
1346 int bitflips_threshold)
1347 {
1348 int data_bitflips = 0, ecc_bitflips = 0, extraoob_bitflips = 0;
1349
1350 data_bitflips = nand_check_erased_buf(data, datalen,
1351 bitflips_threshold);
1352 if (data_bitflips < 0)
1353 return data_bitflips;
1354
1355 bitflips_threshold -= data_bitflips;
1356
1357 ecc_bitflips = nand_check_erased_buf(ecc, ecclen, bitflips_threshold);
1358 if (ecc_bitflips < 0)
1359 return ecc_bitflips;
1360
1361 bitflips_threshold -= ecc_bitflips;
1362
1363 extraoob_bitflips = nand_check_erased_buf(extraoob, extraooblen,
1364 bitflips_threshold);
1365 if (extraoob_bitflips < 0)
1366 return extraoob_bitflips;
1367
1368 if (data_bitflips)
1369 memset(data, 0xff, datalen);
1370
1371 if (ecc_bitflips)
1372 memset(ecc, 0xff, ecclen);
1373
1374 if (extraoob_bitflips)
1375 memset(extraoob, 0xff, extraooblen);
1376
1377 return data_bitflips + ecc_bitflips + extraoob_bitflips;
1378 }
1379 EXPORT_SYMBOL(nand_check_erased_ecc_chunk);
1380
1381 /**
1382 * nand_read_page_raw - [INTERN] read raw page data without ecc
1383 * @mtd: mtd info structure
1384 * @chip: nand chip info structure
1385 * @buf: buffer to store read data
1386 * @oob_required: caller requires OOB data read to chip->oob_poi
1387 * @page: page number to read
1388 *
1389 * Not for syndrome calculating ECC controllers, which use a special oob layout.
1390 */
1391 int nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
1392 uint8_t *buf, int oob_required, int page)
1393 {
1394 chip->read_buf(mtd, buf, mtd->writesize);
1395 if (oob_required)
1396 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1397 return 0;
1398 }
1399 EXPORT_SYMBOL(nand_read_page_raw);
1400
1401 /**
1402 * nand_read_page_raw_syndrome - [INTERN] read raw page data without ecc
1403 * @mtd: mtd info structure
1404 * @chip: nand chip info structure
1405 * @buf: buffer to store read data
1406 * @oob_required: caller requires OOB data read to chip->oob_poi
1407 * @page: page number to read
1408 *
1409 * We need a special oob layout and handling even when OOB isn't used.
1410 */
1411 static int nand_read_page_raw_syndrome(struct mtd_info *mtd,
1412 struct nand_chip *chip, uint8_t *buf,
1413 int oob_required, int page)
1414 {
1415 int eccsize = chip->ecc.size;
1416 int eccbytes = chip->ecc.bytes;
1417 uint8_t *oob = chip->oob_poi;
1418 int steps, size;
1419
1420 for (steps = chip->ecc.steps; steps > 0; steps--) {
1421 chip->read_buf(mtd, buf, eccsize);
1422 buf += eccsize;
1423
1424 if (chip->ecc.prepad) {
1425 chip->read_buf(mtd, oob, chip->ecc.prepad);
1426 oob += chip->ecc.prepad;
1427 }
1428
1429 chip->read_buf(mtd, oob, eccbytes);
1430 oob += eccbytes;
1431
1432 if (chip->ecc.postpad) {
1433 chip->read_buf(mtd, oob, chip->ecc.postpad);
1434 oob += chip->ecc.postpad;
1435 }
1436 }
1437
1438 size = mtd->oobsize - (oob - chip->oob_poi);
1439 if (size)
1440 chip->read_buf(mtd, oob, size);
1441
1442 return 0;
1443 }
1444
1445 /**
1446 * nand_read_page_swecc - [REPLACEABLE] software ECC based page read function
1447 * @mtd: mtd info structure
1448 * @chip: nand chip info structure
1449 * @buf: buffer to store read data
1450 * @oob_required: caller requires OOB data read to chip->oob_poi
1451 * @page: page number to read
1452 */
1453 static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
1454 uint8_t *buf, int oob_required, int page)
1455 {
1456 int i, eccsize = chip->ecc.size, ret;
1457 int eccbytes = chip->ecc.bytes;
1458 int eccsteps = chip->ecc.steps;
1459 uint8_t *p = buf;
1460 uint8_t *ecc_calc = chip->buffers->ecccalc;
1461 uint8_t *ecc_code = chip->buffers->ecccode;
1462 unsigned int max_bitflips = 0;
1463
1464 chip->ecc.read_page_raw(mtd, chip, buf, 1, page);
1465
1466 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
1467 chip->ecc.calculate(mtd, p, &ecc_calc[i]);
1468
1469 ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
1470 chip->ecc.total);
1471 if (ret)
1472 return ret;
1473
1474 eccsteps = chip->ecc.steps;
1475 p = buf;
1476
1477 for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
1478 int stat;
1479
1480 stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
1481 if (stat < 0) {
1482 mtd->ecc_stats.failed++;
1483 } else {
1484 mtd->ecc_stats.corrected += stat;
1485 max_bitflips = max_t(unsigned int, max_bitflips, stat);
1486 }
1487 }
1488 return max_bitflips;
1489 }
1490
1491 /**
1492 * nand_read_subpage - [REPLACEABLE] ECC based sub-page read function
1493 * @mtd: mtd info structure
1494 * @chip: nand chip info structure
1495 * @data_offs: offset of requested data within the page
1496 * @readlen: data length
1497 * @bufpoi: buffer to store read data
1498 * @page: page number to read
1499 */
1500 static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
1501 uint32_t data_offs, uint32_t readlen, uint8_t *bufpoi,
1502 int page)
1503 {
1504 int start_step, end_step, num_steps, ret;
1505 uint8_t *p;
1506 int data_col_addr, i, gaps = 0;
1507 int datafrag_len, eccfrag_len, aligned_len, aligned_pos;
1508 int busw = (chip->options & NAND_BUSWIDTH_16) ? 2 : 1;
1509 int index, section = 0;
1510 unsigned int max_bitflips = 0;
1511 struct mtd_oob_region oobregion = { };
1512
1513 /* Column address within the page aligned to ECC size (256bytes) */
1514 start_step = data_offs / chip->ecc.size;
1515 end_step = (data_offs + readlen - 1) / chip->ecc.size;
1516 num_steps = end_step - start_step + 1;
1517 index = start_step * chip->ecc.bytes;
1518
1519 /* Data size aligned to ECC ecc.size */
1520 datafrag_len = num_steps * chip->ecc.size;
1521 eccfrag_len = num_steps * chip->ecc.bytes;
1522
1523 data_col_addr = start_step * chip->ecc.size;
1524 /* If we read not a page aligned data */
1525 if (data_col_addr != 0)
1526 chip->cmdfunc(mtd, NAND_CMD_RNDOUT, data_col_addr, -1);
1527
1528 p = bufpoi + data_col_addr;
1529 chip->read_buf(mtd, p, datafrag_len);
1530
1531 /* Calculate ECC */
1532 for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size)
1533 chip->ecc.calculate(mtd, p, &chip->buffers->ecccalc[i]);
1534
1535 /*
1536 * The performance is faster if we position offsets according to
1537 * ecc.pos. Let's make sure that there are no gaps in ECC positions.
1538 */
1539 ret = mtd_ooblayout_find_eccregion(mtd, index, &section, &oobregion);
1540 if (ret)
1541 return ret;
1542
1543 if (oobregion.length < eccfrag_len)
1544 gaps = 1;
1545
1546 if (gaps) {
1547 chip->cmdfunc(mtd, NAND_CMD_RNDOUT, mtd->writesize, -1);
1548 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1549 } else {
1550 /*
1551 * Send the command to read the particular ECC bytes take care
1552 * about buswidth alignment in read_buf.
1553 */
1554 aligned_pos = oobregion.offset & ~(busw - 1);
1555 aligned_len = eccfrag_len;
1556 if (oobregion.offset & (busw - 1))
1557 aligned_len++;
1558 if ((oobregion.offset + (num_steps * chip->ecc.bytes)) &
1559 (busw - 1))
1560 aligned_len++;
1561
1562 chip->cmdfunc(mtd, NAND_CMD_RNDOUT,
1563 mtd->writesize + aligned_pos, -1);
1564 chip->read_buf(mtd, &chip->oob_poi[aligned_pos], aligned_len);
1565 }
1566
1567 ret = mtd_ooblayout_get_eccbytes(mtd, chip->buffers->ecccode,
1568 chip->oob_poi, index, eccfrag_len);
1569 if (ret)
1570 return ret;
1571
1572 p = bufpoi + data_col_addr;
1573 for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size) {
1574 int stat;
1575
1576 stat = chip->ecc.correct(mtd, p,
1577 &chip->buffers->ecccode[i], &chip->buffers->ecccalc[i]);
1578 if (stat == -EBADMSG &&
1579 (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
1580 /* check for empty pages with bitflips */
1581 stat = nand_check_erased_ecc_chunk(p, chip->ecc.size,
1582 &chip->buffers->ecccode[i],
1583 chip->ecc.bytes,
1584 NULL, 0,
1585 chip->ecc.strength);
1586 }
1587
1588 if (stat < 0) {
1589 mtd->ecc_stats.failed++;
1590 } else {
1591 mtd->ecc_stats.corrected += stat;
1592 max_bitflips = max_t(unsigned int, max_bitflips, stat);
1593 }
1594 }
1595 return max_bitflips;
1596 }
1597
1598 /**
1599 * nand_read_page_hwecc - [REPLACEABLE] hardware ECC based page read function
1600 * @mtd: mtd info structure
1601 * @chip: nand chip info structure
1602 * @buf: buffer to store read data
1603 * @oob_required: caller requires OOB data read to chip->oob_poi
1604 * @page: page number to read
1605 *
1606 * Not for syndrome calculating ECC controllers which need a special oob layout.
1607 */
1608 static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
1609 uint8_t *buf, int oob_required, int page)
1610 {
1611 int i, eccsize = chip->ecc.size, ret;
1612 int eccbytes = chip->ecc.bytes;
1613 int eccsteps = chip->ecc.steps;
1614 uint8_t *p = buf;
1615 uint8_t *ecc_calc = chip->buffers->ecccalc;
1616 uint8_t *ecc_code = chip->buffers->ecccode;
1617 unsigned int max_bitflips = 0;
1618
1619 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
1620 chip->ecc.hwctl(mtd, NAND_ECC_READ);
1621 chip->read_buf(mtd, p, eccsize);
1622 chip->ecc.calculate(mtd, p, &ecc_calc[i]);
1623 }
1624 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1625
1626 ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
1627 chip->ecc.total);
1628 if (ret)
1629 return ret;
1630
1631 eccsteps = chip->ecc.steps;
1632 p = buf;
1633
1634 for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
1635 int stat;
1636
1637 stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
1638 if (stat == -EBADMSG &&
1639 (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
1640 /* check for empty pages with bitflips */
1641 stat = nand_check_erased_ecc_chunk(p, eccsize,
1642 &ecc_code[i], eccbytes,
1643 NULL, 0,
1644 chip->ecc.strength);
1645 }
1646
1647 if (stat < 0) {
1648 mtd->ecc_stats.failed++;
1649 } else {
1650 mtd->ecc_stats.corrected += stat;
1651 max_bitflips = max_t(unsigned int, max_bitflips, stat);
1652 }
1653 }
1654 return max_bitflips;
1655 }
1656
1657 /**
1658 * nand_read_page_hwecc_oob_first - [REPLACEABLE] hw ecc, read oob first
1659 * @mtd: mtd info structure
1660 * @chip: nand chip info structure
1661 * @buf: buffer to store read data
1662 * @oob_required: caller requires OOB data read to chip->oob_poi
1663 * @page: page number to read
1664 *
1665 * Hardware ECC for large page chips, require OOB to be read first. For this
1666 * ECC mode, the write_page method is re-used from ECC_HW. These methods
1667 * read/write ECC from the OOB area, unlike the ECC_HW_SYNDROME support with
1668 * multiple ECC steps, follows the "infix ECC" scheme and reads/writes ECC from
1669 * the data area, by overwriting the NAND manufacturer bad block markings.
1670 */
1671 static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd,
1672 struct nand_chip *chip, uint8_t *buf, int oob_required, int page)
1673 {
1674 int i, eccsize = chip->ecc.size, ret;
1675 int eccbytes = chip->ecc.bytes;
1676 int eccsteps = chip->ecc.steps;
1677 uint8_t *p = buf;
1678 uint8_t *ecc_code = chip->buffers->ecccode;
1679 uint8_t *ecc_calc = chip->buffers->ecccalc;
1680 unsigned int max_bitflips = 0;
1681
1682 /* Read the OOB area first */
1683 chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
1684 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1685 chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
1686
1687 ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
1688 chip->ecc.total);
1689 if (ret)
1690 return ret;
1691
1692 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
1693 int stat;
1694
1695 chip->ecc.hwctl(mtd, NAND_ECC_READ);
1696 chip->read_buf(mtd, p, eccsize);
1697 chip->ecc.calculate(mtd, p, &ecc_calc[i]);
1698
1699 stat = chip->ecc.correct(mtd, p, &ecc_code[i], NULL);
1700 if (stat == -EBADMSG &&
1701 (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
1702 /* check for empty pages with bitflips */
1703 stat = nand_check_erased_ecc_chunk(p, eccsize,
1704 &ecc_code[i], eccbytes,
1705 NULL, 0,
1706 chip->ecc.strength);
1707 }
1708
1709 if (stat < 0) {
1710 mtd->ecc_stats.failed++;
1711 } else {
1712 mtd->ecc_stats.corrected += stat;
1713 max_bitflips = max_t(unsigned int, max_bitflips, stat);
1714 }
1715 }
1716 return max_bitflips;
1717 }
1718
1719 /**
1720 * nand_read_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page read
1721 * @mtd: mtd info structure
1722 * @chip: nand chip info structure
1723 * @buf: buffer to store read data
1724 * @oob_required: caller requires OOB data read to chip->oob_poi
1725 * @page: page number to read
1726 *
1727 * The hw generator calculates the error syndrome automatically. Therefore we
1728 * need a special oob layout and handling.
1729 */
1730 static int nand_read_page_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
1731 uint8_t *buf, int oob_required, int page)
1732 {
1733 int i, eccsize = chip->ecc.size;
1734 int eccbytes = chip->ecc.bytes;
1735 int eccsteps = chip->ecc.steps;
1736 int eccpadbytes = eccbytes + chip->ecc.prepad + chip->ecc.postpad;
1737 uint8_t *p = buf;
1738 uint8_t *oob = chip->oob_poi;
1739 unsigned int max_bitflips = 0;
1740
1741 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
1742 int stat;
1743
1744 chip->ecc.hwctl(mtd, NAND_ECC_READ);
1745 chip->read_buf(mtd, p, eccsize);
1746
1747 if (chip->ecc.prepad) {
1748 chip->read_buf(mtd, oob, chip->ecc.prepad);
1749 oob += chip->ecc.prepad;
1750 }
1751
1752 chip->ecc.hwctl(mtd, NAND_ECC_READSYN);
1753 chip->read_buf(mtd, oob, eccbytes);
1754 stat = chip->ecc.correct(mtd, p, oob, NULL);
1755
1756 oob += eccbytes;
1757
1758 if (chip->ecc.postpad) {
1759 chip->read_buf(mtd, oob, chip->ecc.postpad);
1760 oob += chip->ecc.postpad;
1761 }
1762
1763 if (stat == -EBADMSG &&
1764 (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
1765 /* check for empty pages with bitflips */
1766 stat = nand_check_erased_ecc_chunk(p, chip->ecc.size,
1767 oob - eccpadbytes,
1768 eccpadbytes,
1769 NULL, 0,
1770 chip->ecc.strength);
1771 }
1772
1773 if (stat < 0) {
1774 mtd->ecc_stats.failed++;
1775 } else {
1776 mtd->ecc_stats.corrected += stat;
1777 max_bitflips = max_t(unsigned int, max_bitflips, stat);
1778 }
1779 }
1780
1781 /* Calculate remaining oob bytes */
1782 i = mtd->oobsize - (oob - chip->oob_poi);
1783 if (i)
1784 chip->read_buf(mtd, oob, i);
1785
1786 return max_bitflips;
1787 }
1788
1789 /**
1790 * nand_transfer_oob - [INTERN] Transfer oob to client buffer
1791 * @mtd: mtd info structure
1792 * @oob: oob destination address
1793 * @ops: oob ops structure
1794 * @len: size of oob to transfer
1795 */
1796 static uint8_t *nand_transfer_oob(struct mtd_info *mtd, uint8_t *oob,
1797 struct mtd_oob_ops *ops, size_t len)
1798 {
1799 struct nand_chip *chip = mtd_to_nand(mtd);
1800 int ret;
1801
1802 switch (ops->mode) {
1803
1804 case MTD_OPS_PLACE_OOB:
1805 case MTD_OPS_RAW:
1806 memcpy(oob, chip->oob_poi + ops->ooboffs, len);
1807 return oob + len;
1808
1809 case MTD_OPS_AUTO_OOB:
1810 ret = mtd_ooblayout_get_databytes(mtd, oob, chip->oob_poi,
1811 ops->ooboffs, len);
1812 BUG_ON(ret);
1813 return oob + len;
1814
1815 default:
1816 BUG();
1817 }
1818 return NULL;
1819 }
1820
1821 /**
1822 * nand_setup_read_retry - [INTERN] Set the READ RETRY mode
1823 * @mtd: MTD device structure
1824 * @retry_mode: the retry mode to use
1825 *
1826 * Some vendors supply a special command to shift the Vt threshold, to be used
1827 * when there are too many bitflips in a page (i.e., ECC error). After setting
1828 * a new threshold, the host should retry reading the page.
1829 */
1830 static int nand_setup_read_retry(struct mtd_info *mtd, int retry_mode)
1831 {
1832 struct nand_chip *chip = mtd_to_nand(mtd);
1833
1834 pr_debug("setting READ RETRY mode %d\n", retry_mode);
1835
1836 if (retry_mode >= chip->read_retries)
1837 return -EINVAL;
1838
1839 if (!chip->setup_read_retry)
1840 return -EOPNOTSUPP;
1841
1842 return chip->setup_read_retry(mtd, retry_mode);
1843 }
1844
1845 /**
1846 * nand_do_read_ops - [INTERN] Read data with ECC
1847 * @mtd: MTD device structure
1848 * @from: offset to read from
1849 * @ops: oob ops structure
1850 *
1851 * Internal function. Called with chip held.
1852 */
1853 static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
1854 struct mtd_oob_ops *ops)
1855 {
1856 int chipnr, page, realpage, col, bytes, aligned, oob_required;
1857 struct nand_chip *chip = mtd_to_nand(mtd);
1858 int ret = 0;
1859 uint32_t readlen = ops->len;
1860 uint32_t oobreadlen = ops->ooblen;
1861 uint32_t max_oobsize = mtd_oobavail(mtd, ops);
1862
1863 uint8_t *bufpoi, *oob, *buf;
1864 int use_bufpoi;
1865 unsigned int max_bitflips = 0;
1866 int retry_mode = 0;
1867 bool ecc_fail = false;
1868
1869 chipnr = (int)(from >> chip->chip_shift);
1870 chip->select_chip(mtd, chipnr);
1871
1872 realpage = (int)(from >> chip->page_shift);
1873 page = realpage & chip->pagemask;
1874
1875 col = (int)(from & (mtd->writesize - 1));
1876
1877 buf = ops->datbuf;
1878 oob = ops->oobbuf;
1879 oob_required = oob ? 1 : 0;
1880
1881 while (1) {
1882 unsigned int ecc_failures = mtd->ecc_stats.failed;
1883
1884 bytes = min(mtd->writesize - col, readlen);
1885 aligned = (bytes == mtd->writesize);
1886
1887 if (!aligned)
1888 use_bufpoi = 1;
1889 else if (chip->options & NAND_USE_BOUNCE_BUFFER)
1890 use_bufpoi = !virt_addr_valid(buf) ||
1891 !IS_ALIGNED((unsigned long)buf,
1892 chip->buf_align);
1893 else
1894 use_bufpoi = 0;
1895
1896 /* Is the current page in the buffer? */
1897 if (realpage != chip->pagebuf || oob) {
1898 bufpoi = use_bufpoi ? chip->buffers->databuf : buf;
1899
1900 if (use_bufpoi && aligned)
1901 pr_debug("%s: using read bounce buffer for buf@%p\n",
1902 __func__, buf);
1903
1904 read_retry:
1905 if (nand_standard_page_accessors(&chip->ecc))
1906 chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
1907
1908 /*
1909 * Now read the page into the buffer. Absent an error,
1910 * the read methods return max bitflips per ecc step.
1911 */
1912 if (unlikely(ops->mode == MTD_OPS_RAW))
1913 ret = chip->ecc.read_page_raw(mtd, chip, bufpoi,
1914 oob_required,
1915 page);
1916 else if (!aligned && NAND_HAS_SUBPAGE_READ(chip) &&
1917 !oob)
1918 ret = chip->ecc.read_subpage(mtd, chip,
1919 col, bytes, bufpoi,
1920 page);
1921 else
1922 ret = chip->ecc.read_page(mtd, chip, bufpoi,
1923 oob_required, page);
1924 if (ret < 0) {
1925 if (use_bufpoi)
1926 /* Invalidate page cache */
1927 chip->pagebuf = -1;
1928 break;
1929 }
1930
1931 /* Transfer not aligned data */
1932 if (use_bufpoi) {
1933 if (!NAND_HAS_SUBPAGE_READ(chip) && !oob &&
1934 !(mtd->ecc_stats.failed - ecc_failures) &&
1935 (ops->mode != MTD_OPS_RAW)) {
1936 chip->pagebuf = realpage;
1937 chip->pagebuf_bitflips = ret;
1938 } else {
1939 /* Invalidate page cache */
1940 chip->pagebuf = -1;
1941 }
1942 memcpy(buf, chip->buffers->databuf + col, bytes);
1943 }
1944
1945 if (unlikely(oob)) {
1946 int toread = min(oobreadlen, max_oobsize);
1947
1948 if (toread) {
1949 oob = nand_transfer_oob(mtd,
1950 oob, ops, toread);
1951 oobreadlen -= toread;
1952 }
1953 }
1954
1955 if (chip->options & NAND_NEED_READRDY) {
1956 /* Apply delay or wait for ready/busy pin */
1957 if (!chip->dev_ready)
1958 udelay(chip->chip_delay);
1959 else
1960 nand_wait_ready(mtd);
1961 }
1962
1963 if (mtd->ecc_stats.failed - ecc_failures) {
1964 if (retry_mode + 1 < chip->read_retries) {
1965 retry_mode++;
1966 ret = nand_setup_read_retry(mtd,
1967 retry_mode);
1968 if (ret < 0)
1969 break;
1970
1971 /* Reset failures; retry */
1972 mtd->ecc_stats.failed = ecc_failures;
1973 goto read_retry;
1974 } else {
1975 /* No more retry modes; real failure */
1976 ecc_fail = true;
1977 }
1978 }
1979
1980 buf += bytes;
1981 max_bitflips = max_t(unsigned int, max_bitflips, ret);
1982 } else {
1983 memcpy(buf, chip->buffers->databuf + col, bytes);
1984 buf += bytes;
1985 max_bitflips = max_t(unsigned int, max_bitflips,
1986 chip->pagebuf_bitflips);
1987 }
1988
1989 readlen -= bytes;
1990
1991 /* Reset to retry mode 0 */
1992 if (retry_mode) {
1993 ret = nand_setup_read_retry(mtd, 0);
1994 if (ret < 0)
1995 break;
1996 retry_mode = 0;
1997 }
1998
1999 if (!readlen)
2000 break;
2001
2002 /* For subsequent reads align to page boundary */
2003 col = 0;
2004 /* Increment page address */
2005 realpage++;
2006
2007 page = realpage & chip->pagemask;
2008 /* Check, if we cross a chip boundary */
2009 if (!page) {
2010 chipnr++;
2011 chip->select_chip(mtd, -1);
2012 chip->select_chip(mtd, chipnr);
2013 }
2014 }
2015 chip->select_chip(mtd, -1);
2016
2017 ops->retlen = ops->len - (size_t) readlen;
2018 if (oob)
2019 ops->oobretlen = ops->ooblen - oobreadlen;
2020
2021 if (ret < 0)
2022 return ret;
2023
2024 if (ecc_fail)
2025 return -EBADMSG;
2026
2027 return max_bitflips;
2028 }
2029
2030 /**
2031 * nand_read - [MTD Interface] MTD compatibility function for nand_do_read_ecc
2032 * @mtd: MTD device structure
2033 * @from: offset to read from
2034 * @len: number of bytes to read
2035 * @retlen: pointer to variable to store the number of read bytes
2036 * @buf: the databuffer to put data
2037 *
2038 * Get hold of the chip and call nand_do_read.
2039 */
2040 static int nand_read(struct mtd_info *mtd, loff_t from, size_t len,
2041 size_t *retlen, uint8_t *buf)
2042 {
2043 struct mtd_oob_ops ops;
2044 int ret;
2045
2046 nand_get_device(mtd, FL_READING);
2047 memset(&ops, 0, sizeof(ops));
2048 ops.len = len;
2049 ops.datbuf = buf;
2050 ops.mode = MTD_OPS_PLACE_OOB;
2051 ret = nand_do_read_ops(mtd, from, &ops);
2052 *retlen = ops.retlen;
2053 nand_release_device(mtd);
2054 return ret;
2055 }
2056
2057 /**
2058 * nand_read_oob_std - [REPLACEABLE] the most common OOB data read function
2059 * @mtd: mtd info structure
2060 * @chip: nand chip info structure
2061 * @page: page number to read
2062 */
2063 int nand_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip, int page)
2064 {
2065 chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
2066 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
2067 return 0;
2068 }
2069 EXPORT_SYMBOL(nand_read_oob_std);
2070
2071 /**
2072 * nand_read_oob_syndrome - [REPLACEABLE] OOB data read function for HW ECC
2073 * with syndromes
2074 * @mtd: mtd info structure
2075 * @chip: nand chip info structure
2076 * @page: page number to read
2077 */
2078 int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
2079 int page)
2080 {
2081 int length = mtd->oobsize;
2082 int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
2083 int eccsize = chip->ecc.size;
2084 uint8_t *bufpoi = chip->oob_poi;
2085 int i, toread, sndrnd = 0, pos;
2086
2087 chip->cmdfunc(mtd, NAND_CMD_READ0, chip->ecc.size, page);
2088 for (i = 0; i < chip->ecc.steps; i++) {
2089 if (sndrnd) {
2090 pos = eccsize + i * (eccsize + chunk);
2091 if (mtd->writesize > 512)
2092 chip->cmdfunc(mtd, NAND_CMD_RNDOUT, pos, -1);
2093 else
2094 chip->cmdfunc(mtd, NAND_CMD_READ0, pos, page);
2095 } else
2096 sndrnd = 1;
2097 toread = min_t(int, length, chunk);
2098 chip->read_buf(mtd, bufpoi, toread);
2099 bufpoi += toread;
2100 length -= toread;
2101 }
2102 if (length > 0)
2103 chip->read_buf(mtd, bufpoi, length);
2104
2105 return 0;
2106 }
2107 EXPORT_SYMBOL(nand_read_oob_syndrome);
2108
2109 /**
2110 * nand_write_oob_std - [REPLACEABLE] the most common OOB data write function
2111 * @mtd: mtd info structure
2112 * @chip: nand chip info structure
2113 * @page: page number to write
2114 */
2115 int nand_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip, int page)
2116 {
2117 int status = 0;
2118 const uint8_t *buf = chip->oob_poi;
2119 int length = mtd->oobsize;
2120
2121 chip->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize, page);
2122 chip->write_buf(mtd, buf, length);
2123 /* Send command to program the OOB data */
2124 chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
2125
2126 status = chip->waitfunc(mtd, chip);
2127
2128 return status & NAND_STATUS_FAIL ? -EIO : 0;
2129 }
2130 EXPORT_SYMBOL(nand_write_oob_std);
2131
2132 /**
2133 * nand_write_oob_syndrome - [REPLACEABLE] OOB data write function for HW ECC
2134 * with syndrome - only for large page flash
2135 * @mtd: mtd info structure
2136 * @chip: nand chip info structure
2137 * @page: page number to write
2138 */
2139 int nand_write_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
2140 int page)
2141 {
2142 int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
2143 int eccsize = chip->ecc.size, length = mtd->oobsize;
2144 int i, len, pos, status = 0, sndcmd = 0, steps = chip->ecc.steps;
2145 const uint8_t *bufpoi = chip->oob_poi;
2146
2147 /*
2148 * data-ecc-data-ecc ... ecc-oob
2149 * or
2150 * data-pad-ecc-pad-data-pad .... ecc-pad-oob
2151 */
2152 if (!chip->ecc.prepad && !chip->ecc.postpad) {
2153 pos = steps * (eccsize + chunk);
2154 steps = 0;
2155 } else
2156 pos = eccsize;
2157
2158 chip->cmdfunc(mtd, NAND_CMD_SEQIN, pos, page);
2159 for (i = 0; i < steps; i++) {
2160 if (sndcmd) {
2161 if (mtd->writesize <= 512) {
2162 uint32_t fill = 0xFFFFFFFF;
2163
2164 len = eccsize;
2165 while (len > 0) {
2166 int num = min_t(int, len, 4);
2167 chip->write_buf(mtd, (uint8_t *)&fill,
2168 num);
2169 len -= num;
2170 }
2171 } else {
2172 pos = eccsize + i * (eccsize + chunk);
2173 chip->cmdfunc(mtd, NAND_CMD_RNDIN, pos, -1);
2174 }
2175 } else
2176 sndcmd = 1;
2177 len = min_t(int, length, chunk);
2178 chip->write_buf(mtd, bufpoi, len);
2179 bufpoi += len;
2180 length -= len;
2181 }
2182 if (length > 0)
2183 chip->write_buf(mtd, bufpoi, length);
2184
2185 chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
2186 status = chip->waitfunc(mtd, chip);
2187
2188 return status & NAND_STATUS_FAIL ? -EIO : 0;
2189 }
2190 EXPORT_SYMBOL(nand_write_oob_syndrome);
2191
2192 /**
2193 * nand_do_read_oob - [INTERN] NAND read out-of-band
2194 * @mtd: MTD device structure
2195 * @from: offset to read from
2196 * @ops: oob operations description structure
2197 *
2198 * NAND read out-of-band data from the spare area.
2199 */
2200 static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
2201 struct mtd_oob_ops *ops)
2202 {
2203 int page, realpage, chipnr;
2204 struct nand_chip *chip = mtd_to_nand(mtd);
2205 struct mtd_ecc_stats stats;
2206 int readlen = ops->ooblen;
2207 int len;
2208 uint8_t *buf = ops->oobbuf;
2209 int ret = 0;
2210
2211 pr_debug("%s: from = 0x%08Lx, len = %i\n",
2212 __func__, (unsigned long long)from, readlen);
2213
2214 stats = mtd->ecc_stats;
2215
2216 len = mtd_oobavail(mtd, ops);
2217
2218 if (unlikely(ops->ooboffs >= len)) {
2219 pr_debug("%s: attempt to start read outside oob\n",
2220 __func__);
2221 return -EINVAL;
2222 }
2223
2224 /* Do not allow reads past end of device */
2225 if (unlikely(from >= mtd->size ||
2226 ops->ooboffs + readlen > ((mtd->size >> chip->page_shift) -
2227 (from >> chip->page_shift)) * len)) {
2228 pr_debug("%s: attempt to read beyond end of device\n",
2229 __func__);
2230 return -EINVAL;
2231 }
2232
2233 chipnr = (int)(from >> chip->chip_shift);
2234 chip->select_chip(mtd, chipnr);
2235
2236 /* Shift to get page */
2237 realpage = (int)(from >> chip->page_shift);
2238 page = realpage & chip->pagemask;
2239
2240 while (1) {
2241 if (ops->mode == MTD_OPS_RAW)
2242 ret = chip->ecc.read_oob_raw(mtd, chip, page);
2243 else
2244 ret = chip->ecc.read_oob(mtd, chip, page);
2245
2246 if (ret < 0)
2247 break;
2248
2249 len = min(len, readlen);
2250 buf = nand_transfer_oob(mtd, buf, ops, len);
2251
2252 if (chip->options & NAND_NEED_READRDY) {
2253 /* Apply delay or wait for ready/busy pin */
2254 if (!chip->dev_ready)
2255 udelay(chip->chip_delay);
2256 else
2257 nand_wait_ready(mtd);
2258 }
2259
2260 readlen -= len;
2261 if (!readlen)
2262 break;
2263
2264 /* Increment page address */
2265 realpage++;
2266
2267 page = realpage & chip->pagemask;
2268 /* Check, if we cross a chip boundary */
2269 if (!page) {
2270 chipnr++;
2271 chip->select_chip(mtd, -1);
2272 chip->select_chip(mtd, chipnr);
2273 }
2274 }
2275 chip->select_chip(mtd, -1);
2276
2277 ops->oobretlen = ops->ooblen - readlen;
2278
2279 if (ret < 0)
2280 return ret;
2281
2282 if (mtd->ecc_stats.failed - stats.failed)
2283 return -EBADMSG;
2284
2285 return mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0;
2286 }
2287
2288 /**
2289 * nand_read_oob - [MTD Interface] NAND read data and/or out-of-band
2290 * @mtd: MTD device structure
2291 * @from: offset to read from
2292 * @ops: oob operation description structure
2293 *
2294 * NAND read data and/or out-of-band data.
2295 */
2296 static int nand_read_oob(struct mtd_info *mtd, loff_t from,
2297 struct mtd_oob_ops *ops)
2298 {
2299 int ret;
2300
2301 ops->retlen = 0;
2302
2303 /* Do not allow reads past end of device */
2304 if (ops->datbuf && (from + ops->len) > mtd->size) {
2305 pr_debug("%s: attempt to read beyond end of device\n",
2306 __func__);
2307 return -EINVAL;
2308 }
2309
2310 if (ops->mode != MTD_OPS_PLACE_OOB &&
2311 ops->mode != MTD_OPS_AUTO_OOB &&
2312 ops->mode != MTD_OPS_RAW)
2313 return -ENOTSUPP;
2314
2315 nand_get_device(mtd, FL_READING);
2316
2317 if (!ops->datbuf)
2318 ret = nand_do_read_oob(mtd, from, ops);
2319 else
2320 ret = nand_do_read_ops(mtd, from, ops);
2321
2322 nand_release_device(mtd);
2323 return ret;
2324 }
2325
2326
2327 /**
2328 * nand_write_page_raw - [INTERN] raw page write function
2329 * @mtd: mtd info structure
2330 * @chip: nand chip info structure
2331 * @buf: data buffer
2332 * @oob_required: must write chip->oob_poi to OOB
2333 * @page: page number to write
2334 *
2335 * Not for syndrome calculating ECC controllers, which use a special oob layout.
2336 */
2337 int nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
2338 const uint8_t *buf, int oob_required, int page)
2339 {
2340 chip->write_buf(mtd, buf, mtd->writesize);
2341 if (oob_required)
2342 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
2343
2344 return 0;
2345 }
2346 EXPORT_SYMBOL(nand_write_page_raw);
2347
2348 /**
2349 * nand_write_page_raw_syndrome - [INTERN] raw page write function
2350 * @mtd: mtd info structure
2351 * @chip: nand chip info structure
2352 * @buf: data buffer
2353 * @oob_required: must write chip->oob_poi to OOB
2354 * @page: page number to write
2355 *
2356 * We need a special oob layout and handling even when ECC isn't checked.
2357 */
2358 static int nand_write_page_raw_syndrome(struct mtd_info *mtd,
2359 struct nand_chip *chip,
2360 const uint8_t *buf, int oob_required,
2361 int page)
2362 {
2363 int eccsize = chip->ecc.size;
2364 int eccbytes = chip->ecc.bytes;
2365 uint8_t *oob = chip->oob_poi;
2366 int steps, size;
2367
2368 for (steps = chip->ecc.steps; steps > 0; steps--) {
2369 chip->write_buf(mtd, buf, eccsize);
2370 buf += eccsize;
2371
2372 if (chip->ecc.prepad) {
2373 chip->write_buf(mtd, oob, chip->ecc.prepad);
2374 oob += chip->ecc.prepad;
2375 }
2376
2377 chip->write_buf(mtd, oob, eccbytes);
2378 oob += eccbytes;
2379
2380 if (chip->ecc.postpad) {
2381 chip->write_buf(mtd, oob, chip->ecc.postpad);
2382 oob += chip->ecc.postpad;
2383 }
2384 }
2385
2386 size = mtd->oobsize - (oob - chip->oob_poi);
2387 if (size)
2388 chip->write_buf(mtd, oob, size);
2389
2390 return 0;
2391 }
2392 /**
2393 * nand_write_page_swecc - [REPLACEABLE] software ECC based page write function
2394 * @mtd: mtd info structure
2395 * @chip: nand chip info structure
2396 * @buf: data buffer
2397 * @oob_required: must write chip->oob_poi to OOB
2398 * @page: page number to write
2399 */
2400 static int nand_write_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
2401 const uint8_t *buf, int oob_required,
2402 int page)
2403 {
2404 int i, eccsize = chip->ecc.size, ret;
2405 int eccbytes = chip->ecc.bytes;
2406 int eccsteps = chip->ecc.steps;
2407 uint8_t *ecc_calc = chip->buffers->ecccalc;
2408 const uint8_t *p = buf;
2409
2410 /* Software ECC calculation */
2411 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
2412 chip->ecc.calculate(mtd, p, &ecc_calc[i]);
2413
2414 ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
2415 chip->ecc.total);
2416 if (ret)
2417 return ret;
2418
2419 return chip->ecc.write_page_raw(mtd, chip, buf, 1, page);
2420 }
2421
2422 /**
2423 * nand_write_page_hwecc - [REPLACEABLE] hardware ECC based page write function
2424 * @mtd: mtd info structure
2425 * @chip: nand chip info structure
2426 * @buf: data buffer
2427 * @oob_required: must write chip->oob_poi to OOB
2428 * @page: page number to write
2429 */
2430 static int nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
2431 const uint8_t *buf, int oob_required,
2432 int page)
2433 {
2434 int i, eccsize = chip->ecc.size, ret;
2435 int eccbytes = chip->ecc.bytes;
2436 int eccsteps = chip->ecc.steps;
2437 uint8_t *ecc_calc = chip->buffers->ecccalc;
2438 const uint8_t *p = buf;
2439
2440 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
2441 chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
2442 chip->write_buf(mtd, p, eccsize);
2443 chip->ecc.calculate(mtd, p, &ecc_calc[i]);
2444 }
2445
2446 ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
2447 chip->ecc.total);
2448 if (ret)
2449 return ret;
2450
2451 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
2452
2453 return 0;
2454 }
2455
2456
2457 /**
2458 * nand_write_subpage_hwecc - [REPLACEABLE] hardware ECC based subpage write
2459 * @mtd: mtd info structure
2460 * @chip: nand chip info structure
2461 * @offset: column address of subpage within the page
2462 * @data_len: data length
2463 * @buf: data buffer
2464 * @oob_required: must write chip->oob_poi to OOB
2465 * @page: page number to write
2466 */
2467 static int nand_write_subpage_hwecc(struct mtd_info *mtd,
2468 struct nand_chip *chip, uint32_t offset,
2469 uint32_t data_len, const uint8_t *buf,
2470 int oob_required, int page)
2471 {
2472 uint8_t *oob_buf = chip->oob_poi;
2473 uint8_t *ecc_calc = chip->buffers->ecccalc;
2474 int ecc_size = chip->ecc.size;
2475 int ecc_bytes = chip->ecc.bytes;
2476 int ecc_steps = chip->ecc.steps;
2477 uint32_t start_step = offset / ecc_size;
2478 uint32_t end_step = (offset + data_len - 1) / ecc_size;
2479 int oob_bytes = mtd->oobsize / ecc_steps;
2480 int step, ret;
2481
2482 for (step = 0; step < ecc_steps; step++) {
2483 /* configure controller for WRITE access */
2484 chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
2485
2486 /* write data (untouched subpages already masked by 0xFF) */
2487 chip->write_buf(mtd, buf, ecc_size);
2488
2489 /* mask ECC of un-touched subpages by padding 0xFF */
2490 if ((step < start_step) || (step > end_step))
2491 memset(ecc_calc, 0xff, ecc_bytes);
2492 else
2493 chip->ecc.calculate(mtd, buf, ecc_calc);
2494
2495 /* mask OOB of un-touched subpages by padding 0xFF */
2496 /* if oob_required, preserve OOB metadata of written subpage */
2497 if (!oob_required || (step < start_step) || (step > end_step))
2498 memset(oob_buf, 0xff, oob_bytes);
2499
2500 buf += ecc_size;
2501 ecc_calc += ecc_bytes;
2502 oob_buf += oob_bytes;
2503 }
2504
2505 /* copy calculated ECC for whole page to chip->buffer->oob */
2506 /* this include masked-value(0xFF) for unwritten subpages */
2507 ecc_calc = chip->buffers->ecccalc;
2508 ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
2509 chip->ecc.total);
2510 if (ret)
2511 return ret;
2512
2513 /* write OOB buffer to NAND device */
2514 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
2515
2516 return 0;
2517 }
2518
2519
2520 /**
2521 * nand_write_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page write
2522 * @mtd: mtd info structure
2523 * @chip: nand chip info structure
2524 * @buf: data buffer
2525 * @oob_required: must write chip->oob_poi to OOB
2526 * @page: page number to write
2527 *
2528 * The hw generator calculates the error syndrome automatically. Therefore we
2529 * need a special oob layout and handling.
2530 */
2531 static int nand_write_page_syndrome(struct mtd_info *mtd,
2532 struct nand_chip *chip,
2533 const uint8_t *buf, int oob_required,
2534 int page)
2535 {
2536 int i, eccsize = chip->ecc.size;
2537 int eccbytes = chip->ecc.bytes;
2538 int eccsteps = chip->ecc.steps;
2539 const uint8_t *p = buf;
2540 uint8_t *oob = chip->oob_poi;
2541
2542 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
2543
2544 chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
2545 chip->write_buf(mtd, p, eccsize);
2546
2547 if (chip->ecc.prepad) {
2548 chip->write_buf(mtd, oob, chip->ecc.prepad);
2549 oob += chip->ecc.prepad;
2550 }
2551
2552 chip->ecc.calculate(mtd, p, oob);
2553 chip->write_buf(mtd, oob, eccbytes);
2554 oob += eccbytes;
2555
2556 if (chip->ecc.postpad) {
2557 chip->write_buf(mtd, oob, chip->ecc.postpad);
2558 oob += chip->ecc.postpad;
2559 }
2560 }
2561
2562 /* Calculate remaining oob bytes */
2563 i = mtd->oobsize - (oob - chip->oob_poi);
2564 if (i)
2565 chip->write_buf(mtd, oob, i);
2566
2567 return 0;
2568 }
2569
2570 /**
2571 * nand_write_page - write one page
2572 * @mtd: MTD device structure
2573 * @chip: NAND chip descriptor
2574 * @offset: address offset within the page
2575 * @data_len: length of actual data to be written
2576 * @buf: the data to write
2577 * @oob_required: must write chip->oob_poi to OOB
2578 * @page: page number to write
2579 * @raw: use _raw version of write_page
2580 */
2581 static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
2582 uint32_t offset, int data_len, const uint8_t *buf,
2583 int oob_required, int page, int raw)
2584 {
2585 int status, subpage;
2586
2587 if (!(chip->options & NAND_NO_SUBPAGE_WRITE) &&
2588 chip->ecc.write_subpage)
2589 subpage = offset || (data_len < mtd->writesize);
2590 else
2591 subpage = 0;
2592
2593 if (nand_standard_page_accessors(&chip->ecc))
2594 chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
2595
2596 if (unlikely(raw))
2597 status = chip->ecc.write_page_raw(mtd, chip, buf,
2598 oob_required, page);
2599 else if (subpage)
2600 status = chip->ecc.write_subpage(mtd, chip, offset, data_len,
2601 buf, oob_required, page);
2602 else
2603 status = chip->ecc.write_page(mtd, chip, buf, oob_required,
2604 page);
2605
2606 if (status < 0)
2607 return status;
2608
2609 if (nand_standard_page_accessors(&chip->ecc)) {
2610 chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
2611
2612 status = chip->waitfunc(mtd, chip);
2613 if (status & NAND_STATUS_FAIL)
2614 return -EIO;
2615 }
2616
2617 return 0;
2618 }
2619
2620 /**
2621 * nand_fill_oob - [INTERN] Transfer client buffer to oob
2622 * @mtd: MTD device structure
2623 * @oob: oob data buffer
2624 * @len: oob data write length
2625 * @ops: oob ops structure
2626 */
2627 static uint8_t *nand_fill_oob(struct mtd_info *mtd, uint8_t *oob, size_t len,
2628 struct mtd_oob_ops *ops)
2629 {
2630 struct nand_chip *chip = mtd_to_nand(mtd);
2631 int ret;
2632
2633 /*
2634 * Initialise to all 0xFF, to avoid the possibility of left over OOB
2635 * data from a previous OOB read.
2636 */
2637 memset(chip->oob_poi, 0xff, mtd->oobsize);
2638
2639 switch (ops->mode) {
2640
2641 case MTD_OPS_PLACE_OOB:
2642 case MTD_OPS_RAW:
2643 memcpy(chip->oob_poi + ops->ooboffs, oob, len);
2644 return oob + len;
2645
2646 case MTD_OPS_AUTO_OOB:
2647 ret = mtd_ooblayout_set_databytes(mtd, oob, chip->oob_poi,
2648 ops->ooboffs, len);
2649 BUG_ON(ret);
2650 return oob + len;
2651
2652 default:
2653 BUG();
2654 }
2655 return NULL;
2656 }
2657
2658 #define NOTALIGNED(x) ((x & (chip->subpagesize - 1)) != 0)
2659
2660 /**
2661 * nand_do_write_ops - [INTERN] NAND write with ECC
2662 * @mtd: MTD device structure
2663 * @to: offset to write to
2664 * @ops: oob operations description structure
2665 *
2666 * NAND write with ECC.
2667 */
2668 static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
2669 struct mtd_oob_ops *ops)
2670 {
2671 int chipnr, realpage, page, blockmask, column;
2672 struct nand_chip *chip = mtd_to_nand(mtd);
2673 uint32_t writelen = ops->len;
2674
2675 uint32_t oobwritelen = ops->ooblen;
2676 uint32_t oobmaxlen = mtd_oobavail(mtd, ops);
2677
2678 uint8_t *oob = ops->oobbuf;
2679 uint8_t *buf = ops->datbuf;
2680 int ret;
2681 int oob_required = oob ? 1 : 0;
2682
2683 ops->retlen = 0;
2684 if (!writelen)
2685 return 0;
2686
2687 /* Reject writes, which are not page aligned */
2688 if (NOTALIGNED(to) || NOTALIGNED(ops->len)) {
2689 pr_notice("%s: attempt to write non page aligned data\n",
2690 __func__);
2691 return -EINVAL;
2692 }
2693
2694 column = to & (mtd->writesize - 1);
2695
2696 chipnr = (int)(to >> chip->chip_shift);
2697 chip->select_chip(mtd, chipnr);
2698
2699 /* Check, if it is write protected */
2700 if (nand_check_wp(mtd)) {
2701 ret = -EIO;
2702 goto err_out;
2703 }
2704
2705 realpage = (int)(to >> chip->page_shift);
2706 page = realpage & chip->pagemask;
2707 blockmask = (1 << (chip->phys_erase_shift - chip->page_shift)) - 1;
2708
2709 /* Invalidate the page cache, when we write to the cached page */
2710 if (to <= ((loff_t)chip->pagebuf << chip->page_shift) &&
2711 ((loff_t)chip->pagebuf << chip->page_shift) < (to + ops->len))
2712 chip->pagebuf = -1;
2713
2714 /* Don't allow multipage oob writes with offset */
2715 if (oob && ops->ooboffs && (ops->ooboffs + ops->ooblen > oobmaxlen)) {
2716 ret = -EINVAL;
2717 goto err_out;
2718 }
2719
2720 while (1) {
2721 int bytes = mtd->writesize;
2722 uint8_t *wbuf = buf;
2723 int use_bufpoi;
2724 int part_pagewr = (column || writelen < mtd->writesize);
2725
2726 if (part_pagewr)
2727 use_bufpoi = 1;
2728 else if (chip->options & NAND_USE_BOUNCE_BUFFER)
2729 use_bufpoi = !virt_addr_valid(buf) ||
2730 !IS_ALIGNED((unsigned long)buf,
2731 chip->buf_align);
2732 else
2733 use_bufpoi = 0;
2734
2735 /* Partial page write?, or need to use bounce buffer */
2736 if (use_bufpoi) {
2737 pr_debug("%s: using write bounce buffer for buf@%p\n",
2738 __func__, buf);
2739 if (part_pagewr)
2740 bytes = min_t(int, bytes - column, writelen);
2741 chip->pagebuf = -1;
2742 memset(chip->buffers->databuf, 0xff, mtd->writesize);
2743 memcpy(&chip->buffers->databuf[column], buf, bytes);
2744 wbuf = chip->buffers->databuf;
2745 }
2746
2747 if (unlikely(oob)) {
2748 size_t len = min(oobwritelen, oobmaxlen);
2749 oob = nand_fill_oob(mtd, oob, len, ops);
2750 oobwritelen -= len;
2751 } else {
2752 /* We still need to erase leftover OOB data */
2753 memset(chip->oob_poi, 0xff, mtd->oobsize);
2754 }
2755
2756 ret = nand_write_page(mtd, chip, column, bytes, wbuf,
2757 oob_required, page,
2758 (ops->mode == MTD_OPS_RAW));
2759 if (ret)
2760 break;
2761
2762 writelen -= bytes;
2763 if (!writelen)
2764 break;
2765
2766 column = 0;
2767 buf += bytes;
2768 realpage++;
2769
2770 page = realpage & chip->pagemask;
2771 /* Check, if we cross a chip boundary */
2772 if (!page) {
2773 chipnr++;
2774 chip->select_chip(mtd, -1);
2775 chip->select_chip(mtd, chipnr);
2776 }
2777 }
2778
2779 ops->retlen = ops->len - writelen;
2780 if (unlikely(oob))
2781 ops->oobretlen = ops->ooblen;
2782
2783 err_out:
2784 chip->select_chip(mtd, -1);
2785 return ret;
2786 }
2787
2788 /**
2789 * panic_nand_write - [MTD Interface] NAND write with ECC
2790 * @mtd: MTD device structure
2791 * @to: offset to write to
2792 * @len: number of bytes to write
2793 * @retlen: pointer to variable to store the number of written bytes
2794 * @buf: the data to write
2795 *
2796 * NAND write with ECC. Used when performing writes in interrupt context, this
2797 * may for example be called by mtdoops when writing an oops while in panic.
2798 */
2799 static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
2800 size_t *retlen, const uint8_t *buf)
2801 {
2802 struct nand_chip *chip = mtd_to_nand(mtd);
2803 struct mtd_oob_ops ops;
2804 int ret;
2805
2806 /* Wait for the device to get ready */
2807 panic_nand_wait(mtd, chip, 400);
2808
2809 /* Grab the device */
2810 panic_nand_get_device(chip, mtd, FL_WRITING);
2811
2812 memset(&ops, 0, sizeof(ops));
2813 ops.len = len;
2814 ops.datbuf = (uint8_t *)buf;
2815 ops.mode = MTD_OPS_PLACE_OOB;
2816
2817 ret = nand_do_write_ops(mtd, to, &ops);
2818
2819 *retlen = ops.retlen;
2820 return ret;
2821 }
2822
2823 /**
2824 * nand_write - [MTD Interface] NAND write with ECC
2825 * @mtd: MTD device structure
2826 * @to: offset to write to
2827 * @len: number of bytes to write
2828 * @retlen: pointer to variable to store the number of written bytes
2829 * @buf: the data to write
2830 *
2831 * NAND write with ECC.
2832 */
2833 static int nand_write(struct mtd_info *mtd, loff_t to, size_t len,
2834 size_t *retlen, const uint8_t *buf)
2835 {
2836 struct mtd_oob_ops ops;
2837 int ret;
2838
2839 nand_get_device(mtd, FL_WRITING);
2840 memset(&ops, 0, sizeof(ops));
2841 ops.len = len;
2842 ops.datbuf = (uint8_t *)buf;
2843 ops.mode = MTD_OPS_PLACE_OOB;
2844 ret = nand_do_write_ops(mtd, to, &ops);
2845 *retlen = ops.retlen;
2846 nand_release_device(mtd);
2847 return ret;
2848 }
2849
2850 /**
2851 * nand_do_write_oob - [MTD Interface] NAND write out-of-band
2852 * @mtd: MTD device structure
2853 * @to: offset to write to
2854 * @ops: oob operation description structure
2855 *
2856 * NAND write out-of-band.
2857 */
2858 static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
2859 struct mtd_oob_ops *ops)
2860 {
2861 int chipnr, page, status, len;
2862 struct nand_chip *chip = mtd_to_nand(mtd);
2863
2864 pr_debug("%s: to = 0x%08x, len = %i\n",
2865 __func__, (unsigned int)to, (int)ops->ooblen);
2866
2867 len = mtd_oobavail(mtd, ops);
2868
2869 /* Do not allow write past end of page */
2870 if ((ops->ooboffs + ops->ooblen) > len) {
2871 pr_debug("%s: attempt to write past end of page\n",
2872 __func__);
2873 return -EINVAL;
2874 }
2875
2876 if (unlikely(ops->ooboffs >= len)) {
2877 pr_debug("%s: attempt to start write outside oob\n",
2878 __func__);
2879 return -EINVAL;
2880 }
2881
2882 /* Do not allow write past end of device */
2883 if (unlikely(to >= mtd->size ||
2884 ops->ooboffs + ops->ooblen >
2885 ((mtd->size >> chip->page_shift) -
2886 (to >> chip->page_shift)) * len)) {
2887 pr_debug("%s: attempt to write beyond end of device\n",
2888 __func__);
2889 return -EINVAL;
2890 }
2891
2892 chipnr = (int)(to >> chip->chip_shift);
2893
2894 /*
2895 * Reset the chip. Some chips (like the Toshiba TC5832DC found in one
2896 * of my DiskOnChip 2000 test units) will clear the whole data page too
2897 * if we don't do this. I have no clue why, but I seem to have 'fixed'
2898 * it in the doc2000 driver in August 1999. dwmw2.
2899 */
2900 nand_reset(chip, chipnr);
2901
2902 chip->select_chip(mtd, chipnr);
2903
2904 /* Shift to get page */
2905 page = (int)(to >> chip->page_shift);
2906
2907 /* Check, if it is write protected */
2908 if (nand_check_wp(mtd)) {
2909 chip->select_chip(mtd, -1);
2910 return -EROFS;
2911 }
2912
2913 /* Invalidate the page cache, if we write to the cached page */
2914 if (page == chip->pagebuf)
2915 chip->pagebuf = -1;
2916
2917 nand_fill_oob(mtd, ops->oobbuf, ops->ooblen, ops);
2918
2919 if (ops->mode == MTD_OPS_RAW)
2920 status = chip->ecc.write_oob_raw(mtd, chip, page & chip->pagemask);
2921 else
2922 status = chip->ecc.write_oob(mtd, chip, page & chip->pagemask);
2923
2924 chip->select_chip(mtd, -1);
2925
2926 if (status)
2927 return status;
2928
2929 ops->oobretlen = ops->ooblen;
2930
2931 return 0;
2932 }
2933
2934 /**
2935 * nand_write_oob - [MTD Interface] NAND write data and/or out-of-band
2936 * @mtd: MTD device structure
2937 * @to: offset to write to
2938 * @ops: oob operation description structure
2939 */
2940 static int nand_write_oob(struct mtd_info *mtd, loff_t to,
2941 struct mtd_oob_ops *ops)
2942 {
2943 int ret = -ENOTSUPP;
2944
2945 ops->retlen = 0;
2946
2947 /* Do not allow writes past end of device */
2948 if (ops->datbuf && (to + ops->len) > mtd->size) {
2949 pr_debug("%s: attempt to write beyond end of device\n",
2950 __func__);
2951 return -EINVAL;
2952 }
2953
2954 nand_get_device(mtd, FL_WRITING);
2955
2956 switch (ops->mode) {
2957 case MTD_OPS_PLACE_OOB:
2958 case MTD_OPS_AUTO_OOB:
2959 case MTD_OPS_RAW:
2960 break;
2961
2962 default:
2963 goto out;
2964 }
2965
2966 if (!ops->datbuf)
2967 ret = nand_do_write_oob(mtd, to, ops);
2968 else
2969 ret = nand_do_write_ops(mtd, to, ops);
2970
2971 out:
2972 nand_release_device(mtd);
2973 return ret;
2974 }
2975
2976 /**
2977 * single_erase - [GENERIC] NAND standard block erase command function
2978 * @mtd: MTD device structure
2979 * @page: the page address of the block which will be erased
2980 *
2981 * Standard erase command for NAND chips. Returns NAND status.
2982 */
2983 static int single_erase(struct mtd_info *mtd, int page)
2984 {
2985 struct nand_chip *chip = mtd_to_nand(mtd);
2986 /* Send commands to erase a block */
2987 chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page);
2988 chip->cmdfunc(mtd, NAND_CMD_ERASE2, -1, -1);
2989
2990 return chip->waitfunc(mtd, chip);
2991 }
2992
2993 /**
2994 * nand_erase - [MTD Interface] erase block(s)
2995 * @mtd: MTD device structure
2996 * @instr: erase instruction
2997 *
2998 * Erase one ore more blocks.
2999 */
3000 static int nand_erase(struct mtd_info *mtd, struct erase_info *instr)
3001 {
3002 return nand_erase_nand(mtd, instr, 0);
3003 }
3004
3005 /**
3006 * nand_erase_nand - [INTERN] erase block(s)
3007 * @mtd: MTD device structure
3008 * @instr: erase instruction
3009 * @allowbbt: allow erasing the bbt area
3010 *
3011 * Erase one ore more blocks.
3012 */
3013 int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
3014 int allowbbt)
3015 {
3016 int page, status, pages_per_block, ret, chipnr;
3017 struct nand_chip *chip = mtd_to_nand(mtd);
3018 loff_t len;
3019
3020 pr_debug("%s: start = 0x%012llx, len = %llu\n",
3021 __func__, (unsigned long long)instr->addr,
3022 (unsigned long long)instr->len);
3023
3024 if (check_offs_len(mtd, instr->addr, instr->len))
3025 return -EINVAL;
3026
3027 /* Grab the lock and see if the device is available */
3028 nand_get_device(mtd, FL_ERASING);
3029
3030 /* Shift to get first page */
3031 page = (int)(instr->addr >> chip->page_shift);
3032 chipnr = (int)(instr->addr >> chip->chip_shift);
3033
3034 /* Calculate pages in each block */
3035 pages_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
3036
3037 /* Select the NAND device */
3038 chip->select_chip(mtd, chipnr);
3039
3040 /* Check, if it is write protected */
3041 if (nand_check_wp(mtd)) {
3042 pr_debug("%s: device is write protected!\n",
3043 __func__);
3044 instr->state = MTD_ERASE_FAILED;
3045 goto erase_exit;
3046 }
3047
3048 /* Loop through the pages */
3049 len = instr->len;
3050
3051 instr->state = MTD_ERASING;
3052
3053 while (len) {
3054 /* Check if we have a bad block, we do not erase bad blocks! */
3055 if (nand_block_checkbad(mtd, ((loff_t) page) <<
3056 chip->page_shift, allowbbt)) {
3057 pr_warn("%s: attempt to erase a bad block at page 0x%08x\n",
3058 __func__, page);
3059 instr->state = MTD_ERASE_FAILED;
3060 goto erase_exit;
3061 }
3062
3063 /*
3064 * Invalidate the page cache, if we erase the block which
3065 * contains the current cached page.
3066 */
3067 if (page <= chip->pagebuf && chip->pagebuf <
3068 (page + pages_per_block))
3069 chip->pagebuf = -1;
3070
3071 status = chip->erase(mtd, page & chip->pagemask);
3072
3073 /* See if block erase succeeded */
3074 if (status & NAND_STATUS_FAIL) {
3075 pr_debug("%s: failed erase, page 0x%08x\n",
3076 __func__, page);
3077 instr->state = MTD_ERASE_FAILED;
3078 instr->fail_addr =
3079 ((loff_t)page << chip->page_shift);
3080 goto erase_exit;
3081 }
3082
3083 /* Increment page address and decrement length */
3084 len -= (1ULL << chip->phys_erase_shift);
3085 page += pages_per_block;
3086
3087 /* Check, if we cross a chip boundary */
3088 if (len && !(page & chip->pagemask)) {
3089 chipnr++;
3090 chip->select_chip(mtd, -1);
3091 chip->select_chip(mtd, chipnr);
3092 }
3093 }
3094 instr->state = MTD_ERASE_DONE;
3095
3096 erase_exit:
3097
3098 ret = instr->state == MTD_ERASE_DONE ? 0 : -EIO;
3099
3100 /* Deselect and wake up anyone waiting on the device */
3101 chip->select_chip(mtd, -1);
3102 nand_release_device(mtd);
3103
3104 /* Do call back function */
3105 if (!ret)
3106 mtd_erase_callback(instr);
3107
3108 /* Return more or less happy */
3109 return ret;
3110 }
3111
3112 /**
3113 * nand_sync - [MTD Interface] sync
3114 * @mtd: MTD device structure
3115 *
3116 * Sync is actually a wait for chip ready function.
3117 */
3118 static void nand_sync(struct mtd_info *mtd)
3119 {
3120 pr_debug("%s: called\n", __func__);
3121
3122 /* Grab the lock and see if the device is available */
3123 nand_get_device(mtd, FL_SYNCING);
3124 /* Release it and go back */
3125 nand_release_device(mtd);
3126 }
3127
3128 /**
3129 * nand_block_isbad - [MTD Interface] Check if block at offset is bad
3130 * @mtd: MTD device structure
3131 * @offs: offset relative to mtd start
3132 */
3133 static int nand_block_isbad(struct mtd_info *mtd, loff_t offs)
3134 {
3135 struct nand_chip *chip = mtd_to_nand(mtd);
3136 int chipnr = (int)(offs >> chip->chip_shift);
3137 int ret;
3138
3139 /* Select the NAND device */
3140 nand_get_device(mtd, FL_READING);
3141 chip->select_chip(mtd, chipnr);
3142
3143 ret = nand_block_checkbad(mtd, offs, 0);
3144
3145 chip->select_chip(mtd, -1);
3146 nand_release_device(mtd);
3147
3148 return ret;
3149 }
3150
3151 /**
3152 * nand_block_markbad - [MTD Interface] Mark block at the given offset as bad
3153 * @mtd: MTD device structure
3154 * @ofs: offset relative to mtd start
3155 */
3156 static int nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
3157 {
3158 int ret;
3159
3160 ret = nand_block_isbad(mtd, ofs);
3161 if (ret) {
3162 /* If it was bad already, return success and do nothing */
3163 if (ret > 0)
3164 return 0;
3165 return ret;
3166 }
3167
3168 return nand_block_markbad_lowlevel(mtd, ofs);
3169 }
3170
3171 /**
3172 * nand_max_bad_blocks - [MTD Interface] Max number of bad blocks for an mtd
3173 * @mtd: MTD device structure
3174 * @ofs: offset relative to mtd start
3175 * @len: length of mtd
3176 */
3177 static int nand_max_bad_blocks(struct mtd_info *mtd, loff_t ofs, size_t len)
3178 {
3179 struct nand_chip *chip = mtd_to_nand(mtd);
3180 u32 part_start_block;
3181 u32 part_end_block;
3182 u32 part_start_die;
3183 u32 part_end_die;
3184
3185 /*
3186 * max_bb_per_die and blocks_per_die used to determine
3187 * the maximum bad block count.
3188 */
3189 if (!chip->max_bb_per_die || !chip->blocks_per_die)
3190 return -ENOTSUPP;
3191
3192 /* Get the start and end of the partition in erase blocks. */
3193 part_start_block = mtd_div_by_eb(ofs, mtd);
3194 part_end_block = mtd_div_by_eb(len, mtd) + part_start_block - 1;
3195
3196 /* Get the start and end LUNs of the partition. */
3197 part_start_die = part_start_block / chip->blocks_per_die;
3198 part_end_die = part_end_block / chip->blocks_per_die;
3199
3200 /*
3201 * Look up the bad blocks per unit and multiply by the number of units
3202 * that the partition spans.
3203 */
3204 return chip->max_bb_per_die * (part_end_die - part_start_die + 1);
3205 }
3206
3207 /**
3208 * nand_onfi_set_features- [REPLACEABLE] set features for ONFI nand
3209 * @mtd: MTD device structure
3210 * @chip: nand chip info structure
3211 * @addr: feature address.
3212 * @subfeature_param: the subfeature parameters, a four bytes array.
3213 */
3214 static int nand_onfi_set_features(struct mtd_info *mtd, struct nand_chip *chip,
3215 int addr, uint8_t *subfeature_param)
3216 {
3217 int status;
3218 int i;
3219
3220 if (!chip->onfi_version ||
3221 !(le16_to_cpu(chip->onfi_params.opt_cmd)
3222 & ONFI_OPT_CMD_SET_GET_FEATURES))
3223 return -EINVAL;
3224
3225 chip->cmdfunc(mtd, NAND_CMD_SET_FEATURES, addr, -1);
3226 for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
3227 chip->write_byte(mtd, subfeature_param[i]);
3228
3229 status = chip->waitfunc(mtd, chip);
3230 if (status & NAND_STATUS_FAIL)
3231 return -EIO;
3232 return 0;
3233 }
3234
3235 /**
3236 * nand_onfi_get_features- [REPLACEABLE] get features for ONFI nand
3237 * @mtd: MTD device structure
3238 * @chip: nand chip info structure
3239 * @addr: feature address.
3240 * @subfeature_param: the subfeature parameters, a four bytes array.
3241 */
3242 static int nand_onfi_get_features(struct mtd_info *mtd, struct nand_chip *chip,
3243 int addr, uint8_t *subfeature_param)
3244 {
3245 int i;
3246
3247 if (!chip->onfi_version ||
3248 !(le16_to_cpu(chip->onfi_params.opt_cmd)
3249 & ONFI_OPT_CMD_SET_GET_FEATURES))
3250 return -EINVAL;
3251
3252 chip->cmdfunc(mtd, NAND_CMD_GET_FEATURES, addr, -1);
3253 for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
3254 *subfeature_param++ = chip->read_byte(mtd);
3255 return 0;
3256 }
3257
3258 /**
3259 * nand_onfi_get_set_features_notsupp - set/get features stub returning
3260 * -ENOTSUPP
3261 * @mtd: MTD device structure
3262 * @chip: nand chip info structure
3263 * @addr: feature address.
3264 * @subfeature_param: the subfeature parameters, a four bytes array.
3265 *
3266 * Should be used by NAND controller drivers that do not support the SET/GET
3267 * FEATURES operations.
3268 */
3269 int nand_onfi_get_set_features_notsupp(struct mtd_info *mtd,
3270 struct nand_chip *chip, int addr,
3271 u8 *subfeature_param)
3272 {
3273 return -ENOTSUPP;
3274 }
3275 EXPORT_SYMBOL(nand_onfi_get_set_features_notsupp);
3276
3277 /**
3278 * nand_suspend - [MTD Interface] Suspend the NAND flash
3279 * @mtd: MTD device structure
3280 */
3281 static int nand_suspend(struct mtd_info *mtd)
3282 {
3283 return nand_get_device(mtd, FL_PM_SUSPENDED);
3284 }
3285
3286 /**
3287 * nand_resume - [MTD Interface] Resume the NAND flash
3288 * @mtd: MTD device structure
3289 */
3290 static void nand_resume(struct mtd_info *mtd)
3291 {
3292 struct nand_chip *chip = mtd_to_nand(mtd);
3293
3294 if (chip->state == FL_PM_SUSPENDED)
3295 nand_release_device(mtd);
3296 else
3297 pr_err("%s called for a chip which is not in suspended state\n",
3298 __func__);
3299 }
3300
3301 /**
3302 * nand_shutdown - [MTD Interface] Finish the current NAND operation and
3303 * prevent further operations
3304 * @mtd: MTD device structure
3305 */
3306 static void nand_shutdown(struct mtd_info *mtd)
3307 {
3308 nand_get_device(mtd, FL_PM_SUSPENDED);
3309 }
3310
3311 /* Set default functions */
3312 static void nand_set_defaults(struct nand_chip *chip)
3313 {
3314 unsigned int busw = chip->options & NAND_BUSWIDTH_16;
3315
3316 /* check for proper chip_delay setup, set 20us if not */
3317 if (!chip->chip_delay)
3318 chip->chip_delay = 20;
3319
3320 /* check, if a user supplied command function given */
3321 if (chip->cmdfunc == NULL)
3322 chip->cmdfunc = nand_command;
3323
3324 /* check, if a user supplied wait function given */
3325 if (chip->waitfunc == NULL)
3326 chip->waitfunc = nand_wait;
3327
3328 if (!chip->select_chip)
3329 chip->select_chip = nand_select_chip;
3330
3331 /* set for ONFI nand */
3332 if (!chip->onfi_set_features)
3333 chip->onfi_set_features = nand_onfi_set_features;
3334 if (!chip->onfi_get_features)
3335 chip->onfi_get_features = nand_onfi_get_features;
3336
3337 /* If called twice, pointers that depend on busw may need to be reset */
3338 if (!chip->read_byte || chip->read_byte == nand_read_byte)
3339 chip->read_byte = busw ? nand_read_byte16 : nand_read_byte;
3340 if (!chip->read_word)
3341 chip->read_word = nand_read_word;
3342 if (!chip->block_bad)
3343 chip->block_bad = nand_block_bad;
3344 if (!chip->block_markbad)
3345 chip->block_markbad = nand_default_block_markbad;
3346 if (!chip->write_buf || chip->write_buf == nand_write_buf)
3347 chip->write_buf = busw ? nand_write_buf16 : nand_write_buf;
3348 if (!chip->write_byte || chip->write_byte == nand_write_byte)
3349 chip->write_byte = busw ? nand_write_byte16 : nand_write_byte;
3350 if (!chip->read_buf || chip->read_buf == nand_read_buf)
3351 chip->read_buf = busw ? nand_read_buf16 : nand_read_buf;
3352 if (!chip->scan_bbt)
3353 chip->scan_bbt = nand_default_bbt;
3354
3355 if (!chip->controller) {
3356 chip->controller = &chip->hwcontrol;
3357 nand_hw_control_init(chip->controller);
3358 }
3359
3360 if (!chip->buf_align)
3361 chip->buf_align = 1;
3362 }
3363
3364 /* Sanitize ONFI strings so we can safely print them */
3365 static void sanitize_string(uint8_t *s, size_t len)
3366 {
3367 ssize_t i;
3368
3369 /* Null terminate */
3370 s[len - 1] = 0;
3371
3372 /* Remove non printable chars */
3373 for (i = 0; i < len - 1; i++) {
3374 if (s[i] < ' ' || s[i] > 127)
3375 s[i] = '?';
3376 }
3377
3378 /* Remove trailing spaces */
3379 strim(s);
3380 }
3381
3382 static u16 onfi_crc16(u16 crc, u8 const *p, size_t len)
3383 {
3384 int i;
3385 while (len--) {
3386 crc ^= *p++ << 8;
3387 for (i = 0; i < 8; i++)
3388 crc = (crc << 1) ^ ((crc & 0x8000) ? 0x8005 : 0);
3389 }
3390
3391 return crc;
3392 }
3393
3394 /* Parse the Extended Parameter Page. */
3395 static int nand_flash_detect_ext_param_page(struct nand_chip *chip,
3396 struct nand_onfi_params *p)
3397 {
3398 struct mtd_info *mtd = nand_to_mtd(chip);
3399 struct onfi_ext_param_page *ep;
3400 struct onfi_ext_section *s;
3401 struct onfi_ext_ecc_info *ecc;
3402 uint8_t *cursor;
3403 int ret = -EINVAL;
3404 int len;
3405 int i;
3406
3407 len = le16_to_cpu(p->ext_param_page_length) * 16;
3408 ep = kmalloc(len, GFP_KERNEL);
3409 if (!ep)
3410 return -ENOMEM;
3411
3412 /* Send our own NAND_CMD_PARAM. */
3413 chip->cmdfunc(mtd, NAND_CMD_PARAM, 0, -1);
3414
3415 /* Use the Change Read Column command to skip the ONFI param pages. */
3416 chip->cmdfunc(mtd, NAND_CMD_RNDOUT,
3417 sizeof(*p) * p->num_of_param_pages , -1);
3418
3419 /* Read out the Extended Parameter Page. */
3420 chip->read_buf(mtd, (uint8_t *)ep, len);
3421 if ((onfi_crc16(ONFI_CRC_BASE, ((uint8_t *)ep) + 2, len - 2)
3422 != le16_to_cpu(ep->crc))) {
3423 pr_debug("fail in the CRC.\n");
3424 goto ext_out;
3425 }
3426
3427 /*
3428 * Check the signature.
3429 * Do not strictly follow the ONFI spec, maybe changed in future.
3430 */
3431 if (strncmp(ep->sig, "EPPS", 4)) {
3432 pr_debug("The signature is invalid.\n");
3433 goto ext_out;
3434 }
3435
3436 /* find the ECC section. */
3437 cursor = (uint8_t *)(ep + 1);
3438 for (i = 0; i < ONFI_EXT_SECTION_MAX; i++) {
3439 s = ep->sections + i;
3440 if (s->type == ONFI_SECTION_TYPE_2)
3441 break;
3442 cursor += s->length * 16;
3443 }
3444 if (i == ONFI_EXT_SECTION_MAX) {
3445 pr_debug("We can not find the ECC section.\n");
3446 goto ext_out;
3447 }
3448
3449 /* get the info we want. */
3450 ecc = (struct onfi_ext_ecc_info *)cursor;
3451
3452 if (!ecc->codeword_size) {
3453 pr_debug("Invalid codeword size\n");
3454 goto ext_out;
3455 }
3456
3457 chip->ecc_strength_ds = ecc->ecc_bits;
3458 chip->ecc_step_ds = 1 << ecc->codeword_size;
3459 ret = 0;
3460
3461 ext_out:
3462 kfree(ep);
3463 return ret;
3464 }
3465
3466 /*
3467 * Check if the NAND chip is ONFI compliant, returns 1 if it is, 0 otherwise.
3468 */
3469 static int nand_flash_detect_onfi(struct nand_chip *chip)
3470 {
3471 struct mtd_info *mtd = nand_to_mtd(chip);
3472 struct nand_onfi_params *p = &chip->onfi_params;
3473 int i, j;
3474 int val;
3475
3476 /* Try ONFI for unknown chip or LP */
3477 chip->cmdfunc(mtd, NAND_CMD_READID, 0x20, -1);
3478 if (chip->read_byte(mtd) != 'O' || chip->read_byte(mtd) != 'N' ||
3479 chip->read_byte(mtd) != 'F' || chip->read_byte(mtd) != 'I')
3480 return 0;
3481
3482 chip->cmdfunc(mtd, NAND_CMD_PARAM, 0, -1);
3483 for (i = 0; i < 3; i++) {
3484 for (j = 0; j < sizeof(*p); j++)
3485 ((uint8_t *)p)[j] = chip->read_byte(mtd);
3486 if (onfi_crc16(ONFI_CRC_BASE, (uint8_t *)p, 254) ==
3487 le16_to_cpu(p->crc)) {
3488 break;
3489 }
3490 }
3491
3492 if (i == 3) {
3493 pr_err("Could not find valid ONFI parameter page; aborting\n");
3494 return 0;
3495 }
3496
3497 /* Check version */
3498 val = le16_to_cpu(p->revision);
3499 if (val & (1 << 5))
3500 chip->onfi_version = 23;
3501 else if (val & (1 << 4))
3502 chip->onfi_version = 22;
3503 else if (val & (1 << 3))
3504 chip->onfi_version = 21;
3505 else if (val & (1 << 2))
3506 chip->onfi_version = 20;
3507 else if (val & (1 << 1))
3508 chip->onfi_version = 10;
3509
3510 if (!chip->onfi_version) {
3511 pr_info("unsupported ONFI version: %d\n", val);
3512 return 0;
3513 }
3514
3515 sanitize_string(p->manufacturer, sizeof(p->manufacturer));
3516 sanitize_string(p->model, sizeof(p->model));
3517 if (!mtd->name)
3518 mtd->name = p->model;
3519
3520 mtd->writesize = le32_to_cpu(p->byte_per_page);
3521
3522 /*
3523 * pages_per_block and blocks_per_lun may not be a power-of-2 size
3524 * (don't ask me who thought of this...). MTD assumes that these
3525 * dimensions will be power-of-2, so just truncate the remaining area.
3526 */
3527 mtd->erasesize = 1 << (fls(le32_to_cpu(p->pages_per_block)) - 1);
3528 mtd->erasesize *= mtd->writesize;
3529
3530 mtd->oobsize = le16_to_cpu(p->spare_bytes_per_page);
3531
3532 /* See erasesize comment */
3533 chip->chipsize = 1 << (fls(le32_to_cpu(p->blocks_per_lun)) - 1);
3534 chip->chipsize *= (uint64_t)mtd->erasesize * p->lun_count;
3535 chip->bits_per_cell = p->bits_per_cell;
3536
3537 chip->max_bb_per_die = le16_to_cpu(p->bb_per_lun);
3538 chip->blocks_per_die = le32_to_cpu(p->blocks_per_lun);
3539
3540 if (onfi_feature(chip) & ONFI_FEATURE_16_BIT_BUS)
3541 chip->options |= NAND_BUSWIDTH_16;
3542
3543 if (p->ecc_bits != 0xff) {
3544 chip->ecc_strength_ds = p->ecc_bits;
3545 chip->ecc_step_ds = 512;
3546 } else if (chip->onfi_version >= 21 &&
3547 (onfi_feature(chip) & ONFI_FEATURE_EXT_PARAM_PAGE)) {
3548
3549 /*
3550 * The nand_flash_detect_ext_param_page() uses the
3551 * Change Read Column command which maybe not supported
3552 * by the chip->cmdfunc. So try to update the chip->cmdfunc
3553 * now. We do not replace user supplied command function.
3554 */
3555 if (mtd->writesize > 512 && chip->cmdfunc == nand_command)
3556 chip->cmdfunc = nand_command_lp;
3557
3558 /* The Extended Parameter Page is supported since ONFI 2.1. */
3559 if (nand_flash_detect_ext_param_page(chip, p))
3560 pr_warn("Failed to detect ONFI extended param page\n");
3561 } else {
3562 pr_warn("Could not retrieve ONFI ECC requirements\n");
3563 }
3564
3565 return 1;
3566 }
3567
3568 /*
3569 * Check if the NAND chip is JEDEC compliant, returns 1 if it is, 0 otherwise.
3570 */
3571 static int nand_flash_detect_jedec(struct nand_chip *chip)
3572 {
3573 struct mtd_info *mtd = nand_to_mtd(chip);
3574 struct nand_jedec_params *p = &chip->jedec_params;
3575 struct jedec_ecc_info *ecc;
3576 int val;
3577 int i, j;
3578
3579 /* Try JEDEC for unknown chip or LP */
3580 chip->cmdfunc(mtd, NAND_CMD_READID, 0x40, -1);
3581 if (chip->read_byte(mtd) != 'J' || chip->read_byte(mtd) != 'E' ||
3582 chip->read_byte(mtd) != 'D' || chip->read_byte(mtd) != 'E' ||
3583 chip->read_byte(mtd) != 'C')
3584 return 0;
3585
3586 chip->cmdfunc(mtd, NAND_CMD_PARAM, 0x40, -1);
3587 for (i = 0; i < 3; i++) {
3588 for (j = 0; j < sizeof(*p); j++)
3589 ((uint8_t *)p)[j] = chip->read_byte(mtd);
3590
3591 if (onfi_crc16(ONFI_CRC_BASE, (uint8_t *)p, 510) ==
3592 le16_to_cpu(p->crc))
3593 break;
3594 }
3595
3596 if (i == 3) {
3597 pr_err("Could not find valid JEDEC parameter page; aborting\n");
3598 return 0;
3599 }
3600
3601 /* Check version */
3602 val = le16_to_cpu(p->revision);
3603 if (val & (1 << 2))
3604 chip->jedec_version = 10;
3605 else if (val & (1 << 1))
3606 chip->jedec_version = 1; /* vendor specific version */
3607
3608 if (!chip->jedec_version) {
3609 pr_info("unsupported JEDEC version: %d\n", val);
3610 return 0;
3611 }
3612
3613 sanitize_string(p->manufacturer, sizeof(p->manufacturer));
3614 sanitize_string(p->model, sizeof(p->model));
3615 if (!mtd->name)
3616 mtd->name = p->model;
3617
3618 mtd->writesize = le32_to_cpu(p->byte_per_page);
3619
3620 /* Please reference to the comment for nand_flash_detect_onfi. */
3621 mtd->erasesize = 1 << (fls(le32_to_cpu(p->pages_per_block)) - 1);
3622 mtd->erasesize *= mtd->writesize;
3623
3624 mtd->oobsize = le16_to_cpu(p->spare_bytes_per_page);
3625
3626 /* Please reference to the comment for nand_flash_detect_onfi. */
3627 chip->chipsize = 1 << (fls(le32_to_cpu(p->blocks_per_lun)) - 1);
3628 chip->chipsize *= (uint64_t)mtd->erasesize * p->lun_count;
3629 chip->bits_per_cell = p->bits_per_cell;
3630
3631 if (jedec_feature(chip) & JEDEC_FEATURE_16_BIT_BUS)
3632 chip->options |= NAND_BUSWIDTH_16;
3633
3634 /* ECC info */
3635 ecc = &p->ecc_info[0];
3636
3637 if (ecc->codeword_size >= 9) {
3638 chip->ecc_strength_ds = ecc->ecc_bits;
3639 chip->ecc_step_ds = 1 << ecc->codeword_size;
3640 } else {
3641 pr_warn("Invalid codeword size\n");
3642 }
3643
3644 return 1;
3645 }
3646
3647 /*
3648 * nand_id_has_period - Check if an ID string has a given wraparound period
3649 * @id_data: the ID string
3650 * @arrlen: the length of the @id_data array
3651 * @period: the period of repitition
3652 *
3653 * Check if an ID string is repeated within a given sequence of bytes at
3654 * specific repetition interval period (e.g., {0x20,0x01,0x7F,0x20} has a
3655 * period of 3). This is a helper function for nand_id_len(). Returns non-zero
3656 * if the repetition has a period of @period; otherwise, returns zero.
3657 */
3658 static int nand_id_has_period(u8 *id_data, int arrlen, int period)
3659 {
3660 int i, j;
3661 for (i = 0; i < period; i++)
3662 for (j = i + period; j < arrlen; j += period)
3663 if (id_data[i] != id_data[j])
3664 return 0;
3665 return 1;
3666 }
3667
3668 /*
3669 * nand_id_len - Get the length of an ID string returned by CMD_READID
3670 * @id_data: the ID string
3671 * @arrlen: the length of the @id_data array
3672
3673 * Returns the length of the ID string, according to known wraparound/trailing
3674 * zero patterns. If no pattern exists, returns the length of the array.
3675 */
3676 static int nand_id_len(u8 *id_data, int arrlen)
3677 {
3678 int last_nonzero, period;
3679
3680 /* Find last non-zero byte */
3681 for (last_nonzero = arrlen - 1; last_nonzero >= 0; last_nonzero--)
3682 if (id_data[last_nonzero])
3683 break;
3684
3685 /* All zeros */
3686 if (last_nonzero < 0)
3687 return 0;
3688
3689 /* Calculate wraparound period */
3690 for (period = 1; period < arrlen; period++)
3691 if (nand_id_has_period(id_data, arrlen, period))
3692 break;
3693
3694 /* There's a repeated pattern */
3695 if (period < arrlen)
3696 return period;
3697
3698 /* There are trailing zeros */
3699 if (last_nonzero < arrlen - 1)
3700 return last_nonzero + 1;
3701
3702 /* No pattern detected */
3703 return arrlen;
3704 }
3705
3706 /* Extract the bits of per cell from the 3rd byte of the extended ID */
3707 static int nand_get_bits_per_cell(u8 cellinfo)
3708 {
3709 int bits;
3710
3711 bits = cellinfo & NAND_CI_CELLTYPE_MSK;
3712 bits >>= NAND_CI_CELLTYPE_SHIFT;
3713 return bits + 1;
3714 }
3715
3716 /*
3717 * Many new NAND share similar device ID codes, which represent the size of the
3718 * chip. The rest of the parameters must be decoded according to generic or
3719 * manufacturer-specific "extended ID" decoding patterns.
3720 */
3721 void nand_decode_ext_id(struct nand_chip *chip)
3722 {
3723 struct mtd_info *mtd = nand_to_mtd(chip);
3724 int extid;
3725 u8 *id_data = chip->id.data;
3726 /* The 3rd id byte holds MLC / multichip data */
3727 chip->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
3728 /* The 4th id byte is the important one */
3729 extid = id_data[3];
3730
3731 /* Calc pagesize */
3732 mtd->writesize = 1024 << (extid & 0x03);
3733 extid >>= 2;
3734 /* Calc oobsize */
3735 mtd->oobsize = (8 << (extid & 0x01)) * (mtd->writesize >> 9);
3736 extid >>= 2;
3737 /* Calc blocksize. Blocksize is multiples of 64KiB */
3738 mtd->erasesize = (64 * 1024) << (extid & 0x03);
3739 extid >>= 2;
3740 /* Get buswidth information */
3741 if (extid & 0x1)
3742 chip->options |= NAND_BUSWIDTH_16;
3743 }
3744 EXPORT_SYMBOL_GPL(nand_decode_ext_id);
3745
3746 /*
3747 * Old devices have chip data hardcoded in the device ID table. nand_decode_id
3748 * decodes a matching ID table entry and assigns the MTD size parameters for
3749 * the chip.
3750 */
3751 static void nand_decode_id(struct nand_chip *chip, struct nand_flash_dev *type)
3752 {
3753 struct mtd_info *mtd = nand_to_mtd(chip);
3754
3755 mtd->erasesize = type->erasesize;
3756 mtd->writesize = type->pagesize;
3757 mtd->oobsize = mtd->writesize / 32;
3758
3759 /* All legacy ID NAND are small-page, SLC */
3760 chip->bits_per_cell = 1;
3761 }
3762
3763 /*
3764 * Set the bad block marker/indicator (BBM/BBI) patterns according to some
3765 * heuristic patterns using various detected parameters (e.g., manufacturer,
3766 * page size, cell-type information).
3767 */
3768 static void nand_decode_bbm_options(struct nand_chip *chip)
3769 {
3770 struct mtd_info *mtd = nand_to_mtd(chip);
3771
3772 /* Set the bad block position */
3773 if (mtd->writesize > 512 || (chip->options & NAND_BUSWIDTH_16))
3774 chip->badblockpos = NAND_LARGE_BADBLOCK_POS;
3775 else
3776 chip->badblockpos = NAND_SMALL_BADBLOCK_POS;
3777 }
3778
3779 static inline bool is_full_id_nand(struct nand_flash_dev *type)
3780 {
3781 return type->id_len;
3782 }
3783
3784 static bool find_full_id_nand(struct nand_chip *chip,
3785 struct nand_flash_dev *type)
3786 {
3787 struct mtd_info *mtd = nand_to_mtd(chip);
3788 u8 *id_data = chip->id.data;
3789
3790 if (!strncmp(type->id, id_data, type->id_len)) {
3791 mtd->writesize = type->pagesize;
3792 mtd->erasesize = type->erasesize;
3793 mtd->oobsize = type->oobsize;
3794
3795 chip->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
3796 chip->chipsize = (uint64_t)type->chipsize << 20;
3797 chip->options |= type->options;
3798 chip->ecc_strength_ds = NAND_ECC_STRENGTH(type);
3799 chip->ecc_step_ds = NAND_ECC_STEP(type);
3800 chip->onfi_timing_mode_default =
3801 type->onfi_timing_mode_default;
3802
3803 if (!mtd->name)
3804 mtd->name = type->name;
3805
3806 return true;
3807 }
3808 return false;
3809 }
3810
3811 /*
3812 * Manufacturer detection. Only used when the NAND is not ONFI or JEDEC
3813 * compliant and does not have a full-id or legacy-id entry in the nand_ids
3814 * table.
3815 */
3816 static void nand_manufacturer_detect(struct nand_chip *chip)
3817 {
3818 /*
3819 * Try manufacturer detection if available and use
3820 * nand_decode_ext_id() otherwise.
3821 */
3822 if (chip->manufacturer.desc && chip->manufacturer.desc->ops &&
3823 chip->manufacturer.desc->ops->detect) {
3824 /* The 3rd id byte holds MLC / multichip data */
3825 chip->bits_per_cell = nand_get_bits_per_cell(chip->id.data[2]);
3826 chip->manufacturer.desc->ops->detect(chip);
3827 } else {
3828 nand_decode_ext_id(chip);
3829 }
3830 }
3831
3832 /*
3833 * Manufacturer initialization. This function is called for all NANDs including
3834 * ONFI and JEDEC compliant ones.
3835 * Manufacturer drivers should put all their specific initialization code in
3836 * their ->init() hook.
3837 */
3838 static int nand_manufacturer_init(struct nand_chip *chip)
3839 {
3840 if (!chip->manufacturer.desc || !chip->manufacturer.desc->ops ||
3841 !chip->manufacturer.desc->ops->init)
3842 return 0;
3843
3844 return chip->manufacturer.desc->ops->init(chip);
3845 }
3846
3847 /*
3848 * Manufacturer cleanup. This function is called for all NANDs including
3849 * ONFI and JEDEC compliant ones.
3850 * Manufacturer drivers should put all their specific cleanup code in their
3851 * ->cleanup() hook.
3852 */
3853 static void nand_manufacturer_cleanup(struct nand_chip *chip)
3854 {
3855 /* Release manufacturer private data */
3856 if (chip->manufacturer.desc && chip->manufacturer.desc->ops &&
3857 chip->manufacturer.desc->ops->cleanup)
3858 chip->manufacturer.desc->ops->cleanup(chip);
3859 }
3860
3861 /*
3862 * Get the flash and manufacturer id and lookup if the type is supported.
3863 */
3864 static int nand_detect(struct nand_chip *chip, struct nand_flash_dev *type)
3865 {
3866 const struct nand_manufacturer *manufacturer;
3867 struct mtd_info *mtd = nand_to_mtd(chip);
3868 int busw;
3869 int i;
3870 u8 *id_data = chip->id.data;
3871 u8 maf_id, dev_id;
3872
3873 /*
3874 * Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx)
3875 * after power-up.
3876 */
3877 nand_reset(chip, 0);
3878
3879 /* Select the device */
3880 chip->select_chip(mtd, 0);
3881
3882 /* Send the command for reading device ID */
3883 chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
3884
3885 /* Read manufacturer and device IDs */
3886 maf_id = chip->read_byte(mtd);
3887 dev_id = chip->read_byte(mtd);
3888
3889 /*
3890 * Try again to make sure, as some systems the bus-hold or other
3891 * interface concerns can cause random data which looks like a
3892 * possibly credible NAND flash to appear. If the two results do
3893 * not match, ignore the device completely.
3894 */
3895
3896 chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
3897
3898 /* Read entire ID string */
3899 for (i = 0; i < ARRAY_SIZE(chip->id.data); i++)
3900 id_data[i] = chip->read_byte(mtd);
3901
3902 if (id_data[0] != maf_id || id_data[1] != dev_id) {
3903 pr_info("second ID read did not match %02x,%02x against %02x,%02x\n",
3904 maf_id, dev_id, id_data[0], id_data[1]);
3905 return -ENODEV;
3906 }
3907
3908 chip->id.len = nand_id_len(id_data, ARRAY_SIZE(chip->id.data));
3909
3910 /* Try to identify manufacturer */
3911 manufacturer = nand_get_manufacturer(maf_id);
3912 chip->manufacturer.desc = manufacturer;
3913
3914 if (!type)
3915 type = nand_flash_ids;
3916
3917 /*
3918 * Save the NAND_BUSWIDTH_16 flag before letting auto-detection logic
3919 * override it.
3920 * This is required to make sure initial NAND bus width set by the
3921 * NAND controller driver is coherent with the real NAND bus width
3922 * (extracted by auto-detection code).
3923 */
3924 busw = chip->options & NAND_BUSWIDTH_16;
3925
3926 /*
3927 * The flag is only set (never cleared), reset it to its default value
3928 * before starting auto-detection.
3929 */
3930 chip->options &= ~NAND_BUSWIDTH_16;
3931
3932 for (; type->name != NULL; type++) {
3933 if (is_full_id_nand(type)) {
3934 if (find_full_id_nand(chip, type))
3935 goto ident_done;
3936 } else if (dev_id == type->dev_id) {
3937 break;
3938 }
3939 }
3940
3941 chip->onfi_version = 0;
3942 if (!type->name || !type->pagesize) {
3943 /* Check if the chip is ONFI compliant */
3944 if (nand_flash_detect_onfi(chip))
3945 goto ident_done;
3946
3947 /* Check if the chip is JEDEC compliant */
3948 if (nand_flash_detect_jedec(chip))
3949 goto ident_done;
3950 }
3951
3952 if (!type->name)
3953 return -ENODEV;
3954
3955 if (!mtd->name)
3956 mtd->name = type->name;
3957
3958 chip->chipsize = (uint64_t)type->chipsize << 20;
3959
3960 if (!type->pagesize)
3961 nand_manufacturer_detect(chip);
3962 else
3963 nand_decode_id(chip, type);
3964
3965 /* Get chip options */
3966 chip->options |= type->options;
3967
3968 ident_done:
3969
3970 if (chip->options & NAND_BUSWIDTH_AUTO) {
3971 WARN_ON(busw & NAND_BUSWIDTH_16);
3972 nand_set_defaults(chip);
3973 } else if (busw != (chip->options & NAND_BUSWIDTH_16)) {
3974 /*
3975 * Check, if buswidth is correct. Hardware drivers should set
3976 * chip correct!
3977 */
3978 pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
3979 maf_id, dev_id);
3980 pr_info("%s %s\n", nand_manufacturer_name(manufacturer),
3981 mtd->name);
3982 pr_warn("bus width %d instead of %d bits\n", busw ? 16 : 8,
3983 (chip->options & NAND_BUSWIDTH_16) ? 16 : 8);
3984 return -EINVAL;
3985 }
3986
3987 nand_decode_bbm_options(chip);
3988
3989 /* Calculate the address shift from the page size */
3990 chip->page_shift = ffs(mtd->writesize) - 1;
3991 /* Convert chipsize to number of pages per chip -1 */
3992 chip->pagemask = (chip->chipsize >> chip->page_shift) - 1;
3993
3994 chip->bbt_erase_shift = chip->phys_erase_shift =
3995 ffs(mtd->erasesize) - 1;
3996 if (chip->chipsize & 0xffffffff)
3997 chip->chip_shift = ffs((unsigned)chip->chipsize) - 1;
3998 else {
3999 chip->chip_shift = ffs((unsigned)(chip->chipsize >> 32));
4000 chip->chip_shift += 32 - 1;
4001 }
4002
4003 chip->badblockbits = 8;
4004 chip->erase = single_erase;
4005
4006 /* Do not replace user supplied command function! */
4007 if (mtd->writesize > 512 && chip->cmdfunc == nand_command)
4008 chip->cmdfunc = nand_command_lp;
4009
4010 pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
4011 maf_id, dev_id);
4012
4013 if (chip->onfi_version)
4014 pr_info("%s %s\n", nand_manufacturer_name(manufacturer),
4015 chip->onfi_params.model);
4016 else if (chip->jedec_version)
4017 pr_info("%s %s\n", nand_manufacturer_name(manufacturer),
4018 chip->jedec_params.model);
4019 else
4020 pr_info("%s %s\n", nand_manufacturer_name(manufacturer),
4021 type->name);
4022
4023 pr_info("%d MiB, %s, erase size: %d KiB, page size: %d, OOB size: %d\n",
4024 (int)(chip->chipsize >> 20), nand_is_slc(chip) ? "SLC" : "MLC",
4025 mtd->erasesize >> 10, mtd->writesize, mtd->oobsize);
4026 return 0;
4027 }
4028
4029 static const char * const nand_ecc_modes[] = {
4030 [NAND_ECC_NONE] = "none",
4031 [NAND_ECC_SOFT] = "soft",
4032 [NAND_ECC_HW] = "hw",
4033 [NAND_ECC_HW_SYNDROME] = "hw_syndrome",
4034 [NAND_ECC_HW_OOB_FIRST] = "hw_oob_first",
4035 [NAND_ECC_ON_DIE] = "on-die",
4036 };
4037
4038 static int of_get_nand_ecc_mode(struct device_node *np)
4039 {
4040 const char *pm;
4041 int err, i;
4042
4043 err = of_property_read_string(np, "nand-ecc-mode", &pm);
4044 if (err < 0)
4045 return err;
4046
4047 for (i = 0; i < ARRAY_SIZE(nand_ecc_modes); i++)
4048 if (!strcasecmp(pm, nand_ecc_modes[i]))
4049 return i;
4050
4051 /*
4052 * For backward compatibility we support few obsoleted values that don't
4053 * have their mappings into nand_ecc_modes_t anymore (they were merged
4054 * with other enums).
4055 */
4056 if (!strcasecmp(pm, "soft_bch"))
4057 return NAND_ECC_SOFT;
4058
4059 return -ENODEV;
4060 }
4061
4062 static const char * const nand_ecc_algos[] = {
4063 [NAND_ECC_HAMMING] = "hamming",
4064 [NAND_ECC_BCH] = "bch",
4065 };
4066
4067 static int of_get_nand_ecc_algo(struct device_node *np)
4068 {
4069 const char *pm;
4070 int err, i;
4071
4072 err = of_property_read_string(np, "nand-ecc-algo", &pm);
4073 if (!err) {
4074 for (i = NAND_ECC_HAMMING; i < ARRAY_SIZE(nand_ecc_algos); i++)
4075 if (!strcasecmp(pm, nand_ecc_algos[i]))
4076 return i;
4077 return -ENODEV;
4078 }
4079
4080 /*
4081 * For backward compatibility we also read "nand-ecc-mode" checking
4082 * for some obsoleted values that were specifying ECC algorithm.
4083 */
4084 err = of_property_read_string(np, "nand-ecc-mode", &pm);
4085 if (err < 0)
4086 return err;
4087
4088 if (!strcasecmp(pm, "soft"))
4089 return NAND_ECC_HAMMING;
4090 else if (!strcasecmp(pm, "soft_bch"))
4091 return NAND_ECC_BCH;
4092
4093 return -ENODEV;
4094 }
4095
4096 static int of_get_nand_ecc_step_size(struct device_node *np)
4097 {
4098 int ret;
4099 u32 val;
4100
4101 ret = of_property_read_u32(np, "nand-ecc-step-size", &val);
4102 return ret ? ret : val;
4103 }
4104
4105 static int of_get_nand_ecc_strength(struct device_node *np)
4106 {
4107 int ret;
4108 u32 val;
4109
4110 ret = of_property_read_u32(np, "nand-ecc-strength", &val);
4111 return ret ? ret : val;
4112 }
4113
4114 static int of_get_nand_bus_width(struct device_node *np)
4115 {
4116 u32 val;
4117
4118 if (of_property_read_u32(np, "nand-bus-width", &val))
4119 return 8;
4120
4121 switch (val) {
4122 case 8:
4123 case 16:
4124 return val;
4125 default:
4126 return -EIO;
4127 }
4128 }
4129
4130 static bool of_get_nand_on_flash_bbt(struct device_node *np)
4131 {
4132 return of_property_read_bool(np, "nand-on-flash-bbt");
4133 }
4134
4135 static int nand_dt_init(struct nand_chip *chip)
4136 {
4137 struct device_node *dn = nand_get_flash_node(chip);
4138 int ecc_mode, ecc_algo, ecc_strength, ecc_step;
4139
4140 if (!dn)
4141 return 0;
4142
4143 if (of_get_nand_bus_width(dn) == 16)
4144 chip->options |= NAND_BUSWIDTH_16;
4145
4146 if (of_get_nand_on_flash_bbt(dn))
4147 chip->bbt_options |= NAND_BBT_USE_FLASH;
4148
4149 ecc_mode = of_get_nand_ecc_mode(dn);
4150 ecc_algo = of_get_nand_ecc_algo(dn);
4151 ecc_strength = of_get_nand_ecc_strength(dn);
4152 ecc_step = of_get_nand_ecc_step_size(dn);
4153
4154 if (ecc_mode >= 0)
4155 chip->ecc.mode = ecc_mode;
4156
4157 if (ecc_algo >= 0)
4158 chip->ecc.algo = ecc_algo;
4159
4160 if (ecc_strength >= 0)
4161 chip->ecc.strength = ecc_strength;
4162
4163 if (ecc_step > 0)
4164 chip->ecc.size = ecc_step;
4165
4166 if (of_property_read_bool(dn, "nand-ecc-maximize"))
4167 chip->ecc.options |= NAND_ECC_MAXIMIZE;
4168
4169 return 0;
4170 }
4171
4172 /**
4173 * nand_scan_ident - [NAND Interface] Scan for the NAND device
4174 * @mtd: MTD device structure
4175 * @maxchips: number of chips to scan for
4176 * @table: alternative NAND ID table
4177 *
4178 * This is the first phase of the normal nand_scan() function. It reads the
4179 * flash ID and sets up MTD fields accordingly.
4180 *
4181 */
4182 int nand_scan_ident(struct mtd_info *mtd, int maxchips,
4183 struct nand_flash_dev *table)
4184 {
4185 int i, nand_maf_id, nand_dev_id;
4186 struct nand_chip *chip = mtd_to_nand(mtd);
4187 int ret;
4188
4189 ret = nand_dt_init(chip);
4190 if (ret)
4191 return ret;
4192
4193 if (!mtd->name && mtd->dev.parent)
4194 mtd->name = dev_name(mtd->dev.parent);
4195
4196 if ((!chip->cmdfunc || !chip->select_chip) && !chip->cmd_ctrl) {
4197 /*
4198 * Default functions assigned for chip_select() and
4199 * cmdfunc() both expect cmd_ctrl() to be populated,
4200 * so we need to check that that's the case
4201 */
4202 pr_err("chip.cmd_ctrl() callback is not provided");
4203 return -EINVAL;
4204 }
4205 /* Set the default functions */
4206 nand_set_defaults(chip);
4207
4208 /* Read the flash type */
4209 ret = nand_detect(chip, table);
4210 if (ret) {
4211 if (!(chip->options & NAND_SCAN_SILENT_NODEV))
4212 pr_warn("No NAND device found\n");
4213 chip->select_chip(mtd, -1);
4214 return ret;
4215 }
4216
4217 nand_maf_id = chip->id.data[0];
4218 nand_dev_id = chip->id.data[1];
4219
4220 chip->select_chip(mtd, -1);
4221
4222 /* Check for a chip array */
4223 for (i = 1; i < maxchips; i++) {
4224 /* See comment in nand_get_flash_type for reset */
4225 nand_reset(chip, i);
4226
4227 chip->select_chip(mtd, i);
4228 /* Send the command for reading device ID */
4229 chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
4230 /* Read manufacturer and device IDs */
4231 if (nand_maf_id != chip->read_byte(mtd) ||
4232 nand_dev_id != chip->read_byte(mtd)) {
4233 chip->select_chip(mtd, -1);
4234 break;
4235 }
4236 chip->select_chip(mtd, -1);
4237 }
4238 if (i > 1)
4239 pr_info("%d chips detected\n", i);
4240
4241 /* Store the number of chips and calc total size for mtd */
4242 chip->numchips = i;
4243 mtd->size = i * chip->chipsize;
4244
4245 return 0;
4246 }
4247 EXPORT_SYMBOL(nand_scan_ident);
4248
4249 static int nand_set_ecc_soft_ops(struct mtd_info *mtd)
4250 {
4251 struct nand_chip *chip = mtd_to_nand(mtd);
4252 struct nand_ecc_ctrl *ecc = &chip->ecc;
4253
4254 if (WARN_ON(ecc->mode != NAND_ECC_SOFT))
4255 return -EINVAL;
4256
4257 switch (ecc->algo) {
4258 case NAND_ECC_HAMMING:
4259 ecc->calculate = nand_calculate_ecc;
4260 ecc->correct = nand_correct_data;
4261 ecc->read_page = nand_read_page_swecc;
4262 ecc->read_subpage = nand_read_subpage;
4263 ecc->write_page = nand_write_page_swecc;
4264 ecc->read_page_raw = nand_read_page_raw;
4265 ecc->write_page_raw = nand_write_page_raw;
4266 ecc->read_oob = nand_read_oob_std;
4267 ecc->write_oob = nand_write_oob_std;
4268 if (!ecc->size)
4269 ecc->size = 256;
4270 ecc->bytes = 3;
4271 ecc->strength = 1;
4272 return 0;
4273 case NAND_ECC_BCH:
4274 if (!mtd_nand_has_bch()) {
4275 WARN(1, "CONFIG_MTD_NAND_ECC_BCH not enabled\n");
4276 return -EINVAL;
4277 }
4278 ecc->calculate = nand_bch_calculate_ecc;
4279 ecc->correct = nand_bch_correct_data;
4280 ecc->read_page = nand_read_page_swecc;
4281 ecc->read_subpage = nand_read_subpage;
4282 ecc->write_page = nand_write_page_swecc;
4283 ecc->read_page_raw = nand_read_page_raw;
4284 ecc->write_page_raw = nand_write_page_raw;
4285 ecc->read_oob = nand_read_oob_std;
4286 ecc->write_oob = nand_write_oob_std;
4287
4288 /*
4289 * Board driver should supply ecc.size and ecc.strength
4290 * values to select how many bits are correctable.
4291 * Otherwise, default to 4 bits for large page devices.
4292 */
4293 if (!ecc->size && (mtd->oobsize >= 64)) {
4294 ecc->size = 512;
4295 ecc->strength = 4;
4296 }
4297
4298 /*
4299 * if no ecc placement scheme was provided pickup the default
4300 * large page one.
4301 */
4302 if (!mtd->ooblayout) {
4303 /* handle large page devices only */
4304 if (mtd->oobsize < 64) {
4305 WARN(1, "OOB layout is required when using software BCH on small pages\n");
4306 return -EINVAL;
4307 }
4308
4309 mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
4310
4311 }
4312
4313 /*
4314 * We can only maximize ECC config when the default layout is
4315 * used, otherwise we don't know how many bytes can really be
4316 * used.
4317 */
4318 if (mtd->ooblayout == &nand_ooblayout_lp_ops &&
4319 ecc->options & NAND_ECC_MAXIMIZE) {
4320 int steps, bytes;
4321
4322 /* Always prefer 1k blocks over 512bytes ones */
4323 ecc->size = 1024;
4324 steps = mtd->writesize / ecc->size;
4325
4326 /* Reserve 2 bytes for the BBM */
4327 bytes = (mtd->oobsize - 2) / steps;
4328 ecc->strength = bytes * 8 / fls(8 * ecc->size);
4329 }
4330
4331 /* See nand_bch_init() for details. */
4332 ecc->bytes = 0;
4333 ecc->priv = nand_bch_init(mtd);
4334 if (!ecc->priv) {
4335 WARN(1, "BCH ECC initialization failed!\n");
4336 return -EINVAL;
4337 }
4338 return 0;
4339 default:
4340 WARN(1, "Unsupported ECC algorithm!\n");
4341 return -EINVAL;
4342 }
4343 }
4344
4345 /**
4346 * nand_check_ecc_caps - check the sanity of preset ECC settings
4347 * @chip: nand chip info structure
4348 * @caps: ECC caps info structure
4349 * @oobavail: OOB size that the ECC engine can use
4350 *
4351 * When ECC step size and strength are already set, check if they are supported
4352 * by the controller and the calculated ECC bytes fit within the chip's OOB.
4353 * On success, the calculated ECC bytes is set.
4354 */
4355 int nand_check_ecc_caps(struct nand_chip *chip,
4356 const struct nand_ecc_caps *caps, int oobavail)
4357 {
4358 struct mtd_info *mtd = nand_to_mtd(chip);
4359 const struct nand_ecc_step_info *stepinfo;
4360 int preset_step = chip->ecc.size;
4361 int preset_strength = chip->ecc.strength;
4362 int nsteps, ecc_bytes;
4363 int i, j;
4364
4365 if (WARN_ON(oobavail < 0))
4366 return -EINVAL;
4367
4368 if (!preset_step || !preset_strength)
4369 return -ENODATA;
4370
4371 nsteps = mtd->writesize / preset_step;
4372
4373 for (i = 0; i < caps->nstepinfos; i++) {
4374 stepinfo = &caps->stepinfos[i];
4375
4376 if (stepinfo->stepsize != preset_step)
4377 continue;
4378
4379 for (j = 0; j < stepinfo->nstrengths; j++) {
4380 if (stepinfo->strengths[j] != preset_strength)
4381 continue;
4382
4383 ecc_bytes = caps->calc_ecc_bytes(preset_step,
4384 preset_strength);
4385 if (WARN_ON_ONCE(ecc_bytes < 0))
4386 return ecc_bytes;
4387
4388 if (ecc_bytes * nsteps > oobavail) {
4389 pr_err("ECC (step, strength) = (%d, %d) does not fit in OOB",
4390 preset_step, preset_strength);
4391 return -ENOSPC;
4392 }
4393
4394 chip->ecc.bytes = ecc_bytes;
4395
4396 return 0;
4397 }
4398 }
4399
4400 pr_err("ECC (step, strength) = (%d, %d) not supported on this controller",
4401 preset_step, preset_strength);
4402
4403 return -ENOTSUPP;
4404 }
4405 EXPORT_SYMBOL_GPL(nand_check_ecc_caps);
4406
4407 /**
4408 * nand_match_ecc_req - meet the chip's requirement with least ECC bytes
4409 * @chip: nand chip info structure
4410 * @caps: ECC engine caps info structure
4411 * @oobavail: OOB size that the ECC engine can use
4412 *
4413 * If a chip's ECC requirement is provided, try to meet it with the least
4414 * number of ECC bytes (i.e. with the largest number of OOB-free bytes).
4415 * On success, the chosen ECC settings are set.
4416 */
4417 int nand_match_ecc_req(struct nand_chip *chip,
4418 const struct nand_ecc_caps *caps, int oobavail)
4419 {
4420 struct mtd_info *mtd = nand_to_mtd(chip);
4421 const struct nand_ecc_step_info *stepinfo;
4422 int req_step = chip->ecc_step_ds;
4423 int req_strength = chip->ecc_strength_ds;
4424 int req_corr, step_size, strength, nsteps, ecc_bytes, ecc_bytes_total;
4425 int best_step, best_strength, best_ecc_bytes;
4426 int best_ecc_bytes_total = INT_MAX;
4427 int i, j;
4428
4429 if (WARN_ON(oobavail < 0))
4430 return -EINVAL;
4431
4432 /* No information provided by the NAND chip */
4433 if (!req_step || !req_strength)
4434 return -ENOTSUPP;
4435
4436 /* number of correctable bits the chip requires in a page */
4437 req_corr = mtd->writesize / req_step * req_strength;
4438
4439 for (i = 0; i < caps->nstepinfos; i++) {
4440 stepinfo = &caps->stepinfos[i];
4441 step_size = stepinfo->stepsize;
4442
4443 for (j = 0; j < stepinfo->nstrengths; j++) {
4444 strength = stepinfo->strengths[j];
4445
4446 /*
4447 * If both step size and strength are smaller than the
4448 * chip's requirement, it is not easy to compare the
4449 * resulted reliability.
4450 */
4451 if (step_size < req_step && strength < req_strength)
4452 continue;
4453
4454 if (mtd->writesize % step_size)
4455 continue;
4456
4457 nsteps = mtd->writesize / step_size;
4458
4459 ecc_bytes = caps->calc_ecc_bytes(step_size, strength);
4460 if (WARN_ON_ONCE(ecc_bytes < 0))
4461 continue;
4462 ecc_bytes_total = ecc_bytes * nsteps;
4463
4464 if (ecc_bytes_total > oobavail ||
4465 strength * nsteps < req_corr)
4466 continue;
4467
4468 /*
4469 * We assume the best is to meet the chip's requrement
4470 * with the least number of ECC bytes.
4471 */
4472 if (ecc_bytes_total < best_ecc_bytes_total) {
4473 best_ecc_bytes_total = ecc_bytes_total;
4474 best_step = step_size;
4475 best_strength = strength;
4476 best_ecc_bytes = ecc_bytes;
4477 }
4478 }
4479 }
4480
4481 if (best_ecc_bytes_total == INT_MAX)
4482 return -ENOTSUPP;
4483
4484 chip->ecc.size = best_step;
4485 chip->ecc.strength = best_strength;
4486 chip->ecc.bytes = best_ecc_bytes;
4487
4488 return 0;
4489 }
4490 EXPORT_SYMBOL_GPL(nand_match_ecc_req);
4491
4492 /**
4493 * nand_maximize_ecc - choose the max ECC strength available
4494 * @chip: nand chip info structure
4495 * @caps: ECC engine caps info structure
4496 * @oobavail: OOB size that the ECC engine can use
4497 *
4498 * Choose the max ECC strength that is supported on the controller, and can fit
4499 * within the chip's OOB. On success, the chosen ECC settings are set.
4500 */
4501 int nand_maximize_ecc(struct nand_chip *chip,
4502 const struct nand_ecc_caps *caps, int oobavail)
4503 {
4504 struct mtd_info *mtd = nand_to_mtd(chip);
4505 const struct nand_ecc_step_info *stepinfo;
4506 int step_size, strength, nsteps, ecc_bytes, corr;
4507 int best_corr = 0;
4508 int best_step = 0;
4509 int best_strength, best_ecc_bytes;
4510 int i, j;
4511
4512 if (WARN_ON(oobavail < 0))
4513 return -EINVAL;
4514
4515 for (i = 0; i < caps->nstepinfos; i++) {
4516 stepinfo = &caps->stepinfos[i];
4517 step_size = stepinfo->stepsize;
4518
4519 /* If chip->ecc.size is already set, respect it */
4520 if (chip->ecc.size && step_size != chip->ecc.size)
4521 continue;
4522
4523 for (j = 0; j < stepinfo->nstrengths; j++) {
4524 strength = stepinfo->strengths[j];
4525
4526 if (mtd->writesize % step_size)
4527 continue;
4528
4529 nsteps = mtd->writesize / step_size;
4530
4531 ecc_bytes = caps->calc_ecc_bytes(step_size, strength);
4532 if (WARN_ON_ONCE(ecc_bytes < 0))
4533 continue;
4534
4535 if (ecc_bytes * nsteps > oobavail)
4536 continue;
4537
4538 corr = strength * nsteps;
4539
4540 /*
4541 * If the number of correctable bits is the same,
4542 * bigger step_size has more reliability.
4543 */
4544 if (corr > best_corr ||
4545 (corr == best_corr && step_size > best_step)) {
4546 best_corr = corr;
4547 best_step = step_size;
4548 best_strength = strength;
4549 best_ecc_bytes = ecc_bytes;
4550 }
4551 }
4552 }
4553
4554 if (!best_corr)
4555 return -ENOTSUPP;
4556
4557 chip->ecc.size = best_step;
4558 chip->ecc.strength = best_strength;
4559 chip->ecc.bytes = best_ecc_bytes;
4560
4561 return 0;
4562 }
4563 EXPORT_SYMBOL_GPL(nand_maximize_ecc);
4564
4565 /*
4566 * Check if the chip configuration meet the datasheet requirements.
4567
4568 * If our configuration corrects A bits per B bytes and the minimum
4569 * required correction level is X bits per Y bytes, then we must ensure
4570 * both of the following are true:
4571 *
4572 * (1) A / B >= X / Y
4573 * (2) A >= X
4574 *
4575 * Requirement (1) ensures we can correct for the required bitflip density.
4576 * Requirement (2) ensures we can correct even when all bitflips are clumped
4577 * in the same sector.
4578 */
4579 static bool nand_ecc_strength_good(struct mtd_info *mtd)
4580 {
4581 struct nand_chip *chip = mtd_to_nand(mtd);
4582 struct nand_ecc_ctrl *ecc = &chip->ecc;
4583 int corr, ds_corr;
4584
4585 if (ecc->size == 0 || chip->ecc_step_ds == 0)
4586 /* Not enough information */
4587 return true;
4588
4589 /*
4590 * We get the number of corrected bits per page to compare
4591 * the correction density.
4592 */
4593 corr = (mtd->writesize * ecc->strength) / ecc->size;
4594 ds_corr = (mtd->writesize * chip->ecc_strength_ds) / chip->ecc_step_ds;
4595
4596 return corr >= ds_corr && ecc->strength >= chip->ecc_strength_ds;
4597 }
4598
4599 static bool invalid_ecc_page_accessors(struct nand_chip *chip)
4600 {
4601 struct nand_ecc_ctrl *ecc = &chip->ecc;
4602
4603 if (nand_standard_page_accessors(ecc))
4604 return false;
4605
4606 /*
4607 * NAND_ECC_CUSTOM_PAGE_ACCESS flag is set, make sure the NAND
4608 * controller driver implements all the page accessors because
4609 * default helpers are not suitable when the core does not
4610 * send the READ0/PAGEPROG commands.
4611 */
4612 return (!ecc->read_page || !ecc->write_page ||
4613 !ecc->read_page_raw || !ecc->write_page_raw ||
4614 (NAND_HAS_SUBPAGE_READ(chip) && !ecc->read_subpage) ||
4615 (NAND_HAS_SUBPAGE_WRITE(chip) && !ecc->write_subpage &&
4616 ecc->hwctl && ecc->calculate));
4617 }
4618
4619 /**
4620 * nand_scan_tail - [NAND Interface] Scan for the NAND device
4621 * @mtd: MTD device structure
4622 *
4623 * This is the second phase of the normal nand_scan() function. It fills out
4624 * all the uninitialized function pointers with the defaults and scans for a
4625 * bad block table if appropriate.
4626 */
4627 int nand_scan_tail(struct mtd_info *mtd)
4628 {
4629 struct nand_chip *chip = mtd_to_nand(mtd);
4630 struct nand_ecc_ctrl *ecc = &chip->ecc;
4631 struct nand_buffers *nbuf = NULL;
4632 int ret, i;
4633
4634 /* New bad blocks should be marked in OOB, flash-based BBT, or both */
4635 if (WARN_ON((chip->bbt_options & NAND_BBT_NO_OOB_BBM) &&
4636 !(chip->bbt_options & NAND_BBT_USE_FLASH))) {
4637 return -EINVAL;
4638 }
4639
4640 if (invalid_ecc_page_accessors(chip)) {
4641 pr_err("Invalid ECC page accessors setup\n");
4642 return -EINVAL;
4643 }
4644
4645 if (!(chip->options & NAND_OWN_BUFFERS)) {
4646 nbuf = kzalloc(sizeof(*nbuf), GFP_KERNEL);
4647 if (!nbuf)
4648 return -ENOMEM;
4649
4650 nbuf->ecccalc = kmalloc(mtd->oobsize, GFP_KERNEL);
4651 if (!nbuf->ecccalc) {
4652 ret = -ENOMEM;
4653 goto err_free_nbuf;
4654 }
4655
4656 nbuf->ecccode = kmalloc(mtd->oobsize, GFP_KERNEL);
4657 if (!nbuf->ecccode) {
4658 ret = -ENOMEM;
4659 goto err_free_nbuf;
4660 }
4661
4662 nbuf->databuf = kmalloc(mtd->writesize + mtd->oobsize,
4663 GFP_KERNEL);
4664 if (!nbuf->databuf) {
4665 ret = -ENOMEM;
4666 goto err_free_nbuf;
4667 }
4668
4669 chip->buffers = nbuf;
4670 } else if (!chip->buffers) {
4671 return -ENOMEM;
4672 }
4673
4674 /*
4675 * FIXME: some NAND manufacturer drivers expect the first die to be
4676 * selected when manufacturer->init() is called. They should be fixed
4677 * to explictly select the relevant die when interacting with the NAND
4678 * chip.
4679 */
4680 chip->select_chip(mtd, 0);
4681 ret = nand_manufacturer_init(chip);
4682 chip->select_chip(mtd, -1);
4683 if (ret)
4684 goto err_free_nbuf;
4685
4686 /* Set the internal oob buffer location, just after the page data */
4687 chip->oob_poi = chip->buffers->databuf + mtd->writesize;
4688
4689 /*
4690 * If no default placement scheme is given, select an appropriate one.
4691 */
4692 if (!mtd->ooblayout &&
4693 !(ecc->mode == NAND_ECC_SOFT && ecc->algo == NAND_ECC_BCH)) {
4694 switch (mtd->oobsize) {
4695 case 8:
4696 case 16:
4697 mtd_set_ooblayout(mtd, &nand_ooblayout_sp_ops);
4698 break;
4699 case 64:
4700 case 128:
4701 mtd_set_ooblayout(mtd, &nand_ooblayout_lp_hamming_ops);
4702 break;
4703 default:
4704 WARN(1, "No oob scheme defined for oobsize %d\n",
4705 mtd->oobsize);
4706 ret = -EINVAL;
4707 goto err_nand_manuf_cleanup;
4708 }
4709 }
4710
4711 /*
4712 * Check ECC mode, default to software if 3byte/512byte hardware ECC is
4713 * selected and we have 256 byte pagesize fallback to software ECC
4714 */
4715
4716 switch (ecc->mode) {
4717 case NAND_ECC_HW_OOB_FIRST:
4718 /* Similar to NAND_ECC_HW, but a separate read_page handle */
4719 if (!ecc->calculate || !ecc->correct || !ecc->hwctl) {
4720 WARN(1, "No ECC functions supplied; hardware ECC not possible\n");
4721 ret = -EINVAL;
4722 goto err_nand_manuf_cleanup;
4723 }
4724 if (!ecc->read_page)
4725 ecc->read_page = nand_read_page_hwecc_oob_first;
4726
4727 case NAND_ECC_HW:
4728 /* Use standard hwecc read page function? */
4729 if (!ecc->read_page)
4730 ecc->read_page = nand_read_page_hwecc;
4731 if (!ecc->write_page)
4732 ecc->write_page = nand_write_page_hwecc;
4733 if (!ecc->read_page_raw)
4734 ecc->read_page_raw = nand_read_page_raw;
4735 if (!ecc->write_page_raw)
4736 ecc->write_page_raw = nand_write_page_raw;
4737 if (!ecc->read_oob)
4738 ecc->read_oob = nand_read_oob_std;
4739 if (!ecc->write_oob)
4740 ecc->write_oob = nand_write_oob_std;
4741 if (!ecc->read_subpage)
4742 ecc->read_subpage = nand_read_subpage;
4743 if (!ecc->write_subpage && ecc->hwctl && ecc->calculate)
4744 ecc->write_subpage = nand_write_subpage_hwecc;
4745
4746 case NAND_ECC_HW_SYNDROME:
4747 if ((!ecc->calculate || !ecc->correct || !ecc->hwctl) &&
4748 (!ecc->read_page ||
4749 ecc->read_page == nand_read_page_hwecc ||
4750 !ecc->write_page ||
4751 ecc->write_page == nand_write_page_hwecc)) {
4752 WARN(1, "No ECC functions supplied; hardware ECC not possible\n");
4753 ret = -EINVAL;
4754 goto err_nand_manuf_cleanup;
4755 }
4756 /* Use standard syndrome read/write page function? */
4757 if (!ecc->read_page)
4758 ecc->read_page = nand_read_page_syndrome;
4759 if (!ecc->write_page)
4760 ecc->write_page = nand_write_page_syndrome;
4761 if (!ecc->read_page_raw)
4762 ecc->read_page_raw = nand_read_page_raw_syndrome;
4763 if (!ecc->write_page_raw)
4764 ecc->write_page_raw = nand_write_page_raw_syndrome;
4765 if (!ecc->read_oob)
4766 ecc->read_oob = nand_read_oob_syndrome;
4767 if (!ecc->write_oob)
4768 ecc->write_oob = nand_write_oob_syndrome;
4769
4770 if (mtd->writesize >= ecc->size) {
4771 if (!ecc->strength) {
4772 WARN(1, "Driver must set ecc.strength when using hardware ECC\n");
4773 ret = -EINVAL;
4774 goto err_nand_manuf_cleanup;
4775 }
4776 break;
4777 }
4778 pr_warn("%d byte HW ECC not possible on %d byte page size, fallback to SW ECC\n",
4779 ecc->size, mtd->writesize);
4780 ecc->mode = NAND_ECC_SOFT;
4781 ecc->algo = NAND_ECC_HAMMING;
4782
4783 case NAND_ECC_SOFT:
4784 ret = nand_set_ecc_soft_ops(mtd);
4785 if (ret) {
4786 ret = -EINVAL;
4787 goto err_nand_manuf_cleanup;
4788 }
4789 break;
4790
4791 case NAND_ECC_ON_DIE:
4792 if (!ecc->read_page || !ecc->write_page) {
4793 WARN(1, "No ECC functions supplied; on-die ECC not possible\n");
4794 ret = -EINVAL;
4795 goto err_nand_manuf_cleanup;
4796 }
4797 if (!ecc->read_oob)
4798 ecc->read_oob = nand_read_oob_std;
4799 if (!ecc->write_oob)
4800 ecc->write_oob = nand_write_oob_std;
4801 break;
4802
4803 case NAND_ECC_NONE:
4804 pr_warn("NAND_ECC_NONE selected by board driver. This is not recommended!\n");
4805 ecc->read_page = nand_read_page_raw;
4806 ecc->write_page = nand_write_page_raw;
4807 ecc->read_oob = nand_read_oob_std;
4808 ecc->read_page_raw = nand_read_page_raw;
4809 ecc->write_page_raw = nand_write_page_raw;
4810 ecc->write_oob = nand_write_oob_std;
4811 ecc->size = mtd->writesize;
4812 ecc->bytes = 0;
4813 ecc->strength = 0;
4814 break;
4815
4816 default:
4817 WARN(1, "Invalid NAND_ECC_MODE %d\n", ecc->mode);
4818 ret = -EINVAL;
4819 goto err_nand_manuf_cleanup;
4820 }
4821
4822 /* For many systems, the standard OOB write also works for raw */
4823 if (!ecc->read_oob_raw)
4824 ecc->read_oob_raw = ecc->read_oob;
4825 if (!ecc->write_oob_raw)
4826 ecc->write_oob_raw = ecc->write_oob;
4827
4828 /* propagate ecc info to mtd_info */
4829 mtd->ecc_strength = ecc->strength;
4830 mtd->ecc_step_size = ecc->size;
4831
4832 /*
4833 * Set the number of read / write steps for one page depending on ECC
4834 * mode.
4835 */
4836 ecc->steps = mtd->writesize / ecc->size;
4837 if (ecc->steps * ecc->size != mtd->writesize) {
4838 WARN(1, "Invalid ECC parameters\n");
4839 ret = -EINVAL;
4840 goto err_nand_manuf_cleanup;
4841 }
4842 ecc->total = ecc->steps * ecc->bytes;
4843 if (ecc->total > mtd->oobsize) {
4844 WARN(1, "Total number of ECC bytes exceeded oobsize\n");
4845 ret = -EINVAL;
4846 goto err_nand_manuf_cleanup;
4847 }
4848
4849 /*
4850 * The number of bytes available for a client to place data into
4851 * the out of band area.
4852 */
4853 ret = mtd_ooblayout_count_freebytes(mtd);
4854 if (ret < 0)
4855 ret = 0;
4856
4857 mtd->oobavail = ret;
4858
4859 /* ECC sanity check: warn if it's too weak */
4860 if (!nand_ecc_strength_good(mtd))
4861 pr_warn("WARNING: %s: the ECC used on your system is too weak compared to the one required by the NAND chip\n",
4862 mtd->name);
4863
4864 /* Allow subpage writes up to ecc.steps. Not possible for MLC flash */
4865 if (!(chip->options & NAND_NO_SUBPAGE_WRITE) && nand_is_slc(chip)) {
4866 switch (ecc->steps) {
4867 case 2:
4868 mtd->subpage_sft = 1;
4869 break;
4870 case 4:
4871 case 8:
4872 case 16:
4873 mtd->subpage_sft = 2;
4874 break;
4875 }
4876 }
4877 chip->subpagesize = mtd->writesize >> mtd->subpage_sft;
4878
4879 /* Initialize state */
4880 chip->state = FL_READY;
4881
4882 /* Invalidate the pagebuffer reference */
4883 chip->pagebuf = -1;
4884
4885 /* Large page NAND with SOFT_ECC should support subpage reads */
4886 switch (ecc->mode) {
4887 case NAND_ECC_SOFT:
4888 if (chip->page_shift > 9)
4889 chip->options |= NAND_SUBPAGE_READ;
4890 break;
4891
4892 default:
4893 break;
4894 }
4895
4896 /* Fill in remaining MTD driver data */
4897 mtd->type = nand_is_slc(chip) ? MTD_NANDFLASH : MTD_MLCNANDFLASH;
4898 mtd->flags = (chip->options & NAND_ROM) ? MTD_CAP_ROM :
4899 MTD_CAP_NANDFLASH;
4900 mtd->_erase = nand_erase;
4901 mtd->_point = NULL;
4902 mtd->_unpoint = NULL;
4903 mtd->_read = nand_read;
4904 mtd->_write = nand_write;
4905 mtd->_panic_write = panic_nand_write;
4906 mtd->_read_oob = nand_read_oob;
4907 mtd->_write_oob = nand_write_oob;
4908 mtd->_sync = nand_sync;
4909 mtd->_lock = NULL;
4910 mtd->_unlock = NULL;
4911 mtd->_suspend = nand_suspend;
4912 mtd->_resume = nand_resume;
4913 mtd->_reboot = nand_shutdown;
4914 mtd->_block_isreserved = nand_block_isreserved;
4915 mtd->_block_isbad = nand_block_isbad;
4916 mtd->_block_markbad = nand_block_markbad;
4917 mtd->_max_bad_blocks = nand_max_bad_blocks;
4918 mtd->writebufsize = mtd->writesize;
4919
4920 /*
4921 * Initialize bitflip_threshold to its default prior scan_bbt() call.
4922 * scan_bbt() might invoke mtd_read(), thus bitflip_threshold must be
4923 * properly set.
4924 */
4925 if (!mtd->bitflip_threshold)
4926 mtd->bitflip_threshold = DIV_ROUND_UP(mtd->ecc_strength * 3, 4);
4927
4928 /* Initialize the ->data_interface field. */
4929 ret = nand_init_data_interface(chip);
4930 if (ret)
4931 goto err_nand_manuf_cleanup;
4932
4933 /* Enter fastest possible mode on all dies. */
4934 for (i = 0; i < chip->numchips; i++) {
4935 chip->select_chip(mtd, i);
4936 ret = nand_setup_data_interface(chip, i);
4937 chip->select_chip(mtd, -1);
4938
4939 if (ret)
4940 goto err_nand_data_iface_cleanup;
4941 }
4942
4943 /* Check, if we should skip the bad block table scan */
4944 if (chip->options & NAND_SKIP_BBTSCAN)
4945 return 0;
4946
4947 /* Build bad block table */
4948 ret = chip->scan_bbt(mtd);
4949 if (ret)
4950 goto err_nand_data_iface_cleanup;
4951
4952 return 0;
4953
4954 err_nand_data_iface_cleanup:
4955 nand_release_data_interface(chip);
4956
4957 err_nand_manuf_cleanup:
4958 nand_manufacturer_cleanup(chip);
4959
4960 err_free_nbuf:
4961 if (nbuf) {
4962 kfree(nbuf->databuf);
4963 kfree(nbuf->ecccode);
4964 kfree(nbuf->ecccalc);
4965 kfree(nbuf);
4966 }
4967
4968 return ret;
4969 }
4970 EXPORT_SYMBOL(nand_scan_tail);
4971
4972 /*
4973 * is_module_text_address() isn't exported, and it's mostly a pointless
4974 * test if this is a module _anyway_ -- they'd have to try _really_ hard
4975 * to call us from in-kernel code if the core NAND support is modular.
4976 */
4977 #ifdef MODULE
4978 #define caller_is_module() (1)
4979 #else
4980 #define caller_is_module() \
4981 is_module_text_address((unsigned long)__builtin_return_address(0))
4982 #endif
4983
4984 /**
4985 * nand_scan - [NAND Interface] Scan for the NAND device
4986 * @mtd: MTD device structure
4987 * @maxchips: number of chips to scan for
4988 *
4989 * This fills out all the uninitialized function pointers with the defaults.
4990 * The flash ID is read and the mtd/chip structures are filled with the
4991 * appropriate values.
4992 */
4993 int nand_scan(struct mtd_info *mtd, int maxchips)
4994 {
4995 int ret;
4996
4997 ret = nand_scan_ident(mtd, maxchips, NULL);
4998 if (!ret)
4999 ret = nand_scan_tail(mtd);
5000 return ret;
5001 }
5002 EXPORT_SYMBOL(nand_scan);
5003
5004 /**
5005 * nand_cleanup - [NAND Interface] Free resources held by the NAND device
5006 * @chip: NAND chip object
5007 */
5008 void nand_cleanup(struct nand_chip *chip)
5009 {
5010 if (chip->ecc.mode == NAND_ECC_SOFT &&
5011 chip->ecc.algo == NAND_ECC_BCH)
5012 nand_bch_free((struct nand_bch_control *)chip->ecc.priv);
5013
5014 nand_release_data_interface(chip);
5015
5016 /* Free bad block table memory */
5017 kfree(chip->bbt);
5018 if (!(chip->options & NAND_OWN_BUFFERS) && chip->buffers) {
5019 kfree(chip->buffers->databuf);
5020 kfree(chip->buffers->ecccode);
5021 kfree(chip->buffers->ecccalc);
5022 kfree(chip->buffers);
5023 }
5024
5025 /* Free bad block descriptor memory */
5026 if (chip->badblock_pattern && chip->badblock_pattern->options
5027 & NAND_BBT_DYNAMICSTRUCT)
5028 kfree(chip->badblock_pattern);
5029
5030 /* Free manufacturer priv data. */
5031 nand_manufacturer_cleanup(chip);
5032 }
5033 EXPORT_SYMBOL_GPL(nand_cleanup);
5034
5035 /**
5036 * nand_release - [NAND Interface] Unregister the MTD device and free resources
5037 * held by the NAND device
5038 * @mtd: MTD device structure
5039 */
5040 void nand_release(struct mtd_info *mtd)
5041 {
5042 mtd_device_unregister(mtd);
5043 nand_cleanup(mtd_to_nand(mtd));
5044 }
5045 EXPORT_SYMBOL_GPL(nand_release);
5046
5047 MODULE_LICENSE("GPL");
5048 MODULE_AUTHOR("Steven J. Hill <sjhill@realitydiluted.com>");
5049 MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>");
5050 MODULE_DESCRIPTION("Generic NAND flash driver code");