]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/mtd/nand/nand_base.c
Merge tag 'reset-fixes-for-4.14' of git://git.pengutronix.de/git/pza/linux into fixes
[mirror_ubuntu-bionic-kernel.git] / drivers / mtd / nand / nand_base.c
1 /*
2 * Overview:
3 * This is the generic MTD driver for NAND flash devices. It should be
4 * capable of working with almost all NAND chips currently available.
5 *
6 * Additional technical information is available on
7 * http://www.linux-mtd.infradead.org/doc/nand.html
8 *
9 * Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com)
10 * 2002-2006 Thomas Gleixner (tglx@linutronix.de)
11 *
12 * Credits:
13 * David Woodhouse for adding multichip support
14 *
15 * Aleph One Ltd. and Toby Churchill Ltd. for supporting the
16 * rework for 2K page size chips
17 *
18 * TODO:
19 * Enable cached programming for 2k page size chips
20 * Check, if mtd->ecctype should be set to MTD_ECC_HW
21 * if we have HW ECC support.
22 * BBT table is not serialized, has to be fixed
23 *
24 * This program is free software; you can redistribute it and/or modify
25 * it under the terms of the GNU General Public License version 2 as
26 * published by the Free Software Foundation.
27 *
28 */
29
30 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31
32 #include <linux/module.h>
33 #include <linux/delay.h>
34 #include <linux/errno.h>
35 #include <linux/err.h>
36 #include <linux/sched.h>
37 #include <linux/slab.h>
38 #include <linux/mm.h>
39 #include <linux/nmi.h>
40 #include <linux/types.h>
41 #include <linux/mtd/mtd.h>
42 #include <linux/mtd/rawnand.h>
43 #include <linux/mtd/nand_ecc.h>
44 #include <linux/mtd/nand_bch.h>
45 #include <linux/interrupt.h>
46 #include <linux/bitops.h>
47 #include <linux/io.h>
48 #include <linux/mtd/partitions.h>
49 #include <linux/of.h>
50
51 static int nand_get_device(struct mtd_info *mtd, int new_state);
52
53 static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
54 struct mtd_oob_ops *ops);
55
56 /* Define default oob placement schemes for large and small page devices */
57 static int nand_ooblayout_ecc_sp(struct mtd_info *mtd, int section,
58 struct mtd_oob_region *oobregion)
59 {
60 struct nand_chip *chip = mtd_to_nand(mtd);
61 struct nand_ecc_ctrl *ecc = &chip->ecc;
62
63 if (section > 1)
64 return -ERANGE;
65
66 if (!section) {
67 oobregion->offset = 0;
68 if (mtd->oobsize == 16)
69 oobregion->length = 4;
70 else
71 oobregion->length = 3;
72 } else {
73 if (mtd->oobsize == 8)
74 return -ERANGE;
75
76 oobregion->offset = 6;
77 oobregion->length = ecc->total - 4;
78 }
79
80 return 0;
81 }
82
83 static int nand_ooblayout_free_sp(struct mtd_info *mtd, int section,
84 struct mtd_oob_region *oobregion)
85 {
86 if (section > 1)
87 return -ERANGE;
88
89 if (mtd->oobsize == 16) {
90 if (section)
91 return -ERANGE;
92
93 oobregion->length = 8;
94 oobregion->offset = 8;
95 } else {
96 oobregion->length = 2;
97 if (!section)
98 oobregion->offset = 3;
99 else
100 oobregion->offset = 6;
101 }
102
103 return 0;
104 }
105
106 const struct mtd_ooblayout_ops nand_ooblayout_sp_ops = {
107 .ecc = nand_ooblayout_ecc_sp,
108 .free = nand_ooblayout_free_sp,
109 };
110 EXPORT_SYMBOL_GPL(nand_ooblayout_sp_ops);
111
112 static int nand_ooblayout_ecc_lp(struct mtd_info *mtd, int section,
113 struct mtd_oob_region *oobregion)
114 {
115 struct nand_chip *chip = mtd_to_nand(mtd);
116 struct nand_ecc_ctrl *ecc = &chip->ecc;
117
118 if (section)
119 return -ERANGE;
120
121 oobregion->length = ecc->total;
122 oobregion->offset = mtd->oobsize - oobregion->length;
123
124 return 0;
125 }
126
127 static int nand_ooblayout_free_lp(struct mtd_info *mtd, int section,
128 struct mtd_oob_region *oobregion)
129 {
130 struct nand_chip *chip = mtd_to_nand(mtd);
131 struct nand_ecc_ctrl *ecc = &chip->ecc;
132
133 if (section)
134 return -ERANGE;
135
136 oobregion->length = mtd->oobsize - ecc->total - 2;
137 oobregion->offset = 2;
138
139 return 0;
140 }
141
142 const struct mtd_ooblayout_ops nand_ooblayout_lp_ops = {
143 .ecc = nand_ooblayout_ecc_lp,
144 .free = nand_ooblayout_free_lp,
145 };
146 EXPORT_SYMBOL_GPL(nand_ooblayout_lp_ops);
147
148 /*
149 * Support the old "large page" layout used for 1-bit Hamming ECC where ECC
150 * are placed at a fixed offset.
151 */
152 static int nand_ooblayout_ecc_lp_hamming(struct mtd_info *mtd, int section,
153 struct mtd_oob_region *oobregion)
154 {
155 struct nand_chip *chip = mtd_to_nand(mtd);
156 struct nand_ecc_ctrl *ecc = &chip->ecc;
157
158 if (section)
159 return -ERANGE;
160
161 switch (mtd->oobsize) {
162 case 64:
163 oobregion->offset = 40;
164 break;
165 case 128:
166 oobregion->offset = 80;
167 break;
168 default:
169 return -EINVAL;
170 }
171
172 oobregion->length = ecc->total;
173 if (oobregion->offset + oobregion->length > mtd->oobsize)
174 return -ERANGE;
175
176 return 0;
177 }
178
179 static int nand_ooblayout_free_lp_hamming(struct mtd_info *mtd, int section,
180 struct mtd_oob_region *oobregion)
181 {
182 struct nand_chip *chip = mtd_to_nand(mtd);
183 struct nand_ecc_ctrl *ecc = &chip->ecc;
184 int ecc_offset = 0;
185
186 if (section < 0 || section > 1)
187 return -ERANGE;
188
189 switch (mtd->oobsize) {
190 case 64:
191 ecc_offset = 40;
192 break;
193 case 128:
194 ecc_offset = 80;
195 break;
196 default:
197 return -EINVAL;
198 }
199
200 if (section == 0) {
201 oobregion->offset = 2;
202 oobregion->length = ecc_offset - 2;
203 } else {
204 oobregion->offset = ecc_offset + ecc->total;
205 oobregion->length = mtd->oobsize - oobregion->offset;
206 }
207
208 return 0;
209 }
210
211 static const struct mtd_ooblayout_ops nand_ooblayout_lp_hamming_ops = {
212 .ecc = nand_ooblayout_ecc_lp_hamming,
213 .free = nand_ooblayout_free_lp_hamming,
214 };
215
216 static int check_offs_len(struct mtd_info *mtd,
217 loff_t ofs, uint64_t len)
218 {
219 struct nand_chip *chip = mtd_to_nand(mtd);
220 int ret = 0;
221
222 /* Start address must align on block boundary */
223 if (ofs & ((1ULL << chip->phys_erase_shift) - 1)) {
224 pr_debug("%s: unaligned address\n", __func__);
225 ret = -EINVAL;
226 }
227
228 /* Length must align on block boundary */
229 if (len & ((1ULL << chip->phys_erase_shift) - 1)) {
230 pr_debug("%s: length not block aligned\n", __func__);
231 ret = -EINVAL;
232 }
233
234 return ret;
235 }
236
237 /**
238 * nand_release_device - [GENERIC] release chip
239 * @mtd: MTD device structure
240 *
241 * Release chip lock and wake up anyone waiting on the device.
242 */
243 static void nand_release_device(struct mtd_info *mtd)
244 {
245 struct nand_chip *chip = mtd_to_nand(mtd);
246
247 /* Release the controller and the chip */
248 spin_lock(&chip->controller->lock);
249 chip->controller->active = NULL;
250 chip->state = FL_READY;
251 wake_up(&chip->controller->wq);
252 spin_unlock(&chip->controller->lock);
253 }
254
255 /**
256 * nand_read_byte - [DEFAULT] read one byte from the chip
257 * @mtd: MTD device structure
258 *
259 * Default read function for 8bit buswidth
260 */
261 static uint8_t nand_read_byte(struct mtd_info *mtd)
262 {
263 struct nand_chip *chip = mtd_to_nand(mtd);
264 return readb(chip->IO_ADDR_R);
265 }
266
267 /**
268 * nand_read_byte16 - [DEFAULT] read one byte endianness aware from the chip
269 * @mtd: MTD device structure
270 *
271 * Default read function for 16bit buswidth with endianness conversion.
272 *
273 */
274 static uint8_t nand_read_byte16(struct mtd_info *mtd)
275 {
276 struct nand_chip *chip = mtd_to_nand(mtd);
277 return (uint8_t) cpu_to_le16(readw(chip->IO_ADDR_R));
278 }
279
280 /**
281 * nand_read_word - [DEFAULT] read one word from the chip
282 * @mtd: MTD device structure
283 *
284 * Default read function for 16bit buswidth without endianness conversion.
285 */
286 static u16 nand_read_word(struct mtd_info *mtd)
287 {
288 struct nand_chip *chip = mtd_to_nand(mtd);
289 return readw(chip->IO_ADDR_R);
290 }
291
292 /**
293 * nand_select_chip - [DEFAULT] control CE line
294 * @mtd: MTD device structure
295 * @chipnr: chipnumber to select, -1 for deselect
296 *
297 * Default select function for 1 chip devices.
298 */
299 static void nand_select_chip(struct mtd_info *mtd, int chipnr)
300 {
301 struct nand_chip *chip = mtd_to_nand(mtd);
302
303 switch (chipnr) {
304 case -1:
305 chip->cmd_ctrl(mtd, NAND_CMD_NONE, 0 | NAND_CTRL_CHANGE);
306 break;
307 case 0:
308 break;
309
310 default:
311 BUG();
312 }
313 }
314
315 /**
316 * nand_write_byte - [DEFAULT] write single byte to chip
317 * @mtd: MTD device structure
318 * @byte: value to write
319 *
320 * Default function to write a byte to I/O[7:0]
321 */
322 static void nand_write_byte(struct mtd_info *mtd, uint8_t byte)
323 {
324 struct nand_chip *chip = mtd_to_nand(mtd);
325
326 chip->write_buf(mtd, &byte, 1);
327 }
328
329 /**
330 * nand_write_byte16 - [DEFAULT] write single byte to a chip with width 16
331 * @mtd: MTD device structure
332 * @byte: value to write
333 *
334 * Default function to write a byte to I/O[7:0] on a 16-bit wide chip.
335 */
336 static void nand_write_byte16(struct mtd_info *mtd, uint8_t byte)
337 {
338 struct nand_chip *chip = mtd_to_nand(mtd);
339 uint16_t word = byte;
340
341 /*
342 * It's not entirely clear what should happen to I/O[15:8] when writing
343 * a byte. The ONFi spec (Revision 3.1; 2012-09-19, Section 2.16) reads:
344 *
345 * When the host supports a 16-bit bus width, only data is
346 * transferred at the 16-bit width. All address and command line
347 * transfers shall use only the lower 8-bits of the data bus. During
348 * command transfers, the host may place any value on the upper
349 * 8-bits of the data bus. During address transfers, the host shall
350 * set the upper 8-bits of the data bus to 00h.
351 *
352 * One user of the write_byte callback is nand_onfi_set_features. The
353 * four parameters are specified to be written to I/O[7:0], but this is
354 * neither an address nor a command transfer. Let's assume a 0 on the
355 * upper I/O lines is OK.
356 */
357 chip->write_buf(mtd, (uint8_t *)&word, 2);
358 }
359
360 /**
361 * nand_write_buf - [DEFAULT] write buffer to chip
362 * @mtd: MTD device structure
363 * @buf: data buffer
364 * @len: number of bytes to write
365 *
366 * Default write function for 8bit buswidth.
367 */
368 static void nand_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
369 {
370 struct nand_chip *chip = mtd_to_nand(mtd);
371
372 iowrite8_rep(chip->IO_ADDR_W, buf, len);
373 }
374
375 /**
376 * nand_read_buf - [DEFAULT] read chip data into buffer
377 * @mtd: MTD device structure
378 * @buf: buffer to store date
379 * @len: number of bytes to read
380 *
381 * Default read function for 8bit buswidth.
382 */
383 static void nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
384 {
385 struct nand_chip *chip = mtd_to_nand(mtd);
386
387 ioread8_rep(chip->IO_ADDR_R, buf, len);
388 }
389
390 /**
391 * nand_write_buf16 - [DEFAULT] write buffer to chip
392 * @mtd: MTD device structure
393 * @buf: data buffer
394 * @len: number of bytes to write
395 *
396 * Default write function for 16bit buswidth.
397 */
398 static void nand_write_buf16(struct mtd_info *mtd, const uint8_t *buf, int len)
399 {
400 struct nand_chip *chip = mtd_to_nand(mtd);
401 u16 *p = (u16 *) buf;
402
403 iowrite16_rep(chip->IO_ADDR_W, p, len >> 1);
404 }
405
406 /**
407 * nand_read_buf16 - [DEFAULT] read chip data into buffer
408 * @mtd: MTD device structure
409 * @buf: buffer to store date
410 * @len: number of bytes to read
411 *
412 * Default read function for 16bit buswidth.
413 */
414 static void nand_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len)
415 {
416 struct nand_chip *chip = mtd_to_nand(mtd);
417 u16 *p = (u16 *) buf;
418
419 ioread16_rep(chip->IO_ADDR_R, p, len >> 1);
420 }
421
422 /**
423 * nand_block_bad - [DEFAULT] Read bad block marker from the chip
424 * @mtd: MTD device structure
425 * @ofs: offset from device start
426 *
427 * Check, if the block is bad.
428 */
429 static int nand_block_bad(struct mtd_info *mtd, loff_t ofs)
430 {
431 int page, page_end, res;
432 struct nand_chip *chip = mtd_to_nand(mtd);
433 u8 bad;
434
435 if (chip->bbt_options & NAND_BBT_SCANLASTPAGE)
436 ofs += mtd->erasesize - mtd->writesize;
437
438 page = (int)(ofs >> chip->page_shift) & chip->pagemask;
439 page_end = page + (chip->bbt_options & NAND_BBT_SCAN2NDPAGE ? 2 : 1);
440
441 for (; page < page_end; page++) {
442 res = chip->ecc.read_oob(mtd, chip, page);
443 if (res)
444 return res;
445
446 bad = chip->oob_poi[chip->badblockpos];
447
448 if (likely(chip->badblockbits == 8))
449 res = bad != 0xFF;
450 else
451 res = hweight8(bad) < chip->badblockbits;
452 if (res)
453 return res;
454 }
455
456 return 0;
457 }
458
459 /**
460 * nand_default_block_markbad - [DEFAULT] mark a block bad via bad block marker
461 * @mtd: MTD device structure
462 * @ofs: offset from device start
463 *
464 * This is the default implementation, which can be overridden by a hardware
465 * specific driver. It provides the details for writing a bad block marker to a
466 * block.
467 */
468 static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
469 {
470 struct nand_chip *chip = mtd_to_nand(mtd);
471 struct mtd_oob_ops ops;
472 uint8_t buf[2] = { 0, 0 };
473 int ret = 0, res, i = 0;
474
475 memset(&ops, 0, sizeof(ops));
476 ops.oobbuf = buf;
477 ops.ooboffs = chip->badblockpos;
478 if (chip->options & NAND_BUSWIDTH_16) {
479 ops.ooboffs &= ~0x01;
480 ops.len = ops.ooblen = 2;
481 } else {
482 ops.len = ops.ooblen = 1;
483 }
484 ops.mode = MTD_OPS_PLACE_OOB;
485
486 /* Write to first/last page(s) if necessary */
487 if (chip->bbt_options & NAND_BBT_SCANLASTPAGE)
488 ofs += mtd->erasesize - mtd->writesize;
489 do {
490 res = nand_do_write_oob(mtd, ofs, &ops);
491 if (!ret)
492 ret = res;
493
494 i++;
495 ofs += mtd->writesize;
496 } while ((chip->bbt_options & NAND_BBT_SCAN2NDPAGE) && i < 2);
497
498 return ret;
499 }
500
501 /**
502 * nand_block_markbad_lowlevel - mark a block bad
503 * @mtd: MTD device structure
504 * @ofs: offset from device start
505 *
506 * This function performs the generic NAND bad block marking steps (i.e., bad
507 * block table(s) and/or marker(s)). We only allow the hardware driver to
508 * specify how to write bad block markers to OOB (chip->block_markbad).
509 *
510 * We try operations in the following order:
511 *
512 * (1) erase the affected block, to allow OOB marker to be written cleanly
513 * (2) write bad block marker to OOB area of affected block (unless flag
514 * NAND_BBT_NO_OOB_BBM is present)
515 * (3) update the BBT
516 *
517 * Note that we retain the first error encountered in (2) or (3), finish the
518 * procedures, and dump the error in the end.
519 */
520 static int nand_block_markbad_lowlevel(struct mtd_info *mtd, loff_t ofs)
521 {
522 struct nand_chip *chip = mtd_to_nand(mtd);
523 int res, ret = 0;
524
525 if (!(chip->bbt_options & NAND_BBT_NO_OOB_BBM)) {
526 struct erase_info einfo;
527
528 /* Attempt erase before marking OOB */
529 memset(&einfo, 0, sizeof(einfo));
530 einfo.mtd = mtd;
531 einfo.addr = ofs;
532 einfo.len = 1ULL << chip->phys_erase_shift;
533 nand_erase_nand(mtd, &einfo, 0);
534
535 /* Write bad block marker to OOB */
536 nand_get_device(mtd, FL_WRITING);
537 ret = chip->block_markbad(mtd, ofs);
538 nand_release_device(mtd);
539 }
540
541 /* Mark block bad in BBT */
542 if (chip->bbt) {
543 res = nand_markbad_bbt(mtd, ofs);
544 if (!ret)
545 ret = res;
546 }
547
548 if (!ret)
549 mtd->ecc_stats.badblocks++;
550
551 return ret;
552 }
553
554 /**
555 * nand_check_wp - [GENERIC] check if the chip is write protected
556 * @mtd: MTD device structure
557 *
558 * Check, if the device is write protected. The function expects, that the
559 * device is already selected.
560 */
561 static int nand_check_wp(struct mtd_info *mtd)
562 {
563 struct nand_chip *chip = mtd_to_nand(mtd);
564
565 /* Broken xD cards report WP despite being writable */
566 if (chip->options & NAND_BROKEN_XD)
567 return 0;
568
569 /* Check the WP bit */
570 chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
571 return (chip->read_byte(mtd) & NAND_STATUS_WP) ? 0 : 1;
572 }
573
574 /**
575 * nand_block_isreserved - [GENERIC] Check if a block is marked reserved.
576 * @mtd: MTD device structure
577 * @ofs: offset from device start
578 *
579 * Check if the block is marked as reserved.
580 */
581 static int nand_block_isreserved(struct mtd_info *mtd, loff_t ofs)
582 {
583 struct nand_chip *chip = mtd_to_nand(mtd);
584
585 if (!chip->bbt)
586 return 0;
587 /* Return info from the table */
588 return nand_isreserved_bbt(mtd, ofs);
589 }
590
591 /**
592 * nand_block_checkbad - [GENERIC] Check if a block is marked bad
593 * @mtd: MTD device structure
594 * @ofs: offset from device start
595 * @allowbbt: 1, if its allowed to access the bbt area
596 *
597 * Check, if the block is bad. Either by reading the bad block table or
598 * calling of the scan function.
599 */
600 static int nand_block_checkbad(struct mtd_info *mtd, loff_t ofs, int allowbbt)
601 {
602 struct nand_chip *chip = mtd_to_nand(mtd);
603
604 if (!chip->bbt)
605 return chip->block_bad(mtd, ofs);
606
607 /* Return info from the table */
608 return nand_isbad_bbt(mtd, ofs, allowbbt);
609 }
610
611 /**
612 * panic_nand_wait_ready - [GENERIC] Wait for the ready pin after commands.
613 * @mtd: MTD device structure
614 * @timeo: Timeout
615 *
616 * Helper function for nand_wait_ready used when needing to wait in interrupt
617 * context.
618 */
619 static void panic_nand_wait_ready(struct mtd_info *mtd, unsigned long timeo)
620 {
621 struct nand_chip *chip = mtd_to_nand(mtd);
622 int i;
623
624 /* Wait for the device to get ready */
625 for (i = 0; i < timeo; i++) {
626 if (chip->dev_ready(mtd))
627 break;
628 touch_softlockup_watchdog();
629 mdelay(1);
630 }
631 }
632
633 /**
634 * nand_wait_ready - [GENERIC] Wait for the ready pin after commands.
635 * @mtd: MTD device structure
636 *
637 * Wait for the ready pin after a command, and warn if a timeout occurs.
638 */
639 void nand_wait_ready(struct mtd_info *mtd)
640 {
641 struct nand_chip *chip = mtd_to_nand(mtd);
642 unsigned long timeo = 400;
643
644 if (in_interrupt() || oops_in_progress)
645 return panic_nand_wait_ready(mtd, timeo);
646
647 /* Wait until command is processed or timeout occurs */
648 timeo = jiffies + msecs_to_jiffies(timeo);
649 do {
650 if (chip->dev_ready(mtd))
651 return;
652 cond_resched();
653 } while (time_before(jiffies, timeo));
654
655 if (!chip->dev_ready(mtd))
656 pr_warn_ratelimited("timeout while waiting for chip to become ready\n");
657 }
658 EXPORT_SYMBOL_GPL(nand_wait_ready);
659
660 /**
661 * nand_wait_status_ready - [GENERIC] Wait for the ready status after commands.
662 * @mtd: MTD device structure
663 * @timeo: Timeout in ms
664 *
665 * Wait for status ready (i.e. command done) or timeout.
666 */
667 static void nand_wait_status_ready(struct mtd_info *mtd, unsigned long timeo)
668 {
669 register struct nand_chip *chip = mtd_to_nand(mtd);
670
671 timeo = jiffies + msecs_to_jiffies(timeo);
672 do {
673 if ((chip->read_byte(mtd) & NAND_STATUS_READY))
674 break;
675 touch_softlockup_watchdog();
676 } while (time_before(jiffies, timeo));
677 };
678
679 /**
680 * nand_command - [DEFAULT] Send command to NAND device
681 * @mtd: MTD device structure
682 * @command: the command to be sent
683 * @column: the column address for this command, -1 if none
684 * @page_addr: the page address for this command, -1 if none
685 *
686 * Send command to NAND device. This function is used for small page devices
687 * (512 Bytes per page).
688 */
689 static void nand_command(struct mtd_info *mtd, unsigned int command,
690 int column, int page_addr)
691 {
692 register struct nand_chip *chip = mtd_to_nand(mtd);
693 int ctrl = NAND_CTRL_CLE | NAND_CTRL_CHANGE;
694
695 /* Write out the command to the device */
696 if (command == NAND_CMD_SEQIN) {
697 int readcmd;
698
699 if (column >= mtd->writesize) {
700 /* OOB area */
701 column -= mtd->writesize;
702 readcmd = NAND_CMD_READOOB;
703 } else if (column < 256) {
704 /* First 256 bytes --> READ0 */
705 readcmd = NAND_CMD_READ0;
706 } else {
707 column -= 256;
708 readcmd = NAND_CMD_READ1;
709 }
710 chip->cmd_ctrl(mtd, readcmd, ctrl);
711 ctrl &= ~NAND_CTRL_CHANGE;
712 }
713 chip->cmd_ctrl(mtd, command, ctrl);
714
715 /* Address cycle, when necessary */
716 ctrl = NAND_CTRL_ALE | NAND_CTRL_CHANGE;
717 /* Serially input address */
718 if (column != -1) {
719 /* Adjust columns for 16 bit buswidth */
720 if (chip->options & NAND_BUSWIDTH_16 &&
721 !nand_opcode_8bits(command))
722 column >>= 1;
723 chip->cmd_ctrl(mtd, column, ctrl);
724 ctrl &= ~NAND_CTRL_CHANGE;
725 }
726 if (page_addr != -1) {
727 chip->cmd_ctrl(mtd, page_addr, ctrl);
728 ctrl &= ~NAND_CTRL_CHANGE;
729 chip->cmd_ctrl(mtd, page_addr >> 8, ctrl);
730 /* One more address cycle for devices > 32MiB */
731 if (chip->chipsize > (32 << 20))
732 chip->cmd_ctrl(mtd, page_addr >> 16, ctrl);
733 }
734 chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
735
736 /*
737 * Program and erase have their own busy handlers status and sequential
738 * in needs no delay
739 */
740 switch (command) {
741
742 case NAND_CMD_PAGEPROG:
743 case NAND_CMD_ERASE1:
744 case NAND_CMD_ERASE2:
745 case NAND_CMD_SEQIN:
746 case NAND_CMD_STATUS:
747 case NAND_CMD_READID:
748 case NAND_CMD_SET_FEATURES:
749 return;
750
751 case NAND_CMD_RESET:
752 if (chip->dev_ready)
753 break;
754 udelay(chip->chip_delay);
755 chip->cmd_ctrl(mtd, NAND_CMD_STATUS,
756 NAND_CTRL_CLE | NAND_CTRL_CHANGE);
757 chip->cmd_ctrl(mtd,
758 NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
759 /* EZ-NAND can take upto 250ms as per ONFi v4.0 */
760 nand_wait_status_ready(mtd, 250);
761 return;
762
763 /* This applies to read commands */
764 case NAND_CMD_READ0:
765 /*
766 * READ0 is sometimes used to exit GET STATUS mode. When this
767 * is the case no address cycles are requested, and we can use
768 * this information to detect that we should not wait for the
769 * device to be ready.
770 */
771 if (column == -1 && page_addr == -1)
772 return;
773
774 default:
775 /*
776 * If we don't have access to the busy pin, we apply the given
777 * command delay
778 */
779 if (!chip->dev_ready) {
780 udelay(chip->chip_delay);
781 return;
782 }
783 }
784 /*
785 * Apply this short delay always to ensure that we do wait tWB in
786 * any case on any machine.
787 */
788 ndelay(100);
789
790 nand_wait_ready(mtd);
791 }
792
793 static void nand_ccs_delay(struct nand_chip *chip)
794 {
795 /*
796 * The controller already takes care of waiting for tCCS when the RNDIN
797 * or RNDOUT command is sent, return directly.
798 */
799 if (!(chip->options & NAND_WAIT_TCCS))
800 return;
801
802 /*
803 * Wait tCCS_min if it is correctly defined, otherwise wait 500ns
804 * (which should be safe for all NANDs).
805 */
806 if (chip->data_interface && chip->data_interface->timings.sdr.tCCS_min)
807 ndelay(chip->data_interface->timings.sdr.tCCS_min / 1000);
808 else
809 ndelay(500);
810 }
811
812 /**
813 * nand_command_lp - [DEFAULT] Send command to NAND large page device
814 * @mtd: MTD device structure
815 * @command: the command to be sent
816 * @column: the column address for this command, -1 if none
817 * @page_addr: the page address for this command, -1 if none
818 *
819 * Send command to NAND device. This is the version for the new large page
820 * devices. We don't have the separate regions as we have in the small page
821 * devices. We must emulate NAND_CMD_READOOB to keep the code compatible.
822 */
823 static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
824 int column, int page_addr)
825 {
826 register struct nand_chip *chip = mtd_to_nand(mtd);
827
828 /* Emulate NAND_CMD_READOOB */
829 if (command == NAND_CMD_READOOB) {
830 column += mtd->writesize;
831 command = NAND_CMD_READ0;
832 }
833
834 /* Command latch cycle */
835 chip->cmd_ctrl(mtd, command, NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
836
837 if (column != -1 || page_addr != -1) {
838 int ctrl = NAND_CTRL_CHANGE | NAND_NCE | NAND_ALE;
839
840 /* Serially input address */
841 if (column != -1) {
842 /* Adjust columns for 16 bit buswidth */
843 if (chip->options & NAND_BUSWIDTH_16 &&
844 !nand_opcode_8bits(command))
845 column >>= 1;
846 chip->cmd_ctrl(mtd, column, ctrl);
847 ctrl &= ~NAND_CTRL_CHANGE;
848
849 /* Only output a single addr cycle for 8bits opcodes. */
850 if (!nand_opcode_8bits(command))
851 chip->cmd_ctrl(mtd, column >> 8, ctrl);
852 }
853 if (page_addr != -1) {
854 chip->cmd_ctrl(mtd, page_addr, ctrl);
855 chip->cmd_ctrl(mtd, page_addr >> 8,
856 NAND_NCE | NAND_ALE);
857 /* One more address cycle for devices > 128MiB */
858 if (chip->chipsize > (128 << 20))
859 chip->cmd_ctrl(mtd, page_addr >> 16,
860 NAND_NCE | NAND_ALE);
861 }
862 }
863 chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
864
865 /*
866 * Program and erase have their own busy handlers status, sequential
867 * in and status need no delay.
868 */
869 switch (command) {
870
871 case NAND_CMD_CACHEDPROG:
872 case NAND_CMD_PAGEPROG:
873 case NAND_CMD_ERASE1:
874 case NAND_CMD_ERASE2:
875 case NAND_CMD_SEQIN:
876 case NAND_CMD_STATUS:
877 case NAND_CMD_READID:
878 case NAND_CMD_SET_FEATURES:
879 return;
880
881 case NAND_CMD_RNDIN:
882 nand_ccs_delay(chip);
883 return;
884
885 case NAND_CMD_RESET:
886 if (chip->dev_ready)
887 break;
888 udelay(chip->chip_delay);
889 chip->cmd_ctrl(mtd, NAND_CMD_STATUS,
890 NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
891 chip->cmd_ctrl(mtd, NAND_CMD_NONE,
892 NAND_NCE | NAND_CTRL_CHANGE);
893 /* EZ-NAND can take upto 250ms as per ONFi v4.0 */
894 nand_wait_status_ready(mtd, 250);
895 return;
896
897 case NAND_CMD_RNDOUT:
898 /* No ready / busy check necessary */
899 chip->cmd_ctrl(mtd, NAND_CMD_RNDOUTSTART,
900 NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
901 chip->cmd_ctrl(mtd, NAND_CMD_NONE,
902 NAND_NCE | NAND_CTRL_CHANGE);
903
904 nand_ccs_delay(chip);
905 return;
906
907 case NAND_CMD_READ0:
908 /*
909 * READ0 is sometimes used to exit GET STATUS mode. When this
910 * is the case no address cycles are requested, and we can use
911 * this information to detect that READSTART should not be
912 * issued.
913 */
914 if (column == -1 && page_addr == -1)
915 return;
916
917 chip->cmd_ctrl(mtd, NAND_CMD_READSTART,
918 NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
919 chip->cmd_ctrl(mtd, NAND_CMD_NONE,
920 NAND_NCE | NAND_CTRL_CHANGE);
921
922 /* This applies to read commands */
923 default:
924 /*
925 * If we don't have access to the busy pin, we apply the given
926 * command delay.
927 */
928 if (!chip->dev_ready) {
929 udelay(chip->chip_delay);
930 return;
931 }
932 }
933
934 /*
935 * Apply this short delay always to ensure that we do wait tWB in
936 * any case on any machine.
937 */
938 ndelay(100);
939
940 nand_wait_ready(mtd);
941 }
942
943 /**
944 * panic_nand_get_device - [GENERIC] Get chip for selected access
945 * @chip: the nand chip descriptor
946 * @mtd: MTD device structure
947 * @new_state: the state which is requested
948 *
949 * Used when in panic, no locks are taken.
950 */
951 static void panic_nand_get_device(struct nand_chip *chip,
952 struct mtd_info *mtd, int new_state)
953 {
954 /* Hardware controller shared among independent devices */
955 chip->controller->active = chip;
956 chip->state = new_state;
957 }
958
959 /**
960 * nand_get_device - [GENERIC] Get chip for selected access
961 * @mtd: MTD device structure
962 * @new_state: the state which is requested
963 *
964 * Get the device and lock it for exclusive access
965 */
966 static int
967 nand_get_device(struct mtd_info *mtd, int new_state)
968 {
969 struct nand_chip *chip = mtd_to_nand(mtd);
970 spinlock_t *lock = &chip->controller->lock;
971 wait_queue_head_t *wq = &chip->controller->wq;
972 DECLARE_WAITQUEUE(wait, current);
973 retry:
974 spin_lock(lock);
975
976 /* Hardware controller shared among independent devices */
977 if (!chip->controller->active)
978 chip->controller->active = chip;
979
980 if (chip->controller->active == chip && chip->state == FL_READY) {
981 chip->state = new_state;
982 spin_unlock(lock);
983 return 0;
984 }
985 if (new_state == FL_PM_SUSPENDED) {
986 if (chip->controller->active->state == FL_PM_SUSPENDED) {
987 chip->state = FL_PM_SUSPENDED;
988 spin_unlock(lock);
989 return 0;
990 }
991 }
992 set_current_state(TASK_UNINTERRUPTIBLE);
993 add_wait_queue(wq, &wait);
994 spin_unlock(lock);
995 schedule();
996 remove_wait_queue(wq, &wait);
997 goto retry;
998 }
999
1000 /**
1001 * panic_nand_wait - [GENERIC] wait until the command is done
1002 * @mtd: MTD device structure
1003 * @chip: NAND chip structure
1004 * @timeo: timeout
1005 *
1006 * Wait for command done. This is a helper function for nand_wait used when
1007 * we are in interrupt context. May happen when in panic and trying to write
1008 * an oops through mtdoops.
1009 */
1010 static void panic_nand_wait(struct mtd_info *mtd, struct nand_chip *chip,
1011 unsigned long timeo)
1012 {
1013 int i;
1014 for (i = 0; i < timeo; i++) {
1015 if (chip->dev_ready) {
1016 if (chip->dev_ready(mtd))
1017 break;
1018 } else {
1019 if (chip->read_byte(mtd) & NAND_STATUS_READY)
1020 break;
1021 }
1022 mdelay(1);
1023 }
1024 }
1025
1026 /**
1027 * nand_wait - [DEFAULT] wait until the command is done
1028 * @mtd: MTD device structure
1029 * @chip: NAND chip structure
1030 *
1031 * Wait for command done. This applies to erase and program only.
1032 */
1033 static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
1034 {
1035
1036 int status;
1037 unsigned long timeo = 400;
1038
1039 /*
1040 * Apply this short delay always to ensure that we do wait tWB in any
1041 * case on any machine.
1042 */
1043 ndelay(100);
1044
1045 chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
1046
1047 if (in_interrupt() || oops_in_progress)
1048 panic_nand_wait(mtd, chip, timeo);
1049 else {
1050 timeo = jiffies + msecs_to_jiffies(timeo);
1051 do {
1052 if (chip->dev_ready) {
1053 if (chip->dev_ready(mtd))
1054 break;
1055 } else {
1056 if (chip->read_byte(mtd) & NAND_STATUS_READY)
1057 break;
1058 }
1059 cond_resched();
1060 } while (time_before(jiffies, timeo));
1061 }
1062
1063 status = (int)chip->read_byte(mtd);
1064 /* This can happen if in case of timeout or buggy dev_ready */
1065 WARN_ON(!(status & NAND_STATUS_READY));
1066 return status;
1067 }
1068
1069 /**
1070 * nand_reset_data_interface - Reset data interface and timings
1071 * @chip: The NAND chip
1072 * @chipnr: Internal die id
1073 *
1074 * Reset the Data interface and timings to ONFI mode 0.
1075 *
1076 * Returns 0 for success or negative error code otherwise.
1077 */
1078 static int nand_reset_data_interface(struct nand_chip *chip, int chipnr)
1079 {
1080 struct mtd_info *mtd = nand_to_mtd(chip);
1081 const struct nand_data_interface *conf;
1082 int ret;
1083
1084 if (!chip->setup_data_interface)
1085 return 0;
1086
1087 /*
1088 * The ONFI specification says:
1089 * "
1090 * To transition from NV-DDR or NV-DDR2 to the SDR data
1091 * interface, the host shall use the Reset (FFh) command
1092 * using SDR timing mode 0. A device in any timing mode is
1093 * required to recognize Reset (FFh) command issued in SDR
1094 * timing mode 0.
1095 * "
1096 *
1097 * Configure the data interface in SDR mode and set the
1098 * timings to timing mode 0.
1099 */
1100
1101 conf = nand_get_default_data_interface();
1102 ret = chip->setup_data_interface(mtd, chipnr, conf);
1103 if (ret)
1104 pr_err("Failed to configure data interface to SDR timing mode 0\n");
1105
1106 return ret;
1107 }
1108
1109 /**
1110 * nand_setup_data_interface - Setup the best data interface and timings
1111 * @chip: The NAND chip
1112 * @chipnr: Internal die id
1113 *
1114 * Find and configure the best data interface and NAND timings supported by
1115 * the chip and the driver.
1116 * First tries to retrieve supported timing modes from ONFI information,
1117 * and if the NAND chip does not support ONFI, relies on the
1118 * ->onfi_timing_mode_default specified in the nand_ids table.
1119 *
1120 * Returns 0 for success or negative error code otherwise.
1121 */
1122 static int nand_setup_data_interface(struct nand_chip *chip, int chipnr)
1123 {
1124 struct mtd_info *mtd = nand_to_mtd(chip);
1125 int ret;
1126
1127 if (!chip->setup_data_interface || !chip->data_interface)
1128 return 0;
1129
1130 /*
1131 * Ensure the timing mode has been changed on the chip side
1132 * before changing timings on the controller side.
1133 */
1134 if (chip->onfi_version &&
1135 (le16_to_cpu(chip->onfi_params.opt_cmd) &
1136 ONFI_OPT_CMD_SET_GET_FEATURES)) {
1137 u8 tmode_param[ONFI_SUBFEATURE_PARAM_LEN] = {
1138 chip->onfi_timing_mode_default,
1139 };
1140
1141 ret = chip->onfi_set_features(mtd, chip,
1142 ONFI_FEATURE_ADDR_TIMING_MODE,
1143 tmode_param);
1144 if (ret)
1145 goto err;
1146 }
1147
1148 ret = chip->setup_data_interface(mtd, chipnr, chip->data_interface);
1149 err:
1150 return ret;
1151 }
1152
1153 /**
1154 * nand_init_data_interface - find the best data interface and timings
1155 * @chip: The NAND chip
1156 *
1157 * Find the best data interface and NAND timings supported by the chip
1158 * and the driver.
1159 * First tries to retrieve supported timing modes from ONFI information,
1160 * and if the NAND chip does not support ONFI, relies on the
1161 * ->onfi_timing_mode_default specified in the nand_ids table. After this
1162 * function nand_chip->data_interface is initialized with the best timing mode
1163 * available.
1164 *
1165 * Returns 0 for success or negative error code otherwise.
1166 */
1167 static int nand_init_data_interface(struct nand_chip *chip)
1168 {
1169 struct mtd_info *mtd = nand_to_mtd(chip);
1170 int modes, mode, ret;
1171
1172 if (!chip->setup_data_interface)
1173 return 0;
1174
1175 /*
1176 * First try to identify the best timings from ONFI parameters and
1177 * if the NAND does not support ONFI, fallback to the default ONFI
1178 * timing mode.
1179 */
1180 modes = onfi_get_async_timing_mode(chip);
1181 if (modes == ONFI_TIMING_MODE_UNKNOWN) {
1182 if (!chip->onfi_timing_mode_default)
1183 return 0;
1184
1185 modes = GENMASK(chip->onfi_timing_mode_default, 0);
1186 }
1187
1188 chip->data_interface = kzalloc(sizeof(*chip->data_interface),
1189 GFP_KERNEL);
1190 if (!chip->data_interface)
1191 return -ENOMEM;
1192
1193 for (mode = fls(modes) - 1; mode >= 0; mode--) {
1194 ret = onfi_init_data_interface(chip, chip->data_interface,
1195 NAND_SDR_IFACE, mode);
1196 if (ret)
1197 continue;
1198
1199 /* Pass -1 to only */
1200 ret = chip->setup_data_interface(mtd,
1201 NAND_DATA_IFACE_CHECK_ONLY,
1202 chip->data_interface);
1203 if (!ret) {
1204 chip->onfi_timing_mode_default = mode;
1205 break;
1206 }
1207 }
1208
1209 return 0;
1210 }
1211
1212 static void nand_release_data_interface(struct nand_chip *chip)
1213 {
1214 kfree(chip->data_interface);
1215 }
1216
1217 /**
1218 * nand_reset - Reset and initialize a NAND device
1219 * @chip: The NAND chip
1220 * @chipnr: Internal die id
1221 *
1222 * Returns 0 for success or negative error code otherwise
1223 */
1224 int nand_reset(struct nand_chip *chip, int chipnr)
1225 {
1226 struct mtd_info *mtd = nand_to_mtd(chip);
1227 int ret;
1228
1229 ret = nand_reset_data_interface(chip, chipnr);
1230 if (ret)
1231 return ret;
1232
1233 /*
1234 * The CS line has to be released before we can apply the new NAND
1235 * interface settings, hence this weird ->select_chip() dance.
1236 */
1237 chip->select_chip(mtd, chipnr);
1238 chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
1239 chip->select_chip(mtd, -1);
1240
1241 chip->select_chip(mtd, chipnr);
1242 ret = nand_setup_data_interface(chip, chipnr);
1243 chip->select_chip(mtd, -1);
1244 if (ret)
1245 return ret;
1246
1247 return 0;
1248 }
1249
1250 /**
1251 * nand_check_erased_buf - check if a buffer contains (almost) only 0xff data
1252 * @buf: buffer to test
1253 * @len: buffer length
1254 * @bitflips_threshold: maximum number of bitflips
1255 *
1256 * Check if a buffer contains only 0xff, which means the underlying region
1257 * has been erased and is ready to be programmed.
1258 * The bitflips_threshold specify the maximum number of bitflips before
1259 * considering the region is not erased.
1260 * Note: The logic of this function has been extracted from the memweight
1261 * implementation, except that nand_check_erased_buf function exit before
1262 * testing the whole buffer if the number of bitflips exceed the
1263 * bitflips_threshold value.
1264 *
1265 * Returns a positive number of bitflips less than or equal to
1266 * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the
1267 * threshold.
1268 */
1269 static int nand_check_erased_buf(void *buf, int len, int bitflips_threshold)
1270 {
1271 const unsigned char *bitmap = buf;
1272 int bitflips = 0;
1273 int weight;
1274
1275 for (; len && ((uintptr_t)bitmap) % sizeof(long);
1276 len--, bitmap++) {
1277 weight = hweight8(*bitmap);
1278 bitflips += BITS_PER_BYTE - weight;
1279 if (unlikely(bitflips > bitflips_threshold))
1280 return -EBADMSG;
1281 }
1282
1283 for (; len >= sizeof(long);
1284 len -= sizeof(long), bitmap += sizeof(long)) {
1285 unsigned long d = *((unsigned long *)bitmap);
1286 if (d == ~0UL)
1287 continue;
1288 weight = hweight_long(d);
1289 bitflips += BITS_PER_LONG - weight;
1290 if (unlikely(bitflips > bitflips_threshold))
1291 return -EBADMSG;
1292 }
1293
1294 for (; len > 0; len--, bitmap++) {
1295 weight = hweight8(*bitmap);
1296 bitflips += BITS_PER_BYTE - weight;
1297 if (unlikely(bitflips > bitflips_threshold))
1298 return -EBADMSG;
1299 }
1300
1301 return bitflips;
1302 }
1303
1304 /**
1305 * nand_check_erased_ecc_chunk - check if an ECC chunk contains (almost) only
1306 * 0xff data
1307 * @data: data buffer to test
1308 * @datalen: data length
1309 * @ecc: ECC buffer
1310 * @ecclen: ECC length
1311 * @extraoob: extra OOB buffer
1312 * @extraooblen: extra OOB length
1313 * @bitflips_threshold: maximum number of bitflips
1314 *
1315 * Check if a data buffer and its associated ECC and OOB data contains only
1316 * 0xff pattern, which means the underlying region has been erased and is
1317 * ready to be programmed.
1318 * The bitflips_threshold specify the maximum number of bitflips before
1319 * considering the region as not erased.
1320 *
1321 * Note:
1322 * 1/ ECC algorithms are working on pre-defined block sizes which are usually
1323 * different from the NAND page size. When fixing bitflips, ECC engines will
1324 * report the number of errors per chunk, and the NAND core infrastructure
1325 * expect you to return the maximum number of bitflips for the whole page.
1326 * This is why you should always use this function on a single chunk and
1327 * not on the whole page. After checking each chunk you should update your
1328 * max_bitflips value accordingly.
1329 * 2/ When checking for bitflips in erased pages you should not only check
1330 * the payload data but also their associated ECC data, because a user might
1331 * have programmed almost all bits to 1 but a few. In this case, we
1332 * shouldn't consider the chunk as erased, and checking ECC bytes prevent
1333 * this case.
1334 * 3/ The extraoob argument is optional, and should be used if some of your OOB
1335 * data are protected by the ECC engine.
1336 * It could also be used if you support subpages and want to attach some
1337 * extra OOB data to an ECC chunk.
1338 *
1339 * Returns a positive number of bitflips less than or equal to
1340 * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the
1341 * threshold. In case of success, the passed buffers are filled with 0xff.
1342 */
1343 int nand_check_erased_ecc_chunk(void *data, int datalen,
1344 void *ecc, int ecclen,
1345 void *extraoob, int extraooblen,
1346 int bitflips_threshold)
1347 {
1348 int data_bitflips = 0, ecc_bitflips = 0, extraoob_bitflips = 0;
1349
1350 data_bitflips = nand_check_erased_buf(data, datalen,
1351 bitflips_threshold);
1352 if (data_bitflips < 0)
1353 return data_bitflips;
1354
1355 bitflips_threshold -= data_bitflips;
1356
1357 ecc_bitflips = nand_check_erased_buf(ecc, ecclen, bitflips_threshold);
1358 if (ecc_bitflips < 0)
1359 return ecc_bitflips;
1360
1361 bitflips_threshold -= ecc_bitflips;
1362
1363 extraoob_bitflips = nand_check_erased_buf(extraoob, extraooblen,
1364 bitflips_threshold);
1365 if (extraoob_bitflips < 0)
1366 return extraoob_bitflips;
1367
1368 if (data_bitflips)
1369 memset(data, 0xff, datalen);
1370
1371 if (ecc_bitflips)
1372 memset(ecc, 0xff, ecclen);
1373
1374 if (extraoob_bitflips)
1375 memset(extraoob, 0xff, extraooblen);
1376
1377 return data_bitflips + ecc_bitflips + extraoob_bitflips;
1378 }
1379 EXPORT_SYMBOL(nand_check_erased_ecc_chunk);
1380
1381 /**
1382 * nand_read_page_raw - [INTERN] read raw page data without ecc
1383 * @mtd: mtd info structure
1384 * @chip: nand chip info structure
1385 * @buf: buffer to store read data
1386 * @oob_required: caller requires OOB data read to chip->oob_poi
1387 * @page: page number to read
1388 *
1389 * Not for syndrome calculating ECC controllers, which use a special oob layout.
1390 */
1391 int nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
1392 uint8_t *buf, int oob_required, int page)
1393 {
1394 chip->read_buf(mtd, buf, mtd->writesize);
1395 if (oob_required)
1396 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1397 return 0;
1398 }
1399 EXPORT_SYMBOL(nand_read_page_raw);
1400
1401 /**
1402 * nand_read_page_raw_syndrome - [INTERN] read raw page data without ecc
1403 * @mtd: mtd info structure
1404 * @chip: nand chip info structure
1405 * @buf: buffer to store read data
1406 * @oob_required: caller requires OOB data read to chip->oob_poi
1407 * @page: page number to read
1408 *
1409 * We need a special oob layout and handling even when OOB isn't used.
1410 */
1411 static int nand_read_page_raw_syndrome(struct mtd_info *mtd,
1412 struct nand_chip *chip, uint8_t *buf,
1413 int oob_required, int page)
1414 {
1415 int eccsize = chip->ecc.size;
1416 int eccbytes = chip->ecc.bytes;
1417 uint8_t *oob = chip->oob_poi;
1418 int steps, size;
1419
1420 for (steps = chip->ecc.steps; steps > 0; steps--) {
1421 chip->read_buf(mtd, buf, eccsize);
1422 buf += eccsize;
1423
1424 if (chip->ecc.prepad) {
1425 chip->read_buf(mtd, oob, chip->ecc.prepad);
1426 oob += chip->ecc.prepad;
1427 }
1428
1429 chip->read_buf(mtd, oob, eccbytes);
1430 oob += eccbytes;
1431
1432 if (chip->ecc.postpad) {
1433 chip->read_buf(mtd, oob, chip->ecc.postpad);
1434 oob += chip->ecc.postpad;
1435 }
1436 }
1437
1438 size = mtd->oobsize - (oob - chip->oob_poi);
1439 if (size)
1440 chip->read_buf(mtd, oob, size);
1441
1442 return 0;
1443 }
1444
1445 /**
1446 * nand_read_page_swecc - [REPLACEABLE] software ECC based page read function
1447 * @mtd: mtd info structure
1448 * @chip: nand chip info structure
1449 * @buf: buffer to store read data
1450 * @oob_required: caller requires OOB data read to chip->oob_poi
1451 * @page: page number to read
1452 */
1453 static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
1454 uint8_t *buf, int oob_required, int page)
1455 {
1456 int i, eccsize = chip->ecc.size, ret;
1457 int eccbytes = chip->ecc.bytes;
1458 int eccsteps = chip->ecc.steps;
1459 uint8_t *p = buf;
1460 uint8_t *ecc_calc = chip->buffers->ecccalc;
1461 uint8_t *ecc_code = chip->buffers->ecccode;
1462 unsigned int max_bitflips = 0;
1463
1464 chip->ecc.read_page_raw(mtd, chip, buf, 1, page);
1465
1466 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
1467 chip->ecc.calculate(mtd, p, &ecc_calc[i]);
1468
1469 ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
1470 chip->ecc.total);
1471 if (ret)
1472 return ret;
1473
1474 eccsteps = chip->ecc.steps;
1475 p = buf;
1476
1477 for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
1478 int stat;
1479
1480 stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
1481 if (stat < 0) {
1482 mtd->ecc_stats.failed++;
1483 } else {
1484 mtd->ecc_stats.corrected += stat;
1485 max_bitflips = max_t(unsigned int, max_bitflips, stat);
1486 }
1487 }
1488 return max_bitflips;
1489 }
1490
1491 /**
1492 * nand_read_subpage - [REPLACEABLE] ECC based sub-page read function
1493 * @mtd: mtd info structure
1494 * @chip: nand chip info structure
1495 * @data_offs: offset of requested data within the page
1496 * @readlen: data length
1497 * @bufpoi: buffer to store read data
1498 * @page: page number to read
1499 */
1500 static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
1501 uint32_t data_offs, uint32_t readlen, uint8_t *bufpoi,
1502 int page)
1503 {
1504 int start_step, end_step, num_steps, ret;
1505 uint8_t *p;
1506 int data_col_addr, i, gaps = 0;
1507 int datafrag_len, eccfrag_len, aligned_len, aligned_pos;
1508 int busw = (chip->options & NAND_BUSWIDTH_16) ? 2 : 1;
1509 int index, section = 0;
1510 unsigned int max_bitflips = 0;
1511 struct mtd_oob_region oobregion = { };
1512
1513 /* Column address within the page aligned to ECC size (256bytes) */
1514 start_step = data_offs / chip->ecc.size;
1515 end_step = (data_offs + readlen - 1) / chip->ecc.size;
1516 num_steps = end_step - start_step + 1;
1517 index = start_step * chip->ecc.bytes;
1518
1519 /* Data size aligned to ECC ecc.size */
1520 datafrag_len = num_steps * chip->ecc.size;
1521 eccfrag_len = num_steps * chip->ecc.bytes;
1522
1523 data_col_addr = start_step * chip->ecc.size;
1524 /* If we read not a page aligned data */
1525 if (data_col_addr != 0)
1526 chip->cmdfunc(mtd, NAND_CMD_RNDOUT, data_col_addr, -1);
1527
1528 p = bufpoi + data_col_addr;
1529 chip->read_buf(mtd, p, datafrag_len);
1530
1531 /* Calculate ECC */
1532 for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size)
1533 chip->ecc.calculate(mtd, p, &chip->buffers->ecccalc[i]);
1534
1535 /*
1536 * The performance is faster if we position offsets according to
1537 * ecc.pos. Let's make sure that there are no gaps in ECC positions.
1538 */
1539 ret = mtd_ooblayout_find_eccregion(mtd, index, &section, &oobregion);
1540 if (ret)
1541 return ret;
1542
1543 if (oobregion.length < eccfrag_len)
1544 gaps = 1;
1545
1546 if (gaps) {
1547 chip->cmdfunc(mtd, NAND_CMD_RNDOUT, mtd->writesize, -1);
1548 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1549 } else {
1550 /*
1551 * Send the command to read the particular ECC bytes take care
1552 * about buswidth alignment in read_buf.
1553 */
1554 aligned_pos = oobregion.offset & ~(busw - 1);
1555 aligned_len = eccfrag_len;
1556 if (oobregion.offset & (busw - 1))
1557 aligned_len++;
1558 if ((oobregion.offset + (num_steps * chip->ecc.bytes)) &
1559 (busw - 1))
1560 aligned_len++;
1561
1562 chip->cmdfunc(mtd, NAND_CMD_RNDOUT,
1563 mtd->writesize + aligned_pos, -1);
1564 chip->read_buf(mtd, &chip->oob_poi[aligned_pos], aligned_len);
1565 }
1566
1567 ret = mtd_ooblayout_get_eccbytes(mtd, chip->buffers->ecccode,
1568 chip->oob_poi, index, eccfrag_len);
1569 if (ret)
1570 return ret;
1571
1572 p = bufpoi + data_col_addr;
1573 for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size) {
1574 int stat;
1575
1576 stat = chip->ecc.correct(mtd, p,
1577 &chip->buffers->ecccode[i], &chip->buffers->ecccalc[i]);
1578 if (stat == -EBADMSG &&
1579 (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
1580 /* check for empty pages with bitflips */
1581 stat = nand_check_erased_ecc_chunk(p, chip->ecc.size,
1582 &chip->buffers->ecccode[i],
1583 chip->ecc.bytes,
1584 NULL, 0,
1585 chip->ecc.strength);
1586 }
1587
1588 if (stat < 0) {
1589 mtd->ecc_stats.failed++;
1590 } else {
1591 mtd->ecc_stats.corrected += stat;
1592 max_bitflips = max_t(unsigned int, max_bitflips, stat);
1593 }
1594 }
1595 return max_bitflips;
1596 }
1597
1598 /**
1599 * nand_read_page_hwecc - [REPLACEABLE] hardware ECC based page read function
1600 * @mtd: mtd info structure
1601 * @chip: nand chip info structure
1602 * @buf: buffer to store read data
1603 * @oob_required: caller requires OOB data read to chip->oob_poi
1604 * @page: page number to read
1605 *
1606 * Not for syndrome calculating ECC controllers which need a special oob layout.
1607 */
1608 static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
1609 uint8_t *buf, int oob_required, int page)
1610 {
1611 int i, eccsize = chip->ecc.size, ret;
1612 int eccbytes = chip->ecc.bytes;
1613 int eccsteps = chip->ecc.steps;
1614 uint8_t *p = buf;
1615 uint8_t *ecc_calc = chip->buffers->ecccalc;
1616 uint8_t *ecc_code = chip->buffers->ecccode;
1617 unsigned int max_bitflips = 0;
1618
1619 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
1620 chip->ecc.hwctl(mtd, NAND_ECC_READ);
1621 chip->read_buf(mtd, p, eccsize);
1622 chip->ecc.calculate(mtd, p, &ecc_calc[i]);
1623 }
1624 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1625
1626 ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
1627 chip->ecc.total);
1628 if (ret)
1629 return ret;
1630
1631 eccsteps = chip->ecc.steps;
1632 p = buf;
1633
1634 for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
1635 int stat;
1636
1637 stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
1638 if (stat == -EBADMSG &&
1639 (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
1640 /* check for empty pages with bitflips */
1641 stat = nand_check_erased_ecc_chunk(p, eccsize,
1642 &ecc_code[i], eccbytes,
1643 NULL, 0,
1644 chip->ecc.strength);
1645 }
1646
1647 if (stat < 0) {
1648 mtd->ecc_stats.failed++;
1649 } else {
1650 mtd->ecc_stats.corrected += stat;
1651 max_bitflips = max_t(unsigned int, max_bitflips, stat);
1652 }
1653 }
1654 return max_bitflips;
1655 }
1656
1657 /**
1658 * nand_read_page_hwecc_oob_first - [REPLACEABLE] hw ecc, read oob first
1659 * @mtd: mtd info structure
1660 * @chip: nand chip info structure
1661 * @buf: buffer to store read data
1662 * @oob_required: caller requires OOB data read to chip->oob_poi
1663 * @page: page number to read
1664 *
1665 * Hardware ECC for large page chips, require OOB to be read first. For this
1666 * ECC mode, the write_page method is re-used from ECC_HW. These methods
1667 * read/write ECC from the OOB area, unlike the ECC_HW_SYNDROME support with
1668 * multiple ECC steps, follows the "infix ECC" scheme and reads/writes ECC from
1669 * the data area, by overwriting the NAND manufacturer bad block markings.
1670 */
1671 static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd,
1672 struct nand_chip *chip, uint8_t *buf, int oob_required, int page)
1673 {
1674 int i, eccsize = chip->ecc.size, ret;
1675 int eccbytes = chip->ecc.bytes;
1676 int eccsteps = chip->ecc.steps;
1677 uint8_t *p = buf;
1678 uint8_t *ecc_code = chip->buffers->ecccode;
1679 uint8_t *ecc_calc = chip->buffers->ecccalc;
1680 unsigned int max_bitflips = 0;
1681
1682 /* Read the OOB area first */
1683 chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
1684 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1685 chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
1686
1687 ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
1688 chip->ecc.total);
1689 if (ret)
1690 return ret;
1691
1692 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
1693 int stat;
1694
1695 chip->ecc.hwctl(mtd, NAND_ECC_READ);
1696 chip->read_buf(mtd, p, eccsize);
1697 chip->ecc.calculate(mtd, p, &ecc_calc[i]);
1698
1699 stat = chip->ecc.correct(mtd, p, &ecc_code[i], NULL);
1700 if (stat == -EBADMSG &&
1701 (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
1702 /* check for empty pages with bitflips */
1703 stat = nand_check_erased_ecc_chunk(p, eccsize,
1704 &ecc_code[i], eccbytes,
1705 NULL, 0,
1706 chip->ecc.strength);
1707 }
1708
1709 if (stat < 0) {
1710 mtd->ecc_stats.failed++;
1711 } else {
1712 mtd->ecc_stats.corrected += stat;
1713 max_bitflips = max_t(unsigned int, max_bitflips, stat);
1714 }
1715 }
1716 return max_bitflips;
1717 }
1718
1719 /**
1720 * nand_read_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page read
1721 * @mtd: mtd info structure
1722 * @chip: nand chip info structure
1723 * @buf: buffer to store read data
1724 * @oob_required: caller requires OOB data read to chip->oob_poi
1725 * @page: page number to read
1726 *
1727 * The hw generator calculates the error syndrome automatically. Therefore we
1728 * need a special oob layout and handling.
1729 */
1730 static int nand_read_page_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
1731 uint8_t *buf, int oob_required, int page)
1732 {
1733 int i, eccsize = chip->ecc.size;
1734 int eccbytes = chip->ecc.bytes;
1735 int eccsteps = chip->ecc.steps;
1736 int eccpadbytes = eccbytes + chip->ecc.prepad + chip->ecc.postpad;
1737 uint8_t *p = buf;
1738 uint8_t *oob = chip->oob_poi;
1739 unsigned int max_bitflips = 0;
1740
1741 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
1742 int stat;
1743
1744 chip->ecc.hwctl(mtd, NAND_ECC_READ);
1745 chip->read_buf(mtd, p, eccsize);
1746
1747 if (chip->ecc.prepad) {
1748 chip->read_buf(mtd, oob, chip->ecc.prepad);
1749 oob += chip->ecc.prepad;
1750 }
1751
1752 chip->ecc.hwctl(mtd, NAND_ECC_READSYN);
1753 chip->read_buf(mtd, oob, eccbytes);
1754 stat = chip->ecc.correct(mtd, p, oob, NULL);
1755
1756 oob += eccbytes;
1757
1758 if (chip->ecc.postpad) {
1759 chip->read_buf(mtd, oob, chip->ecc.postpad);
1760 oob += chip->ecc.postpad;
1761 }
1762
1763 if (stat == -EBADMSG &&
1764 (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
1765 /* check for empty pages with bitflips */
1766 stat = nand_check_erased_ecc_chunk(p, chip->ecc.size,
1767 oob - eccpadbytes,
1768 eccpadbytes,
1769 NULL, 0,
1770 chip->ecc.strength);
1771 }
1772
1773 if (stat < 0) {
1774 mtd->ecc_stats.failed++;
1775 } else {
1776 mtd->ecc_stats.corrected += stat;
1777 max_bitflips = max_t(unsigned int, max_bitflips, stat);
1778 }
1779 }
1780
1781 /* Calculate remaining oob bytes */
1782 i = mtd->oobsize - (oob - chip->oob_poi);
1783 if (i)
1784 chip->read_buf(mtd, oob, i);
1785
1786 return max_bitflips;
1787 }
1788
1789 /**
1790 * nand_transfer_oob - [INTERN] Transfer oob to client buffer
1791 * @mtd: mtd info structure
1792 * @oob: oob destination address
1793 * @ops: oob ops structure
1794 * @len: size of oob to transfer
1795 */
1796 static uint8_t *nand_transfer_oob(struct mtd_info *mtd, uint8_t *oob,
1797 struct mtd_oob_ops *ops, size_t len)
1798 {
1799 struct nand_chip *chip = mtd_to_nand(mtd);
1800 int ret;
1801
1802 switch (ops->mode) {
1803
1804 case MTD_OPS_PLACE_OOB:
1805 case MTD_OPS_RAW:
1806 memcpy(oob, chip->oob_poi + ops->ooboffs, len);
1807 return oob + len;
1808
1809 case MTD_OPS_AUTO_OOB:
1810 ret = mtd_ooblayout_get_databytes(mtd, oob, chip->oob_poi,
1811 ops->ooboffs, len);
1812 BUG_ON(ret);
1813 return oob + len;
1814
1815 default:
1816 BUG();
1817 }
1818 return NULL;
1819 }
1820
1821 /**
1822 * nand_setup_read_retry - [INTERN] Set the READ RETRY mode
1823 * @mtd: MTD device structure
1824 * @retry_mode: the retry mode to use
1825 *
1826 * Some vendors supply a special command to shift the Vt threshold, to be used
1827 * when there are too many bitflips in a page (i.e., ECC error). After setting
1828 * a new threshold, the host should retry reading the page.
1829 */
1830 static int nand_setup_read_retry(struct mtd_info *mtd, int retry_mode)
1831 {
1832 struct nand_chip *chip = mtd_to_nand(mtd);
1833
1834 pr_debug("setting READ RETRY mode %d\n", retry_mode);
1835
1836 if (retry_mode >= chip->read_retries)
1837 return -EINVAL;
1838
1839 if (!chip->setup_read_retry)
1840 return -EOPNOTSUPP;
1841
1842 return chip->setup_read_retry(mtd, retry_mode);
1843 }
1844
1845 /**
1846 * nand_do_read_ops - [INTERN] Read data with ECC
1847 * @mtd: MTD device structure
1848 * @from: offset to read from
1849 * @ops: oob ops structure
1850 *
1851 * Internal function. Called with chip held.
1852 */
1853 static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
1854 struct mtd_oob_ops *ops)
1855 {
1856 int chipnr, page, realpage, col, bytes, aligned, oob_required;
1857 struct nand_chip *chip = mtd_to_nand(mtd);
1858 int ret = 0;
1859 uint32_t readlen = ops->len;
1860 uint32_t oobreadlen = ops->ooblen;
1861 uint32_t max_oobsize = mtd_oobavail(mtd, ops);
1862
1863 uint8_t *bufpoi, *oob, *buf;
1864 int use_bufpoi;
1865 unsigned int max_bitflips = 0;
1866 int retry_mode = 0;
1867 bool ecc_fail = false;
1868
1869 chipnr = (int)(from >> chip->chip_shift);
1870 chip->select_chip(mtd, chipnr);
1871
1872 realpage = (int)(from >> chip->page_shift);
1873 page = realpage & chip->pagemask;
1874
1875 col = (int)(from & (mtd->writesize - 1));
1876
1877 buf = ops->datbuf;
1878 oob = ops->oobbuf;
1879 oob_required = oob ? 1 : 0;
1880
1881 while (1) {
1882 unsigned int ecc_failures = mtd->ecc_stats.failed;
1883
1884 bytes = min(mtd->writesize - col, readlen);
1885 aligned = (bytes == mtd->writesize);
1886
1887 if (!aligned)
1888 use_bufpoi = 1;
1889 else if (chip->options & NAND_USE_BOUNCE_BUFFER)
1890 use_bufpoi = !virt_addr_valid(buf) ||
1891 !IS_ALIGNED((unsigned long)buf,
1892 chip->buf_align);
1893 else
1894 use_bufpoi = 0;
1895
1896 /* Is the current page in the buffer? */
1897 if (realpage != chip->pagebuf || oob) {
1898 bufpoi = use_bufpoi ? chip->buffers->databuf : buf;
1899
1900 if (use_bufpoi && aligned)
1901 pr_debug("%s: using read bounce buffer for buf@%p\n",
1902 __func__, buf);
1903
1904 read_retry:
1905 if (nand_standard_page_accessors(&chip->ecc))
1906 chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
1907
1908 /*
1909 * Now read the page into the buffer. Absent an error,
1910 * the read methods return max bitflips per ecc step.
1911 */
1912 if (unlikely(ops->mode == MTD_OPS_RAW))
1913 ret = chip->ecc.read_page_raw(mtd, chip, bufpoi,
1914 oob_required,
1915 page);
1916 else if (!aligned && NAND_HAS_SUBPAGE_READ(chip) &&
1917 !oob)
1918 ret = chip->ecc.read_subpage(mtd, chip,
1919 col, bytes, bufpoi,
1920 page);
1921 else
1922 ret = chip->ecc.read_page(mtd, chip, bufpoi,
1923 oob_required, page);
1924 if (ret < 0) {
1925 if (use_bufpoi)
1926 /* Invalidate page cache */
1927 chip->pagebuf = -1;
1928 break;
1929 }
1930
1931 /* Transfer not aligned data */
1932 if (use_bufpoi) {
1933 if (!NAND_HAS_SUBPAGE_READ(chip) && !oob &&
1934 !(mtd->ecc_stats.failed - ecc_failures) &&
1935 (ops->mode != MTD_OPS_RAW)) {
1936 chip->pagebuf = realpage;
1937 chip->pagebuf_bitflips = ret;
1938 } else {
1939 /* Invalidate page cache */
1940 chip->pagebuf = -1;
1941 }
1942 memcpy(buf, chip->buffers->databuf + col, bytes);
1943 }
1944
1945 if (unlikely(oob)) {
1946 int toread = min(oobreadlen, max_oobsize);
1947
1948 if (toread) {
1949 oob = nand_transfer_oob(mtd,
1950 oob, ops, toread);
1951 oobreadlen -= toread;
1952 }
1953 }
1954
1955 if (chip->options & NAND_NEED_READRDY) {
1956 /* Apply delay or wait for ready/busy pin */
1957 if (!chip->dev_ready)
1958 udelay(chip->chip_delay);
1959 else
1960 nand_wait_ready(mtd);
1961 }
1962
1963 if (mtd->ecc_stats.failed - ecc_failures) {
1964 if (retry_mode + 1 < chip->read_retries) {
1965 retry_mode++;
1966 ret = nand_setup_read_retry(mtd,
1967 retry_mode);
1968 if (ret < 0)
1969 break;
1970
1971 /* Reset failures; retry */
1972 mtd->ecc_stats.failed = ecc_failures;
1973 goto read_retry;
1974 } else {
1975 /* No more retry modes; real failure */
1976 ecc_fail = true;
1977 }
1978 }
1979
1980 buf += bytes;
1981 max_bitflips = max_t(unsigned int, max_bitflips, ret);
1982 } else {
1983 memcpy(buf, chip->buffers->databuf + col, bytes);
1984 buf += bytes;
1985 max_bitflips = max_t(unsigned int, max_bitflips,
1986 chip->pagebuf_bitflips);
1987 }
1988
1989 readlen -= bytes;
1990
1991 /* Reset to retry mode 0 */
1992 if (retry_mode) {
1993 ret = nand_setup_read_retry(mtd, 0);
1994 if (ret < 0)
1995 break;
1996 retry_mode = 0;
1997 }
1998
1999 if (!readlen)
2000 break;
2001
2002 /* For subsequent reads align to page boundary */
2003 col = 0;
2004 /* Increment page address */
2005 realpage++;
2006
2007 page = realpage & chip->pagemask;
2008 /* Check, if we cross a chip boundary */
2009 if (!page) {
2010 chipnr++;
2011 chip->select_chip(mtd, -1);
2012 chip->select_chip(mtd, chipnr);
2013 }
2014 }
2015 chip->select_chip(mtd, -1);
2016
2017 ops->retlen = ops->len - (size_t) readlen;
2018 if (oob)
2019 ops->oobretlen = ops->ooblen - oobreadlen;
2020
2021 if (ret < 0)
2022 return ret;
2023
2024 if (ecc_fail)
2025 return -EBADMSG;
2026
2027 return max_bitflips;
2028 }
2029
2030 /**
2031 * nand_read - [MTD Interface] MTD compatibility function for nand_do_read_ecc
2032 * @mtd: MTD device structure
2033 * @from: offset to read from
2034 * @len: number of bytes to read
2035 * @retlen: pointer to variable to store the number of read bytes
2036 * @buf: the databuffer to put data
2037 *
2038 * Get hold of the chip and call nand_do_read.
2039 */
2040 static int nand_read(struct mtd_info *mtd, loff_t from, size_t len,
2041 size_t *retlen, uint8_t *buf)
2042 {
2043 struct mtd_oob_ops ops;
2044 int ret;
2045
2046 nand_get_device(mtd, FL_READING);
2047 memset(&ops, 0, sizeof(ops));
2048 ops.len = len;
2049 ops.datbuf = buf;
2050 ops.mode = MTD_OPS_PLACE_OOB;
2051 ret = nand_do_read_ops(mtd, from, &ops);
2052 *retlen = ops.retlen;
2053 nand_release_device(mtd);
2054 return ret;
2055 }
2056
2057 /**
2058 * nand_read_oob_std - [REPLACEABLE] the most common OOB data read function
2059 * @mtd: mtd info structure
2060 * @chip: nand chip info structure
2061 * @page: page number to read
2062 */
2063 int nand_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip, int page)
2064 {
2065 chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
2066 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
2067 return 0;
2068 }
2069 EXPORT_SYMBOL(nand_read_oob_std);
2070
2071 /**
2072 * nand_read_oob_syndrome - [REPLACEABLE] OOB data read function for HW ECC
2073 * with syndromes
2074 * @mtd: mtd info structure
2075 * @chip: nand chip info structure
2076 * @page: page number to read
2077 */
2078 int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
2079 int page)
2080 {
2081 int length = mtd->oobsize;
2082 int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
2083 int eccsize = chip->ecc.size;
2084 uint8_t *bufpoi = chip->oob_poi;
2085 int i, toread, sndrnd = 0, pos;
2086
2087 chip->cmdfunc(mtd, NAND_CMD_READ0, chip->ecc.size, page);
2088 for (i = 0; i < chip->ecc.steps; i++) {
2089 if (sndrnd) {
2090 pos = eccsize + i * (eccsize + chunk);
2091 if (mtd->writesize > 512)
2092 chip->cmdfunc(mtd, NAND_CMD_RNDOUT, pos, -1);
2093 else
2094 chip->cmdfunc(mtd, NAND_CMD_READ0, pos, page);
2095 } else
2096 sndrnd = 1;
2097 toread = min_t(int, length, chunk);
2098 chip->read_buf(mtd, bufpoi, toread);
2099 bufpoi += toread;
2100 length -= toread;
2101 }
2102 if (length > 0)
2103 chip->read_buf(mtd, bufpoi, length);
2104
2105 return 0;
2106 }
2107 EXPORT_SYMBOL(nand_read_oob_syndrome);
2108
2109 /**
2110 * nand_write_oob_std - [REPLACEABLE] the most common OOB data write function
2111 * @mtd: mtd info structure
2112 * @chip: nand chip info structure
2113 * @page: page number to write
2114 */
2115 int nand_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip, int page)
2116 {
2117 int status = 0;
2118 const uint8_t *buf = chip->oob_poi;
2119 int length = mtd->oobsize;
2120
2121 chip->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize, page);
2122 chip->write_buf(mtd, buf, length);
2123 /* Send command to program the OOB data */
2124 chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
2125
2126 status = chip->waitfunc(mtd, chip);
2127
2128 return status & NAND_STATUS_FAIL ? -EIO : 0;
2129 }
2130 EXPORT_SYMBOL(nand_write_oob_std);
2131
2132 /**
2133 * nand_write_oob_syndrome - [REPLACEABLE] OOB data write function for HW ECC
2134 * with syndrome - only for large page flash
2135 * @mtd: mtd info structure
2136 * @chip: nand chip info structure
2137 * @page: page number to write
2138 */
2139 int nand_write_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
2140 int page)
2141 {
2142 int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
2143 int eccsize = chip->ecc.size, length = mtd->oobsize;
2144 int i, len, pos, status = 0, sndcmd = 0, steps = chip->ecc.steps;
2145 const uint8_t *bufpoi = chip->oob_poi;
2146
2147 /*
2148 * data-ecc-data-ecc ... ecc-oob
2149 * or
2150 * data-pad-ecc-pad-data-pad .... ecc-pad-oob
2151 */
2152 if (!chip->ecc.prepad && !chip->ecc.postpad) {
2153 pos = steps * (eccsize + chunk);
2154 steps = 0;
2155 } else
2156 pos = eccsize;
2157
2158 chip->cmdfunc(mtd, NAND_CMD_SEQIN, pos, page);
2159 for (i = 0; i < steps; i++) {
2160 if (sndcmd) {
2161 if (mtd->writesize <= 512) {
2162 uint32_t fill = 0xFFFFFFFF;
2163
2164 len = eccsize;
2165 while (len > 0) {
2166 int num = min_t(int, len, 4);
2167 chip->write_buf(mtd, (uint8_t *)&fill,
2168 num);
2169 len -= num;
2170 }
2171 } else {
2172 pos = eccsize + i * (eccsize + chunk);
2173 chip->cmdfunc(mtd, NAND_CMD_RNDIN, pos, -1);
2174 }
2175 } else
2176 sndcmd = 1;
2177 len = min_t(int, length, chunk);
2178 chip->write_buf(mtd, bufpoi, len);
2179 bufpoi += len;
2180 length -= len;
2181 }
2182 if (length > 0)
2183 chip->write_buf(mtd, bufpoi, length);
2184
2185 chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
2186 status = chip->waitfunc(mtd, chip);
2187
2188 return status & NAND_STATUS_FAIL ? -EIO : 0;
2189 }
2190 EXPORT_SYMBOL(nand_write_oob_syndrome);
2191
2192 /**
2193 * nand_do_read_oob - [INTERN] NAND read out-of-band
2194 * @mtd: MTD device structure
2195 * @from: offset to read from
2196 * @ops: oob operations description structure
2197 *
2198 * NAND read out-of-band data from the spare area.
2199 */
2200 static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
2201 struct mtd_oob_ops *ops)
2202 {
2203 int page, realpage, chipnr;
2204 struct nand_chip *chip = mtd_to_nand(mtd);
2205 struct mtd_ecc_stats stats;
2206 int readlen = ops->ooblen;
2207 int len;
2208 uint8_t *buf = ops->oobbuf;
2209 int ret = 0;
2210
2211 pr_debug("%s: from = 0x%08Lx, len = %i\n",
2212 __func__, (unsigned long long)from, readlen);
2213
2214 stats = mtd->ecc_stats;
2215
2216 len = mtd_oobavail(mtd, ops);
2217
2218 if (unlikely(ops->ooboffs >= len)) {
2219 pr_debug("%s: attempt to start read outside oob\n",
2220 __func__);
2221 return -EINVAL;
2222 }
2223
2224 /* Do not allow reads past end of device */
2225 if (unlikely(from >= mtd->size ||
2226 ops->ooboffs + readlen > ((mtd->size >> chip->page_shift) -
2227 (from >> chip->page_shift)) * len)) {
2228 pr_debug("%s: attempt to read beyond end of device\n",
2229 __func__);
2230 return -EINVAL;
2231 }
2232
2233 chipnr = (int)(from >> chip->chip_shift);
2234 chip->select_chip(mtd, chipnr);
2235
2236 /* Shift to get page */
2237 realpage = (int)(from >> chip->page_shift);
2238 page = realpage & chip->pagemask;
2239
2240 while (1) {
2241 if (ops->mode == MTD_OPS_RAW)
2242 ret = chip->ecc.read_oob_raw(mtd, chip, page);
2243 else
2244 ret = chip->ecc.read_oob(mtd, chip, page);
2245
2246 if (ret < 0)
2247 break;
2248
2249 len = min(len, readlen);
2250 buf = nand_transfer_oob(mtd, buf, ops, len);
2251
2252 if (chip->options & NAND_NEED_READRDY) {
2253 /* Apply delay or wait for ready/busy pin */
2254 if (!chip->dev_ready)
2255 udelay(chip->chip_delay);
2256 else
2257 nand_wait_ready(mtd);
2258 }
2259
2260 readlen -= len;
2261 if (!readlen)
2262 break;
2263
2264 /* Increment page address */
2265 realpage++;
2266
2267 page = realpage & chip->pagemask;
2268 /* Check, if we cross a chip boundary */
2269 if (!page) {
2270 chipnr++;
2271 chip->select_chip(mtd, -1);
2272 chip->select_chip(mtd, chipnr);
2273 }
2274 }
2275 chip->select_chip(mtd, -1);
2276
2277 ops->oobretlen = ops->ooblen - readlen;
2278
2279 if (ret < 0)
2280 return ret;
2281
2282 if (mtd->ecc_stats.failed - stats.failed)
2283 return -EBADMSG;
2284
2285 return mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0;
2286 }
2287
2288 /**
2289 * nand_read_oob - [MTD Interface] NAND read data and/or out-of-band
2290 * @mtd: MTD device structure
2291 * @from: offset to read from
2292 * @ops: oob operation description structure
2293 *
2294 * NAND read data and/or out-of-band data.
2295 */
2296 static int nand_read_oob(struct mtd_info *mtd, loff_t from,
2297 struct mtd_oob_ops *ops)
2298 {
2299 int ret;
2300
2301 ops->retlen = 0;
2302
2303 /* Do not allow reads past end of device */
2304 if (ops->datbuf && (from + ops->len) > mtd->size) {
2305 pr_debug("%s: attempt to read beyond end of device\n",
2306 __func__);
2307 return -EINVAL;
2308 }
2309
2310 if (ops->mode != MTD_OPS_PLACE_OOB &&
2311 ops->mode != MTD_OPS_AUTO_OOB &&
2312 ops->mode != MTD_OPS_RAW)
2313 return -ENOTSUPP;
2314
2315 nand_get_device(mtd, FL_READING);
2316
2317 if (!ops->datbuf)
2318 ret = nand_do_read_oob(mtd, from, ops);
2319 else
2320 ret = nand_do_read_ops(mtd, from, ops);
2321
2322 nand_release_device(mtd);
2323 return ret;
2324 }
2325
2326
2327 /**
2328 * nand_write_page_raw - [INTERN] raw page write function
2329 * @mtd: mtd info structure
2330 * @chip: nand chip info structure
2331 * @buf: data buffer
2332 * @oob_required: must write chip->oob_poi to OOB
2333 * @page: page number to write
2334 *
2335 * Not for syndrome calculating ECC controllers, which use a special oob layout.
2336 */
2337 int nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
2338 const uint8_t *buf, int oob_required, int page)
2339 {
2340 chip->write_buf(mtd, buf, mtd->writesize);
2341 if (oob_required)
2342 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
2343
2344 return 0;
2345 }
2346 EXPORT_SYMBOL(nand_write_page_raw);
2347
2348 /**
2349 * nand_write_page_raw_syndrome - [INTERN] raw page write function
2350 * @mtd: mtd info structure
2351 * @chip: nand chip info structure
2352 * @buf: data buffer
2353 * @oob_required: must write chip->oob_poi to OOB
2354 * @page: page number to write
2355 *
2356 * We need a special oob layout and handling even when ECC isn't checked.
2357 */
2358 static int nand_write_page_raw_syndrome(struct mtd_info *mtd,
2359 struct nand_chip *chip,
2360 const uint8_t *buf, int oob_required,
2361 int page)
2362 {
2363 int eccsize = chip->ecc.size;
2364 int eccbytes = chip->ecc.bytes;
2365 uint8_t *oob = chip->oob_poi;
2366 int steps, size;
2367
2368 for (steps = chip->ecc.steps; steps > 0; steps--) {
2369 chip->write_buf(mtd, buf, eccsize);
2370 buf += eccsize;
2371
2372 if (chip->ecc.prepad) {
2373 chip->write_buf(mtd, oob, chip->ecc.prepad);
2374 oob += chip->ecc.prepad;
2375 }
2376
2377 chip->write_buf(mtd, oob, eccbytes);
2378 oob += eccbytes;
2379
2380 if (chip->ecc.postpad) {
2381 chip->write_buf(mtd, oob, chip->ecc.postpad);
2382 oob += chip->ecc.postpad;
2383 }
2384 }
2385
2386 size = mtd->oobsize - (oob - chip->oob_poi);
2387 if (size)
2388 chip->write_buf(mtd, oob, size);
2389
2390 return 0;
2391 }
2392 /**
2393 * nand_write_page_swecc - [REPLACEABLE] software ECC based page write function
2394 * @mtd: mtd info structure
2395 * @chip: nand chip info structure
2396 * @buf: data buffer
2397 * @oob_required: must write chip->oob_poi to OOB
2398 * @page: page number to write
2399 */
2400 static int nand_write_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
2401 const uint8_t *buf, int oob_required,
2402 int page)
2403 {
2404 int i, eccsize = chip->ecc.size, ret;
2405 int eccbytes = chip->ecc.bytes;
2406 int eccsteps = chip->ecc.steps;
2407 uint8_t *ecc_calc = chip->buffers->ecccalc;
2408 const uint8_t *p = buf;
2409
2410 /* Software ECC calculation */
2411 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
2412 chip->ecc.calculate(mtd, p, &ecc_calc[i]);
2413
2414 ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
2415 chip->ecc.total);
2416 if (ret)
2417 return ret;
2418
2419 return chip->ecc.write_page_raw(mtd, chip, buf, 1, page);
2420 }
2421
2422 /**
2423 * nand_write_page_hwecc - [REPLACEABLE] hardware ECC based page write function
2424 * @mtd: mtd info structure
2425 * @chip: nand chip info structure
2426 * @buf: data buffer
2427 * @oob_required: must write chip->oob_poi to OOB
2428 * @page: page number to write
2429 */
2430 static int nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
2431 const uint8_t *buf, int oob_required,
2432 int page)
2433 {
2434 int i, eccsize = chip->ecc.size, ret;
2435 int eccbytes = chip->ecc.bytes;
2436 int eccsteps = chip->ecc.steps;
2437 uint8_t *ecc_calc = chip->buffers->ecccalc;
2438 const uint8_t *p = buf;
2439
2440 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
2441 chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
2442 chip->write_buf(mtd, p, eccsize);
2443 chip->ecc.calculate(mtd, p, &ecc_calc[i]);
2444 }
2445
2446 ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
2447 chip->ecc.total);
2448 if (ret)
2449 return ret;
2450
2451 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
2452
2453 return 0;
2454 }
2455
2456
2457 /**
2458 * nand_write_subpage_hwecc - [REPLACEABLE] hardware ECC based subpage write
2459 * @mtd: mtd info structure
2460 * @chip: nand chip info structure
2461 * @offset: column address of subpage within the page
2462 * @data_len: data length
2463 * @buf: data buffer
2464 * @oob_required: must write chip->oob_poi to OOB
2465 * @page: page number to write
2466 */
2467 static int nand_write_subpage_hwecc(struct mtd_info *mtd,
2468 struct nand_chip *chip, uint32_t offset,
2469 uint32_t data_len, const uint8_t *buf,
2470 int oob_required, int page)
2471 {
2472 uint8_t *oob_buf = chip->oob_poi;
2473 uint8_t *ecc_calc = chip->buffers->ecccalc;
2474 int ecc_size = chip->ecc.size;
2475 int ecc_bytes = chip->ecc.bytes;
2476 int ecc_steps = chip->ecc.steps;
2477 uint32_t start_step = offset / ecc_size;
2478 uint32_t end_step = (offset + data_len - 1) / ecc_size;
2479 int oob_bytes = mtd->oobsize / ecc_steps;
2480 int step, ret;
2481
2482 for (step = 0; step < ecc_steps; step++) {
2483 /* configure controller for WRITE access */
2484 chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
2485
2486 /* write data (untouched subpages already masked by 0xFF) */
2487 chip->write_buf(mtd, buf, ecc_size);
2488
2489 /* mask ECC of un-touched subpages by padding 0xFF */
2490 if ((step < start_step) || (step > end_step))
2491 memset(ecc_calc, 0xff, ecc_bytes);
2492 else
2493 chip->ecc.calculate(mtd, buf, ecc_calc);
2494
2495 /* mask OOB of un-touched subpages by padding 0xFF */
2496 /* if oob_required, preserve OOB metadata of written subpage */
2497 if (!oob_required || (step < start_step) || (step > end_step))
2498 memset(oob_buf, 0xff, oob_bytes);
2499
2500 buf += ecc_size;
2501 ecc_calc += ecc_bytes;
2502 oob_buf += oob_bytes;
2503 }
2504
2505 /* copy calculated ECC for whole page to chip->buffer->oob */
2506 /* this include masked-value(0xFF) for unwritten subpages */
2507 ecc_calc = chip->buffers->ecccalc;
2508 ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
2509 chip->ecc.total);
2510 if (ret)
2511 return ret;
2512
2513 /* write OOB buffer to NAND device */
2514 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
2515
2516 return 0;
2517 }
2518
2519
2520 /**
2521 * nand_write_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page write
2522 * @mtd: mtd info structure
2523 * @chip: nand chip info structure
2524 * @buf: data buffer
2525 * @oob_required: must write chip->oob_poi to OOB
2526 * @page: page number to write
2527 *
2528 * The hw generator calculates the error syndrome automatically. Therefore we
2529 * need a special oob layout and handling.
2530 */
2531 static int nand_write_page_syndrome(struct mtd_info *mtd,
2532 struct nand_chip *chip,
2533 const uint8_t *buf, int oob_required,
2534 int page)
2535 {
2536 int i, eccsize = chip->ecc.size;
2537 int eccbytes = chip->ecc.bytes;
2538 int eccsteps = chip->ecc.steps;
2539 const uint8_t *p = buf;
2540 uint8_t *oob = chip->oob_poi;
2541
2542 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
2543
2544 chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
2545 chip->write_buf(mtd, p, eccsize);
2546
2547 if (chip->ecc.prepad) {
2548 chip->write_buf(mtd, oob, chip->ecc.prepad);
2549 oob += chip->ecc.prepad;
2550 }
2551
2552 chip->ecc.calculate(mtd, p, oob);
2553 chip->write_buf(mtd, oob, eccbytes);
2554 oob += eccbytes;
2555
2556 if (chip->ecc.postpad) {
2557 chip->write_buf(mtd, oob, chip->ecc.postpad);
2558 oob += chip->ecc.postpad;
2559 }
2560 }
2561
2562 /* Calculate remaining oob bytes */
2563 i = mtd->oobsize - (oob - chip->oob_poi);
2564 if (i)
2565 chip->write_buf(mtd, oob, i);
2566
2567 return 0;
2568 }
2569
2570 /**
2571 * nand_write_page - write one page
2572 * @mtd: MTD device structure
2573 * @chip: NAND chip descriptor
2574 * @offset: address offset within the page
2575 * @data_len: length of actual data to be written
2576 * @buf: the data to write
2577 * @oob_required: must write chip->oob_poi to OOB
2578 * @page: page number to write
2579 * @raw: use _raw version of write_page
2580 */
2581 static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
2582 uint32_t offset, int data_len, const uint8_t *buf,
2583 int oob_required, int page, int raw)
2584 {
2585 int status, subpage;
2586
2587 if (!(chip->options & NAND_NO_SUBPAGE_WRITE) &&
2588 chip->ecc.write_subpage)
2589 subpage = offset || (data_len < mtd->writesize);
2590 else
2591 subpage = 0;
2592
2593 if (nand_standard_page_accessors(&chip->ecc))
2594 chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
2595
2596 if (unlikely(raw))
2597 status = chip->ecc.write_page_raw(mtd, chip, buf,
2598 oob_required, page);
2599 else if (subpage)
2600 status = chip->ecc.write_subpage(mtd, chip, offset, data_len,
2601 buf, oob_required, page);
2602 else
2603 status = chip->ecc.write_page(mtd, chip, buf, oob_required,
2604 page);
2605
2606 if (status < 0)
2607 return status;
2608
2609 if (nand_standard_page_accessors(&chip->ecc)) {
2610 chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
2611
2612 status = chip->waitfunc(mtd, chip);
2613 if (status & NAND_STATUS_FAIL)
2614 return -EIO;
2615 }
2616
2617 return 0;
2618 }
2619
2620 /**
2621 * nand_fill_oob - [INTERN] Transfer client buffer to oob
2622 * @mtd: MTD device structure
2623 * @oob: oob data buffer
2624 * @len: oob data write length
2625 * @ops: oob ops structure
2626 */
2627 static uint8_t *nand_fill_oob(struct mtd_info *mtd, uint8_t *oob, size_t len,
2628 struct mtd_oob_ops *ops)
2629 {
2630 struct nand_chip *chip = mtd_to_nand(mtd);
2631 int ret;
2632
2633 /*
2634 * Initialise to all 0xFF, to avoid the possibility of left over OOB
2635 * data from a previous OOB read.
2636 */
2637 memset(chip->oob_poi, 0xff, mtd->oobsize);
2638
2639 switch (ops->mode) {
2640
2641 case MTD_OPS_PLACE_OOB:
2642 case MTD_OPS_RAW:
2643 memcpy(chip->oob_poi + ops->ooboffs, oob, len);
2644 return oob + len;
2645
2646 case MTD_OPS_AUTO_OOB:
2647 ret = mtd_ooblayout_set_databytes(mtd, oob, chip->oob_poi,
2648 ops->ooboffs, len);
2649 BUG_ON(ret);
2650 return oob + len;
2651
2652 default:
2653 BUG();
2654 }
2655 return NULL;
2656 }
2657
2658 #define NOTALIGNED(x) ((x & (chip->subpagesize - 1)) != 0)
2659
2660 /**
2661 * nand_do_write_ops - [INTERN] NAND write with ECC
2662 * @mtd: MTD device structure
2663 * @to: offset to write to
2664 * @ops: oob operations description structure
2665 *
2666 * NAND write with ECC.
2667 */
2668 static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
2669 struct mtd_oob_ops *ops)
2670 {
2671 int chipnr, realpage, page, column;
2672 struct nand_chip *chip = mtd_to_nand(mtd);
2673 uint32_t writelen = ops->len;
2674
2675 uint32_t oobwritelen = ops->ooblen;
2676 uint32_t oobmaxlen = mtd_oobavail(mtd, ops);
2677
2678 uint8_t *oob = ops->oobbuf;
2679 uint8_t *buf = ops->datbuf;
2680 int ret;
2681 int oob_required = oob ? 1 : 0;
2682
2683 ops->retlen = 0;
2684 if (!writelen)
2685 return 0;
2686
2687 /* Reject writes, which are not page aligned */
2688 if (NOTALIGNED(to) || NOTALIGNED(ops->len)) {
2689 pr_notice("%s: attempt to write non page aligned data\n",
2690 __func__);
2691 return -EINVAL;
2692 }
2693
2694 column = to & (mtd->writesize - 1);
2695
2696 chipnr = (int)(to >> chip->chip_shift);
2697 chip->select_chip(mtd, chipnr);
2698
2699 /* Check, if it is write protected */
2700 if (nand_check_wp(mtd)) {
2701 ret = -EIO;
2702 goto err_out;
2703 }
2704
2705 realpage = (int)(to >> chip->page_shift);
2706 page = realpage & chip->pagemask;
2707
2708 /* Invalidate the page cache, when we write to the cached page */
2709 if (to <= ((loff_t)chip->pagebuf << chip->page_shift) &&
2710 ((loff_t)chip->pagebuf << chip->page_shift) < (to + ops->len))
2711 chip->pagebuf = -1;
2712
2713 /* Don't allow multipage oob writes with offset */
2714 if (oob && ops->ooboffs && (ops->ooboffs + ops->ooblen > oobmaxlen)) {
2715 ret = -EINVAL;
2716 goto err_out;
2717 }
2718
2719 while (1) {
2720 int bytes = mtd->writesize;
2721 uint8_t *wbuf = buf;
2722 int use_bufpoi;
2723 int part_pagewr = (column || writelen < mtd->writesize);
2724
2725 if (part_pagewr)
2726 use_bufpoi = 1;
2727 else if (chip->options & NAND_USE_BOUNCE_BUFFER)
2728 use_bufpoi = !virt_addr_valid(buf) ||
2729 !IS_ALIGNED((unsigned long)buf,
2730 chip->buf_align);
2731 else
2732 use_bufpoi = 0;
2733
2734 /* Partial page write?, or need to use bounce buffer */
2735 if (use_bufpoi) {
2736 pr_debug("%s: using write bounce buffer for buf@%p\n",
2737 __func__, buf);
2738 if (part_pagewr)
2739 bytes = min_t(int, bytes - column, writelen);
2740 chip->pagebuf = -1;
2741 memset(chip->buffers->databuf, 0xff, mtd->writesize);
2742 memcpy(&chip->buffers->databuf[column], buf, bytes);
2743 wbuf = chip->buffers->databuf;
2744 }
2745
2746 if (unlikely(oob)) {
2747 size_t len = min(oobwritelen, oobmaxlen);
2748 oob = nand_fill_oob(mtd, oob, len, ops);
2749 oobwritelen -= len;
2750 } else {
2751 /* We still need to erase leftover OOB data */
2752 memset(chip->oob_poi, 0xff, mtd->oobsize);
2753 }
2754
2755 ret = nand_write_page(mtd, chip, column, bytes, wbuf,
2756 oob_required, page,
2757 (ops->mode == MTD_OPS_RAW));
2758 if (ret)
2759 break;
2760
2761 writelen -= bytes;
2762 if (!writelen)
2763 break;
2764
2765 column = 0;
2766 buf += bytes;
2767 realpage++;
2768
2769 page = realpage & chip->pagemask;
2770 /* Check, if we cross a chip boundary */
2771 if (!page) {
2772 chipnr++;
2773 chip->select_chip(mtd, -1);
2774 chip->select_chip(mtd, chipnr);
2775 }
2776 }
2777
2778 ops->retlen = ops->len - writelen;
2779 if (unlikely(oob))
2780 ops->oobretlen = ops->ooblen;
2781
2782 err_out:
2783 chip->select_chip(mtd, -1);
2784 return ret;
2785 }
2786
2787 /**
2788 * panic_nand_write - [MTD Interface] NAND write with ECC
2789 * @mtd: MTD device structure
2790 * @to: offset to write to
2791 * @len: number of bytes to write
2792 * @retlen: pointer to variable to store the number of written bytes
2793 * @buf: the data to write
2794 *
2795 * NAND write with ECC. Used when performing writes in interrupt context, this
2796 * may for example be called by mtdoops when writing an oops while in panic.
2797 */
2798 static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
2799 size_t *retlen, const uint8_t *buf)
2800 {
2801 struct nand_chip *chip = mtd_to_nand(mtd);
2802 struct mtd_oob_ops ops;
2803 int ret;
2804
2805 /* Wait for the device to get ready */
2806 panic_nand_wait(mtd, chip, 400);
2807
2808 /* Grab the device */
2809 panic_nand_get_device(chip, mtd, FL_WRITING);
2810
2811 memset(&ops, 0, sizeof(ops));
2812 ops.len = len;
2813 ops.datbuf = (uint8_t *)buf;
2814 ops.mode = MTD_OPS_PLACE_OOB;
2815
2816 ret = nand_do_write_ops(mtd, to, &ops);
2817
2818 *retlen = ops.retlen;
2819 return ret;
2820 }
2821
2822 /**
2823 * nand_write - [MTD Interface] NAND write with ECC
2824 * @mtd: MTD device structure
2825 * @to: offset to write to
2826 * @len: number of bytes to write
2827 * @retlen: pointer to variable to store the number of written bytes
2828 * @buf: the data to write
2829 *
2830 * NAND write with ECC.
2831 */
2832 static int nand_write(struct mtd_info *mtd, loff_t to, size_t len,
2833 size_t *retlen, const uint8_t *buf)
2834 {
2835 struct mtd_oob_ops ops;
2836 int ret;
2837
2838 nand_get_device(mtd, FL_WRITING);
2839 memset(&ops, 0, sizeof(ops));
2840 ops.len = len;
2841 ops.datbuf = (uint8_t *)buf;
2842 ops.mode = MTD_OPS_PLACE_OOB;
2843 ret = nand_do_write_ops(mtd, to, &ops);
2844 *retlen = ops.retlen;
2845 nand_release_device(mtd);
2846 return ret;
2847 }
2848
2849 /**
2850 * nand_do_write_oob - [MTD Interface] NAND write out-of-band
2851 * @mtd: MTD device structure
2852 * @to: offset to write to
2853 * @ops: oob operation description structure
2854 *
2855 * NAND write out-of-band.
2856 */
2857 static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
2858 struct mtd_oob_ops *ops)
2859 {
2860 int chipnr, page, status, len;
2861 struct nand_chip *chip = mtd_to_nand(mtd);
2862
2863 pr_debug("%s: to = 0x%08x, len = %i\n",
2864 __func__, (unsigned int)to, (int)ops->ooblen);
2865
2866 len = mtd_oobavail(mtd, ops);
2867
2868 /* Do not allow write past end of page */
2869 if ((ops->ooboffs + ops->ooblen) > len) {
2870 pr_debug("%s: attempt to write past end of page\n",
2871 __func__);
2872 return -EINVAL;
2873 }
2874
2875 if (unlikely(ops->ooboffs >= len)) {
2876 pr_debug("%s: attempt to start write outside oob\n",
2877 __func__);
2878 return -EINVAL;
2879 }
2880
2881 /* Do not allow write past end of device */
2882 if (unlikely(to >= mtd->size ||
2883 ops->ooboffs + ops->ooblen >
2884 ((mtd->size >> chip->page_shift) -
2885 (to >> chip->page_shift)) * len)) {
2886 pr_debug("%s: attempt to write beyond end of device\n",
2887 __func__);
2888 return -EINVAL;
2889 }
2890
2891 chipnr = (int)(to >> chip->chip_shift);
2892
2893 /*
2894 * Reset the chip. Some chips (like the Toshiba TC5832DC found in one
2895 * of my DiskOnChip 2000 test units) will clear the whole data page too
2896 * if we don't do this. I have no clue why, but I seem to have 'fixed'
2897 * it in the doc2000 driver in August 1999. dwmw2.
2898 */
2899 nand_reset(chip, chipnr);
2900
2901 chip->select_chip(mtd, chipnr);
2902
2903 /* Shift to get page */
2904 page = (int)(to >> chip->page_shift);
2905
2906 /* Check, if it is write protected */
2907 if (nand_check_wp(mtd)) {
2908 chip->select_chip(mtd, -1);
2909 return -EROFS;
2910 }
2911
2912 /* Invalidate the page cache, if we write to the cached page */
2913 if (page == chip->pagebuf)
2914 chip->pagebuf = -1;
2915
2916 nand_fill_oob(mtd, ops->oobbuf, ops->ooblen, ops);
2917
2918 if (ops->mode == MTD_OPS_RAW)
2919 status = chip->ecc.write_oob_raw(mtd, chip, page & chip->pagemask);
2920 else
2921 status = chip->ecc.write_oob(mtd, chip, page & chip->pagemask);
2922
2923 chip->select_chip(mtd, -1);
2924
2925 if (status)
2926 return status;
2927
2928 ops->oobretlen = ops->ooblen;
2929
2930 return 0;
2931 }
2932
2933 /**
2934 * nand_write_oob - [MTD Interface] NAND write data and/or out-of-band
2935 * @mtd: MTD device structure
2936 * @to: offset to write to
2937 * @ops: oob operation description structure
2938 */
2939 static int nand_write_oob(struct mtd_info *mtd, loff_t to,
2940 struct mtd_oob_ops *ops)
2941 {
2942 int ret = -ENOTSUPP;
2943
2944 ops->retlen = 0;
2945
2946 /* Do not allow writes past end of device */
2947 if (ops->datbuf && (to + ops->len) > mtd->size) {
2948 pr_debug("%s: attempt to write beyond end of device\n",
2949 __func__);
2950 return -EINVAL;
2951 }
2952
2953 nand_get_device(mtd, FL_WRITING);
2954
2955 switch (ops->mode) {
2956 case MTD_OPS_PLACE_OOB:
2957 case MTD_OPS_AUTO_OOB:
2958 case MTD_OPS_RAW:
2959 break;
2960
2961 default:
2962 goto out;
2963 }
2964
2965 if (!ops->datbuf)
2966 ret = nand_do_write_oob(mtd, to, ops);
2967 else
2968 ret = nand_do_write_ops(mtd, to, ops);
2969
2970 out:
2971 nand_release_device(mtd);
2972 return ret;
2973 }
2974
2975 /**
2976 * single_erase - [GENERIC] NAND standard block erase command function
2977 * @mtd: MTD device structure
2978 * @page: the page address of the block which will be erased
2979 *
2980 * Standard erase command for NAND chips. Returns NAND status.
2981 */
2982 static int single_erase(struct mtd_info *mtd, int page)
2983 {
2984 struct nand_chip *chip = mtd_to_nand(mtd);
2985 /* Send commands to erase a block */
2986 chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page);
2987 chip->cmdfunc(mtd, NAND_CMD_ERASE2, -1, -1);
2988
2989 return chip->waitfunc(mtd, chip);
2990 }
2991
2992 /**
2993 * nand_erase - [MTD Interface] erase block(s)
2994 * @mtd: MTD device structure
2995 * @instr: erase instruction
2996 *
2997 * Erase one ore more blocks.
2998 */
2999 static int nand_erase(struct mtd_info *mtd, struct erase_info *instr)
3000 {
3001 return nand_erase_nand(mtd, instr, 0);
3002 }
3003
3004 /**
3005 * nand_erase_nand - [INTERN] erase block(s)
3006 * @mtd: MTD device structure
3007 * @instr: erase instruction
3008 * @allowbbt: allow erasing the bbt area
3009 *
3010 * Erase one ore more blocks.
3011 */
3012 int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
3013 int allowbbt)
3014 {
3015 int page, status, pages_per_block, ret, chipnr;
3016 struct nand_chip *chip = mtd_to_nand(mtd);
3017 loff_t len;
3018
3019 pr_debug("%s: start = 0x%012llx, len = %llu\n",
3020 __func__, (unsigned long long)instr->addr,
3021 (unsigned long long)instr->len);
3022
3023 if (check_offs_len(mtd, instr->addr, instr->len))
3024 return -EINVAL;
3025
3026 /* Grab the lock and see if the device is available */
3027 nand_get_device(mtd, FL_ERASING);
3028
3029 /* Shift to get first page */
3030 page = (int)(instr->addr >> chip->page_shift);
3031 chipnr = (int)(instr->addr >> chip->chip_shift);
3032
3033 /* Calculate pages in each block */
3034 pages_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
3035
3036 /* Select the NAND device */
3037 chip->select_chip(mtd, chipnr);
3038
3039 /* Check, if it is write protected */
3040 if (nand_check_wp(mtd)) {
3041 pr_debug("%s: device is write protected!\n",
3042 __func__);
3043 instr->state = MTD_ERASE_FAILED;
3044 goto erase_exit;
3045 }
3046
3047 /* Loop through the pages */
3048 len = instr->len;
3049
3050 instr->state = MTD_ERASING;
3051
3052 while (len) {
3053 /* Check if we have a bad block, we do not erase bad blocks! */
3054 if (nand_block_checkbad(mtd, ((loff_t) page) <<
3055 chip->page_shift, allowbbt)) {
3056 pr_warn("%s: attempt to erase a bad block at page 0x%08x\n",
3057 __func__, page);
3058 instr->state = MTD_ERASE_FAILED;
3059 goto erase_exit;
3060 }
3061
3062 /*
3063 * Invalidate the page cache, if we erase the block which
3064 * contains the current cached page.
3065 */
3066 if (page <= chip->pagebuf && chip->pagebuf <
3067 (page + pages_per_block))
3068 chip->pagebuf = -1;
3069
3070 status = chip->erase(mtd, page & chip->pagemask);
3071
3072 /* See if block erase succeeded */
3073 if (status & NAND_STATUS_FAIL) {
3074 pr_debug("%s: failed erase, page 0x%08x\n",
3075 __func__, page);
3076 instr->state = MTD_ERASE_FAILED;
3077 instr->fail_addr =
3078 ((loff_t)page << chip->page_shift);
3079 goto erase_exit;
3080 }
3081
3082 /* Increment page address and decrement length */
3083 len -= (1ULL << chip->phys_erase_shift);
3084 page += pages_per_block;
3085
3086 /* Check, if we cross a chip boundary */
3087 if (len && !(page & chip->pagemask)) {
3088 chipnr++;
3089 chip->select_chip(mtd, -1);
3090 chip->select_chip(mtd, chipnr);
3091 }
3092 }
3093 instr->state = MTD_ERASE_DONE;
3094
3095 erase_exit:
3096
3097 ret = instr->state == MTD_ERASE_DONE ? 0 : -EIO;
3098
3099 /* Deselect and wake up anyone waiting on the device */
3100 chip->select_chip(mtd, -1);
3101 nand_release_device(mtd);
3102
3103 /* Do call back function */
3104 if (!ret)
3105 mtd_erase_callback(instr);
3106
3107 /* Return more or less happy */
3108 return ret;
3109 }
3110
3111 /**
3112 * nand_sync - [MTD Interface] sync
3113 * @mtd: MTD device structure
3114 *
3115 * Sync is actually a wait for chip ready function.
3116 */
3117 static void nand_sync(struct mtd_info *mtd)
3118 {
3119 pr_debug("%s: called\n", __func__);
3120
3121 /* Grab the lock and see if the device is available */
3122 nand_get_device(mtd, FL_SYNCING);
3123 /* Release it and go back */
3124 nand_release_device(mtd);
3125 }
3126
3127 /**
3128 * nand_block_isbad - [MTD Interface] Check if block at offset is bad
3129 * @mtd: MTD device structure
3130 * @offs: offset relative to mtd start
3131 */
3132 static int nand_block_isbad(struct mtd_info *mtd, loff_t offs)
3133 {
3134 struct nand_chip *chip = mtd_to_nand(mtd);
3135 int chipnr = (int)(offs >> chip->chip_shift);
3136 int ret;
3137
3138 /* Select the NAND device */
3139 nand_get_device(mtd, FL_READING);
3140 chip->select_chip(mtd, chipnr);
3141
3142 ret = nand_block_checkbad(mtd, offs, 0);
3143
3144 chip->select_chip(mtd, -1);
3145 nand_release_device(mtd);
3146
3147 return ret;
3148 }
3149
3150 /**
3151 * nand_block_markbad - [MTD Interface] Mark block at the given offset as bad
3152 * @mtd: MTD device structure
3153 * @ofs: offset relative to mtd start
3154 */
3155 static int nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
3156 {
3157 int ret;
3158
3159 ret = nand_block_isbad(mtd, ofs);
3160 if (ret) {
3161 /* If it was bad already, return success and do nothing */
3162 if (ret > 0)
3163 return 0;
3164 return ret;
3165 }
3166
3167 return nand_block_markbad_lowlevel(mtd, ofs);
3168 }
3169
3170 /**
3171 * nand_max_bad_blocks - [MTD Interface] Max number of bad blocks for an mtd
3172 * @mtd: MTD device structure
3173 * @ofs: offset relative to mtd start
3174 * @len: length of mtd
3175 */
3176 static int nand_max_bad_blocks(struct mtd_info *mtd, loff_t ofs, size_t len)
3177 {
3178 struct nand_chip *chip = mtd_to_nand(mtd);
3179 u32 part_start_block;
3180 u32 part_end_block;
3181 u32 part_start_die;
3182 u32 part_end_die;
3183
3184 /*
3185 * max_bb_per_die and blocks_per_die used to determine
3186 * the maximum bad block count.
3187 */
3188 if (!chip->max_bb_per_die || !chip->blocks_per_die)
3189 return -ENOTSUPP;
3190
3191 /* Get the start and end of the partition in erase blocks. */
3192 part_start_block = mtd_div_by_eb(ofs, mtd);
3193 part_end_block = mtd_div_by_eb(len, mtd) + part_start_block - 1;
3194
3195 /* Get the start and end LUNs of the partition. */
3196 part_start_die = part_start_block / chip->blocks_per_die;
3197 part_end_die = part_end_block / chip->blocks_per_die;
3198
3199 /*
3200 * Look up the bad blocks per unit and multiply by the number of units
3201 * that the partition spans.
3202 */
3203 return chip->max_bb_per_die * (part_end_die - part_start_die + 1);
3204 }
3205
3206 /**
3207 * nand_onfi_set_features- [REPLACEABLE] set features for ONFI nand
3208 * @mtd: MTD device structure
3209 * @chip: nand chip info structure
3210 * @addr: feature address.
3211 * @subfeature_param: the subfeature parameters, a four bytes array.
3212 */
3213 static int nand_onfi_set_features(struct mtd_info *mtd, struct nand_chip *chip,
3214 int addr, uint8_t *subfeature_param)
3215 {
3216 int status;
3217 int i;
3218
3219 if (!chip->onfi_version ||
3220 !(le16_to_cpu(chip->onfi_params.opt_cmd)
3221 & ONFI_OPT_CMD_SET_GET_FEATURES))
3222 return -EINVAL;
3223
3224 chip->cmdfunc(mtd, NAND_CMD_SET_FEATURES, addr, -1);
3225 for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
3226 chip->write_byte(mtd, subfeature_param[i]);
3227
3228 status = chip->waitfunc(mtd, chip);
3229 if (status & NAND_STATUS_FAIL)
3230 return -EIO;
3231 return 0;
3232 }
3233
3234 /**
3235 * nand_onfi_get_features- [REPLACEABLE] get features for ONFI nand
3236 * @mtd: MTD device structure
3237 * @chip: nand chip info structure
3238 * @addr: feature address.
3239 * @subfeature_param: the subfeature parameters, a four bytes array.
3240 */
3241 static int nand_onfi_get_features(struct mtd_info *mtd, struct nand_chip *chip,
3242 int addr, uint8_t *subfeature_param)
3243 {
3244 int i;
3245
3246 if (!chip->onfi_version ||
3247 !(le16_to_cpu(chip->onfi_params.opt_cmd)
3248 & ONFI_OPT_CMD_SET_GET_FEATURES))
3249 return -EINVAL;
3250
3251 chip->cmdfunc(mtd, NAND_CMD_GET_FEATURES, addr, -1);
3252 for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
3253 *subfeature_param++ = chip->read_byte(mtd);
3254 return 0;
3255 }
3256
3257 /**
3258 * nand_onfi_get_set_features_notsupp - set/get features stub returning
3259 * -ENOTSUPP
3260 * @mtd: MTD device structure
3261 * @chip: nand chip info structure
3262 * @addr: feature address.
3263 * @subfeature_param: the subfeature parameters, a four bytes array.
3264 *
3265 * Should be used by NAND controller drivers that do not support the SET/GET
3266 * FEATURES operations.
3267 */
3268 int nand_onfi_get_set_features_notsupp(struct mtd_info *mtd,
3269 struct nand_chip *chip, int addr,
3270 u8 *subfeature_param)
3271 {
3272 return -ENOTSUPP;
3273 }
3274 EXPORT_SYMBOL(nand_onfi_get_set_features_notsupp);
3275
3276 /**
3277 * nand_suspend - [MTD Interface] Suspend the NAND flash
3278 * @mtd: MTD device structure
3279 */
3280 static int nand_suspend(struct mtd_info *mtd)
3281 {
3282 return nand_get_device(mtd, FL_PM_SUSPENDED);
3283 }
3284
3285 /**
3286 * nand_resume - [MTD Interface] Resume the NAND flash
3287 * @mtd: MTD device structure
3288 */
3289 static void nand_resume(struct mtd_info *mtd)
3290 {
3291 struct nand_chip *chip = mtd_to_nand(mtd);
3292
3293 if (chip->state == FL_PM_SUSPENDED)
3294 nand_release_device(mtd);
3295 else
3296 pr_err("%s called for a chip which is not in suspended state\n",
3297 __func__);
3298 }
3299
3300 /**
3301 * nand_shutdown - [MTD Interface] Finish the current NAND operation and
3302 * prevent further operations
3303 * @mtd: MTD device structure
3304 */
3305 static void nand_shutdown(struct mtd_info *mtd)
3306 {
3307 nand_get_device(mtd, FL_PM_SUSPENDED);
3308 }
3309
3310 /* Set default functions */
3311 static void nand_set_defaults(struct nand_chip *chip)
3312 {
3313 unsigned int busw = chip->options & NAND_BUSWIDTH_16;
3314
3315 /* check for proper chip_delay setup, set 20us if not */
3316 if (!chip->chip_delay)
3317 chip->chip_delay = 20;
3318
3319 /* check, if a user supplied command function given */
3320 if (chip->cmdfunc == NULL)
3321 chip->cmdfunc = nand_command;
3322
3323 /* check, if a user supplied wait function given */
3324 if (chip->waitfunc == NULL)
3325 chip->waitfunc = nand_wait;
3326
3327 if (!chip->select_chip)
3328 chip->select_chip = nand_select_chip;
3329
3330 /* set for ONFI nand */
3331 if (!chip->onfi_set_features)
3332 chip->onfi_set_features = nand_onfi_set_features;
3333 if (!chip->onfi_get_features)
3334 chip->onfi_get_features = nand_onfi_get_features;
3335
3336 /* If called twice, pointers that depend on busw may need to be reset */
3337 if (!chip->read_byte || chip->read_byte == nand_read_byte)
3338 chip->read_byte = busw ? nand_read_byte16 : nand_read_byte;
3339 if (!chip->read_word)
3340 chip->read_word = nand_read_word;
3341 if (!chip->block_bad)
3342 chip->block_bad = nand_block_bad;
3343 if (!chip->block_markbad)
3344 chip->block_markbad = nand_default_block_markbad;
3345 if (!chip->write_buf || chip->write_buf == nand_write_buf)
3346 chip->write_buf = busw ? nand_write_buf16 : nand_write_buf;
3347 if (!chip->write_byte || chip->write_byte == nand_write_byte)
3348 chip->write_byte = busw ? nand_write_byte16 : nand_write_byte;
3349 if (!chip->read_buf || chip->read_buf == nand_read_buf)
3350 chip->read_buf = busw ? nand_read_buf16 : nand_read_buf;
3351 if (!chip->scan_bbt)
3352 chip->scan_bbt = nand_default_bbt;
3353
3354 if (!chip->controller) {
3355 chip->controller = &chip->hwcontrol;
3356 nand_hw_control_init(chip->controller);
3357 }
3358
3359 if (!chip->buf_align)
3360 chip->buf_align = 1;
3361 }
3362
3363 /* Sanitize ONFI strings so we can safely print them */
3364 static void sanitize_string(uint8_t *s, size_t len)
3365 {
3366 ssize_t i;
3367
3368 /* Null terminate */
3369 s[len - 1] = 0;
3370
3371 /* Remove non printable chars */
3372 for (i = 0; i < len - 1; i++) {
3373 if (s[i] < ' ' || s[i] > 127)
3374 s[i] = '?';
3375 }
3376
3377 /* Remove trailing spaces */
3378 strim(s);
3379 }
3380
3381 static u16 onfi_crc16(u16 crc, u8 const *p, size_t len)
3382 {
3383 int i;
3384 while (len--) {
3385 crc ^= *p++ << 8;
3386 for (i = 0; i < 8; i++)
3387 crc = (crc << 1) ^ ((crc & 0x8000) ? 0x8005 : 0);
3388 }
3389
3390 return crc;
3391 }
3392
3393 /* Parse the Extended Parameter Page. */
3394 static int nand_flash_detect_ext_param_page(struct nand_chip *chip,
3395 struct nand_onfi_params *p)
3396 {
3397 struct mtd_info *mtd = nand_to_mtd(chip);
3398 struct onfi_ext_param_page *ep;
3399 struct onfi_ext_section *s;
3400 struct onfi_ext_ecc_info *ecc;
3401 uint8_t *cursor;
3402 int ret = -EINVAL;
3403 int len;
3404 int i;
3405
3406 len = le16_to_cpu(p->ext_param_page_length) * 16;
3407 ep = kmalloc(len, GFP_KERNEL);
3408 if (!ep)
3409 return -ENOMEM;
3410
3411 /* Send our own NAND_CMD_PARAM. */
3412 chip->cmdfunc(mtd, NAND_CMD_PARAM, 0, -1);
3413
3414 /* Use the Change Read Column command to skip the ONFI param pages. */
3415 chip->cmdfunc(mtd, NAND_CMD_RNDOUT,
3416 sizeof(*p) * p->num_of_param_pages , -1);
3417
3418 /* Read out the Extended Parameter Page. */
3419 chip->read_buf(mtd, (uint8_t *)ep, len);
3420 if ((onfi_crc16(ONFI_CRC_BASE, ((uint8_t *)ep) + 2, len - 2)
3421 != le16_to_cpu(ep->crc))) {
3422 pr_debug("fail in the CRC.\n");
3423 goto ext_out;
3424 }
3425
3426 /*
3427 * Check the signature.
3428 * Do not strictly follow the ONFI spec, maybe changed in future.
3429 */
3430 if (strncmp(ep->sig, "EPPS", 4)) {
3431 pr_debug("The signature is invalid.\n");
3432 goto ext_out;
3433 }
3434
3435 /* find the ECC section. */
3436 cursor = (uint8_t *)(ep + 1);
3437 for (i = 0; i < ONFI_EXT_SECTION_MAX; i++) {
3438 s = ep->sections + i;
3439 if (s->type == ONFI_SECTION_TYPE_2)
3440 break;
3441 cursor += s->length * 16;
3442 }
3443 if (i == ONFI_EXT_SECTION_MAX) {
3444 pr_debug("We can not find the ECC section.\n");
3445 goto ext_out;
3446 }
3447
3448 /* get the info we want. */
3449 ecc = (struct onfi_ext_ecc_info *)cursor;
3450
3451 if (!ecc->codeword_size) {
3452 pr_debug("Invalid codeword size\n");
3453 goto ext_out;
3454 }
3455
3456 chip->ecc_strength_ds = ecc->ecc_bits;
3457 chip->ecc_step_ds = 1 << ecc->codeword_size;
3458 ret = 0;
3459
3460 ext_out:
3461 kfree(ep);
3462 return ret;
3463 }
3464
3465 /*
3466 * Check if the NAND chip is ONFI compliant, returns 1 if it is, 0 otherwise.
3467 */
3468 static int nand_flash_detect_onfi(struct nand_chip *chip)
3469 {
3470 struct mtd_info *mtd = nand_to_mtd(chip);
3471 struct nand_onfi_params *p = &chip->onfi_params;
3472 int i, j;
3473 int val;
3474
3475 /* Try ONFI for unknown chip or LP */
3476 chip->cmdfunc(mtd, NAND_CMD_READID, 0x20, -1);
3477 if (chip->read_byte(mtd) != 'O' || chip->read_byte(mtd) != 'N' ||
3478 chip->read_byte(mtd) != 'F' || chip->read_byte(mtd) != 'I')
3479 return 0;
3480
3481 chip->cmdfunc(mtd, NAND_CMD_PARAM, 0, -1);
3482 for (i = 0; i < 3; i++) {
3483 for (j = 0; j < sizeof(*p); j++)
3484 ((uint8_t *)p)[j] = chip->read_byte(mtd);
3485 if (onfi_crc16(ONFI_CRC_BASE, (uint8_t *)p, 254) ==
3486 le16_to_cpu(p->crc)) {
3487 break;
3488 }
3489 }
3490
3491 if (i == 3) {
3492 pr_err("Could not find valid ONFI parameter page; aborting\n");
3493 return 0;
3494 }
3495
3496 /* Check version */
3497 val = le16_to_cpu(p->revision);
3498 if (val & (1 << 5))
3499 chip->onfi_version = 23;
3500 else if (val & (1 << 4))
3501 chip->onfi_version = 22;
3502 else if (val & (1 << 3))
3503 chip->onfi_version = 21;
3504 else if (val & (1 << 2))
3505 chip->onfi_version = 20;
3506 else if (val & (1 << 1))
3507 chip->onfi_version = 10;
3508
3509 if (!chip->onfi_version) {
3510 pr_info("unsupported ONFI version: %d\n", val);
3511 return 0;
3512 }
3513
3514 sanitize_string(p->manufacturer, sizeof(p->manufacturer));
3515 sanitize_string(p->model, sizeof(p->model));
3516 if (!mtd->name)
3517 mtd->name = p->model;
3518
3519 mtd->writesize = le32_to_cpu(p->byte_per_page);
3520
3521 /*
3522 * pages_per_block and blocks_per_lun may not be a power-of-2 size
3523 * (don't ask me who thought of this...). MTD assumes that these
3524 * dimensions will be power-of-2, so just truncate the remaining area.
3525 */
3526 mtd->erasesize = 1 << (fls(le32_to_cpu(p->pages_per_block)) - 1);
3527 mtd->erasesize *= mtd->writesize;
3528
3529 mtd->oobsize = le16_to_cpu(p->spare_bytes_per_page);
3530
3531 /* See erasesize comment */
3532 chip->chipsize = 1 << (fls(le32_to_cpu(p->blocks_per_lun)) - 1);
3533 chip->chipsize *= (uint64_t)mtd->erasesize * p->lun_count;
3534 chip->bits_per_cell = p->bits_per_cell;
3535
3536 chip->max_bb_per_die = le16_to_cpu(p->bb_per_lun);
3537 chip->blocks_per_die = le32_to_cpu(p->blocks_per_lun);
3538
3539 if (onfi_feature(chip) & ONFI_FEATURE_16_BIT_BUS)
3540 chip->options |= NAND_BUSWIDTH_16;
3541
3542 if (p->ecc_bits != 0xff) {
3543 chip->ecc_strength_ds = p->ecc_bits;
3544 chip->ecc_step_ds = 512;
3545 } else if (chip->onfi_version >= 21 &&
3546 (onfi_feature(chip) & ONFI_FEATURE_EXT_PARAM_PAGE)) {
3547
3548 /*
3549 * The nand_flash_detect_ext_param_page() uses the
3550 * Change Read Column command which maybe not supported
3551 * by the chip->cmdfunc. So try to update the chip->cmdfunc
3552 * now. We do not replace user supplied command function.
3553 */
3554 if (mtd->writesize > 512 && chip->cmdfunc == nand_command)
3555 chip->cmdfunc = nand_command_lp;
3556
3557 /* The Extended Parameter Page is supported since ONFI 2.1. */
3558 if (nand_flash_detect_ext_param_page(chip, p))
3559 pr_warn("Failed to detect ONFI extended param page\n");
3560 } else {
3561 pr_warn("Could not retrieve ONFI ECC requirements\n");
3562 }
3563
3564 return 1;
3565 }
3566
3567 /*
3568 * Check if the NAND chip is JEDEC compliant, returns 1 if it is, 0 otherwise.
3569 */
3570 static int nand_flash_detect_jedec(struct nand_chip *chip)
3571 {
3572 struct mtd_info *mtd = nand_to_mtd(chip);
3573 struct nand_jedec_params *p = &chip->jedec_params;
3574 struct jedec_ecc_info *ecc;
3575 int val;
3576 int i, j;
3577
3578 /* Try JEDEC for unknown chip or LP */
3579 chip->cmdfunc(mtd, NAND_CMD_READID, 0x40, -1);
3580 if (chip->read_byte(mtd) != 'J' || chip->read_byte(mtd) != 'E' ||
3581 chip->read_byte(mtd) != 'D' || chip->read_byte(mtd) != 'E' ||
3582 chip->read_byte(mtd) != 'C')
3583 return 0;
3584
3585 chip->cmdfunc(mtd, NAND_CMD_PARAM, 0x40, -1);
3586 for (i = 0; i < 3; i++) {
3587 for (j = 0; j < sizeof(*p); j++)
3588 ((uint8_t *)p)[j] = chip->read_byte(mtd);
3589
3590 if (onfi_crc16(ONFI_CRC_BASE, (uint8_t *)p, 510) ==
3591 le16_to_cpu(p->crc))
3592 break;
3593 }
3594
3595 if (i == 3) {
3596 pr_err("Could not find valid JEDEC parameter page; aborting\n");
3597 return 0;
3598 }
3599
3600 /* Check version */
3601 val = le16_to_cpu(p->revision);
3602 if (val & (1 << 2))
3603 chip->jedec_version = 10;
3604 else if (val & (1 << 1))
3605 chip->jedec_version = 1; /* vendor specific version */
3606
3607 if (!chip->jedec_version) {
3608 pr_info("unsupported JEDEC version: %d\n", val);
3609 return 0;
3610 }
3611
3612 sanitize_string(p->manufacturer, sizeof(p->manufacturer));
3613 sanitize_string(p->model, sizeof(p->model));
3614 if (!mtd->name)
3615 mtd->name = p->model;
3616
3617 mtd->writesize = le32_to_cpu(p->byte_per_page);
3618
3619 /* Please reference to the comment for nand_flash_detect_onfi. */
3620 mtd->erasesize = 1 << (fls(le32_to_cpu(p->pages_per_block)) - 1);
3621 mtd->erasesize *= mtd->writesize;
3622
3623 mtd->oobsize = le16_to_cpu(p->spare_bytes_per_page);
3624
3625 /* Please reference to the comment for nand_flash_detect_onfi. */
3626 chip->chipsize = 1 << (fls(le32_to_cpu(p->blocks_per_lun)) - 1);
3627 chip->chipsize *= (uint64_t)mtd->erasesize * p->lun_count;
3628 chip->bits_per_cell = p->bits_per_cell;
3629
3630 if (jedec_feature(chip) & JEDEC_FEATURE_16_BIT_BUS)
3631 chip->options |= NAND_BUSWIDTH_16;
3632
3633 /* ECC info */
3634 ecc = &p->ecc_info[0];
3635
3636 if (ecc->codeword_size >= 9) {
3637 chip->ecc_strength_ds = ecc->ecc_bits;
3638 chip->ecc_step_ds = 1 << ecc->codeword_size;
3639 } else {
3640 pr_warn("Invalid codeword size\n");
3641 }
3642
3643 return 1;
3644 }
3645
3646 /*
3647 * nand_id_has_period - Check if an ID string has a given wraparound period
3648 * @id_data: the ID string
3649 * @arrlen: the length of the @id_data array
3650 * @period: the period of repitition
3651 *
3652 * Check if an ID string is repeated within a given sequence of bytes at
3653 * specific repetition interval period (e.g., {0x20,0x01,0x7F,0x20} has a
3654 * period of 3). This is a helper function for nand_id_len(). Returns non-zero
3655 * if the repetition has a period of @period; otherwise, returns zero.
3656 */
3657 static int nand_id_has_period(u8 *id_data, int arrlen, int period)
3658 {
3659 int i, j;
3660 for (i = 0; i < period; i++)
3661 for (j = i + period; j < arrlen; j += period)
3662 if (id_data[i] != id_data[j])
3663 return 0;
3664 return 1;
3665 }
3666
3667 /*
3668 * nand_id_len - Get the length of an ID string returned by CMD_READID
3669 * @id_data: the ID string
3670 * @arrlen: the length of the @id_data array
3671
3672 * Returns the length of the ID string, according to known wraparound/trailing
3673 * zero patterns. If no pattern exists, returns the length of the array.
3674 */
3675 static int nand_id_len(u8 *id_data, int arrlen)
3676 {
3677 int last_nonzero, period;
3678
3679 /* Find last non-zero byte */
3680 for (last_nonzero = arrlen - 1; last_nonzero >= 0; last_nonzero--)
3681 if (id_data[last_nonzero])
3682 break;
3683
3684 /* All zeros */
3685 if (last_nonzero < 0)
3686 return 0;
3687
3688 /* Calculate wraparound period */
3689 for (period = 1; period < arrlen; period++)
3690 if (nand_id_has_period(id_data, arrlen, period))
3691 break;
3692
3693 /* There's a repeated pattern */
3694 if (period < arrlen)
3695 return period;
3696
3697 /* There are trailing zeros */
3698 if (last_nonzero < arrlen - 1)
3699 return last_nonzero + 1;
3700
3701 /* No pattern detected */
3702 return arrlen;
3703 }
3704
3705 /* Extract the bits of per cell from the 3rd byte of the extended ID */
3706 static int nand_get_bits_per_cell(u8 cellinfo)
3707 {
3708 int bits;
3709
3710 bits = cellinfo & NAND_CI_CELLTYPE_MSK;
3711 bits >>= NAND_CI_CELLTYPE_SHIFT;
3712 return bits + 1;
3713 }
3714
3715 /*
3716 * Many new NAND share similar device ID codes, which represent the size of the
3717 * chip. The rest of the parameters must be decoded according to generic or
3718 * manufacturer-specific "extended ID" decoding patterns.
3719 */
3720 void nand_decode_ext_id(struct nand_chip *chip)
3721 {
3722 struct mtd_info *mtd = nand_to_mtd(chip);
3723 int extid;
3724 u8 *id_data = chip->id.data;
3725 /* The 3rd id byte holds MLC / multichip data */
3726 chip->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
3727 /* The 4th id byte is the important one */
3728 extid = id_data[3];
3729
3730 /* Calc pagesize */
3731 mtd->writesize = 1024 << (extid & 0x03);
3732 extid >>= 2;
3733 /* Calc oobsize */
3734 mtd->oobsize = (8 << (extid & 0x01)) * (mtd->writesize >> 9);
3735 extid >>= 2;
3736 /* Calc blocksize. Blocksize is multiples of 64KiB */
3737 mtd->erasesize = (64 * 1024) << (extid & 0x03);
3738 extid >>= 2;
3739 /* Get buswidth information */
3740 if (extid & 0x1)
3741 chip->options |= NAND_BUSWIDTH_16;
3742 }
3743 EXPORT_SYMBOL_GPL(nand_decode_ext_id);
3744
3745 /*
3746 * Old devices have chip data hardcoded in the device ID table. nand_decode_id
3747 * decodes a matching ID table entry and assigns the MTD size parameters for
3748 * the chip.
3749 */
3750 static void nand_decode_id(struct nand_chip *chip, struct nand_flash_dev *type)
3751 {
3752 struct mtd_info *mtd = nand_to_mtd(chip);
3753
3754 mtd->erasesize = type->erasesize;
3755 mtd->writesize = type->pagesize;
3756 mtd->oobsize = mtd->writesize / 32;
3757
3758 /* All legacy ID NAND are small-page, SLC */
3759 chip->bits_per_cell = 1;
3760 }
3761
3762 /*
3763 * Set the bad block marker/indicator (BBM/BBI) patterns according to some
3764 * heuristic patterns using various detected parameters (e.g., manufacturer,
3765 * page size, cell-type information).
3766 */
3767 static void nand_decode_bbm_options(struct nand_chip *chip)
3768 {
3769 struct mtd_info *mtd = nand_to_mtd(chip);
3770
3771 /* Set the bad block position */
3772 if (mtd->writesize > 512 || (chip->options & NAND_BUSWIDTH_16))
3773 chip->badblockpos = NAND_LARGE_BADBLOCK_POS;
3774 else
3775 chip->badblockpos = NAND_SMALL_BADBLOCK_POS;
3776 }
3777
3778 static inline bool is_full_id_nand(struct nand_flash_dev *type)
3779 {
3780 return type->id_len;
3781 }
3782
3783 static bool find_full_id_nand(struct nand_chip *chip,
3784 struct nand_flash_dev *type)
3785 {
3786 struct mtd_info *mtd = nand_to_mtd(chip);
3787 u8 *id_data = chip->id.data;
3788
3789 if (!strncmp(type->id, id_data, type->id_len)) {
3790 mtd->writesize = type->pagesize;
3791 mtd->erasesize = type->erasesize;
3792 mtd->oobsize = type->oobsize;
3793
3794 chip->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
3795 chip->chipsize = (uint64_t)type->chipsize << 20;
3796 chip->options |= type->options;
3797 chip->ecc_strength_ds = NAND_ECC_STRENGTH(type);
3798 chip->ecc_step_ds = NAND_ECC_STEP(type);
3799 chip->onfi_timing_mode_default =
3800 type->onfi_timing_mode_default;
3801
3802 if (!mtd->name)
3803 mtd->name = type->name;
3804
3805 return true;
3806 }
3807 return false;
3808 }
3809
3810 /*
3811 * Manufacturer detection. Only used when the NAND is not ONFI or JEDEC
3812 * compliant and does not have a full-id or legacy-id entry in the nand_ids
3813 * table.
3814 */
3815 static void nand_manufacturer_detect(struct nand_chip *chip)
3816 {
3817 /*
3818 * Try manufacturer detection if available and use
3819 * nand_decode_ext_id() otherwise.
3820 */
3821 if (chip->manufacturer.desc && chip->manufacturer.desc->ops &&
3822 chip->manufacturer.desc->ops->detect) {
3823 /* The 3rd id byte holds MLC / multichip data */
3824 chip->bits_per_cell = nand_get_bits_per_cell(chip->id.data[2]);
3825 chip->manufacturer.desc->ops->detect(chip);
3826 } else {
3827 nand_decode_ext_id(chip);
3828 }
3829 }
3830
3831 /*
3832 * Manufacturer initialization. This function is called for all NANDs including
3833 * ONFI and JEDEC compliant ones.
3834 * Manufacturer drivers should put all their specific initialization code in
3835 * their ->init() hook.
3836 */
3837 static int nand_manufacturer_init(struct nand_chip *chip)
3838 {
3839 if (!chip->manufacturer.desc || !chip->manufacturer.desc->ops ||
3840 !chip->manufacturer.desc->ops->init)
3841 return 0;
3842
3843 return chip->manufacturer.desc->ops->init(chip);
3844 }
3845
3846 /*
3847 * Manufacturer cleanup. This function is called for all NANDs including
3848 * ONFI and JEDEC compliant ones.
3849 * Manufacturer drivers should put all their specific cleanup code in their
3850 * ->cleanup() hook.
3851 */
3852 static void nand_manufacturer_cleanup(struct nand_chip *chip)
3853 {
3854 /* Release manufacturer private data */
3855 if (chip->manufacturer.desc && chip->manufacturer.desc->ops &&
3856 chip->manufacturer.desc->ops->cleanup)
3857 chip->manufacturer.desc->ops->cleanup(chip);
3858 }
3859
3860 /*
3861 * Get the flash and manufacturer id and lookup if the type is supported.
3862 */
3863 static int nand_detect(struct nand_chip *chip, struct nand_flash_dev *type)
3864 {
3865 const struct nand_manufacturer *manufacturer;
3866 struct mtd_info *mtd = nand_to_mtd(chip);
3867 int busw;
3868 int i;
3869 u8 *id_data = chip->id.data;
3870 u8 maf_id, dev_id;
3871
3872 /*
3873 * Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx)
3874 * after power-up.
3875 */
3876 nand_reset(chip, 0);
3877
3878 /* Select the device */
3879 chip->select_chip(mtd, 0);
3880
3881 /* Send the command for reading device ID */
3882 chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
3883
3884 /* Read manufacturer and device IDs */
3885 maf_id = chip->read_byte(mtd);
3886 dev_id = chip->read_byte(mtd);
3887
3888 /*
3889 * Try again to make sure, as some systems the bus-hold or other
3890 * interface concerns can cause random data which looks like a
3891 * possibly credible NAND flash to appear. If the two results do
3892 * not match, ignore the device completely.
3893 */
3894
3895 chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
3896
3897 /* Read entire ID string */
3898 for (i = 0; i < ARRAY_SIZE(chip->id.data); i++)
3899 id_data[i] = chip->read_byte(mtd);
3900
3901 if (id_data[0] != maf_id || id_data[1] != dev_id) {
3902 pr_info("second ID read did not match %02x,%02x against %02x,%02x\n",
3903 maf_id, dev_id, id_data[0], id_data[1]);
3904 return -ENODEV;
3905 }
3906
3907 chip->id.len = nand_id_len(id_data, ARRAY_SIZE(chip->id.data));
3908
3909 /* Try to identify manufacturer */
3910 manufacturer = nand_get_manufacturer(maf_id);
3911 chip->manufacturer.desc = manufacturer;
3912
3913 if (!type)
3914 type = nand_flash_ids;
3915
3916 /*
3917 * Save the NAND_BUSWIDTH_16 flag before letting auto-detection logic
3918 * override it.
3919 * This is required to make sure initial NAND bus width set by the
3920 * NAND controller driver is coherent with the real NAND bus width
3921 * (extracted by auto-detection code).
3922 */
3923 busw = chip->options & NAND_BUSWIDTH_16;
3924
3925 /*
3926 * The flag is only set (never cleared), reset it to its default value
3927 * before starting auto-detection.
3928 */
3929 chip->options &= ~NAND_BUSWIDTH_16;
3930
3931 for (; type->name != NULL; type++) {
3932 if (is_full_id_nand(type)) {
3933 if (find_full_id_nand(chip, type))
3934 goto ident_done;
3935 } else if (dev_id == type->dev_id) {
3936 break;
3937 }
3938 }
3939
3940 chip->onfi_version = 0;
3941 if (!type->name || !type->pagesize) {
3942 /* Check if the chip is ONFI compliant */
3943 if (nand_flash_detect_onfi(chip))
3944 goto ident_done;
3945
3946 /* Check if the chip is JEDEC compliant */
3947 if (nand_flash_detect_jedec(chip))
3948 goto ident_done;
3949 }
3950
3951 if (!type->name)
3952 return -ENODEV;
3953
3954 if (!mtd->name)
3955 mtd->name = type->name;
3956
3957 chip->chipsize = (uint64_t)type->chipsize << 20;
3958
3959 if (!type->pagesize)
3960 nand_manufacturer_detect(chip);
3961 else
3962 nand_decode_id(chip, type);
3963
3964 /* Get chip options */
3965 chip->options |= type->options;
3966
3967 ident_done:
3968
3969 if (chip->options & NAND_BUSWIDTH_AUTO) {
3970 WARN_ON(busw & NAND_BUSWIDTH_16);
3971 nand_set_defaults(chip);
3972 } else if (busw != (chip->options & NAND_BUSWIDTH_16)) {
3973 /*
3974 * Check, if buswidth is correct. Hardware drivers should set
3975 * chip correct!
3976 */
3977 pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
3978 maf_id, dev_id);
3979 pr_info("%s %s\n", nand_manufacturer_name(manufacturer),
3980 mtd->name);
3981 pr_warn("bus width %d instead of %d bits\n", busw ? 16 : 8,
3982 (chip->options & NAND_BUSWIDTH_16) ? 16 : 8);
3983 return -EINVAL;
3984 }
3985
3986 nand_decode_bbm_options(chip);
3987
3988 /* Calculate the address shift from the page size */
3989 chip->page_shift = ffs(mtd->writesize) - 1;
3990 /* Convert chipsize to number of pages per chip -1 */
3991 chip->pagemask = (chip->chipsize >> chip->page_shift) - 1;
3992
3993 chip->bbt_erase_shift = chip->phys_erase_shift =
3994 ffs(mtd->erasesize) - 1;
3995 if (chip->chipsize & 0xffffffff)
3996 chip->chip_shift = ffs((unsigned)chip->chipsize) - 1;
3997 else {
3998 chip->chip_shift = ffs((unsigned)(chip->chipsize >> 32));
3999 chip->chip_shift += 32 - 1;
4000 }
4001
4002 chip->badblockbits = 8;
4003 chip->erase = single_erase;
4004
4005 /* Do not replace user supplied command function! */
4006 if (mtd->writesize > 512 && chip->cmdfunc == nand_command)
4007 chip->cmdfunc = nand_command_lp;
4008
4009 pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
4010 maf_id, dev_id);
4011
4012 if (chip->onfi_version)
4013 pr_info("%s %s\n", nand_manufacturer_name(manufacturer),
4014 chip->onfi_params.model);
4015 else if (chip->jedec_version)
4016 pr_info("%s %s\n", nand_manufacturer_name(manufacturer),
4017 chip->jedec_params.model);
4018 else
4019 pr_info("%s %s\n", nand_manufacturer_name(manufacturer),
4020 type->name);
4021
4022 pr_info("%d MiB, %s, erase size: %d KiB, page size: %d, OOB size: %d\n",
4023 (int)(chip->chipsize >> 20), nand_is_slc(chip) ? "SLC" : "MLC",
4024 mtd->erasesize >> 10, mtd->writesize, mtd->oobsize);
4025 return 0;
4026 }
4027
4028 static const char * const nand_ecc_modes[] = {
4029 [NAND_ECC_NONE] = "none",
4030 [NAND_ECC_SOFT] = "soft",
4031 [NAND_ECC_HW] = "hw",
4032 [NAND_ECC_HW_SYNDROME] = "hw_syndrome",
4033 [NAND_ECC_HW_OOB_FIRST] = "hw_oob_first",
4034 [NAND_ECC_ON_DIE] = "on-die",
4035 };
4036
4037 static int of_get_nand_ecc_mode(struct device_node *np)
4038 {
4039 const char *pm;
4040 int err, i;
4041
4042 err = of_property_read_string(np, "nand-ecc-mode", &pm);
4043 if (err < 0)
4044 return err;
4045
4046 for (i = 0; i < ARRAY_SIZE(nand_ecc_modes); i++)
4047 if (!strcasecmp(pm, nand_ecc_modes[i]))
4048 return i;
4049
4050 /*
4051 * For backward compatibility we support few obsoleted values that don't
4052 * have their mappings into nand_ecc_modes_t anymore (they were merged
4053 * with other enums).
4054 */
4055 if (!strcasecmp(pm, "soft_bch"))
4056 return NAND_ECC_SOFT;
4057
4058 return -ENODEV;
4059 }
4060
4061 static const char * const nand_ecc_algos[] = {
4062 [NAND_ECC_HAMMING] = "hamming",
4063 [NAND_ECC_BCH] = "bch",
4064 };
4065
4066 static int of_get_nand_ecc_algo(struct device_node *np)
4067 {
4068 const char *pm;
4069 int err, i;
4070
4071 err = of_property_read_string(np, "nand-ecc-algo", &pm);
4072 if (!err) {
4073 for (i = NAND_ECC_HAMMING; i < ARRAY_SIZE(nand_ecc_algos); i++)
4074 if (!strcasecmp(pm, nand_ecc_algos[i]))
4075 return i;
4076 return -ENODEV;
4077 }
4078
4079 /*
4080 * For backward compatibility we also read "nand-ecc-mode" checking
4081 * for some obsoleted values that were specifying ECC algorithm.
4082 */
4083 err = of_property_read_string(np, "nand-ecc-mode", &pm);
4084 if (err < 0)
4085 return err;
4086
4087 if (!strcasecmp(pm, "soft"))
4088 return NAND_ECC_HAMMING;
4089 else if (!strcasecmp(pm, "soft_bch"))
4090 return NAND_ECC_BCH;
4091
4092 return -ENODEV;
4093 }
4094
4095 static int of_get_nand_ecc_step_size(struct device_node *np)
4096 {
4097 int ret;
4098 u32 val;
4099
4100 ret = of_property_read_u32(np, "nand-ecc-step-size", &val);
4101 return ret ? ret : val;
4102 }
4103
4104 static int of_get_nand_ecc_strength(struct device_node *np)
4105 {
4106 int ret;
4107 u32 val;
4108
4109 ret = of_property_read_u32(np, "nand-ecc-strength", &val);
4110 return ret ? ret : val;
4111 }
4112
4113 static int of_get_nand_bus_width(struct device_node *np)
4114 {
4115 u32 val;
4116
4117 if (of_property_read_u32(np, "nand-bus-width", &val))
4118 return 8;
4119
4120 switch (val) {
4121 case 8:
4122 case 16:
4123 return val;
4124 default:
4125 return -EIO;
4126 }
4127 }
4128
4129 static bool of_get_nand_on_flash_bbt(struct device_node *np)
4130 {
4131 return of_property_read_bool(np, "nand-on-flash-bbt");
4132 }
4133
4134 static int nand_dt_init(struct nand_chip *chip)
4135 {
4136 struct device_node *dn = nand_get_flash_node(chip);
4137 int ecc_mode, ecc_algo, ecc_strength, ecc_step;
4138
4139 if (!dn)
4140 return 0;
4141
4142 if (of_get_nand_bus_width(dn) == 16)
4143 chip->options |= NAND_BUSWIDTH_16;
4144
4145 if (of_get_nand_on_flash_bbt(dn))
4146 chip->bbt_options |= NAND_BBT_USE_FLASH;
4147
4148 ecc_mode = of_get_nand_ecc_mode(dn);
4149 ecc_algo = of_get_nand_ecc_algo(dn);
4150 ecc_strength = of_get_nand_ecc_strength(dn);
4151 ecc_step = of_get_nand_ecc_step_size(dn);
4152
4153 if (ecc_mode >= 0)
4154 chip->ecc.mode = ecc_mode;
4155
4156 if (ecc_algo >= 0)
4157 chip->ecc.algo = ecc_algo;
4158
4159 if (ecc_strength >= 0)
4160 chip->ecc.strength = ecc_strength;
4161
4162 if (ecc_step > 0)
4163 chip->ecc.size = ecc_step;
4164
4165 if (of_property_read_bool(dn, "nand-ecc-maximize"))
4166 chip->ecc.options |= NAND_ECC_MAXIMIZE;
4167
4168 return 0;
4169 }
4170
4171 /**
4172 * nand_scan_ident - [NAND Interface] Scan for the NAND device
4173 * @mtd: MTD device structure
4174 * @maxchips: number of chips to scan for
4175 * @table: alternative NAND ID table
4176 *
4177 * This is the first phase of the normal nand_scan() function. It reads the
4178 * flash ID and sets up MTD fields accordingly.
4179 *
4180 */
4181 int nand_scan_ident(struct mtd_info *mtd, int maxchips,
4182 struct nand_flash_dev *table)
4183 {
4184 int i, nand_maf_id, nand_dev_id;
4185 struct nand_chip *chip = mtd_to_nand(mtd);
4186 int ret;
4187
4188 ret = nand_dt_init(chip);
4189 if (ret)
4190 return ret;
4191
4192 if (!mtd->name && mtd->dev.parent)
4193 mtd->name = dev_name(mtd->dev.parent);
4194
4195 if ((!chip->cmdfunc || !chip->select_chip) && !chip->cmd_ctrl) {
4196 /*
4197 * Default functions assigned for chip_select() and
4198 * cmdfunc() both expect cmd_ctrl() to be populated,
4199 * so we need to check that that's the case
4200 */
4201 pr_err("chip.cmd_ctrl() callback is not provided");
4202 return -EINVAL;
4203 }
4204 /* Set the default functions */
4205 nand_set_defaults(chip);
4206
4207 /* Read the flash type */
4208 ret = nand_detect(chip, table);
4209 if (ret) {
4210 if (!(chip->options & NAND_SCAN_SILENT_NODEV))
4211 pr_warn("No NAND device found\n");
4212 chip->select_chip(mtd, -1);
4213 return ret;
4214 }
4215
4216 nand_maf_id = chip->id.data[0];
4217 nand_dev_id = chip->id.data[1];
4218
4219 chip->select_chip(mtd, -1);
4220
4221 /* Check for a chip array */
4222 for (i = 1; i < maxchips; i++) {
4223 /* See comment in nand_get_flash_type for reset */
4224 nand_reset(chip, i);
4225
4226 chip->select_chip(mtd, i);
4227 /* Send the command for reading device ID */
4228 chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
4229 /* Read manufacturer and device IDs */
4230 if (nand_maf_id != chip->read_byte(mtd) ||
4231 nand_dev_id != chip->read_byte(mtd)) {
4232 chip->select_chip(mtd, -1);
4233 break;
4234 }
4235 chip->select_chip(mtd, -1);
4236 }
4237 if (i > 1)
4238 pr_info("%d chips detected\n", i);
4239
4240 /* Store the number of chips and calc total size for mtd */
4241 chip->numchips = i;
4242 mtd->size = i * chip->chipsize;
4243
4244 return 0;
4245 }
4246 EXPORT_SYMBOL(nand_scan_ident);
4247
4248 static int nand_set_ecc_soft_ops(struct mtd_info *mtd)
4249 {
4250 struct nand_chip *chip = mtd_to_nand(mtd);
4251 struct nand_ecc_ctrl *ecc = &chip->ecc;
4252
4253 if (WARN_ON(ecc->mode != NAND_ECC_SOFT))
4254 return -EINVAL;
4255
4256 switch (ecc->algo) {
4257 case NAND_ECC_HAMMING:
4258 ecc->calculate = nand_calculate_ecc;
4259 ecc->correct = nand_correct_data;
4260 ecc->read_page = nand_read_page_swecc;
4261 ecc->read_subpage = nand_read_subpage;
4262 ecc->write_page = nand_write_page_swecc;
4263 ecc->read_page_raw = nand_read_page_raw;
4264 ecc->write_page_raw = nand_write_page_raw;
4265 ecc->read_oob = nand_read_oob_std;
4266 ecc->write_oob = nand_write_oob_std;
4267 if (!ecc->size)
4268 ecc->size = 256;
4269 ecc->bytes = 3;
4270 ecc->strength = 1;
4271 return 0;
4272 case NAND_ECC_BCH:
4273 if (!mtd_nand_has_bch()) {
4274 WARN(1, "CONFIG_MTD_NAND_ECC_BCH not enabled\n");
4275 return -EINVAL;
4276 }
4277 ecc->calculate = nand_bch_calculate_ecc;
4278 ecc->correct = nand_bch_correct_data;
4279 ecc->read_page = nand_read_page_swecc;
4280 ecc->read_subpage = nand_read_subpage;
4281 ecc->write_page = nand_write_page_swecc;
4282 ecc->read_page_raw = nand_read_page_raw;
4283 ecc->write_page_raw = nand_write_page_raw;
4284 ecc->read_oob = nand_read_oob_std;
4285 ecc->write_oob = nand_write_oob_std;
4286
4287 /*
4288 * Board driver should supply ecc.size and ecc.strength
4289 * values to select how many bits are correctable.
4290 * Otherwise, default to 4 bits for large page devices.
4291 */
4292 if (!ecc->size && (mtd->oobsize >= 64)) {
4293 ecc->size = 512;
4294 ecc->strength = 4;
4295 }
4296
4297 /*
4298 * if no ecc placement scheme was provided pickup the default
4299 * large page one.
4300 */
4301 if (!mtd->ooblayout) {
4302 /* handle large page devices only */
4303 if (mtd->oobsize < 64) {
4304 WARN(1, "OOB layout is required when using software BCH on small pages\n");
4305 return -EINVAL;
4306 }
4307
4308 mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
4309
4310 }
4311
4312 /*
4313 * We can only maximize ECC config when the default layout is
4314 * used, otherwise we don't know how many bytes can really be
4315 * used.
4316 */
4317 if (mtd->ooblayout == &nand_ooblayout_lp_ops &&
4318 ecc->options & NAND_ECC_MAXIMIZE) {
4319 int steps, bytes;
4320
4321 /* Always prefer 1k blocks over 512bytes ones */
4322 ecc->size = 1024;
4323 steps = mtd->writesize / ecc->size;
4324
4325 /* Reserve 2 bytes for the BBM */
4326 bytes = (mtd->oobsize - 2) / steps;
4327 ecc->strength = bytes * 8 / fls(8 * ecc->size);
4328 }
4329
4330 /* See nand_bch_init() for details. */
4331 ecc->bytes = 0;
4332 ecc->priv = nand_bch_init(mtd);
4333 if (!ecc->priv) {
4334 WARN(1, "BCH ECC initialization failed!\n");
4335 return -EINVAL;
4336 }
4337 return 0;
4338 default:
4339 WARN(1, "Unsupported ECC algorithm!\n");
4340 return -EINVAL;
4341 }
4342 }
4343
4344 /**
4345 * nand_check_ecc_caps - check the sanity of preset ECC settings
4346 * @chip: nand chip info structure
4347 * @caps: ECC caps info structure
4348 * @oobavail: OOB size that the ECC engine can use
4349 *
4350 * When ECC step size and strength are already set, check if they are supported
4351 * by the controller and the calculated ECC bytes fit within the chip's OOB.
4352 * On success, the calculated ECC bytes is set.
4353 */
4354 int nand_check_ecc_caps(struct nand_chip *chip,
4355 const struct nand_ecc_caps *caps, int oobavail)
4356 {
4357 struct mtd_info *mtd = nand_to_mtd(chip);
4358 const struct nand_ecc_step_info *stepinfo;
4359 int preset_step = chip->ecc.size;
4360 int preset_strength = chip->ecc.strength;
4361 int nsteps, ecc_bytes;
4362 int i, j;
4363
4364 if (WARN_ON(oobavail < 0))
4365 return -EINVAL;
4366
4367 if (!preset_step || !preset_strength)
4368 return -ENODATA;
4369
4370 nsteps = mtd->writesize / preset_step;
4371
4372 for (i = 0; i < caps->nstepinfos; i++) {
4373 stepinfo = &caps->stepinfos[i];
4374
4375 if (stepinfo->stepsize != preset_step)
4376 continue;
4377
4378 for (j = 0; j < stepinfo->nstrengths; j++) {
4379 if (stepinfo->strengths[j] != preset_strength)
4380 continue;
4381
4382 ecc_bytes = caps->calc_ecc_bytes(preset_step,
4383 preset_strength);
4384 if (WARN_ON_ONCE(ecc_bytes < 0))
4385 return ecc_bytes;
4386
4387 if (ecc_bytes * nsteps > oobavail) {
4388 pr_err("ECC (step, strength) = (%d, %d) does not fit in OOB",
4389 preset_step, preset_strength);
4390 return -ENOSPC;
4391 }
4392
4393 chip->ecc.bytes = ecc_bytes;
4394
4395 return 0;
4396 }
4397 }
4398
4399 pr_err("ECC (step, strength) = (%d, %d) not supported on this controller",
4400 preset_step, preset_strength);
4401
4402 return -ENOTSUPP;
4403 }
4404 EXPORT_SYMBOL_GPL(nand_check_ecc_caps);
4405
4406 /**
4407 * nand_match_ecc_req - meet the chip's requirement with least ECC bytes
4408 * @chip: nand chip info structure
4409 * @caps: ECC engine caps info structure
4410 * @oobavail: OOB size that the ECC engine can use
4411 *
4412 * If a chip's ECC requirement is provided, try to meet it with the least
4413 * number of ECC bytes (i.e. with the largest number of OOB-free bytes).
4414 * On success, the chosen ECC settings are set.
4415 */
4416 int nand_match_ecc_req(struct nand_chip *chip,
4417 const struct nand_ecc_caps *caps, int oobavail)
4418 {
4419 struct mtd_info *mtd = nand_to_mtd(chip);
4420 const struct nand_ecc_step_info *stepinfo;
4421 int req_step = chip->ecc_step_ds;
4422 int req_strength = chip->ecc_strength_ds;
4423 int req_corr, step_size, strength, nsteps, ecc_bytes, ecc_bytes_total;
4424 int best_step, best_strength, best_ecc_bytes;
4425 int best_ecc_bytes_total = INT_MAX;
4426 int i, j;
4427
4428 if (WARN_ON(oobavail < 0))
4429 return -EINVAL;
4430
4431 /* No information provided by the NAND chip */
4432 if (!req_step || !req_strength)
4433 return -ENOTSUPP;
4434
4435 /* number of correctable bits the chip requires in a page */
4436 req_corr = mtd->writesize / req_step * req_strength;
4437
4438 for (i = 0; i < caps->nstepinfos; i++) {
4439 stepinfo = &caps->stepinfos[i];
4440 step_size = stepinfo->stepsize;
4441
4442 for (j = 0; j < stepinfo->nstrengths; j++) {
4443 strength = stepinfo->strengths[j];
4444
4445 /*
4446 * If both step size and strength are smaller than the
4447 * chip's requirement, it is not easy to compare the
4448 * resulted reliability.
4449 */
4450 if (step_size < req_step && strength < req_strength)
4451 continue;
4452
4453 if (mtd->writesize % step_size)
4454 continue;
4455
4456 nsteps = mtd->writesize / step_size;
4457
4458 ecc_bytes = caps->calc_ecc_bytes(step_size, strength);
4459 if (WARN_ON_ONCE(ecc_bytes < 0))
4460 continue;
4461 ecc_bytes_total = ecc_bytes * nsteps;
4462
4463 if (ecc_bytes_total > oobavail ||
4464 strength * nsteps < req_corr)
4465 continue;
4466
4467 /*
4468 * We assume the best is to meet the chip's requrement
4469 * with the least number of ECC bytes.
4470 */
4471 if (ecc_bytes_total < best_ecc_bytes_total) {
4472 best_ecc_bytes_total = ecc_bytes_total;
4473 best_step = step_size;
4474 best_strength = strength;
4475 best_ecc_bytes = ecc_bytes;
4476 }
4477 }
4478 }
4479
4480 if (best_ecc_bytes_total == INT_MAX)
4481 return -ENOTSUPP;
4482
4483 chip->ecc.size = best_step;
4484 chip->ecc.strength = best_strength;
4485 chip->ecc.bytes = best_ecc_bytes;
4486
4487 return 0;
4488 }
4489 EXPORT_SYMBOL_GPL(nand_match_ecc_req);
4490
4491 /**
4492 * nand_maximize_ecc - choose the max ECC strength available
4493 * @chip: nand chip info structure
4494 * @caps: ECC engine caps info structure
4495 * @oobavail: OOB size that the ECC engine can use
4496 *
4497 * Choose the max ECC strength that is supported on the controller, and can fit
4498 * within the chip's OOB. On success, the chosen ECC settings are set.
4499 */
4500 int nand_maximize_ecc(struct nand_chip *chip,
4501 const struct nand_ecc_caps *caps, int oobavail)
4502 {
4503 struct mtd_info *mtd = nand_to_mtd(chip);
4504 const struct nand_ecc_step_info *stepinfo;
4505 int step_size, strength, nsteps, ecc_bytes, corr;
4506 int best_corr = 0;
4507 int best_step = 0;
4508 int best_strength, best_ecc_bytes;
4509 int i, j;
4510
4511 if (WARN_ON(oobavail < 0))
4512 return -EINVAL;
4513
4514 for (i = 0; i < caps->nstepinfos; i++) {
4515 stepinfo = &caps->stepinfos[i];
4516 step_size = stepinfo->stepsize;
4517
4518 /* If chip->ecc.size is already set, respect it */
4519 if (chip->ecc.size && step_size != chip->ecc.size)
4520 continue;
4521
4522 for (j = 0; j < stepinfo->nstrengths; j++) {
4523 strength = stepinfo->strengths[j];
4524
4525 if (mtd->writesize % step_size)
4526 continue;
4527
4528 nsteps = mtd->writesize / step_size;
4529
4530 ecc_bytes = caps->calc_ecc_bytes(step_size, strength);
4531 if (WARN_ON_ONCE(ecc_bytes < 0))
4532 continue;
4533
4534 if (ecc_bytes * nsteps > oobavail)
4535 continue;
4536
4537 corr = strength * nsteps;
4538
4539 /*
4540 * If the number of correctable bits is the same,
4541 * bigger step_size has more reliability.
4542 */
4543 if (corr > best_corr ||
4544 (corr == best_corr && step_size > best_step)) {
4545 best_corr = corr;
4546 best_step = step_size;
4547 best_strength = strength;
4548 best_ecc_bytes = ecc_bytes;
4549 }
4550 }
4551 }
4552
4553 if (!best_corr)
4554 return -ENOTSUPP;
4555
4556 chip->ecc.size = best_step;
4557 chip->ecc.strength = best_strength;
4558 chip->ecc.bytes = best_ecc_bytes;
4559
4560 return 0;
4561 }
4562 EXPORT_SYMBOL_GPL(nand_maximize_ecc);
4563
4564 /*
4565 * Check if the chip configuration meet the datasheet requirements.
4566
4567 * If our configuration corrects A bits per B bytes and the minimum
4568 * required correction level is X bits per Y bytes, then we must ensure
4569 * both of the following are true:
4570 *
4571 * (1) A / B >= X / Y
4572 * (2) A >= X
4573 *
4574 * Requirement (1) ensures we can correct for the required bitflip density.
4575 * Requirement (2) ensures we can correct even when all bitflips are clumped
4576 * in the same sector.
4577 */
4578 static bool nand_ecc_strength_good(struct mtd_info *mtd)
4579 {
4580 struct nand_chip *chip = mtd_to_nand(mtd);
4581 struct nand_ecc_ctrl *ecc = &chip->ecc;
4582 int corr, ds_corr;
4583
4584 if (ecc->size == 0 || chip->ecc_step_ds == 0)
4585 /* Not enough information */
4586 return true;
4587
4588 /*
4589 * We get the number of corrected bits per page to compare
4590 * the correction density.
4591 */
4592 corr = (mtd->writesize * ecc->strength) / ecc->size;
4593 ds_corr = (mtd->writesize * chip->ecc_strength_ds) / chip->ecc_step_ds;
4594
4595 return corr >= ds_corr && ecc->strength >= chip->ecc_strength_ds;
4596 }
4597
4598 static bool invalid_ecc_page_accessors(struct nand_chip *chip)
4599 {
4600 struct nand_ecc_ctrl *ecc = &chip->ecc;
4601
4602 if (nand_standard_page_accessors(ecc))
4603 return false;
4604
4605 /*
4606 * NAND_ECC_CUSTOM_PAGE_ACCESS flag is set, make sure the NAND
4607 * controller driver implements all the page accessors because
4608 * default helpers are not suitable when the core does not
4609 * send the READ0/PAGEPROG commands.
4610 */
4611 return (!ecc->read_page || !ecc->write_page ||
4612 !ecc->read_page_raw || !ecc->write_page_raw ||
4613 (NAND_HAS_SUBPAGE_READ(chip) && !ecc->read_subpage) ||
4614 (NAND_HAS_SUBPAGE_WRITE(chip) && !ecc->write_subpage &&
4615 ecc->hwctl && ecc->calculate));
4616 }
4617
4618 /**
4619 * nand_scan_tail - [NAND Interface] Scan for the NAND device
4620 * @mtd: MTD device structure
4621 *
4622 * This is the second phase of the normal nand_scan() function. It fills out
4623 * all the uninitialized function pointers with the defaults and scans for a
4624 * bad block table if appropriate.
4625 */
4626 int nand_scan_tail(struct mtd_info *mtd)
4627 {
4628 struct nand_chip *chip = mtd_to_nand(mtd);
4629 struct nand_ecc_ctrl *ecc = &chip->ecc;
4630 struct nand_buffers *nbuf = NULL;
4631 int ret, i;
4632
4633 /* New bad blocks should be marked in OOB, flash-based BBT, or both */
4634 if (WARN_ON((chip->bbt_options & NAND_BBT_NO_OOB_BBM) &&
4635 !(chip->bbt_options & NAND_BBT_USE_FLASH))) {
4636 return -EINVAL;
4637 }
4638
4639 if (invalid_ecc_page_accessors(chip)) {
4640 pr_err("Invalid ECC page accessors setup\n");
4641 return -EINVAL;
4642 }
4643
4644 if (!(chip->options & NAND_OWN_BUFFERS)) {
4645 nbuf = kzalloc(sizeof(*nbuf), GFP_KERNEL);
4646 if (!nbuf)
4647 return -ENOMEM;
4648
4649 nbuf->ecccalc = kmalloc(mtd->oobsize, GFP_KERNEL);
4650 if (!nbuf->ecccalc) {
4651 ret = -ENOMEM;
4652 goto err_free_nbuf;
4653 }
4654
4655 nbuf->ecccode = kmalloc(mtd->oobsize, GFP_KERNEL);
4656 if (!nbuf->ecccode) {
4657 ret = -ENOMEM;
4658 goto err_free_nbuf;
4659 }
4660
4661 nbuf->databuf = kmalloc(mtd->writesize + mtd->oobsize,
4662 GFP_KERNEL);
4663 if (!nbuf->databuf) {
4664 ret = -ENOMEM;
4665 goto err_free_nbuf;
4666 }
4667
4668 chip->buffers = nbuf;
4669 } else if (!chip->buffers) {
4670 return -ENOMEM;
4671 }
4672
4673 /*
4674 * FIXME: some NAND manufacturer drivers expect the first die to be
4675 * selected when manufacturer->init() is called. They should be fixed
4676 * to explictly select the relevant die when interacting with the NAND
4677 * chip.
4678 */
4679 chip->select_chip(mtd, 0);
4680 ret = nand_manufacturer_init(chip);
4681 chip->select_chip(mtd, -1);
4682 if (ret)
4683 goto err_free_nbuf;
4684
4685 /* Set the internal oob buffer location, just after the page data */
4686 chip->oob_poi = chip->buffers->databuf + mtd->writesize;
4687
4688 /*
4689 * If no default placement scheme is given, select an appropriate one.
4690 */
4691 if (!mtd->ooblayout &&
4692 !(ecc->mode == NAND_ECC_SOFT && ecc->algo == NAND_ECC_BCH)) {
4693 switch (mtd->oobsize) {
4694 case 8:
4695 case 16:
4696 mtd_set_ooblayout(mtd, &nand_ooblayout_sp_ops);
4697 break;
4698 case 64:
4699 case 128:
4700 mtd_set_ooblayout(mtd, &nand_ooblayout_lp_hamming_ops);
4701 break;
4702 default:
4703 WARN(1, "No oob scheme defined for oobsize %d\n",
4704 mtd->oobsize);
4705 ret = -EINVAL;
4706 goto err_nand_manuf_cleanup;
4707 }
4708 }
4709
4710 /*
4711 * Check ECC mode, default to software if 3byte/512byte hardware ECC is
4712 * selected and we have 256 byte pagesize fallback to software ECC
4713 */
4714
4715 switch (ecc->mode) {
4716 case NAND_ECC_HW_OOB_FIRST:
4717 /* Similar to NAND_ECC_HW, but a separate read_page handle */
4718 if (!ecc->calculate || !ecc->correct || !ecc->hwctl) {
4719 WARN(1, "No ECC functions supplied; hardware ECC not possible\n");
4720 ret = -EINVAL;
4721 goto err_nand_manuf_cleanup;
4722 }
4723 if (!ecc->read_page)
4724 ecc->read_page = nand_read_page_hwecc_oob_first;
4725
4726 case NAND_ECC_HW:
4727 /* Use standard hwecc read page function? */
4728 if (!ecc->read_page)
4729 ecc->read_page = nand_read_page_hwecc;
4730 if (!ecc->write_page)
4731 ecc->write_page = nand_write_page_hwecc;
4732 if (!ecc->read_page_raw)
4733 ecc->read_page_raw = nand_read_page_raw;
4734 if (!ecc->write_page_raw)
4735 ecc->write_page_raw = nand_write_page_raw;
4736 if (!ecc->read_oob)
4737 ecc->read_oob = nand_read_oob_std;
4738 if (!ecc->write_oob)
4739 ecc->write_oob = nand_write_oob_std;
4740 if (!ecc->read_subpage)
4741 ecc->read_subpage = nand_read_subpage;
4742 if (!ecc->write_subpage && ecc->hwctl && ecc->calculate)
4743 ecc->write_subpage = nand_write_subpage_hwecc;
4744
4745 case NAND_ECC_HW_SYNDROME:
4746 if ((!ecc->calculate || !ecc->correct || !ecc->hwctl) &&
4747 (!ecc->read_page ||
4748 ecc->read_page == nand_read_page_hwecc ||
4749 !ecc->write_page ||
4750 ecc->write_page == nand_write_page_hwecc)) {
4751 WARN(1, "No ECC functions supplied; hardware ECC not possible\n");
4752 ret = -EINVAL;
4753 goto err_nand_manuf_cleanup;
4754 }
4755 /* Use standard syndrome read/write page function? */
4756 if (!ecc->read_page)
4757 ecc->read_page = nand_read_page_syndrome;
4758 if (!ecc->write_page)
4759 ecc->write_page = nand_write_page_syndrome;
4760 if (!ecc->read_page_raw)
4761 ecc->read_page_raw = nand_read_page_raw_syndrome;
4762 if (!ecc->write_page_raw)
4763 ecc->write_page_raw = nand_write_page_raw_syndrome;
4764 if (!ecc->read_oob)
4765 ecc->read_oob = nand_read_oob_syndrome;
4766 if (!ecc->write_oob)
4767 ecc->write_oob = nand_write_oob_syndrome;
4768
4769 if (mtd->writesize >= ecc->size) {
4770 if (!ecc->strength) {
4771 WARN(1, "Driver must set ecc.strength when using hardware ECC\n");
4772 ret = -EINVAL;
4773 goto err_nand_manuf_cleanup;
4774 }
4775 break;
4776 }
4777 pr_warn("%d byte HW ECC not possible on %d byte page size, fallback to SW ECC\n",
4778 ecc->size, mtd->writesize);
4779 ecc->mode = NAND_ECC_SOFT;
4780 ecc->algo = NAND_ECC_HAMMING;
4781
4782 case NAND_ECC_SOFT:
4783 ret = nand_set_ecc_soft_ops(mtd);
4784 if (ret) {
4785 ret = -EINVAL;
4786 goto err_nand_manuf_cleanup;
4787 }
4788 break;
4789
4790 case NAND_ECC_ON_DIE:
4791 if (!ecc->read_page || !ecc->write_page) {
4792 WARN(1, "No ECC functions supplied; on-die ECC not possible\n");
4793 ret = -EINVAL;
4794 goto err_nand_manuf_cleanup;
4795 }
4796 if (!ecc->read_oob)
4797 ecc->read_oob = nand_read_oob_std;
4798 if (!ecc->write_oob)
4799 ecc->write_oob = nand_write_oob_std;
4800 break;
4801
4802 case NAND_ECC_NONE:
4803 pr_warn("NAND_ECC_NONE selected by board driver. This is not recommended!\n");
4804 ecc->read_page = nand_read_page_raw;
4805 ecc->write_page = nand_write_page_raw;
4806 ecc->read_oob = nand_read_oob_std;
4807 ecc->read_page_raw = nand_read_page_raw;
4808 ecc->write_page_raw = nand_write_page_raw;
4809 ecc->write_oob = nand_write_oob_std;
4810 ecc->size = mtd->writesize;
4811 ecc->bytes = 0;
4812 ecc->strength = 0;
4813 break;
4814
4815 default:
4816 WARN(1, "Invalid NAND_ECC_MODE %d\n", ecc->mode);
4817 ret = -EINVAL;
4818 goto err_nand_manuf_cleanup;
4819 }
4820
4821 /* For many systems, the standard OOB write also works for raw */
4822 if (!ecc->read_oob_raw)
4823 ecc->read_oob_raw = ecc->read_oob;
4824 if (!ecc->write_oob_raw)
4825 ecc->write_oob_raw = ecc->write_oob;
4826
4827 /* propagate ecc info to mtd_info */
4828 mtd->ecc_strength = ecc->strength;
4829 mtd->ecc_step_size = ecc->size;
4830
4831 /*
4832 * Set the number of read / write steps for one page depending on ECC
4833 * mode.
4834 */
4835 ecc->steps = mtd->writesize / ecc->size;
4836 if (ecc->steps * ecc->size != mtd->writesize) {
4837 WARN(1, "Invalid ECC parameters\n");
4838 ret = -EINVAL;
4839 goto err_nand_manuf_cleanup;
4840 }
4841 ecc->total = ecc->steps * ecc->bytes;
4842 if (ecc->total > mtd->oobsize) {
4843 WARN(1, "Total number of ECC bytes exceeded oobsize\n");
4844 ret = -EINVAL;
4845 goto err_nand_manuf_cleanup;
4846 }
4847
4848 /*
4849 * The number of bytes available for a client to place data into
4850 * the out of band area.
4851 */
4852 ret = mtd_ooblayout_count_freebytes(mtd);
4853 if (ret < 0)
4854 ret = 0;
4855
4856 mtd->oobavail = ret;
4857
4858 /* ECC sanity check: warn if it's too weak */
4859 if (!nand_ecc_strength_good(mtd))
4860 pr_warn("WARNING: %s: the ECC used on your system is too weak compared to the one required by the NAND chip\n",
4861 mtd->name);
4862
4863 /* Allow subpage writes up to ecc.steps. Not possible for MLC flash */
4864 if (!(chip->options & NAND_NO_SUBPAGE_WRITE) && nand_is_slc(chip)) {
4865 switch (ecc->steps) {
4866 case 2:
4867 mtd->subpage_sft = 1;
4868 break;
4869 case 4:
4870 case 8:
4871 case 16:
4872 mtd->subpage_sft = 2;
4873 break;
4874 }
4875 }
4876 chip->subpagesize = mtd->writesize >> mtd->subpage_sft;
4877
4878 /* Initialize state */
4879 chip->state = FL_READY;
4880
4881 /* Invalidate the pagebuffer reference */
4882 chip->pagebuf = -1;
4883
4884 /* Large page NAND with SOFT_ECC should support subpage reads */
4885 switch (ecc->mode) {
4886 case NAND_ECC_SOFT:
4887 if (chip->page_shift > 9)
4888 chip->options |= NAND_SUBPAGE_READ;
4889 break;
4890
4891 default:
4892 break;
4893 }
4894
4895 /* Fill in remaining MTD driver data */
4896 mtd->type = nand_is_slc(chip) ? MTD_NANDFLASH : MTD_MLCNANDFLASH;
4897 mtd->flags = (chip->options & NAND_ROM) ? MTD_CAP_ROM :
4898 MTD_CAP_NANDFLASH;
4899 mtd->_erase = nand_erase;
4900 mtd->_point = NULL;
4901 mtd->_unpoint = NULL;
4902 mtd->_read = nand_read;
4903 mtd->_write = nand_write;
4904 mtd->_panic_write = panic_nand_write;
4905 mtd->_read_oob = nand_read_oob;
4906 mtd->_write_oob = nand_write_oob;
4907 mtd->_sync = nand_sync;
4908 mtd->_lock = NULL;
4909 mtd->_unlock = NULL;
4910 mtd->_suspend = nand_suspend;
4911 mtd->_resume = nand_resume;
4912 mtd->_reboot = nand_shutdown;
4913 mtd->_block_isreserved = nand_block_isreserved;
4914 mtd->_block_isbad = nand_block_isbad;
4915 mtd->_block_markbad = nand_block_markbad;
4916 mtd->_max_bad_blocks = nand_max_bad_blocks;
4917 mtd->writebufsize = mtd->writesize;
4918
4919 /*
4920 * Initialize bitflip_threshold to its default prior scan_bbt() call.
4921 * scan_bbt() might invoke mtd_read(), thus bitflip_threshold must be
4922 * properly set.
4923 */
4924 if (!mtd->bitflip_threshold)
4925 mtd->bitflip_threshold = DIV_ROUND_UP(mtd->ecc_strength * 3, 4);
4926
4927 /* Initialize the ->data_interface field. */
4928 ret = nand_init_data_interface(chip);
4929 if (ret)
4930 goto err_nand_manuf_cleanup;
4931
4932 /* Enter fastest possible mode on all dies. */
4933 for (i = 0; i < chip->numchips; i++) {
4934 chip->select_chip(mtd, i);
4935 ret = nand_setup_data_interface(chip, i);
4936 chip->select_chip(mtd, -1);
4937
4938 if (ret)
4939 goto err_nand_data_iface_cleanup;
4940 }
4941
4942 /* Check, if we should skip the bad block table scan */
4943 if (chip->options & NAND_SKIP_BBTSCAN)
4944 return 0;
4945
4946 /* Build bad block table */
4947 ret = chip->scan_bbt(mtd);
4948 if (ret)
4949 goto err_nand_data_iface_cleanup;
4950
4951 return 0;
4952
4953 err_nand_data_iface_cleanup:
4954 nand_release_data_interface(chip);
4955
4956 err_nand_manuf_cleanup:
4957 nand_manufacturer_cleanup(chip);
4958
4959 err_free_nbuf:
4960 if (nbuf) {
4961 kfree(nbuf->databuf);
4962 kfree(nbuf->ecccode);
4963 kfree(nbuf->ecccalc);
4964 kfree(nbuf);
4965 }
4966
4967 return ret;
4968 }
4969 EXPORT_SYMBOL(nand_scan_tail);
4970
4971 /*
4972 * is_module_text_address() isn't exported, and it's mostly a pointless
4973 * test if this is a module _anyway_ -- they'd have to try _really_ hard
4974 * to call us from in-kernel code if the core NAND support is modular.
4975 */
4976 #ifdef MODULE
4977 #define caller_is_module() (1)
4978 #else
4979 #define caller_is_module() \
4980 is_module_text_address((unsigned long)__builtin_return_address(0))
4981 #endif
4982
4983 /**
4984 * nand_scan - [NAND Interface] Scan for the NAND device
4985 * @mtd: MTD device structure
4986 * @maxchips: number of chips to scan for
4987 *
4988 * This fills out all the uninitialized function pointers with the defaults.
4989 * The flash ID is read and the mtd/chip structures are filled with the
4990 * appropriate values.
4991 */
4992 int nand_scan(struct mtd_info *mtd, int maxchips)
4993 {
4994 int ret;
4995
4996 ret = nand_scan_ident(mtd, maxchips, NULL);
4997 if (!ret)
4998 ret = nand_scan_tail(mtd);
4999 return ret;
5000 }
5001 EXPORT_SYMBOL(nand_scan);
5002
5003 /**
5004 * nand_cleanup - [NAND Interface] Free resources held by the NAND device
5005 * @chip: NAND chip object
5006 */
5007 void nand_cleanup(struct nand_chip *chip)
5008 {
5009 if (chip->ecc.mode == NAND_ECC_SOFT &&
5010 chip->ecc.algo == NAND_ECC_BCH)
5011 nand_bch_free((struct nand_bch_control *)chip->ecc.priv);
5012
5013 nand_release_data_interface(chip);
5014
5015 /* Free bad block table memory */
5016 kfree(chip->bbt);
5017 if (!(chip->options & NAND_OWN_BUFFERS) && chip->buffers) {
5018 kfree(chip->buffers->databuf);
5019 kfree(chip->buffers->ecccode);
5020 kfree(chip->buffers->ecccalc);
5021 kfree(chip->buffers);
5022 }
5023
5024 /* Free bad block descriptor memory */
5025 if (chip->badblock_pattern && chip->badblock_pattern->options
5026 & NAND_BBT_DYNAMICSTRUCT)
5027 kfree(chip->badblock_pattern);
5028
5029 /* Free manufacturer priv data. */
5030 nand_manufacturer_cleanup(chip);
5031 }
5032 EXPORT_SYMBOL_GPL(nand_cleanup);
5033
5034 /**
5035 * nand_release - [NAND Interface] Unregister the MTD device and free resources
5036 * held by the NAND device
5037 * @mtd: MTD device structure
5038 */
5039 void nand_release(struct mtd_info *mtd)
5040 {
5041 mtd_device_unregister(mtd);
5042 nand_cleanup(mtd_to_nand(mtd));
5043 }
5044 EXPORT_SYMBOL_GPL(nand_release);
5045
5046 MODULE_LICENSE("GPL");
5047 MODULE_AUTHOR("Steven J. Hill <sjhill@realitydiluted.com>");
5048 MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>");
5049 MODULE_DESCRIPTION("Generic NAND flash driver code");