]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/mtd/nand/nand_base.c
efi/arm: Fix boot crash with CONFIG_CPUMASK_OFFSTACK=y
[mirror_ubuntu-artful-kernel.git] / drivers / mtd / nand / nand_base.c
1 /*
2 * Overview:
3 * This is the generic MTD driver for NAND flash devices. It should be
4 * capable of working with almost all NAND chips currently available.
5 *
6 * Additional technical information is available on
7 * http://www.linux-mtd.infradead.org/doc/nand.html
8 *
9 * Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com)
10 * 2002-2006 Thomas Gleixner (tglx@linutronix.de)
11 *
12 * Credits:
13 * David Woodhouse for adding multichip support
14 *
15 * Aleph One Ltd. and Toby Churchill Ltd. for supporting the
16 * rework for 2K page size chips
17 *
18 * TODO:
19 * Enable cached programming for 2k page size chips
20 * Check, if mtd->ecctype should be set to MTD_ECC_HW
21 * if we have HW ECC support.
22 * BBT table is not serialized, has to be fixed
23 *
24 * This program is free software; you can redistribute it and/or modify
25 * it under the terms of the GNU General Public License version 2 as
26 * published by the Free Software Foundation.
27 *
28 */
29
30 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31
32 #include <linux/module.h>
33 #include <linux/delay.h>
34 #include <linux/errno.h>
35 #include <linux/err.h>
36 #include <linux/sched.h>
37 #include <linux/slab.h>
38 #include <linux/mm.h>
39 #include <linux/types.h>
40 #include <linux/mtd/mtd.h>
41 #include <linux/mtd/nand.h>
42 #include <linux/mtd/nand_ecc.h>
43 #include <linux/mtd/nand_bch.h>
44 #include <linux/interrupt.h>
45 #include <linux/bitops.h>
46 #include <linux/io.h>
47 #include <linux/mtd/partitions.h>
48 #include <linux/of.h>
49
50 static int nand_get_device(struct mtd_info *mtd, int new_state);
51
52 static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
53 struct mtd_oob_ops *ops);
54
55 /* Define default oob placement schemes for large and small page devices */
56 static int nand_ooblayout_ecc_sp(struct mtd_info *mtd, int section,
57 struct mtd_oob_region *oobregion)
58 {
59 struct nand_chip *chip = mtd_to_nand(mtd);
60 struct nand_ecc_ctrl *ecc = &chip->ecc;
61
62 if (section > 1)
63 return -ERANGE;
64
65 if (!section) {
66 oobregion->offset = 0;
67 oobregion->length = 4;
68 } else {
69 oobregion->offset = 6;
70 oobregion->length = ecc->total - 4;
71 }
72
73 return 0;
74 }
75
76 static int nand_ooblayout_free_sp(struct mtd_info *mtd, int section,
77 struct mtd_oob_region *oobregion)
78 {
79 if (section > 1)
80 return -ERANGE;
81
82 if (mtd->oobsize == 16) {
83 if (section)
84 return -ERANGE;
85
86 oobregion->length = 8;
87 oobregion->offset = 8;
88 } else {
89 oobregion->length = 2;
90 if (!section)
91 oobregion->offset = 3;
92 else
93 oobregion->offset = 6;
94 }
95
96 return 0;
97 }
98
99 const struct mtd_ooblayout_ops nand_ooblayout_sp_ops = {
100 .ecc = nand_ooblayout_ecc_sp,
101 .free = nand_ooblayout_free_sp,
102 };
103 EXPORT_SYMBOL_GPL(nand_ooblayout_sp_ops);
104
105 static int nand_ooblayout_ecc_lp(struct mtd_info *mtd, int section,
106 struct mtd_oob_region *oobregion)
107 {
108 struct nand_chip *chip = mtd_to_nand(mtd);
109 struct nand_ecc_ctrl *ecc = &chip->ecc;
110
111 if (section)
112 return -ERANGE;
113
114 oobregion->length = ecc->total;
115 oobregion->offset = mtd->oobsize - oobregion->length;
116
117 return 0;
118 }
119
120 static int nand_ooblayout_free_lp(struct mtd_info *mtd, int section,
121 struct mtd_oob_region *oobregion)
122 {
123 struct nand_chip *chip = mtd_to_nand(mtd);
124 struct nand_ecc_ctrl *ecc = &chip->ecc;
125
126 if (section)
127 return -ERANGE;
128
129 oobregion->length = mtd->oobsize - ecc->total - 2;
130 oobregion->offset = 2;
131
132 return 0;
133 }
134
135 const struct mtd_ooblayout_ops nand_ooblayout_lp_ops = {
136 .ecc = nand_ooblayout_ecc_lp,
137 .free = nand_ooblayout_free_lp,
138 };
139 EXPORT_SYMBOL_GPL(nand_ooblayout_lp_ops);
140
141 static int check_offs_len(struct mtd_info *mtd,
142 loff_t ofs, uint64_t len)
143 {
144 struct nand_chip *chip = mtd_to_nand(mtd);
145 int ret = 0;
146
147 /* Start address must align on block boundary */
148 if (ofs & ((1ULL << chip->phys_erase_shift) - 1)) {
149 pr_debug("%s: unaligned address\n", __func__);
150 ret = -EINVAL;
151 }
152
153 /* Length must align on block boundary */
154 if (len & ((1ULL << chip->phys_erase_shift) - 1)) {
155 pr_debug("%s: length not block aligned\n", __func__);
156 ret = -EINVAL;
157 }
158
159 return ret;
160 }
161
162 /**
163 * nand_release_device - [GENERIC] release chip
164 * @mtd: MTD device structure
165 *
166 * Release chip lock and wake up anyone waiting on the device.
167 */
168 static void nand_release_device(struct mtd_info *mtd)
169 {
170 struct nand_chip *chip = mtd_to_nand(mtd);
171
172 /* Release the controller and the chip */
173 spin_lock(&chip->controller->lock);
174 chip->controller->active = NULL;
175 chip->state = FL_READY;
176 wake_up(&chip->controller->wq);
177 spin_unlock(&chip->controller->lock);
178 }
179
180 /**
181 * nand_read_byte - [DEFAULT] read one byte from the chip
182 * @mtd: MTD device structure
183 *
184 * Default read function for 8bit buswidth
185 */
186 static uint8_t nand_read_byte(struct mtd_info *mtd)
187 {
188 struct nand_chip *chip = mtd_to_nand(mtd);
189 return readb(chip->IO_ADDR_R);
190 }
191
192 /**
193 * nand_read_byte16 - [DEFAULT] read one byte endianness aware from the chip
194 * @mtd: MTD device structure
195 *
196 * Default read function for 16bit buswidth with endianness conversion.
197 *
198 */
199 static uint8_t nand_read_byte16(struct mtd_info *mtd)
200 {
201 struct nand_chip *chip = mtd_to_nand(mtd);
202 return (uint8_t) cpu_to_le16(readw(chip->IO_ADDR_R));
203 }
204
205 /**
206 * nand_read_word - [DEFAULT] read one word from the chip
207 * @mtd: MTD device structure
208 *
209 * Default read function for 16bit buswidth without endianness conversion.
210 */
211 static u16 nand_read_word(struct mtd_info *mtd)
212 {
213 struct nand_chip *chip = mtd_to_nand(mtd);
214 return readw(chip->IO_ADDR_R);
215 }
216
217 /**
218 * nand_select_chip - [DEFAULT] control CE line
219 * @mtd: MTD device structure
220 * @chipnr: chipnumber to select, -1 for deselect
221 *
222 * Default select function for 1 chip devices.
223 */
224 static void nand_select_chip(struct mtd_info *mtd, int chipnr)
225 {
226 struct nand_chip *chip = mtd_to_nand(mtd);
227
228 switch (chipnr) {
229 case -1:
230 chip->cmd_ctrl(mtd, NAND_CMD_NONE, 0 | NAND_CTRL_CHANGE);
231 break;
232 case 0:
233 break;
234
235 default:
236 BUG();
237 }
238 }
239
240 /**
241 * nand_write_byte - [DEFAULT] write single byte to chip
242 * @mtd: MTD device structure
243 * @byte: value to write
244 *
245 * Default function to write a byte to I/O[7:0]
246 */
247 static void nand_write_byte(struct mtd_info *mtd, uint8_t byte)
248 {
249 struct nand_chip *chip = mtd_to_nand(mtd);
250
251 chip->write_buf(mtd, &byte, 1);
252 }
253
254 /**
255 * nand_write_byte16 - [DEFAULT] write single byte to a chip with width 16
256 * @mtd: MTD device structure
257 * @byte: value to write
258 *
259 * Default function to write a byte to I/O[7:0] on a 16-bit wide chip.
260 */
261 static void nand_write_byte16(struct mtd_info *mtd, uint8_t byte)
262 {
263 struct nand_chip *chip = mtd_to_nand(mtd);
264 uint16_t word = byte;
265
266 /*
267 * It's not entirely clear what should happen to I/O[15:8] when writing
268 * a byte. The ONFi spec (Revision 3.1; 2012-09-19, Section 2.16) reads:
269 *
270 * When the host supports a 16-bit bus width, only data is
271 * transferred at the 16-bit width. All address and command line
272 * transfers shall use only the lower 8-bits of the data bus. During
273 * command transfers, the host may place any value on the upper
274 * 8-bits of the data bus. During address transfers, the host shall
275 * set the upper 8-bits of the data bus to 00h.
276 *
277 * One user of the write_byte callback is nand_onfi_set_features. The
278 * four parameters are specified to be written to I/O[7:0], but this is
279 * neither an address nor a command transfer. Let's assume a 0 on the
280 * upper I/O lines is OK.
281 */
282 chip->write_buf(mtd, (uint8_t *)&word, 2);
283 }
284
285 /**
286 * nand_write_buf - [DEFAULT] write buffer to chip
287 * @mtd: MTD device structure
288 * @buf: data buffer
289 * @len: number of bytes to write
290 *
291 * Default write function for 8bit buswidth.
292 */
293 static void nand_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
294 {
295 struct nand_chip *chip = mtd_to_nand(mtd);
296
297 iowrite8_rep(chip->IO_ADDR_W, buf, len);
298 }
299
300 /**
301 * nand_read_buf - [DEFAULT] read chip data into buffer
302 * @mtd: MTD device structure
303 * @buf: buffer to store date
304 * @len: number of bytes to read
305 *
306 * Default read function for 8bit buswidth.
307 */
308 static void nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
309 {
310 struct nand_chip *chip = mtd_to_nand(mtd);
311
312 ioread8_rep(chip->IO_ADDR_R, buf, len);
313 }
314
315 /**
316 * nand_write_buf16 - [DEFAULT] write buffer to chip
317 * @mtd: MTD device structure
318 * @buf: data buffer
319 * @len: number of bytes to write
320 *
321 * Default write function for 16bit buswidth.
322 */
323 static void nand_write_buf16(struct mtd_info *mtd, const uint8_t *buf, int len)
324 {
325 struct nand_chip *chip = mtd_to_nand(mtd);
326 u16 *p = (u16 *) buf;
327
328 iowrite16_rep(chip->IO_ADDR_W, p, len >> 1);
329 }
330
331 /**
332 * nand_read_buf16 - [DEFAULT] read chip data into buffer
333 * @mtd: MTD device structure
334 * @buf: buffer to store date
335 * @len: number of bytes to read
336 *
337 * Default read function for 16bit buswidth.
338 */
339 static void nand_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len)
340 {
341 struct nand_chip *chip = mtd_to_nand(mtd);
342 u16 *p = (u16 *) buf;
343
344 ioread16_rep(chip->IO_ADDR_R, p, len >> 1);
345 }
346
347 /**
348 * nand_block_bad - [DEFAULT] Read bad block marker from the chip
349 * @mtd: MTD device structure
350 * @ofs: offset from device start
351 *
352 * Check, if the block is bad.
353 */
354 static int nand_block_bad(struct mtd_info *mtd, loff_t ofs)
355 {
356 int page, res = 0, i = 0;
357 struct nand_chip *chip = mtd_to_nand(mtd);
358 u16 bad;
359
360 if (chip->bbt_options & NAND_BBT_SCANLASTPAGE)
361 ofs += mtd->erasesize - mtd->writesize;
362
363 page = (int)(ofs >> chip->page_shift) & chip->pagemask;
364
365 do {
366 if (chip->options & NAND_BUSWIDTH_16) {
367 chip->cmdfunc(mtd, NAND_CMD_READOOB,
368 chip->badblockpos & 0xFE, page);
369 bad = cpu_to_le16(chip->read_word(mtd));
370 if (chip->badblockpos & 0x1)
371 bad >>= 8;
372 else
373 bad &= 0xFF;
374 } else {
375 chip->cmdfunc(mtd, NAND_CMD_READOOB, chip->badblockpos,
376 page);
377 bad = chip->read_byte(mtd);
378 }
379
380 if (likely(chip->badblockbits == 8))
381 res = bad != 0xFF;
382 else
383 res = hweight8(bad) < chip->badblockbits;
384 ofs += mtd->writesize;
385 page = (int)(ofs >> chip->page_shift) & chip->pagemask;
386 i++;
387 } while (!res && i < 2 && (chip->bbt_options & NAND_BBT_SCAN2NDPAGE));
388
389 return res;
390 }
391
392 /**
393 * nand_default_block_markbad - [DEFAULT] mark a block bad via bad block marker
394 * @mtd: MTD device structure
395 * @ofs: offset from device start
396 *
397 * This is the default implementation, which can be overridden by a hardware
398 * specific driver. It provides the details for writing a bad block marker to a
399 * block.
400 */
401 static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
402 {
403 struct nand_chip *chip = mtd_to_nand(mtd);
404 struct mtd_oob_ops ops;
405 uint8_t buf[2] = { 0, 0 };
406 int ret = 0, res, i = 0;
407
408 memset(&ops, 0, sizeof(ops));
409 ops.oobbuf = buf;
410 ops.ooboffs = chip->badblockpos;
411 if (chip->options & NAND_BUSWIDTH_16) {
412 ops.ooboffs &= ~0x01;
413 ops.len = ops.ooblen = 2;
414 } else {
415 ops.len = ops.ooblen = 1;
416 }
417 ops.mode = MTD_OPS_PLACE_OOB;
418
419 /* Write to first/last page(s) if necessary */
420 if (chip->bbt_options & NAND_BBT_SCANLASTPAGE)
421 ofs += mtd->erasesize - mtd->writesize;
422 do {
423 res = nand_do_write_oob(mtd, ofs, &ops);
424 if (!ret)
425 ret = res;
426
427 i++;
428 ofs += mtd->writesize;
429 } while ((chip->bbt_options & NAND_BBT_SCAN2NDPAGE) && i < 2);
430
431 return ret;
432 }
433
434 /**
435 * nand_block_markbad_lowlevel - mark a block bad
436 * @mtd: MTD device structure
437 * @ofs: offset from device start
438 *
439 * This function performs the generic NAND bad block marking steps (i.e., bad
440 * block table(s) and/or marker(s)). We only allow the hardware driver to
441 * specify how to write bad block markers to OOB (chip->block_markbad).
442 *
443 * We try operations in the following order:
444 * (1) erase the affected block, to allow OOB marker to be written cleanly
445 * (2) write bad block marker to OOB area of affected block (unless flag
446 * NAND_BBT_NO_OOB_BBM is present)
447 * (3) update the BBT
448 * Note that we retain the first error encountered in (2) or (3), finish the
449 * procedures, and dump the error in the end.
450 */
451 static int nand_block_markbad_lowlevel(struct mtd_info *mtd, loff_t ofs)
452 {
453 struct nand_chip *chip = mtd_to_nand(mtd);
454 int res, ret = 0;
455
456 if (!(chip->bbt_options & NAND_BBT_NO_OOB_BBM)) {
457 struct erase_info einfo;
458
459 /* Attempt erase before marking OOB */
460 memset(&einfo, 0, sizeof(einfo));
461 einfo.mtd = mtd;
462 einfo.addr = ofs;
463 einfo.len = 1ULL << chip->phys_erase_shift;
464 nand_erase_nand(mtd, &einfo, 0);
465
466 /* Write bad block marker to OOB */
467 nand_get_device(mtd, FL_WRITING);
468 ret = chip->block_markbad(mtd, ofs);
469 nand_release_device(mtd);
470 }
471
472 /* Mark block bad in BBT */
473 if (chip->bbt) {
474 res = nand_markbad_bbt(mtd, ofs);
475 if (!ret)
476 ret = res;
477 }
478
479 if (!ret)
480 mtd->ecc_stats.badblocks++;
481
482 return ret;
483 }
484
485 /**
486 * nand_check_wp - [GENERIC] check if the chip is write protected
487 * @mtd: MTD device structure
488 *
489 * Check, if the device is write protected. The function expects, that the
490 * device is already selected.
491 */
492 static int nand_check_wp(struct mtd_info *mtd)
493 {
494 struct nand_chip *chip = mtd_to_nand(mtd);
495
496 /* Broken xD cards report WP despite being writable */
497 if (chip->options & NAND_BROKEN_XD)
498 return 0;
499
500 /* Check the WP bit */
501 chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
502 return (chip->read_byte(mtd) & NAND_STATUS_WP) ? 0 : 1;
503 }
504
505 /**
506 * nand_block_isreserved - [GENERIC] Check if a block is marked reserved.
507 * @mtd: MTD device structure
508 * @ofs: offset from device start
509 *
510 * Check if the block is marked as reserved.
511 */
512 static int nand_block_isreserved(struct mtd_info *mtd, loff_t ofs)
513 {
514 struct nand_chip *chip = mtd_to_nand(mtd);
515
516 if (!chip->bbt)
517 return 0;
518 /* Return info from the table */
519 return nand_isreserved_bbt(mtd, ofs);
520 }
521
522 /**
523 * nand_block_checkbad - [GENERIC] Check if a block is marked bad
524 * @mtd: MTD device structure
525 * @ofs: offset from device start
526 * @allowbbt: 1, if its allowed to access the bbt area
527 *
528 * Check, if the block is bad. Either by reading the bad block table or
529 * calling of the scan function.
530 */
531 static int nand_block_checkbad(struct mtd_info *mtd, loff_t ofs, int allowbbt)
532 {
533 struct nand_chip *chip = mtd_to_nand(mtd);
534
535 if (!chip->bbt)
536 return chip->block_bad(mtd, ofs);
537
538 /* Return info from the table */
539 return nand_isbad_bbt(mtd, ofs, allowbbt);
540 }
541
542 /**
543 * panic_nand_wait_ready - [GENERIC] Wait for the ready pin after commands.
544 * @mtd: MTD device structure
545 * @timeo: Timeout
546 *
547 * Helper function for nand_wait_ready used when needing to wait in interrupt
548 * context.
549 */
550 static void panic_nand_wait_ready(struct mtd_info *mtd, unsigned long timeo)
551 {
552 struct nand_chip *chip = mtd_to_nand(mtd);
553 int i;
554
555 /* Wait for the device to get ready */
556 for (i = 0; i < timeo; i++) {
557 if (chip->dev_ready(mtd))
558 break;
559 touch_softlockup_watchdog();
560 mdelay(1);
561 }
562 }
563
564 /**
565 * nand_wait_ready - [GENERIC] Wait for the ready pin after commands.
566 * @mtd: MTD device structure
567 *
568 * Wait for the ready pin after a command, and warn if a timeout occurs.
569 */
570 void nand_wait_ready(struct mtd_info *mtd)
571 {
572 struct nand_chip *chip = mtd_to_nand(mtd);
573 unsigned long timeo = 400;
574
575 if (in_interrupt() || oops_in_progress)
576 return panic_nand_wait_ready(mtd, timeo);
577
578 /* Wait until command is processed or timeout occurs */
579 timeo = jiffies + msecs_to_jiffies(timeo);
580 do {
581 if (chip->dev_ready(mtd))
582 return;
583 cond_resched();
584 } while (time_before(jiffies, timeo));
585
586 if (!chip->dev_ready(mtd))
587 pr_warn_ratelimited("timeout while waiting for chip to become ready\n");
588 }
589 EXPORT_SYMBOL_GPL(nand_wait_ready);
590
591 /**
592 * nand_wait_status_ready - [GENERIC] Wait for the ready status after commands.
593 * @mtd: MTD device structure
594 * @timeo: Timeout in ms
595 *
596 * Wait for status ready (i.e. command done) or timeout.
597 */
598 static void nand_wait_status_ready(struct mtd_info *mtd, unsigned long timeo)
599 {
600 register struct nand_chip *chip = mtd_to_nand(mtd);
601
602 timeo = jiffies + msecs_to_jiffies(timeo);
603 do {
604 if ((chip->read_byte(mtd) & NAND_STATUS_READY))
605 break;
606 touch_softlockup_watchdog();
607 } while (time_before(jiffies, timeo));
608 };
609
610 /**
611 * nand_command - [DEFAULT] Send command to NAND device
612 * @mtd: MTD device structure
613 * @command: the command to be sent
614 * @column: the column address for this command, -1 if none
615 * @page_addr: the page address for this command, -1 if none
616 *
617 * Send command to NAND device. This function is used for small page devices
618 * (512 Bytes per page).
619 */
620 static void nand_command(struct mtd_info *mtd, unsigned int command,
621 int column, int page_addr)
622 {
623 register struct nand_chip *chip = mtd_to_nand(mtd);
624 int ctrl = NAND_CTRL_CLE | NAND_CTRL_CHANGE;
625
626 /* Write out the command to the device */
627 if (command == NAND_CMD_SEQIN) {
628 int readcmd;
629
630 if (column >= mtd->writesize) {
631 /* OOB area */
632 column -= mtd->writesize;
633 readcmd = NAND_CMD_READOOB;
634 } else if (column < 256) {
635 /* First 256 bytes --> READ0 */
636 readcmd = NAND_CMD_READ0;
637 } else {
638 column -= 256;
639 readcmd = NAND_CMD_READ1;
640 }
641 chip->cmd_ctrl(mtd, readcmd, ctrl);
642 ctrl &= ~NAND_CTRL_CHANGE;
643 }
644 chip->cmd_ctrl(mtd, command, ctrl);
645
646 /* Address cycle, when necessary */
647 ctrl = NAND_CTRL_ALE | NAND_CTRL_CHANGE;
648 /* Serially input address */
649 if (column != -1) {
650 /* Adjust columns for 16 bit buswidth */
651 if (chip->options & NAND_BUSWIDTH_16 &&
652 !nand_opcode_8bits(command))
653 column >>= 1;
654 chip->cmd_ctrl(mtd, column, ctrl);
655 ctrl &= ~NAND_CTRL_CHANGE;
656 }
657 if (page_addr != -1) {
658 chip->cmd_ctrl(mtd, page_addr, ctrl);
659 ctrl &= ~NAND_CTRL_CHANGE;
660 chip->cmd_ctrl(mtd, page_addr >> 8, ctrl);
661 /* One more address cycle for devices > 32MiB */
662 if (chip->chipsize > (32 << 20))
663 chip->cmd_ctrl(mtd, page_addr >> 16, ctrl);
664 }
665 chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
666
667 /*
668 * Program and erase have their own busy handlers status and sequential
669 * in needs no delay
670 */
671 switch (command) {
672
673 case NAND_CMD_PAGEPROG:
674 case NAND_CMD_ERASE1:
675 case NAND_CMD_ERASE2:
676 case NAND_CMD_SEQIN:
677 case NAND_CMD_STATUS:
678 return;
679
680 case NAND_CMD_RESET:
681 if (chip->dev_ready)
682 break;
683 udelay(chip->chip_delay);
684 chip->cmd_ctrl(mtd, NAND_CMD_STATUS,
685 NAND_CTRL_CLE | NAND_CTRL_CHANGE);
686 chip->cmd_ctrl(mtd,
687 NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
688 /* EZ-NAND can take upto 250ms as per ONFi v4.0 */
689 nand_wait_status_ready(mtd, 250);
690 return;
691
692 /* This applies to read commands */
693 default:
694 /*
695 * If we don't have access to the busy pin, we apply the given
696 * command delay
697 */
698 if (!chip->dev_ready) {
699 udelay(chip->chip_delay);
700 return;
701 }
702 }
703 /*
704 * Apply this short delay always to ensure that we do wait tWB in
705 * any case on any machine.
706 */
707 ndelay(100);
708
709 nand_wait_ready(mtd);
710 }
711
712 static void nand_ccs_delay(struct nand_chip *chip)
713 {
714 /*
715 * The controller already takes care of waiting for tCCS when the RNDIN
716 * or RNDOUT command is sent, return directly.
717 */
718 if (!(chip->options & NAND_WAIT_TCCS))
719 return;
720
721 /*
722 * Wait tCCS_min if it is correctly defined, otherwise wait 500ns
723 * (which should be safe for all NANDs).
724 */
725 if (chip->data_interface && chip->data_interface->timings.sdr.tCCS_min)
726 ndelay(chip->data_interface->timings.sdr.tCCS_min / 1000);
727 else
728 ndelay(500);
729 }
730
731 /**
732 * nand_command_lp - [DEFAULT] Send command to NAND large page device
733 * @mtd: MTD device structure
734 * @command: the command to be sent
735 * @column: the column address for this command, -1 if none
736 * @page_addr: the page address for this command, -1 if none
737 *
738 * Send command to NAND device. This is the version for the new large page
739 * devices. We don't have the separate regions as we have in the small page
740 * devices. We must emulate NAND_CMD_READOOB to keep the code compatible.
741 */
742 static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
743 int column, int page_addr)
744 {
745 register struct nand_chip *chip = mtd_to_nand(mtd);
746
747 /* Emulate NAND_CMD_READOOB */
748 if (command == NAND_CMD_READOOB) {
749 column += mtd->writesize;
750 command = NAND_CMD_READ0;
751 }
752
753 /* Command latch cycle */
754 chip->cmd_ctrl(mtd, command, NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
755
756 if (column != -1 || page_addr != -1) {
757 int ctrl = NAND_CTRL_CHANGE | NAND_NCE | NAND_ALE;
758
759 /* Serially input address */
760 if (column != -1) {
761 /* Adjust columns for 16 bit buswidth */
762 if (chip->options & NAND_BUSWIDTH_16 &&
763 !nand_opcode_8bits(command))
764 column >>= 1;
765 chip->cmd_ctrl(mtd, column, ctrl);
766 ctrl &= ~NAND_CTRL_CHANGE;
767
768 /* Only output a single addr cycle for 8bits opcodes. */
769 if (!nand_opcode_8bits(command))
770 chip->cmd_ctrl(mtd, column >> 8, ctrl);
771 }
772 if (page_addr != -1) {
773 chip->cmd_ctrl(mtd, page_addr, ctrl);
774 chip->cmd_ctrl(mtd, page_addr >> 8,
775 NAND_NCE | NAND_ALE);
776 /* One more address cycle for devices > 128MiB */
777 if (chip->chipsize > (128 << 20))
778 chip->cmd_ctrl(mtd, page_addr >> 16,
779 NAND_NCE | NAND_ALE);
780 }
781 }
782 chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
783
784 /*
785 * Program and erase have their own busy handlers status, sequential
786 * in and status need no delay.
787 */
788 switch (command) {
789
790 case NAND_CMD_CACHEDPROG:
791 case NAND_CMD_PAGEPROG:
792 case NAND_CMD_ERASE1:
793 case NAND_CMD_ERASE2:
794 case NAND_CMD_SEQIN:
795 case NAND_CMD_STATUS:
796 return;
797
798 case NAND_CMD_RNDIN:
799 nand_ccs_delay(chip);
800 return;
801
802 case NAND_CMD_RESET:
803 if (chip->dev_ready)
804 break;
805 udelay(chip->chip_delay);
806 chip->cmd_ctrl(mtd, NAND_CMD_STATUS,
807 NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
808 chip->cmd_ctrl(mtd, NAND_CMD_NONE,
809 NAND_NCE | NAND_CTRL_CHANGE);
810 /* EZ-NAND can take upto 250ms as per ONFi v4.0 */
811 nand_wait_status_ready(mtd, 250);
812 return;
813
814 case NAND_CMD_RNDOUT:
815 /* No ready / busy check necessary */
816 chip->cmd_ctrl(mtd, NAND_CMD_RNDOUTSTART,
817 NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
818 chip->cmd_ctrl(mtd, NAND_CMD_NONE,
819 NAND_NCE | NAND_CTRL_CHANGE);
820
821 nand_ccs_delay(chip);
822 return;
823
824 case NAND_CMD_READ0:
825 chip->cmd_ctrl(mtd, NAND_CMD_READSTART,
826 NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
827 chip->cmd_ctrl(mtd, NAND_CMD_NONE,
828 NAND_NCE | NAND_CTRL_CHANGE);
829
830 /* This applies to read commands */
831 default:
832 /*
833 * If we don't have access to the busy pin, we apply the given
834 * command delay.
835 */
836 if (!chip->dev_ready) {
837 udelay(chip->chip_delay);
838 return;
839 }
840 }
841
842 /*
843 * Apply this short delay always to ensure that we do wait tWB in
844 * any case on any machine.
845 */
846 ndelay(100);
847
848 nand_wait_ready(mtd);
849 }
850
851 /**
852 * panic_nand_get_device - [GENERIC] Get chip for selected access
853 * @chip: the nand chip descriptor
854 * @mtd: MTD device structure
855 * @new_state: the state which is requested
856 *
857 * Used when in panic, no locks are taken.
858 */
859 static void panic_nand_get_device(struct nand_chip *chip,
860 struct mtd_info *mtd, int new_state)
861 {
862 /* Hardware controller shared among independent devices */
863 chip->controller->active = chip;
864 chip->state = new_state;
865 }
866
867 /**
868 * nand_get_device - [GENERIC] Get chip for selected access
869 * @mtd: MTD device structure
870 * @new_state: the state which is requested
871 *
872 * Get the device and lock it for exclusive access
873 */
874 static int
875 nand_get_device(struct mtd_info *mtd, int new_state)
876 {
877 struct nand_chip *chip = mtd_to_nand(mtd);
878 spinlock_t *lock = &chip->controller->lock;
879 wait_queue_head_t *wq = &chip->controller->wq;
880 DECLARE_WAITQUEUE(wait, current);
881 retry:
882 spin_lock(lock);
883
884 /* Hardware controller shared among independent devices */
885 if (!chip->controller->active)
886 chip->controller->active = chip;
887
888 if (chip->controller->active == chip && chip->state == FL_READY) {
889 chip->state = new_state;
890 spin_unlock(lock);
891 return 0;
892 }
893 if (new_state == FL_PM_SUSPENDED) {
894 if (chip->controller->active->state == FL_PM_SUSPENDED) {
895 chip->state = FL_PM_SUSPENDED;
896 spin_unlock(lock);
897 return 0;
898 }
899 }
900 set_current_state(TASK_UNINTERRUPTIBLE);
901 add_wait_queue(wq, &wait);
902 spin_unlock(lock);
903 schedule();
904 remove_wait_queue(wq, &wait);
905 goto retry;
906 }
907
908 /**
909 * panic_nand_wait - [GENERIC] wait until the command is done
910 * @mtd: MTD device structure
911 * @chip: NAND chip structure
912 * @timeo: timeout
913 *
914 * Wait for command done. This is a helper function for nand_wait used when
915 * we are in interrupt context. May happen when in panic and trying to write
916 * an oops through mtdoops.
917 */
918 static void panic_nand_wait(struct mtd_info *mtd, struct nand_chip *chip,
919 unsigned long timeo)
920 {
921 int i;
922 for (i = 0; i < timeo; i++) {
923 if (chip->dev_ready) {
924 if (chip->dev_ready(mtd))
925 break;
926 } else {
927 if (chip->read_byte(mtd) & NAND_STATUS_READY)
928 break;
929 }
930 mdelay(1);
931 }
932 }
933
934 /**
935 * nand_wait - [DEFAULT] wait until the command is done
936 * @mtd: MTD device structure
937 * @chip: NAND chip structure
938 *
939 * Wait for command done. This applies to erase and program only.
940 */
941 static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
942 {
943
944 int status;
945 unsigned long timeo = 400;
946
947 /*
948 * Apply this short delay always to ensure that we do wait tWB in any
949 * case on any machine.
950 */
951 ndelay(100);
952
953 chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
954
955 if (in_interrupt() || oops_in_progress)
956 panic_nand_wait(mtd, chip, timeo);
957 else {
958 timeo = jiffies + msecs_to_jiffies(timeo);
959 do {
960 if (chip->dev_ready) {
961 if (chip->dev_ready(mtd))
962 break;
963 } else {
964 if (chip->read_byte(mtd) & NAND_STATUS_READY)
965 break;
966 }
967 cond_resched();
968 } while (time_before(jiffies, timeo));
969 }
970
971 status = (int)chip->read_byte(mtd);
972 /* This can happen if in case of timeout or buggy dev_ready */
973 WARN_ON(!(status & NAND_STATUS_READY));
974 return status;
975 }
976
977 /**
978 * nand_reset_data_interface - Reset data interface and timings
979 * @chip: The NAND chip
980 *
981 * Reset the Data interface and timings to ONFI mode 0.
982 *
983 * Returns 0 for success or negative error code otherwise.
984 */
985 static int nand_reset_data_interface(struct nand_chip *chip)
986 {
987 struct mtd_info *mtd = nand_to_mtd(chip);
988 const struct nand_data_interface *conf;
989 int ret;
990
991 if (!chip->setup_data_interface)
992 return 0;
993
994 /*
995 * The ONFI specification says:
996 * "
997 * To transition from NV-DDR or NV-DDR2 to the SDR data
998 * interface, the host shall use the Reset (FFh) command
999 * using SDR timing mode 0. A device in any timing mode is
1000 * required to recognize Reset (FFh) command issued in SDR
1001 * timing mode 0.
1002 * "
1003 *
1004 * Configure the data interface in SDR mode and set the
1005 * timings to timing mode 0.
1006 */
1007
1008 conf = nand_get_default_data_interface();
1009 ret = chip->setup_data_interface(mtd, conf, false);
1010 if (ret)
1011 pr_err("Failed to configure data interface to SDR timing mode 0\n");
1012
1013 return ret;
1014 }
1015
1016 /**
1017 * nand_setup_data_interface - Setup the best data interface and timings
1018 * @chip: The NAND chip
1019 *
1020 * Find and configure the best data interface and NAND timings supported by
1021 * the chip and the driver.
1022 * First tries to retrieve supported timing modes from ONFI information,
1023 * and if the NAND chip does not support ONFI, relies on the
1024 * ->onfi_timing_mode_default specified in the nand_ids table.
1025 *
1026 * Returns 0 for success or negative error code otherwise.
1027 */
1028 static int nand_setup_data_interface(struct nand_chip *chip)
1029 {
1030 struct mtd_info *mtd = nand_to_mtd(chip);
1031 int ret;
1032
1033 if (!chip->setup_data_interface || !chip->data_interface)
1034 return 0;
1035
1036 /*
1037 * Ensure the timing mode has been changed on the chip side
1038 * before changing timings on the controller side.
1039 */
1040 if (chip->onfi_version) {
1041 u8 tmode_param[ONFI_SUBFEATURE_PARAM_LEN] = {
1042 chip->onfi_timing_mode_default,
1043 };
1044
1045 ret = chip->onfi_set_features(mtd, chip,
1046 ONFI_FEATURE_ADDR_TIMING_MODE,
1047 tmode_param);
1048 if (ret)
1049 goto err;
1050 }
1051
1052 ret = chip->setup_data_interface(mtd, chip->data_interface, false);
1053 err:
1054 return ret;
1055 }
1056
1057 /**
1058 * nand_init_data_interface - find the best data interface and timings
1059 * @chip: The NAND chip
1060 *
1061 * Find the best data interface and NAND timings supported by the chip
1062 * and the driver.
1063 * First tries to retrieve supported timing modes from ONFI information,
1064 * and if the NAND chip does not support ONFI, relies on the
1065 * ->onfi_timing_mode_default specified in the nand_ids table. After this
1066 * function nand_chip->data_interface is initialized with the best timing mode
1067 * available.
1068 *
1069 * Returns 0 for success or negative error code otherwise.
1070 */
1071 static int nand_init_data_interface(struct nand_chip *chip)
1072 {
1073 struct mtd_info *mtd = nand_to_mtd(chip);
1074 int modes, mode, ret;
1075
1076 if (!chip->setup_data_interface)
1077 return 0;
1078
1079 /*
1080 * First try to identify the best timings from ONFI parameters and
1081 * if the NAND does not support ONFI, fallback to the default ONFI
1082 * timing mode.
1083 */
1084 modes = onfi_get_async_timing_mode(chip);
1085 if (modes == ONFI_TIMING_MODE_UNKNOWN) {
1086 if (!chip->onfi_timing_mode_default)
1087 return 0;
1088
1089 modes = GENMASK(chip->onfi_timing_mode_default, 0);
1090 }
1091
1092 chip->data_interface = kzalloc(sizeof(*chip->data_interface),
1093 GFP_KERNEL);
1094 if (!chip->data_interface)
1095 return -ENOMEM;
1096
1097 for (mode = fls(modes) - 1; mode >= 0; mode--) {
1098 ret = onfi_init_data_interface(chip, chip->data_interface,
1099 NAND_SDR_IFACE, mode);
1100 if (ret)
1101 continue;
1102
1103 ret = chip->setup_data_interface(mtd, chip->data_interface,
1104 true);
1105 if (!ret) {
1106 chip->onfi_timing_mode_default = mode;
1107 break;
1108 }
1109 }
1110
1111 return 0;
1112 }
1113
1114 static void nand_release_data_interface(struct nand_chip *chip)
1115 {
1116 kfree(chip->data_interface);
1117 }
1118
1119 /**
1120 * nand_reset - Reset and initialize a NAND device
1121 * @chip: The NAND chip
1122 * @chipnr: Internal die id
1123 *
1124 * Returns 0 for success or negative error code otherwise
1125 */
1126 int nand_reset(struct nand_chip *chip, int chipnr)
1127 {
1128 struct mtd_info *mtd = nand_to_mtd(chip);
1129 int ret;
1130
1131 ret = nand_reset_data_interface(chip);
1132 if (ret)
1133 return ret;
1134
1135 /*
1136 * The CS line has to be released before we can apply the new NAND
1137 * interface settings, hence this weird ->select_chip() dance.
1138 */
1139 chip->select_chip(mtd, chipnr);
1140 chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
1141 chip->select_chip(mtd, -1);
1142
1143 chip->select_chip(mtd, chipnr);
1144 ret = nand_setup_data_interface(chip);
1145 chip->select_chip(mtd, -1);
1146 if (ret)
1147 return ret;
1148
1149 return 0;
1150 }
1151
1152 /**
1153 * __nand_unlock - [REPLACEABLE] unlocks specified locked blocks
1154 * @mtd: mtd info
1155 * @ofs: offset to start unlock from
1156 * @len: length to unlock
1157 * @invert: when = 0, unlock the range of blocks within the lower and
1158 * upper boundary address
1159 * when = 1, unlock the range of blocks outside the boundaries
1160 * of the lower and upper boundary address
1161 *
1162 * Returs unlock status.
1163 */
1164 static int __nand_unlock(struct mtd_info *mtd, loff_t ofs,
1165 uint64_t len, int invert)
1166 {
1167 int ret = 0;
1168 int status, page;
1169 struct nand_chip *chip = mtd_to_nand(mtd);
1170
1171 /* Submit address of first page to unlock */
1172 page = ofs >> chip->page_shift;
1173 chip->cmdfunc(mtd, NAND_CMD_UNLOCK1, -1, page & chip->pagemask);
1174
1175 /* Submit address of last page to unlock */
1176 page = (ofs + len) >> chip->page_shift;
1177 chip->cmdfunc(mtd, NAND_CMD_UNLOCK2, -1,
1178 (page | invert) & chip->pagemask);
1179
1180 /* Call wait ready function */
1181 status = chip->waitfunc(mtd, chip);
1182 /* See if device thinks it succeeded */
1183 if (status & NAND_STATUS_FAIL) {
1184 pr_debug("%s: error status = 0x%08x\n",
1185 __func__, status);
1186 ret = -EIO;
1187 }
1188
1189 return ret;
1190 }
1191
1192 /**
1193 * nand_unlock - [REPLACEABLE] unlocks specified locked blocks
1194 * @mtd: mtd info
1195 * @ofs: offset to start unlock from
1196 * @len: length to unlock
1197 *
1198 * Returns unlock status.
1199 */
1200 int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1201 {
1202 int ret = 0;
1203 int chipnr;
1204 struct nand_chip *chip = mtd_to_nand(mtd);
1205
1206 pr_debug("%s: start = 0x%012llx, len = %llu\n",
1207 __func__, (unsigned long long)ofs, len);
1208
1209 if (check_offs_len(mtd, ofs, len))
1210 return -EINVAL;
1211
1212 /* Align to last block address if size addresses end of the device */
1213 if (ofs + len == mtd->size)
1214 len -= mtd->erasesize;
1215
1216 nand_get_device(mtd, FL_UNLOCKING);
1217
1218 /* Shift to get chip number */
1219 chipnr = ofs >> chip->chip_shift;
1220
1221 /*
1222 * Reset the chip.
1223 * If we want to check the WP through READ STATUS and check the bit 7
1224 * we must reset the chip
1225 * some operation can also clear the bit 7 of status register
1226 * eg. erase/program a locked block
1227 */
1228 nand_reset(chip, chipnr);
1229
1230 chip->select_chip(mtd, chipnr);
1231
1232 /* Check, if it is write protected */
1233 if (nand_check_wp(mtd)) {
1234 pr_debug("%s: device is write protected!\n",
1235 __func__);
1236 ret = -EIO;
1237 goto out;
1238 }
1239
1240 ret = __nand_unlock(mtd, ofs, len, 0);
1241
1242 out:
1243 chip->select_chip(mtd, -1);
1244 nand_release_device(mtd);
1245
1246 return ret;
1247 }
1248 EXPORT_SYMBOL(nand_unlock);
1249
1250 /**
1251 * nand_lock - [REPLACEABLE] locks all blocks present in the device
1252 * @mtd: mtd info
1253 * @ofs: offset to start unlock from
1254 * @len: length to unlock
1255 *
1256 * This feature is not supported in many NAND parts. 'Micron' NAND parts do
1257 * have this feature, but it allows only to lock all blocks, not for specified
1258 * range for block. Implementing 'lock' feature by making use of 'unlock', for
1259 * now.
1260 *
1261 * Returns lock status.
1262 */
1263 int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1264 {
1265 int ret = 0;
1266 int chipnr, status, page;
1267 struct nand_chip *chip = mtd_to_nand(mtd);
1268
1269 pr_debug("%s: start = 0x%012llx, len = %llu\n",
1270 __func__, (unsigned long long)ofs, len);
1271
1272 if (check_offs_len(mtd, ofs, len))
1273 return -EINVAL;
1274
1275 nand_get_device(mtd, FL_LOCKING);
1276
1277 /* Shift to get chip number */
1278 chipnr = ofs >> chip->chip_shift;
1279
1280 /*
1281 * Reset the chip.
1282 * If we want to check the WP through READ STATUS and check the bit 7
1283 * we must reset the chip
1284 * some operation can also clear the bit 7 of status register
1285 * eg. erase/program a locked block
1286 */
1287 nand_reset(chip, chipnr);
1288
1289 chip->select_chip(mtd, chipnr);
1290
1291 /* Check, if it is write protected */
1292 if (nand_check_wp(mtd)) {
1293 pr_debug("%s: device is write protected!\n",
1294 __func__);
1295 status = MTD_ERASE_FAILED;
1296 ret = -EIO;
1297 goto out;
1298 }
1299
1300 /* Submit address of first page to lock */
1301 page = ofs >> chip->page_shift;
1302 chip->cmdfunc(mtd, NAND_CMD_LOCK, -1, page & chip->pagemask);
1303
1304 /* Call wait ready function */
1305 status = chip->waitfunc(mtd, chip);
1306 /* See if device thinks it succeeded */
1307 if (status & NAND_STATUS_FAIL) {
1308 pr_debug("%s: error status = 0x%08x\n",
1309 __func__, status);
1310 ret = -EIO;
1311 goto out;
1312 }
1313
1314 ret = __nand_unlock(mtd, ofs, len, 0x1);
1315
1316 out:
1317 chip->select_chip(mtd, -1);
1318 nand_release_device(mtd);
1319
1320 return ret;
1321 }
1322 EXPORT_SYMBOL(nand_lock);
1323
1324 /**
1325 * nand_check_erased_buf - check if a buffer contains (almost) only 0xff data
1326 * @buf: buffer to test
1327 * @len: buffer length
1328 * @bitflips_threshold: maximum number of bitflips
1329 *
1330 * Check if a buffer contains only 0xff, which means the underlying region
1331 * has been erased and is ready to be programmed.
1332 * The bitflips_threshold specify the maximum number of bitflips before
1333 * considering the region is not erased.
1334 * Note: The logic of this function has been extracted from the memweight
1335 * implementation, except that nand_check_erased_buf function exit before
1336 * testing the whole buffer if the number of bitflips exceed the
1337 * bitflips_threshold value.
1338 *
1339 * Returns a positive number of bitflips less than or equal to
1340 * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the
1341 * threshold.
1342 */
1343 static int nand_check_erased_buf(void *buf, int len, int bitflips_threshold)
1344 {
1345 const unsigned char *bitmap = buf;
1346 int bitflips = 0;
1347 int weight;
1348
1349 for (; len && ((uintptr_t)bitmap) % sizeof(long);
1350 len--, bitmap++) {
1351 weight = hweight8(*bitmap);
1352 bitflips += BITS_PER_BYTE - weight;
1353 if (unlikely(bitflips > bitflips_threshold))
1354 return -EBADMSG;
1355 }
1356
1357 for (; len >= sizeof(long);
1358 len -= sizeof(long), bitmap += sizeof(long)) {
1359 weight = hweight_long(*((unsigned long *)bitmap));
1360 bitflips += BITS_PER_LONG - weight;
1361 if (unlikely(bitflips > bitflips_threshold))
1362 return -EBADMSG;
1363 }
1364
1365 for (; len > 0; len--, bitmap++) {
1366 weight = hweight8(*bitmap);
1367 bitflips += BITS_PER_BYTE - weight;
1368 if (unlikely(bitflips > bitflips_threshold))
1369 return -EBADMSG;
1370 }
1371
1372 return bitflips;
1373 }
1374
1375 /**
1376 * nand_check_erased_ecc_chunk - check if an ECC chunk contains (almost) only
1377 * 0xff data
1378 * @data: data buffer to test
1379 * @datalen: data length
1380 * @ecc: ECC buffer
1381 * @ecclen: ECC length
1382 * @extraoob: extra OOB buffer
1383 * @extraooblen: extra OOB length
1384 * @bitflips_threshold: maximum number of bitflips
1385 *
1386 * Check if a data buffer and its associated ECC and OOB data contains only
1387 * 0xff pattern, which means the underlying region has been erased and is
1388 * ready to be programmed.
1389 * The bitflips_threshold specify the maximum number of bitflips before
1390 * considering the region as not erased.
1391 *
1392 * Note:
1393 * 1/ ECC algorithms are working on pre-defined block sizes which are usually
1394 * different from the NAND page size. When fixing bitflips, ECC engines will
1395 * report the number of errors per chunk, and the NAND core infrastructure
1396 * expect you to return the maximum number of bitflips for the whole page.
1397 * This is why you should always use this function on a single chunk and
1398 * not on the whole page. After checking each chunk you should update your
1399 * max_bitflips value accordingly.
1400 * 2/ When checking for bitflips in erased pages you should not only check
1401 * the payload data but also their associated ECC data, because a user might
1402 * have programmed almost all bits to 1 but a few. In this case, we
1403 * shouldn't consider the chunk as erased, and checking ECC bytes prevent
1404 * this case.
1405 * 3/ The extraoob argument is optional, and should be used if some of your OOB
1406 * data are protected by the ECC engine.
1407 * It could also be used if you support subpages and want to attach some
1408 * extra OOB data to an ECC chunk.
1409 *
1410 * Returns a positive number of bitflips less than or equal to
1411 * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the
1412 * threshold. In case of success, the passed buffers are filled with 0xff.
1413 */
1414 int nand_check_erased_ecc_chunk(void *data, int datalen,
1415 void *ecc, int ecclen,
1416 void *extraoob, int extraooblen,
1417 int bitflips_threshold)
1418 {
1419 int data_bitflips = 0, ecc_bitflips = 0, extraoob_bitflips = 0;
1420
1421 data_bitflips = nand_check_erased_buf(data, datalen,
1422 bitflips_threshold);
1423 if (data_bitflips < 0)
1424 return data_bitflips;
1425
1426 bitflips_threshold -= data_bitflips;
1427
1428 ecc_bitflips = nand_check_erased_buf(ecc, ecclen, bitflips_threshold);
1429 if (ecc_bitflips < 0)
1430 return ecc_bitflips;
1431
1432 bitflips_threshold -= ecc_bitflips;
1433
1434 extraoob_bitflips = nand_check_erased_buf(extraoob, extraooblen,
1435 bitflips_threshold);
1436 if (extraoob_bitflips < 0)
1437 return extraoob_bitflips;
1438
1439 if (data_bitflips)
1440 memset(data, 0xff, datalen);
1441
1442 if (ecc_bitflips)
1443 memset(ecc, 0xff, ecclen);
1444
1445 if (extraoob_bitflips)
1446 memset(extraoob, 0xff, extraooblen);
1447
1448 return data_bitflips + ecc_bitflips + extraoob_bitflips;
1449 }
1450 EXPORT_SYMBOL(nand_check_erased_ecc_chunk);
1451
1452 /**
1453 * nand_read_page_raw - [INTERN] read raw page data without ecc
1454 * @mtd: mtd info structure
1455 * @chip: nand chip info structure
1456 * @buf: buffer to store read data
1457 * @oob_required: caller requires OOB data read to chip->oob_poi
1458 * @page: page number to read
1459 *
1460 * Not for syndrome calculating ECC controllers, which use a special oob layout.
1461 */
1462 static int nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
1463 uint8_t *buf, int oob_required, int page)
1464 {
1465 chip->read_buf(mtd, buf, mtd->writesize);
1466 if (oob_required)
1467 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1468 return 0;
1469 }
1470
1471 /**
1472 * nand_read_page_raw_syndrome - [INTERN] read raw page data without ecc
1473 * @mtd: mtd info structure
1474 * @chip: nand chip info structure
1475 * @buf: buffer to store read data
1476 * @oob_required: caller requires OOB data read to chip->oob_poi
1477 * @page: page number to read
1478 *
1479 * We need a special oob layout and handling even when OOB isn't used.
1480 */
1481 static int nand_read_page_raw_syndrome(struct mtd_info *mtd,
1482 struct nand_chip *chip, uint8_t *buf,
1483 int oob_required, int page)
1484 {
1485 int eccsize = chip->ecc.size;
1486 int eccbytes = chip->ecc.bytes;
1487 uint8_t *oob = chip->oob_poi;
1488 int steps, size;
1489
1490 for (steps = chip->ecc.steps; steps > 0; steps--) {
1491 chip->read_buf(mtd, buf, eccsize);
1492 buf += eccsize;
1493
1494 if (chip->ecc.prepad) {
1495 chip->read_buf(mtd, oob, chip->ecc.prepad);
1496 oob += chip->ecc.prepad;
1497 }
1498
1499 chip->read_buf(mtd, oob, eccbytes);
1500 oob += eccbytes;
1501
1502 if (chip->ecc.postpad) {
1503 chip->read_buf(mtd, oob, chip->ecc.postpad);
1504 oob += chip->ecc.postpad;
1505 }
1506 }
1507
1508 size = mtd->oobsize - (oob - chip->oob_poi);
1509 if (size)
1510 chip->read_buf(mtd, oob, size);
1511
1512 return 0;
1513 }
1514
1515 /**
1516 * nand_read_page_swecc - [REPLACEABLE] software ECC based page read function
1517 * @mtd: mtd info structure
1518 * @chip: nand chip info structure
1519 * @buf: buffer to store read data
1520 * @oob_required: caller requires OOB data read to chip->oob_poi
1521 * @page: page number to read
1522 */
1523 static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
1524 uint8_t *buf, int oob_required, int page)
1525 {
1526 int i, eccsize = chip->ecc.size, ret;
1527 int eccbytes = chip->ecc.bytes;
1528 int eccsteps = chip->ecc.steps;
1529 uint8_t *p = buf;
1530 uint8_t *ecc_calc = chip->buffers->ecccalc;
1531 uint8_t *ecc_code = chip->buffers->ecccode;
1532 unsigned int max_bitflips = 0;
1533
1534 chip->ecc.read_page_raw(mtd, chip, buf, 1, page);
1535
1536 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
1537 chip->ecc.calculate(mtd, p, &ecc_calc[i]);
1538
1539 ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
1540 chip->ecc.total);
1541 if (ret)
1542 return ret;
1543
1544 eccsteps = chip->ecc.steps;
1545 p = buf;
1546
1547 for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
1548 int stat;
1549
1550 stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
1551 if (stat < 0) {
1552 mtd->ecc_stats.failed++;
1553 } else {
1554 mtd->ecc_stats.corrected += stat;
1555 max_bitflips = max_t(unsigned int, max_bitflips, stat);
1556 }
1557 }
1558 return max_bitflips;
1559 }
1560
1561 /**
1562 * nand_read_subpage - [REPLACEABLE] ECC based sub-page read function
1563 * @mtd: mtd info structure
1564 * @chip: nand chip info structure
1565 * @data_offs: offset of requested data within the page
1566 * @readlen: data length
1567 * @bufpoi: buffer to store read data
1568 * @page: page number to read
1569 */
1570 static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
1571 uint32_t data_offs, uint32_t readlen, uint8_t *bufpoi,
1572 int page)
1573 {
1574 int start_step, end_step, num_steps, ret;
1575 uint8_t *p;
1576 int data_col_addr, i, gaps = 0;
1577 int datafrag_len, eccfrag_len, aligned_len, aligned_pos;
1578 int busw = (chip->options & NAND_BUSWIDTH_16) ? 2 : 1;
1579 int index, section = 0;
1580 unsigned int max_bitflips = 0;
1581 struct mtd_oob_region oobregion = { };
1582
1583 /* Column address within the page aligned to ECC size (256bytes) */
1584 start_step = data_offs / chip->ecc.size;
1585 end_step = (data_offs + readlen - 1) / chip->ecc.size;
1586 num_steps = end_step - start_step + 1;
1587 index = start_step * chip->ecc.bytes;
1588
1589 /* Data size aligned to ECC ecc.size */
1590 datafrag_len = num_steps * chip->ecc.size;
1591 eccfrag_len = num_steps * chip->ecc.bytes;
1592
1593 data_col_addr = start_step * chip->ecc.size;
1594 /* If we read not a page aligned data */
1595 if (data_col_addr != 0)
1596 chip->cmdfunc(mtd, NAND_CMD_RNDOUT, data_col_addr, -1);
1597
1598 p = bufpoi + data_col_addr;
1599 chip->read_buf(mtd, p, datafrag_len);
1600
1601 /* Calculate ECC */
1602 for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size)
1603 chip->ecc.calculate(mtd, p, &chip->buffers->ecccalc[i]);
1604
1605 /*
1606 * The performance is faster if we position offsets according to
1607 * ecc.pos. Let's make sure that there are no gaps in ECC positions.
1608 */
1609 ret = mtd_ooblayout_find_eccregion(mtd, index, &section, &oobregion);
1610 if (ret)
1611 return ret;
1612
1613 if (oobregion.length < eccfrag_len)
1614 gaps = 1;
1615
1616 if (gaps) {
1617 chip->cmdfunc(mtd, NAND_CMD_RNDOUT, mtd->writesize, -1);
1618 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1619 } else {
1620 /*
1621 * Send the command to read the particular ECC bytes take care
1622 * about buswidth alignment in read_buf.
1623 */
1624 aligned_pos = oobregion.offset & ~(busw - 1);
1625 aligned_len = eccfrag_len;
1626 if (oobregion.offset & (busw - 1))
1627 aligned_len++;
1628 if ((oobregion.offset + (num_steps * chip->ecc.bytes)) &
1629 (busw - 1))
1630 aligned_len++;
1631
1632 chip->cmdfunc(mtd, NAND_CMD_RNDOUT,
1633 mtd->writesize + aligned_pos, -1);
1634 chip->read_buf(mtd, &chip->oob_poi[aligned_pos], aligned_len);
1635 }
1636
1637 ret = mtd_ooblayout_get_eccbytes(mtd, chip->buffers->ecccode,
1638 chip->oob_poi, index, eccfrag_len);
1639 if (ret)
1640 return ret;
1641
1642 p = bufpoi + data_col_addr;
1643 for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size) {
1644 int stat;
1645
1646 stat = chip->ecc.correct(mtd, p,
1647 &chip->buffers->ecccode[i], &chip->buffers->ecccalc[i]);
1648 if (stat == -EBADMSG &&
1649 (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
1650 /* check for empty pages with bitflips */
1651 stat = nand_check_erased_ecc_chunk(p, chip->ecc.size,
1652 &chip->buffers->ecccode[i],
1653 chip->ecc.bytes,
1654 NULL, 0,
1655 chip->ecc.strength);
1656 }
1657
1658 if (stat < 0) {
1659 mtd->ecc_stats.failed++;
1660 } else {
1661 mtd->ecc_stats.corrected += stat;
1662 max_bitflips = max_t(unsigned int, max_bitflips, stat);
1663 }
1664 }
1665 return max_bitflips;
1666 }
1667
1668 /**
1669 * nand_read_page_hwecc - [REPLACEABLE] hardware ECC based page read function
1670 * @mtd: mtd info structure
1671 * @chip: nand chip info structure
1672 * @buf: buffer to store read data
1673 * @oob_required: caller requires OOB data read to chip->oob_poi
1674 * @page: page number to read
1675 *
1676 * Not for syndrome calculating ECC controllers which need a special oob layout.
1677 */
1678 static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
1679 uint8_t *buf, int oob_required, int page)
1680 {
1681 int i, eccsize = chip->ecc.size, ret;
1682 int eccbytes = chip->ecc.bytes;
1683 int eccsteps = chip->ecc.steps;
1684 uint8_t *p = buf;
1685 uint8_t *ecc_calc = chip->buffers->ecccalc;
1686 uint8_t *ecc_code = chip->buffers->ecccode;
1687 unsigned int max_bitflips = 0;
1688
1689 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
1690 chip->ecc.hwctl(mtd, NAND_ECC_READ);
1691 chip->read_buf(mtd, p, eccsize);
1692 chip->ecc.calculate(mtd, p, &ecc_calc[i]);
1693 }
1694 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1695
1696 ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
1697 chip->ecc.total);
1698 if (ret)
1699 return ret;
1700
1701 eccsteps = chip->ecc.steps;
1702 p = buf;
1703
1704 for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
1705 int stat;
1706
1707 stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
1708 if (stat == -EBADMSG &&
1709 (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
1710 /* check for empty pages with bitflips */
1711 stat = nand_check_erased_ecc_chunk(p, eccsize,
1712 &ecc_code[i], eccbytes,
1713 NULL, 0,
1714 chip->ecc.strength);
1715 }
1716
1717 if (stat < 0) {
1718 mtd->ecc_stats.failed++;
1719 } else {
1720 mtd->ecc_stats.corrected += stat;
1721 max_bitflips = max_t(unsigned int, max_bitflips, stat);
1722 }
1723 }
1724 return max_bitflips;
1725 }
1726
1727 /**
1728 * nand_read_page_hwecc_oob_first - [REPLACEABLE] hw ecc, read oob first
1729 * @mtd: mtd info structure
1730 * @chip: nand chip info structure
1731 * @buf: buffer to store read data
1732 * @oob_required: caller requires OOB data read to chip->oob_poi
1733 * @page: page number to read
1734 *
1735 * Hardware ECC for large page chips, require OOB to be read first. For this
1736 * ECC mode, the write_page method is re-used from ECC_HW. These methods
1737 * read/write ECC from the OOB area, unlike the ECC_HW_SYNDROME support with
1738 * multiple ECC steps, follows the "infix ECC" scheme and reads/writes ECC from
1739 * the data area, by overwriting the NAND manufacturer bad block markings.
1740 */
1741 static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd,
1742 struct nand_chip *chip, uint8_t *buf, int oob_required, int page)
1743 {
1744 int i, eccsize = chip->ecc.size, ret;
1745 int eccbytes = chip->ecc.bytes;
1746 int eccsteps = chip->ecc.steps;
1747 uint8_t *p = buf;
1748 uint8_t *ecc_code = chip->buffers->ecccode;
1749 uint8_t *ecc_calc = chip->buffers->ecccalc;
1750 unsigned int max_bitflips = 0;
1751
1752 /* Read the OOB area first */
1753 chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
1754 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1755 chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
1756
1757 ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
1758 chip->ecc.total);
1759 if (ret)
1760 return ret;
1761
1762 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
1763 int stat;
1764
1765 chip->ecc.hwctl(mtd, NAND_ECC_READ);
1766 chip->read_buf(mtd, p, eccsize);
1767 chip->ecc.calculate(mtd, p, &ecc_calc[i]);
1768
1769 stat = chip->ecc.correct(mtd, p, &ecc_code[i], NULL);
1770 if (stat == -EBADMSG &&
1771 (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
1772 /* check for empty pages with bitflips */
1773 stat = nand_check_erased_ecc_chunk(p, eccsize,
1774 &ecc_code[i], eccbytes,
1775 NULL, 0,
1776 chip->ecc.strength);
1777 }
1778
1779 if (stat < 0) {
1780 mtd->ecc_stats.failed++;
1781 } else {
1782 mtd->ecc_stats.corrected += stat;
1783 max_bitflips = max_t(unsigned int, max_bitflips, stat);
1784 }
1785 }
1786 return max_bitflips;
1787 }
1788
1789 /**
1790 * nand_read_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page read
1791 * @mtd: mtd info structure
1792 * @chip: nand chip info structure
1793 * @buf: buffer to store read data
1794 * @oob_required: caller requires OOB data read to chip->oob_poi
1795 * @page: page number to read
1796 *
1797 * The hw generator calculates the error syndrome automatically. Therefore we
1798 * need a special oob layout and handling.
1799 */
1800 static int nand_read_page_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
1801 uint8_t *buf, int oob_required, int page)
1802 {
1803 int i, eccsize = chip->ecc.size;
1804 int eccbytes = chip->ecc.bytes;
1805 int eccsteps = chip->ecc.steps;
1806 int eccpadbytes = eccbytes + chip->ecc.prepad + chip->ecc.postpad;
1807 uint8_t *p = buf;
1808 uint8_t *oob = chip->oob_poi;
1809 unsigned int max_bitflips = 0;
1810
1811 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
1812 int stat;
1813
1814 chip->ecc.hwctl(mtd, NAND_ECC_READ);
1815 chip->read_buf(mtd, p, eccsize);
1816
1817 if (chip->ecc.prepad) {
1818 chip->read_buf(mtd, oob, chip->ecc.prepad);
1819 oob += chip->ecc.prepad;
1820 }
1821
1822 chip->ecc.hwctl(mtd, NAND_ECC_READSYN);
1823 chip->read_buf(mtd, oob, eccbytes);
1824 stat = chip->ecc.correct(mtd, p, oob, NULL);
1825
1826 oob += eccbytes;
1827
1828 if (chip->ecc.postpad) {
1829 chip->read_buf(mtd, oob, chip->ecc.postpad);
1830 oob += chip->ecc.postpad;
1831 }
1832
1833 if (stat == -EBADMSG &&
1834 (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
1835 /* check for empty pages with bitflips */
1836 stat = nand_check_erased_ecc_chunk(p, chip->ecc.size,
1837 oob - eccpadbytes,
1838 eccpadbytes,
1839 NULL, 0,
1840 chip->ecc.strength);
1841 }
1842
1843 if (stat < 0) {
1844 mtd->ecc_stats.failed++;
1845 } else {
1846 mtd->ecc_stats.corrected += stat;
1847 max_bitflips = max_t(unsigned int, max_bitflips, stat);
1848 }
1849 }
1850
1851 /* Calculate remaining oob bytes */
1852 i = mtd->oobsize - (oob - chip->oob_poi);
1853 if (i)
1854 chip->read_buf(mtd, oob, i);
1855
1856 return max_bitflips;
1857 }
1858
1859 /**
1860 * nand_transfer_oob - [INTERN] Transfer oob to client buffer
1861 * @mtd: mtd info structure
1862 * @oob: oob destination address
1863 * @ops: oob ops structure
1864 * @len: size of oob to transfer
1865 */
1866 static uint8_t *nand_transfer_oob(struct mtd_info *mtd, uint8_t *oob,
1867 struct mtd_oob_ops *ops, size_t len)
1868 {
1869 struct nand_chip *chip = mtd_to_nand(mtd);
1870 int ret;
1871
1872 switch (ops->mode) {
1873
1874 case MTD_OPS_PLACE_OOB:
1875 case MTD_OPS_RAW:
1876 memcpy(oob, chip->oob_poi + ops->ooboffs, len);
1877 return oob + len;
1878
1879 case MTD_OPS_AUTO_OOB:
1880 ret = mtd_ooblayout_get_databytes(mtd, oob, chip->oob_poi,
1881 ops->ooboffs, len);
1882 BUG_ON(ret);
1883 return oob + len;
1884
1885 default:
1886 BUG();
1887 }
1888 return NULL;
1889 }
1890
1891 /**
1892 * nand_setup_read_retry - [INTERN] Set the READ RETRY mode
1893 * @mtd: MTD device structure
1894 * @retry_mode: the retry mode to use
1895 *
1896 * Some vendors supply a special command to shift the Vt threshold, to be used
1897 * when there are too many bitflips in a page (i.e., ECC error). After setting
1898 * a new threshold, the host should retry reading the page.
1899 */
1900 static int nand_setup_read_retry(struct mtd_info *mtd, int retry_mode)
1901 {
1902 struct nand_chip *chip = mtd_to_nand(mtd);
1903
1904 pr_debug("setting READ RETRY mode %d\n", retry_mode);
1905
1906 if (retry_mode >= chip->read_retries)
1907 return -EINVAL;
1908
1909 if (!chip->setup_read_retry)
1910 return -EOPNOTSUPP;
1911
1912 return chip->setup_read_retry(mtd, retry_mode);
1913 }
1914
1915 /**
1916 * nand_do_read_ops - [INTERN] Read data with ECC
1917 * @mtd: MTD device structure
1918 * @from: offset to read from
1919 * @ops: oob ops structure
1920 *
1921 * Internal function. Called with chip held.
1922 */
1923 static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
1924 struct mtd_oob_ops *ops)
1925 {
1926 int chipnr, page, realpage, col, bytes, aligned, oob_required;
1927 struct nand_chip *chip = mtd_to_nand(mtd);
1928 int ret = 0;
1929 uint32_t readlen = ops->len;
1930 uint32_t oobreadlen = ops->ooblen;
1931 uint32_t max_oobsize = mtd_oobavail(mtd, ops);
1932
1933 uint8_t *bufpoi, *oob, *buf;
1934 int use_bufpoi;
1935 unsigned int max_bitflips = 0;
1936 int retry_mode = 0;
1937 bool ecc_fail = false;
1938
1939 chipnr = (int)(from >> chip->chip_shift);
1940 chip->select_chip(mtd, chipnr);
1941
1942 realpage = (int)(from >> chip->page_shift);
1943 page = realpage & chip->pagemask;
1944
1945 col = (int)(from & (mtd->writesize - 1));
1946
1947 buf = ops->datbuf;
1948 oob = ops->oobbuf;
1949 oob_required = oob ? 1 : 0;
1950
1951 while (1) {
1952 unsigned int ecc_failures = mtd->ecc_stats.failed;
1953
1954 bytes = min(mtd->writesize - col, readlen);
1955 aligned = (bytes == mtd->writesize);
1956
1957 if (!aligned)
1958 use_bufpoi = 1;
1959 else if (chip->options & NAND_USE_BOUNCE_BUFFER)
1960 use_bufpoi = !virt_addr_valid(buf);
1961 else
1962 use_bufpoi = 0;
1963
1964 /* Is the current page in the buffer? */
1965 if (realpage != chip->pagebuf || oob) {
1966 bufpoi = use_bufpoi ? chip->buffers->databuf : buf;
1967
1968 if (use_bufpoi && aligned)
1969 pr_debug("%s: using read bounce buffer for buf@%p\n",
1970 __func__, buf);
1971
1972 read_retry:
1973 if (nand_standard_page_accessors(&chip->ecc))
1974 chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
1975
1976 /*
1977 * Now read the page into the buffer. Absent an error,
1978 * the read methods return max bitflips per ecc step.
1979 */
1980 if (unlikely(ops->mode == MTD_OPS_RAW))
1981 ret = chip->ecc.read_page_raw(mtd, chip, bufpoi,
1982 oob_required,
1983 page);
1984 else if (!aligned && NAND_HAS_SUBPAGE_READ(chip) &&
1985 !oob)
1986 ret = chip->ecc.read_subpage(mtd, chip,
1987 col, bytes, bufpoi,
1988 page);
1989 else
1990 ret = chip->ecc.read_page(mtd, chip, bufpoi,
1991 oob_required, page);
1992 if (ret < 0) {
1993 if (use_bufpoi)
1994 /* Invalidate page cache */
1995 chip->pagebuf = -1;
1996 break;
1997 }
1998
1999 max_bitflips = max_t(unsigned int, max_bitflips, ret);
2000
2001 /* Transfer not aligned data */
2002 if (use_bufpoi) {
2003 if (!NAND_HAS_SUBPAGE_READ(chip) && !oob &&
2004 !(mtd->ecc_stats.failed - ecc_failures) &&
2005 (ops->mode != MTD_OPS_RAW)) {
2006 chip->pagebuf = realpage;
2007 chip->pagebuf_bitflips = ret;
2008 } else {
2009 /* Invalidate page cache */
2010 chip->pagebuf = -1;
2011 }
2012 memcpy(buf, chip->buffers->databuf + col, bytes);
2013 }
2014
2015 if (unlikely(oob)) {
2016 int toread = min(oobreadlen, max_oobsize);
2017
2018 if (toread) {
2019 oob = nand_transfer_oob(mtd,
2020 oob, ops, toread);
2021 oobreadlen -= toread;
2022 }
2023 }
2024
2025 if (chip->options & NAND_NEED_READRDY) {
2026 /* Apply delay or wait for ready/busy pin */
2027 if (!chip->dev_ready)
2028 udelay(chip->chip_delay);
2029 else
2030 nand_wait_ready(mtd);
2031 }
2032
2033 if (mtd->ecc_stats.failed - ecc_failures) {
2034 if (retry_mode + 1 < chip->read_retries) {
2035 retry_mode++;
2036 ret = nand_setup_read_retry(mtd,
2037 retry_mode);
2038 if (ret < 0)
2039 break;
2040
2041 /* Reset failures; retry */
2042 mtd->ecc_stats.failed = ecc_failures;
2043 goto read_retry;
2044 } else {
2045 /* No more retry modes; real failure */
2046 ecc_fail = true;
2047 }
2048 }
2049
2050 buf += bytes;
2051 } else {
2052 memcpy(buf, chip->buffers->databuf + col, bytes);
2053 buf += bytes;
2054 max_bitflips = max_t(unsigned int, max_bitflips,
2055 chip->pagebuf_bitflips);
2056 }
2057
2058 readlen -= bytes;
2059
2060 /* Reset to retry mode 0 */
2061 if (retry_mode) {
2062 ret = nand_setup_read_retry(mtd, 0);
2063 if (ret < 0)
2064 break;
2065 retry_mode = 0;
2066 }
2067
2068 if (!readlen)
2069 break;
2070
2071 /* For subsequent reads align to page boundary */
2072 col = 0;
2073 /* Increment page address */
2074 realpage++;
2075
2076 page = realpage & chip->pagemask;
2077 /* Check, if we cross a chip boundary */
2078 if (!page) {
2079 chipnr++;
2080 chip->select_chip(mtd, -1);
2081 chip->select_chip(mtd, chipnr);
2082 }
2083 }
2084 chip->select_chip(mtd, -1);
2085
2086 ops->retlen = ops->len - (size_t) readlen;
2087 if (oob)
2088 ops->oobretlen = ops->ooblen - oobreadlen;
2089
2090 if (ret < 0)
2091 return ret;
2092
2093 if (ecc_fail)
2094 return -EBADMSG;
2095
2096 return max_bitflips;
2097 }
2098
2099 /**
2100 * nand_read - [MTD Interface] MTD compatibility function for nand_do_read_ecc
2101 * @mtd: MTD device structure
2102 * @from: offset to read from
2103 * @len: number of bytes to read
2104 * @retlen: pointer to variable to store the number of read bytes
2105 * @buf: the databuffer to put data
2106 *
2107 * Get hold of the chip and call nand_do_read.
2108 */
2109 static int nand_read(struct mtd_info *mtd, loff_t from, size_t len,
2110 size_t *retlen, uint8_t *buf)
2111 {
2112 struct mtd_oob_ops ops;
2113 int ret;
2114
2115 nand_get_device(mtd, FL_READING);
2116 memset(&ops, 0, sizeof(ops));
2117 ops.len = len;
2118 ops.datbuf = buf;
2119 ops.mode = MTD_OPS_PLACE_OOB;
2120 ret = nand_do_read_ops(mtd, from, &ops);
2121 *retlen = ops.retlen;
2122 nand_release_device(mtd);
2123 return ret;
2124 }
2125
2126 /**
2127 * nand_read_oob_std - [REPLACEABLE] the most common OOB data read function
2128 * @mtd: mtd info structure
2129 * @chip: nand chip info structure
2130 * @page: page number to read
2131 */
2132 int nand_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip, int page)
2133 {
2134 chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
2135 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
2136 return 0;
2137 }
2138 EXPORT_SYMBOL(nand_read_oob_std);
2139
2140 /**
2141 * nand_read_oob_syndrome - [REPLACEABLE] OOB data read function for HW ECC
2142 * with syndromes
2143 * @mtd: mtd info structure
2144 * @chip: nand chip info structure
2145 * @page: page number to read
2146 */
2147 int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
2148 int page)
2149 {
2150 int length = mtd->oobsize;
2151 int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
2152 int eccsize = chip->ecc.size;
2153 uint8_t *bufpoi = chip->oob_poi;
2154 int i, toread, sndrnd = 0, pos;
2155
2156 chip->cmdfunc(mtd, NAND_CMD_READ0, chip->ecc.size, page);
2157 for (i = 0; i < chip->ecc.steps; i++) {
2158 if (sndrnd) {
2159 pos = eccsize + i * (eccsize + chunk);
2160 if (mtd->writesize > 512)
2161 chip->cmdfunc(mtd, NAND_CMD_RNDOUT, pos, -1);
2162 else
2163 chip->cmdfunc(mtd, NAND_CMD_READ0, pos, page);
2164 } else
2165 sndrnd = 1;
2166 toread = min_t(int, length, chunk);
2167 chip->read_buf(mtd, bufpoi, toread);
2168 bufpoi += toread;
2169 length -= toread;
2170 }
2171 if (length > 0)
2172 chip->read_buf(mtd, bufpoi, length);
2173
2174 return 0;
2175 }
2176 EXPORT_SYMBOL(nand_read_oob_syndrome);
2177
2178 /**
2179 * nand_write_oob_std - [REPLACEABLE] the most common OOB data write function
2180 * @mtd: mtd info structure
2181 * @chip: nand chip info structure
2182 * @page: page number to write
2183 */
2184 int nand_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip, int page)
2185 {
2186 int status = 0;
2187 const uint8_t *buf = chip->oob_poi;
2188 int length = mtd->oobsize;
2189
2190 chip->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize, page);
2191 chip->write_buf(mtd, buf, length);
2192 /* Send command to program the OOB data */
2193 chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
2194
2195 status = chip->waitfunc(mtd, chip);
2196
2197 return status & NAND_STATUS_FAIL ? -EIO : 0;
2198 }
2199 EXPORT_SYMBOL(nand_write_oob_std);
2200
2201 /**
2202 * nand_write_oob_syndrome - [REPLACEABLE] OOB data write function for HW ECC
2203 * with syndrome - only for large page flash
2204 * @mtd: mtd info structure
2205 * @chip: nand chip info structure
2206 * @page: page number to write
2207 */
2208 int nand_write_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
2209 int page)
2210 {
2211 int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
2212 int eccsize = chip->ecc.size, length = mtd->oobsize;
2213 int i, len, pos, status = 0, sndcmd = 0, steps = chip->ecc.steps;
2214 const uint8_t *bufpoi = chip->oob_poi;
2215
2216 /*
2217 * data-ecc-data-ecc ... ecc-oob
2218 * or
2219 * data-pad-ecc-pad-data-pad .... ecc-pad-oob
2220 */
2221 if (!chip->ecc.prepad && !chip->ecc.postpad) {
2222 pos = steps * (eccsize + chunk);
2223 steps = 0;
2224 } else
2225 pos = eccsize;
2226
2227 chip->cmdfunc(mtd, NAND_CMD_SEQIN, pos, page);
2228 for (i = 0; i < steps; i++) {
2229 if (sndcmd) {
2230 if (mtd->writesize <= 512) {
2231 uint32_t fill = 0xFFFFFFFF;
2232
2233 len = eccsize;
2234 while (len > 0) {
2235 int num = min_t(int, len, 4);
2236 chip->write_buf(mtd, (uint8_t *)&fill,
2237 num);
2238 len -= num;
2239 }
2240 } else {
2241 pos = eccsize + i * (eccsize + chunk);
2242 chip->cmdfunc(mtd, NAND_CMD_RNDIN, pos, -1);
2243 }
2244 } else
2245 sndcmd = 1;
2246 len = min_t(int, length, chunk);
2247 chip->write_buf(mtd, bufpoi, len);
2248 bufpoi += len;
2249 length -= len;
2250 }
2251 if (length > 0)
2252 chip->write_buf(mtd, bufpoi, length);
2253
2254 chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
2255 status = chip->waitfunc(mtd, chip);
2256
2257 return status & NAND_STATUS_FAIL ? -EIO : 0;
2258 }
2259 EXPORT_SYMBOL(nand_write_oob_syndrome);
2260
2261 /**
2262 * nand_do_read_oob - [INTERN] NAND read out-of-band
2263 * @mtd: MTD device structure
2264 * @from: offset to read from
2265 * @ops: oob operations description structure
2266 *
2267 * NAND read out-of-band data from the spare area.
2268 */
2269 static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
2270 struct mtd_oob_ops *ops)
2271 {
2272 int page, realpage, chipnr;
2273 struct nand_chip *chip = mtd_to_nand(mtd);
2274 struct mtd_ecc_stats stats;
2275 int readlen = ops->ooblen;
2276 int len;
2277 uint8_t *buf = ops->oobbuf;
2278 int ret = 0;
2279
2280 pr_debug("%s: from = 0x%08Lx, len = %i\n",
2281 __func__, (unsigned long long)from, readlen);
2282
2283 stats = mtd->ecc_stats;
2284
2285 len = mtd_oobavail(mtd, ops);
2286
2287 if (unlikely(ops->ooboffs >= len)) {
2288 pr_debug("%s: attempt to start read outside oob\n",
2289 __func__);
2290 return -EINVAL;
2291 }
2292
2293 /* Do not allow reads past end of device */
2294 if (unlikely(from >= mtd->size ||
2295 ops->ooboffs + readlen > ((mtd->size >> chip->page_shift) -
2296 (from >> chip->page_shift)) * len)) {
2297 pr_debug("%s: attempt to read beyond end of device\n",
2298 __func__);
2299 return -EINVAL;
2300 }
2301
2302 chipnr = (int)(from >> chip->chip_shift);
2303 chip->select_chip(mtd, chipnr);
2304
2305 /* Shift to get page */
2306 realpage = (int)(from >> chip->page_shift);
2307 page = realpage & chip->pagemask;
2308
2309 while (1) {
2310 if (ops->mode == MTD_OPS_RAW)
2311 ret = chip->ecc.read_oob_raw(mtd, chip, page);
2312 else
2313 ret = chip->ecc.read_oob(mtd, chip, page);
2314
2315 if (ret < 0)
2316 break;
2317
2318 len = min(len, readlen);
2319 buf = nand_transfer_oob(mtd, buf, ops, len);
2320
2321 if (chip->options & NAND_NEED_READRDY) {
2322 /* Apply delay or wait for ready/busy pin */
2323 if (!chip->dev_ready)
2324 udelay(chip->chip_delay);
2325 else
2326 nand_wait_ready(mtd);
2327 }
2328
2329 readlen -= len;
2330 if (!readlen)
2331 break;
2332
2333 /* Increment page address */
2334 realpage++;
2335
2336 page = realpage & chip->pagemask;
2337 /* Check, if we cross a chip boundary */
2338 if (!page) {
2339 chipnr++;
2340 chip->select_chip(mtd, -1);
2341 chip->select_chip(mtd, chipnr);
2342 }
2343 }
2344 chip->select_chip(mtd, -1);
2345
2346 ops->oobretlen = ops->ooblen - readlen;
2347
2348 if (ret < 0)
2349 return ret;
2350
2351 if (mtd->ecc_stats.failed - stats.failed)
2352 return -EBADMSG;
2353
2354 return mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0;
2355 }
2356
2357 /**
2358 * nand_read_oob - [MTD Interface] NAND read data and/or out-of-band
2359 * @mtd: MTD device structure
2360 * @from: offset to read from
2361 * @ops: oob operation description structure
2362 *
2363 * NAND read data and/or out-of-band data.
2364 */
2365 static int nand_read_oob(struct mtd_info *mtd, loff_t from,
2366 struct mtd_oob_ops *ops)
2367 {
2368 int ret;
2369
2370 ops->retlen = 0;
2371
2372 /* Do not allow reads past end of device */
2373 if (ops->datbuf && (from + ops->len) > mtd->size) {
2374 pr_debug("%s: attempt to read beyond end of device\n",
2375 __func__);
2376 return -EINVAL;
2377 }
2378
2379 if (ops->mode != MTD_OPS_PLACE_OOB &&
2380 ops->mode != MTD_OPS_AUTO_OOB &&
2381 ops->mode != MTD_OPS_RAW)
2382 return -ENOTSUPP;
2383
2384 nand_get_device(mtd, FL_READING);
2385
2386 if (!ops->datbuf)
2387 ret = nand_do_read_oob(mtd, from, ops);
2388 else
2389 ret = nand_do_read_ops(mtd, from, ops);
2390
2391 nand_release_device(mtd);
2392 return ret;
2393 }
2394
2395
2396 /**
2397 * nand_write_page_raw - [INTERN] raw page write function
2398 * @mtd: mtd info structure
2399 * @chip: nand chip info structure
2400 * @buf: data buffer
2401 * @oob_required: must write chip->oob_poi to OOB
2402 * @page: page number to write
2403 *
2404 * Not for syndrome calculating ECC controllers, which use a special oob layout.
2405 */
2406 static int nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
2407 const uint8_t *buf, int oob_required, int page)
2408 {
2409 chip->write_buf(mtd, buf, mtd->writesize);
2410 if (oob_required)
2411 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
2412
2413 return 0;
2414 }
2415
2416 /**
2417 * nand_write_page_raw_syndrome - [INTERN] raw page write function
2418 * @mtd: mtd info structure
2419 * @chip: nand chip info structure
2420 * @buf: data buffer
2421 * @oob_required: must write chip->oob_poi to OOB
2422 * @page: page number to write
2423 *
2424 * We need a special oob layout and handling even when ECC isn't checked.
2425 */
2426 static int nand_write_page_raw_syndrome(struct mtd_info *mtd,
2427 struct nand_chip *chip,
2428 const uint8_t *buf, int oob_required,
2429 int page)
2430 {
2431 int eccsize = chip->ecc.size;
2432 int eccbytes = chip->ecc.bytes;
2433 uint8_t *oob = chip->oob_poi;
2434 int steps, size;
2435
2436 for (steps = chip->ecc.steps; steps > 0; steps--) {
2437 chip->write_buf(mtd, buf, eccsize);
2438 buf += eccsize;
2439
2440 if (chip->ecc.prepad) {
2441 chip->write_buf(mtd, oob, chip->ecc.prepad);
2442 oob += chip->ecc.prepad;
2443 }
2444
2445 chip->write_buf(mtd, oob, eccbytes);
2446 oob += eccbytes;
2447
2448 if (chip->ecc.postpad) {
2449 chip->write_buf(mtd, oob, chip->ecc.postpad);
2450 oob += chip->ecc.postpad;
2451 }
2452 }
2453
2454 size = mtd->oobsize - (oob - chip->oob_poi);
2455 if (size)
2456 chip->write_buf(mtd, oob, size);
2457
2458 return 0;
2459 }
2460 /**
2461 * nand_write_page_swecc - [REPLACEABLE] software ECC based page write function
2462 * @mtd: mtd info structure
2463 * @chip: nand chip info structure
2464 * @buf: data buffer
2465 * @oob_required: must write chip->oob_poi to OOB
2466 * @page: page number to write
2467 */
2468 static int nand_write_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
2469 const uint8_t *buf, int oob_required,
2470 int page)
2471 {
2472 int i, eccsize = chip->ecc.size, ret;
2473 int eccbytes = chip->ecc.bytes;
2474 int eccsteps = chip->ecc.steps;
2475 uint8_t *ecc_calc = chip->buffers->ecccalc;
2476 const uint8_t *p = buf;
2477
2478 /* Software ECC calculation */
2479 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
2480 chip->ecc.calculate(mtd, p, &ecc_calc[i]);
2481
2482 ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
2483 chip->ecc.total);
2484 if (ret)
2485 return ret;
2486
2487 return chip->ecc.write_page_raw(mtd, chip, buf, 1, page);
2488 }
2489
2490 /**
2491 * nand_write_page_hwecc - [REPLACEABLE] hardware ECC based page write function
2492 * @mtd: mtd info structure
2493 * @chip: nand chip info structure
2494 * @buf: data buffer
2495 * @oob_required: must write chip->oob_poi to OOB
2496 * @page: page number to write
2497 */
2498 static int nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
2499 const uint8_t *buf, int oob_required,
2500 int page)
2501 {
2502 int i, eccsize = chip->ecc.size, ret;
2503 int eccbytes = chip->ecc.bytes;
2504 int eccsteps = chip->ecc.steps;
2505 uint8_t *ecc_calc = chip->buffers->ecccalc;
2506 const uint8_t *p = buf;
2507
2508 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
2509 chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
2510 chip->write_buf(mtd, p, eccsize);
2511 chip->ecc.calculate(mtd, p, &ecc_calc[i]);
2512 }
2513
2514 ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
2515 chip->ecc.total);
2516 if (ret)
2517 return ret;
2518
2519 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
2520
2521 return 0;
2522 }
2523
2524
2525 /**
2526 * nand_write_subpage_hwecc - [REPLACEABLE] hardware ECC based subpage write
2527 * @mtd: mtd info structure
2528 * @chip: nand chip info structure
2529 * @offset: column address of subpage within the page
2530 * @data_len: data length
2531 * @buf: data buffer
2532 * @oob_required: must write chip->oob_poi to OOB
2533 * @page: page number to write
2534 */
2535 static int nand_write_subpage_hwecc(struct mtd_info *mtd,
2536 struct nand_chip *chip, uint32_t offset,
2537 uint32_t data_len, const uint8_t *buf,
2538 int oob_required, int page)
2539 {
2540 uint8_t *oob_buf = chip->oob_poi;
2541 uint8_t *ecc_calc = chip->buffers->ecccalc;
2542 int ecc_size = chip->ecc.size;
2543 int ecc_bytes = chip->ecc.bytes;
2544 int ecc_steps = chip->ecc.steps;
2545 uint32_t start_step = offset / ecc_size;
2546 uint32_t end_step = (offset + data_len - 1) / ecc_size;
2547 int oob_bytes = mtd->oobsize / ecc_steps;
2548 int step, ret;
2549
2550 for (step = 0; step < ecc_steps; step++) {
2551 /* configure controller for WRITE access */
2552 chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
2553
2554 /* write data (untouched subpages already masked by 0xFF) */
2555 chip->write_buf(mtd, buf, ecc_size);
2556
2557 /* mask ECC of un-touched subpages by padding 0xFF */
2558 if ((step < start_step) || (step > end_step))
2559 memset(ecc_calc, 0xff, ecc_bytes);
2560 else
2561 chip->ecc.calculate(mtd, buf, ecc_calc);
2562
2563 /* mask OOB of un-touched subpages by padding 0xFF */
2564 /* if oob_required, preserve OOB metadata of written subpage */
2565 if (!oob_required || (step < start_step) || (step > end_step))
2566 memset(oob_buf, 0xff, oob_bytes);
2567
2568 buf += ecc_size;
2569 ecc_calc += ecc_bytes;
2570 oob_buf += oob_bytes;
2571 }
2572
2573 /* copy calculated ECC for whole page to chip->buffer->oob */
2574 /* this include masked-value(0xFF) for unwritten subpages */
2575 ecc_calc = chip->buffers->ecccalc;
2576 ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
2577 chip->ecc.total);
2578 if (ret)
2579 return ret;
2580
2581 /* write OOB buffer to NAND device */
2582 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
2583
2584 return 0;
2585 }
2586
2587
2588 /**
2589 * nand_write_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page write
2590 * @mtd: mtd info structure
2591 * @chip: nand chip info structure
2592 * @buf: data buffer
2593 * @oob_required: must write chip->oob_poi to OOB
2594 * @page: page number to write
2595 *
2596 * The hw generator calculates the error syndrome automatically. Therefore we
2597 * need a special oob layout and handling.
2598 */
2599 static int nand_write_page_syndrome(struct mtd_info *mtd,
2600 struct nand_chip *chip,
2601 const uint8_t *buf, int oob_required,
2602 int page)
2603 {
2604 int i, eccsize = chip->ecc.size;
2605 int eccbytes = chip->ecc.bytes;
2606 int eccsteps = chip->ecc.steps;
2607 const uint8_t *p = buf;
2608 uint8_t *oob = chip->oob_poi;
2609
2610 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
2611
2612 chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
2613 chip->write_buf(mtd, p, eccsize);
2614
2615 if (chip->ecc.prepad) {
2616 chip->write_buf(mtd, oob, chip->ecc.prepad);
2617 oob += chip->ecc.prepad;
2618 }
2619
2620 chip->ecc.calculate(mtd, p, oob);
2621 chip->write_buf(mtd, oob, eccbytes);
2622 oob += eccbytes;
2623
2624 if (chip->ecc.postpad) {
2625 chip->write_buf(mtd, oob, chip->ecc.postpad);
2626 oob += chip->ecc.postpad;
2627 }
2628 }
2629
2630 /* Calculate remaining oob bytes */
2631 i = mtd->oobsize - (oob - chip->oob_poi);
2632 if (i)
2633 chip->write_buf(mtd, oob, i);
2634
2635 return 0;
2636 }
2637
2638 /**
2639 * nand_write_page - [REPLACEABLE] write one page
2640 * @mtd: MTD device structure
2641 * @chip: NAND chip descriptor
2642 * @offset: address offset within the page
2643 * @data_len: length of actual data to be written
2644 * @buf: the data to write
2645 * @oob_required: must write chip->oob_poi to OOB
2646 * @page: page number to write
2647 * @cached: cached programming
2648 * @raw: use _raw version of write_page
2649 */
2650 static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
2651 uint32_t offset, int data_len, const uint8_t *buf,
2652 int oob_required, int page, int cached, int raw)
2653 {
2654 int status, subpage;
2655
2656 if (!(chip->options & NAND_NO_SUBPAGE_WRITE) &&
2657 chip->ecc.write_subpage)
2658 subpage = offset || (data_len < mtd->writesize);
2659 else
2660 subpage = 0;
2661
2662 if (nand_standard_page_accessors(&chip->ecc))
2663 chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
2664
2665 if (unlikely(raw))
2666 status = chip->ecc.write_page_raw(mtd, chip, buf,
2667 oob_required, page);
2668 else if (subpage)
2669 status = chip->ecc.write_subpage(mtd, chip, offset, data_len,
2670 buf, oob_required, page);
2671 else
2672 status = chip->ecc.write_page(mtd, chip, buf, oob_required,
2673 page);
2674
2675 if (status < 0)
2676 return status;
2677
2678 /*
2679 * Cached progamming disabled for now. Not sure if it's worth the
2680 * trouble. The speed gain is not very impressive. (2.3->2.6Mib/s).
2681 */
2682 cached = 0;
2683
2684 if (!cached || !NAND_HAS_CACHEPROG(chip)) {
2685
2686 if (nand_standard_page_accessors(&chip->ecc))
2687 chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
2688 status = chip->waitfunc(mtd, chip);
2689 /*
2690 * See if operation failed and additional status checks are
2691 * available.
2692 */
2693 if ((status & NAND_STATUS_FAIL) && (chip->errstat))
2694 status = chip->errstat(mtd, chip, FL_WRITING, status,
2695 page);
2696
2697 if (status & NAND_STATUS_FAIL)
2698 return -EIO;
2699 } else {
2700 chip->cmdfunc(mtd, NAND_CMD_CACHEDPROG, -1, -1);
2701 status = chip->waitfunc(mtd, chip);
2702 }
2703
2704 return 0;
2705 }
2706
2707 /**
2708 * nand_fill_oob - [INTERN] Transfer client buffer to oob
2709 * @mtd: MTD device structure
2710 * @oob: oob data buffer
2711 * @len: oob data write length
2712 * @ops: oob ops structure
2713 */
2714 static uint8_t *nand_fill_oob(struct mtd_info *mtd, uint8_t *oob, size_t len,
2715 struct mtd_oob_ops *ops)
2716 {
2717 struct nand_chip *chip = mtd_to_nand(mtd);
2718 int ret;
2719
2720 /*
2721 * Initialise to all 0xFF, to avoid the possibility of left over OOB
2722 * data from a previous OOB read.
2723 */
2724 memset(chip->oob_poi, 0xff, mtd->oobsize);
2725
2726 switch (ops->mode) {
2727
2728 case MTD_OPS_PLACE_OOB:
2729 case MTD_OPS_RAW:
2730 memcpy(chip->oob_poi + ops->ooboffs, oob, len);
2731 return oob + len;
2732
2733 case MTD_OPS_AUTO_OOB:
2734 ret = mtd_ooblayout_set_databytes(mtd, oob, chip->oob_poi,
2735 ops->ooboffs, len);
2736 BUG_ON(ret);
2737 return oob + len;
2738
2739 default:
2740 BUG();
2741 }
2742 return NULL;
2743 }
2744
2745 #define NOTALIGNED(x) ((x & (chip->subpagesize - 1)) != 0)
2746
2747 /**
2748 * nand_do_write_ops - [INTERN] NAND write with ECC
2749 * @mtd: MTD device structure
2750 * @to: offset to write to
2751 * @ops: oob operations description structure
2752 *
2753 * NAND write with ECC.
2754 */
2755 static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
2756 struct mtd_oob_ops *ops)
2757 {
2758 int chipnr, realpage, page, blockmask, column;
2759 struct nand_chip *chip = mtd_to_nand(mtd);
2760 uint32_t writelen = ops->len;
2761
2762 uint32_t oobwritelen = ops->ooblen;
2763 uint32_t oobmaxlen = mtd_oobavail(mtd, ops);
2764
2765 uint8_t *oob = ops->oobbuf;
2766 uint8_t *buf = ops->datbuf;
2767 int ret;
2768 int oob_required = oob ? 1 : 0;
2769
2770 ops->retlen = 0;
2771 if (!writelen)
2772 return 0;
2773
2774 /* Reject writes, which are not page aligned */
2775 if (NOTALIGNED(to) || NOTALIGNED(ops->len)) {
2776 pr_notice("%s: attempt to write non page aligned data\n",
2777 __func__);
2778 return -EINVAL;
2779 }
2780
2781 column = to & (mtd->writesize - 1);
2782
2783 chipnr = (int)(to >> chip->chip_shift);
2784 chip->select_chip(mtd, chipnr);
2785
2786 /* Check, if it is write protected */
2787 if (nand_check_wp(mtd)) {
2788 ret = -EIO;
2789 goto err_out;
2790 }
2791
2792 realpage = (int)(to >> chip->page_shift);
2793 page = realpage & chip->pagemask;
2794 blockmask = (1 << (chip->phys_erase_shift - chip->page_shift)) - 1;
2795
2796 /* Invalidate the page cache, when we write to the cached page */
2797 if (to <= ((loff_t)chip->pagebuf << chip->page_shift) &&
2798 ((loff_t)chip->pagebuf << chip->page_shift) < (to + ops->len))
2799 chip->pagebuf = -1;
2800
2801 /* Don't allow multipage oob writes with offset */
2802 if (oob && ops->ooboffs && (ops->ooboffs + ops->ooblen > oobmaxlen)) {
2803 ret = -EINVAL;
2804 goto err_out;
2805 }
2806
2807 while (1) {
2808 int bytes = mtd->writesize;
2809 int cached = writelen > bytes && page != blockmask;
2810 uint8_t *wbuf = buf;
2811 int use_bufpoi;
2812 int part_pagewr = (column || writelen < mtd->writesize);
2813
2814 if (part_pagewr)
2815 use_bufpoi = 1;
2816 else if (chip->options & NAND_USE_BOUNCE_BUFFER)
2817 use_bufpoi = !virt_addr_valid(buf);
2818 else
2819 use_bufpoi = 0;
2820
2821 /* Partial page write?, or need to use bounce buffer */
2822 if (use_bufpoi) {
2823 pr_debug("%s: using write bounce buffer for buf@%p\n",
2824 __func__, buf);
2825 cached = 0;
2826 if (part_pagewr)
2827 bytes = min_t(int, bytes - column, writelen);
2828 chip->pagebuf = -1;
2829 memset(chip->buffers->databuf, 0xff, mtd->writesize);
2830 memcpy(&chip->buffers->databuf[column], buf, bytes);
2831 wbuf = chip->buffers->databuf;
2832 }
2833
2834 if (unlikely(oob)) {
2835 size_t len = min(oobwritelen, oobmaxlen);
2836 oob = nand_fill_oob(mtd, oob, len, ops);
2837 oobwritelen -= len;
2838 } else {
2839 /* We still need to erase leftover OOB data */
2840 memset(chip->oob_poi, 0xff, mtd->oobsize);
2841 }
2842 ret = chip->write_page(mtd, chip, column, bytes, wbuf,
2843 oob_required, page, cached,
2844 (ops->mode == MTD_OPS_RAW));
2845 if (ret)
2846 break;
2847
2848 writelen -= bytes;
2849 if (!writelen)
2850 break;
2851
2852 column = 0;
2853 buf += bytes;
2854 realpage++;
2855
2856 page = realpage & chip->pagemask;
2857 /* Check, if we cross a chip boundary */
2858 if (!page) {
2859 chipnr++;
2860 chip->select_chip(mtd, -1);
2861 chip->select_chip(mtd, chipnr);
2862 }
2863 }
2864
2865 ops->retlen = ops->len - writelen;
2866 if (unlikely(oob))
2867 ops->oobretlen = ops->ooblen;
2868
2869 err_out:
2870 chip->select_chip(mtd, -1);
2871 return ret;
2872 }
2873
2874 /**
2875 * panic_nand_write - [MTD Interface] NAND write with ECC
2876 * @mtd: MTD device structure
2877 * @to: offset to write to
2878 * @len: number of bytes to write
2879 * @retlen: pointer to variable to store the number of written bytes
2880 * @buf: the data to write
2881 *
2882 * NAND write with ECC. Used when performing writes in interrupt context, this
2883 * may for example be called by mtdoops when writing an oops while in panic.
2884 */
2885 static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
2886 size_t *retlen, const uint8_t *buf)
2887 {
2888 struct nand_chip *chip = mtd_to_nand(mtd);
2889 struct mtd_oob_ops ops;
2890 int ret;
2891
2892 /* Wait for the device to get ready */
2893 panic_nand_wait(mtd, chip, 400);
2894
2895 /* Grab the device */
2896 panic_nand_get_device(chip, mtd, FL_WRITING);
2897
2898 memset(&ops, 0, sizeof(ops));
2899 ops.len = len;
2900 ops.datbuf = (uint8_t *)buf;
2901 ops.mode = MTD_OPS_PLACE_OOB;
2902
2903 ret = nand_do_write_ops(mtd, to, &ops);
2904
2905 *retlen = ops.retlen;
2906 return ret;
2907 }
2908
2909 /**
2910 * nand_write - [MTD Interface] NAND write with ECC
2911 * @mtd: MTD device structure
2912 * @to: offset to write to
2913 * @len: number of bytes to write
2914 * @retlen: pointer to variable to store the number of written bytes
2915 * @buf: the data to write
2916 *
2917 * NAND write with ECC.
2918 */
2919 static int nand_write(struct mtd_info *mtd, loff_t to, size_t len,
2920 size_t *retlen, const uint8_t *buf)
2921 {
2922 struct mtd_oob_ops ops;
2923 int ret;
2924
2925 nand_get_device(mtd, FL_WRITING);
2926 memset(&ops, 0, sizeof(ops));
2927 ops.len = len;
2928 ops.datbuf = (uint8_t *)buf;
2929 ops.mode = MTD_OPS_PLACE_OOB;
2930 ret = nand_do_write_ops(mtd, to, &ops);
2931 *retlen = ops.retlen;
2932 nand_release_device(mtd);
2933 return ret;
2934 }
2935
2936 /**
2937 * nand_do_write_oob - [MTD Interface] NAND write out-of-band
2938 * @mtd: MTD device structure
2939 * @to: offset to write to
2940 * @ops: oob operation description structure
2941 *
2942 * NAND write out-of-band.
2943 */
2944 static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
2945 struct mtd_oob_ops *ops)
2946 {
2947 int chipnr, page, status, len;
2948 struct nand_chip *chip = mtd_to_nand(mtd);
2949
2950 pr_debug("%s: to = 0x%08x, len = %i\n",
2951 __func__, (unsigned int)to, (int)ops->ooblen);
2952
2953 len = mtd_oobavail(mtd, ops);
2954
2955 /* Do not allow write past end of page */
2956 if ((ops->ooboffs + ops->ooblen) > len) {
2957 pr_debug("%s: attempt to write past end of page\n",
2958 __func__);
2959 return -EINVAL;
2960 }
2961
2962 if (unlikely(ops->ooboffs >= len)) {
2963 pr_debug("%s: attempt to start write outside oob\n",
2964 __func__);
2965 return -EINVAL;
2966 }
2967
2968 /* Do not allow write past end of device */
2969 if (unlikely(to >= mtd->size ||
2970 ops->ooboffs + ops->ooblen >
2971 ((mtd->size >> chip->page_shift) -
2972 (to >> chip->page_shift)) * len)) {
2973 pr_debug("%s: attempt to write beyond end of device\n",
2974 __func__);
2975 return -EINVAL;
2976 }
2977
2978 chipnr = (int)(to >> chip->chip_shift);
2979
2980 /*
2981 * Reset the chip. Some chips (like the Toshiba TC5832DC found in one
2982 * of my DiskOnChip 2000 test units) will clear the whole data page too
2983 * if we don't do this. I have no clue why, but I seem to have 'fixed'
2984 * it in the doc2000 driver in August 1999. dwmw2.
2985 */
2986 nand_reset(chip, chipnr);
2987
2988 chip->select_chip(mtd, chipnr);
2989
2990 /* Shift to get page */
2991 page = (int)(to >> chip->page_shift);
2992
2993 /* Check, if it is write protected */
2994 if (nand_check_wp(mtd)) {
2995 chip->select_chip(mtd, -1);
2996 return -EROFS;
2997 }
2998
2999 /* Invalidate the page cache, if we write to the cached page */
3000 if (page == chip->pagebuf)
3001 chip->pagebuf = -1;
3002
3003 nand_fill_oob(mtd, ops->oobbuf, ops->ooblen, ops);
3004
3005 if (ops->mode == MTD_OPS_RAW)
3006 status = chip->ecc.write_oob_raw(mtd, chip, page & chip->pagemask);
3007 else
3008 status = chip->ecc.write_oob(mtd, chip, page & chip->pagemask);
3009
3010 chip->select_chip(mtd, -1);
3011
3012 if (status)
3013 return status;
3014
3015 ops->oobretlen = ops->ooblen;
3016
3017 return 0;
3018 }
3019
3020 /**
3021 * nand_write_oob - [MTD Interface] NAND write data and/or out-of-band
3022 * @mtd: MTD device structure
3023 * @to: offset to write to
3024 * @ops: oob operation description structure
3025 */
3026 static int nand_write_oob(struct mtd_info *mtd, loff_t to,
3027 struct mtd_oob_ops *ops)
3028 {
3029 int ret = -ENOTSUPP;
3030
3031 ops->retlen = 0;
3032
3033 /* Do not allow writes past end of device */
3034 if (ops->datbuf && (to + ops->len) > mtd->size) {
3035 pr_debug("%s: attempt to write beyond end of device\n",
3036 __func__);
3037 return -EINVAL;
3038 }
3039
3040 nand_get_device(mtd, FL_WRITING);
3041
3042 switch (ops->mode) {
3043 case MTD_OPS_PLACE_OOB:
3044 case MTD_OPS_AUTO_OOB:
3045 case MTD_OPS_RAW:
3046 break;
3047
3048 default:
3049 goto out;
3050 }
3051
3052 if (!ops->datbuf)
3053 ret = nand_do_write_oob(mtd, to, ops);
3054 else
3055 ret = nand_do_write_ops(mtd, to, ops);
3056
3057 out:
3058 nand_release_device(mtd);
3059 return ret;
3060 }
3061
3062 /**
3063 * single_erase - [GENERIC] NAND standard block erase command function
3064 * @mtd: MTD device structure
3065 * @page: the page address of the block which will be erased
3066 *
3067 * Standard erase command for NAND chips. Returns NAND status.
3068 */
3069 static int single_erase(struct mtd_info *mtd, int page)
3070 {
3071 struct nand_chip *chip = mtd_to_nand(mtd);
3072 /* Send commands to erase a block */
3073 chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page);
3074 chip->cmdfunc(mtd, NAND_CMD_ERASE2, -1, -1);
3075
3076 return chip->waitfunc(mtd, chip);
3077 }
3078
3079 /**
3080 * nand_erase - [MTD Interface] erase block(s)
3081 * @mtd: MTD device structure
3082 * @instr: erase instruction
3083 *
3084 * Erase one ore more blocks.
3085 */
3086 static int nand_erase(struct mtd_info *mtd, struct erase_info *instr)
3087 {
3088 return nand_erase_nand(mtd, instr, 0);
3089 }
3090
3091 /**
3092 * nand_erase_nand - [INTERN] erase block(s)
3093 * @mtd: MTD device structure
3094 * @instr: erase instruction
3095 * @allowbbt: allow erasing the bbt area
3096 *
3097 * Erase one ore more blocks.
3098 */
3099 int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
3100 int allowbbt)
3101 {
3102 int page, status, pages_per_block, ret, chipnr;
3103 struct nand_chip *chip = mtd_to_nand(mtd);
3104 loff_t len;
3105
3106 pr_debug("%s: start = 0x%012llx, len = %llu\n",
3107 __func__, (unsigned long long)instr->addr,
3108 (unsigned long long)instr->len);
3109
3110 if (check_offs_len(mtd, instr->addr, instr->len))
3111 return -EINVAL;
3112
3113 /* Grab the lock and see if the device is available */
3114 nand_get_device(mtd, FL_ERASING);
3115
3116 /* Shift to get first page */
3117 page = (int)(instr->addr >> chip->page_shift);
3118 chipnr = (int)(instr->addr >> chip->chip_shift);
3119
3120 /* Calculate pages in each block */
3121 pages_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
3122
3123 /* Select the NAND device */
3124 chip->select_chip(mtd, chipnr);
3125
3126 /* Check, if it is write protected */
3127 if (nand_check_wp(mtd)) {
3128 pr_debug("%s: device is write protected!\n",
3129 __func__);
3130 instr->state = MTD_ERASE_FAILED;
3131 goto erase_exit;
3132 }
3133
3134 /* Loop through the pages */
3135 len = instr->len;
3136
3137 instr->state = MTD_ERASING;
3138
3139 while (len) {
3140 /* Check if we have a bad block, we do not erase bad blocks! */
3141 if (nand_block_checkbad(mtd, ((loff_t) page) <<
3142 chip->page_shift, allowbbt)) {
3143 pr_warn("%s: attempt to erase a bad block at page 0x%08x\n",
3144 __func__, page);
3145 instr->state = MTD_ERASE_FAILED;
3146 goto erase_exit;
3147 }
3148
3149 /*
3150 * Invalidate the page cache, if we erase the block which
3151 * contains the current cached page.
3152 */
3153 if (page <= chip->pagebuf && chip->pagebuf <
3154 (page + pages_per_block))
3155 chip->pagebuf = -1;
3156
3157 status = chip->erase(mtd, page & chip->pagemask);
3158
3159 /*
3160 * See if operation failed and additional status checks are
3161 * available
3162 */
3163 if ((status & NAND_STATUS_FAIL) && (chip->errstat))
3164 status = chip->errstat(mtd, chip, FL_ERASING,
3165 status, page);
3166
3167 /* See if block erase succeeded */
3168 if (status & NAND_STATUS_FAIL) {
3169 pr_debug("%s: failed erase, page 0x%08x\n",
3170 __func__, page);
3171 instr->state = MTD_ERASE_FAILED;
3172 instr->fail_addr =
3173 ((loff_t)page << chip->page_shift);
3174 goto erase_exit;
3175 }
3176
3177 /* Increment page address and decrement length */
3178 len -= (1ULL << chip->phys_erase_shift);
3179 page += pages_per_block;
3180
3181 /* Check, if we cross a chip boundary */
3182 if (len && !(page & chip->pagemask)) {
3183 chipnr++;
3184 chip->select_chip(mtd, -1);
3185 chip->select_chip(mtd, chipnr);
3186 }
3187 }
3188 instr->state = MTD_ERASE_DONE;
3189
3190 erase_exit:
3191
3192 ret = instr->state == MTD_ERASE_DONE ? 0 : -EIO;
3193
3194 /* Deselect and wake up anyone waiting on the device */
3195 chip->select_chip(mtd, -1);
3196 nand_release_device(mtd);
3197
3198 /* Do call back function */
3199 if (!ret)
3200 mtd_erase_callback(instr);
3201
3202 /* Return more or less happy */
3203 return ret;
3204 }
3205
3206 /**
3207 * nand_sync - [MTD Interface] sync
3208 * @mtd: MTD device structure
3209 *
3210 * Sync is actually a wait for chip ready function.
3211 */
3212 static void nand_sync(struct mtd_info *mtd)
3213 {
3214 pr_debug("%s: called\n", __func__);
3215
3216 /* Grab the lock and see if the device is available */
3217 nand_get_device(mtd, FL_SYNCING);
3218 /* Release it and go back */
3219 nand_release_device(mtd);
3220 }
3221
3222 /**
3223 * nand_block_isbad - [MTD Interface] Check if block at offset is bad
3224 * @mtd: MTD device structure
3225 * @offs: offset relative to mtd start
3226 */
3227 static int nand_block_isbad(struct mtd_info *mtd, loff_t offs)
3228 {
3229 struct nand_chip *chip = mtd_to_nand(mtd);
3230 int chipnr = (int)(offs >> chip->chip_shift);
3231 int ret;
3232
3233 /* Select the NAND device */
3234 nand_get_device(mtd, FL_READING);
3235 chip->select_chip(mtd, chipnr);
3236
3237 ret = nand_block_checkbad(mtd, offs, 0);
3238
3239 chip->select_chip(mtd, -1);
3240 nand_release_device(mtd);
3241
3242 return ret;
3243 }
3244
3245 /**
3246 * nand_block_markbad - [MTD Interface] Mark block at the given offset as bad
3247 * @mtd: MTD device structure
3248 * @ofs: offset relative to mtd start
3249 */
3250 static int nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
3251 {
3252 int ret;
3253
3254 ret = nand_block_isbad(mtd, ofs);
3255 if (ret) {
3256 /* If it was bad already, return success and do nothing */
3257 if (ret > 0)
3258 return 0;
3259 return ret;
3260 }
3261
3262 return nand_block_markbad_lowlevel(mtd, ofs);
3263 }
3264
3265 /**
3266 * nand_max_bad_blocks - [MTD Interface] Max number of bad blocks for an mtd
3267 * @mtd: MTD device structure
3268 * @ofs: offset relative to mtd start
3269 * @len: length of mtd
3270 */
3271 static int nand_max_bad_blocks(struct mtd_info *mtd, loff_t ofs, size_t len)
3272 {
3273 struct nand_chip *chip = mtd_to_nand(mtd);
3274 u32 part_start_block;
3275 u32 part_end_block;
3276 u32 part_start_die;
3277 u32 part_end_die;
3278
3279 /*
3280 * max_bb_per_die and blocks_per_die used to determine
3281 * the maximum bad block count.
3282 */
3283 if (!chip->max_bb_per_die || !chip->blocks_per_die)
3284 return -ENOTSUPP;
3285
3286 /* Get the start and end of the partition in erase blocks. */
3287 part_start_block = mtd_div_by_eb(ofs, mtd);
3288 part_end_block = mtd_div_by_eb(len, mtd) + part_start_block - 1;
3289
3290 /* Get the start and end LUNs of the partition. */
3291 part_start_die = part_start_block / chip->blocks_per_die;
3292 part_end_die = part_end_block / chip->blocks_per_die;
3293
3294 /*
3295 * Look up the bad blocks per unit and multiply by the number of units
3296 * that the partition spans.
3297 */
3298 return chip->max_bb_per_die * (part_end_die - part_start_die + 1);
3299 }
3300
3301 /**
3302 * nand_onfi_set_features- [REPLACEABLE] set features for ONFI nand
3303 * @mtd: MTD device structure
3304 * @chip: nand chip info structure
3305 * @addr: feature address.
3306 * @subfeature_param: the subfeature parameters, a four bytes array.
3307 */
3308 static int nand_onfi_set_features(struct mtd_info *mtd, struct nand_chip *chip,
3309 int addr, uint8_t *subfeature_param)
3310 {
3311 int status;
3312 int i;
3313
3314 if (!chip->onfi_version ||
3315 !(le16_to_cpu(chip->onfi_params.opt_cmd)
3316 & ONFI_OPT_CMD_SET_GET_FEATURES))
3317 return -EINVAL;
3318
3319 chip->cmdfunc(mtd, NAND_CMD_SET_FEATURES, addr, -1);
3320 for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
3321 chip->write_byte(mtd, subfeature_param[i]);
3322
3323 status = chip->waitfunc(mtd, chip);
3324 if (status & NAND_STATUS_FAIL)
3325 return -EIO;
3326 return 0;
3327 }
3328
3329 /**
3330 * nand_onfi_get_features- [REPLACEABLE] get features for ONFI nand
3331 * @mtd: MTD device structure
3332 * @chip: nand chip info structure
3333 * @addr: feature address.
3334 * @subfeature_param: the subfeature parameters, a four bytes array.
3335 */
3336 static int nand_onfi_get_features(struct mtd_info *mtd, struct nand_chip *chip,
3337 int addr, uint8_t *subfeature_param)
3338 {
3339 int i;
3340
3341 if (!chip->onfi_version ||
3342 !(le16_to_cpu(chip->onfi_params.opt_cmd)
3343 & ONFI_OPT_CMD_SET_GET_FEATURES))
3344 return -EINVAL;
3345
3346 chip->cmdfunc(mtd, NAND_CMD_GET_FEATURES, addr, -1);
3347 for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
3348 *subfeature_param++ = chip->read_byte(mtd);
3349 return 0;
3350 }
3351
3352 /**
3353 * nand_suspend - [MTD Interface] Suspend the NAND flash
3354 * @mtd: MTD device structure
3355 */
3356 static int nand_suspend(struct mtd_info *mtd)
3357 {
3358 return nand_get_device(mtd, FL_PM_SUSPENDED);
3359 }
3360
3361 /**
3362 * nand_resume - [MTD Interface] Resume the NAND flash
3363 * @mtd: MTD device structure
3364 */
3365 static void nand_resume(struct mtd_info *mtd)
3366 {
3367 struct nand_chip *chip = mtd_to_nand(mtd);
3368
3369 if (chip->state == FL_PM_SUSPENDED)
3370 nand_release_device(mtd);
3371 else
3372 pr_err("%s called for a chip which is not in suspended state\n",
3373 __func__);
3374 }
3375
3376 /**
3377 * nand_shutdown - [MTD Interface] Finish the current NAND operation and
3378 * prevent further operations
3379 * @mtd: MTD device structure
3380 */
3381 static void nand_shutdown(struct mtd_info *mtd)
3382 {
3383 nand_get_device(mtd, FL_PM_SUSPENDED);
3384 }
3385
3386 /* Set default functions */
3387 static void nand_set_defaults(struct nand_chip *chip, int busw)
3388 {
3389 /* check for proper chip_delay setup, set 20us if not */
3390 if (!chip->chip_delay)
3391 chip->chip_delay = 20;
3392
3393 /* check, if a user supplied command function given */
3394 if (chip->cmdfunc == NULL)
3395 chip->cmdfunc = nand_command;
3396
3397 /* check, if a user supplied wait function given */
3398 if (chip->waitfunc == NULL)
3399 chip->waitfunc = nand_wait;
3400
3401 if (!chip->select_chip)
3402 chip->select_chip = nand_select_chip;
3403
3404 /* set for ONFI nand */
3405 if (!chip->onfi_set_features)
3406 chip->onfi_set_features = nand_onfi_set_features;
3407 if (!chip->onfi_get_features)
3408 chip->onfi_get_features = nand_onfi_get_features;
3409
3410 /* If called twice, pointers that depend on busw may need to be reset */
3411 if (!chip->read_byte || chip->read_byte == nand_read_byte)
3412 chip->read_byte = busw ? nand_read_byte16 : nand_read_byte;
3413 if (!chip->read_word)
3414 chip->read_word = nand_read_word;
3415 if (!chip->block_bad)
3416 chip->block_bad = nand_block_bad;
3417 if (!chip->block_markbad)
3418 chip->block_markbad = nand_default_block_markbad;
3419 if (!chip->write_buf || chip->write_buf == nand_write_buf)
3420 chip->write_buf = busw ? nand_write_buf16 : nand_write_buf;
3421 if (!chip->write_byte || chip->write_byte == nand_write_byte)
3422 chip->write_byte = busw ? nand_write_byte16 : nand_write_byte;
3423 if (!chip->read_buf || chip->read_buf == nand_read_buf)
3424 chip->read_buf = busw ? nand_read_buf16 : nand_read_buf;
3425 if (!chip->scan_bbt)
3426 chip->scan_bbt = nand_default_bbt;
3427
3428 if (!chip->controller) {
3429 chip->controller = &chip->hwcontrol;
3430 nand_hw_control_init(chip->controller);
3431 }
3432
3433 }
3434
3435 /* Sanitize ONFI strings so we can safely print them */
3436 static void sanitize_string(uint8_t *s, size_t len)
3437 {
3438 ssize_t i;
3439
3440 /* Null terminate */
3441 s[len - 1] = 0;
3442
3443 /* Remove non printable chars */
3444 for (i = 0; i < len - 1; i++) {
3445 if (s[i] < ' ' || s[i] > 127)
3446 s[i] = '?';
3447 }
3448
3449 /* Remove trailing spaces */
3450 strim(s);
3451 }
3452
3453 static u16 onfi_crc16(u16 crc, u8 const *p, size_t len)
3454 {
3455 int i;
3456 while (len--) {
3457 crc ^= *p++ << 8;
3458 for (i = 0; i < 8; i++)
3459 crc = (crc << 1) ^ ((crc & 0x8000) ? 0x8005 : 0);
3460 }
3461
3462 return crc;
3463 }
3464
3465 /* Parse the Extended Parameter Page. */
3466 static int nand_flash_detect_ext_param_page(struct mtd_info *mtd,
3467 struct nand_chip *chip, struct nand_onfi_params *p)
3468 {
3469 struct onfi_ext_param_page *ep;
3470 struct onfi_ext_section *s;
3471 struct onfi_ext_ecc_info *ecc;
3472 uint8_t *cursor;
3473 int ret = -EINVAL;
3474 int len;
3475 int i;
3476
3477 len = le16_to_cpu(p->ext_param_page_length) * 16;
3478 ep = kmalloc(len, GFP_KERNEL);
3479 if (!ep)
3480 return -ENOMEM;
3481
3482 /* Send our own NAND_CMD_PARAM. */
3483 chip->cmdfunc(mtd, NAND_CMD_PARAM, 0, -1);
3484
3485 /* Use the Change Read Column command to skip the ONFI param pages. */
3486 chip->cmdfunc(mtd, NAND_CMD_RNDOUT,
3487 sizeof(*p) * p->num_of_param_pages , -1);
3488
3489 /* Read out the Extended Parameter Page. */
3490 chip->read_buf(mtd, (uint8_t *)ep, len);
3491 if ((onfi_crc16(ONFI_CRC_BASE, ((uint8_t *)ep) + 2, len - 2)
3492 != le16_to_cpu(ep->crc))) {
3493 pr_debug("fail in the CRC.\n");
3494 goto ext_out;
3495 }
3496
3497 /*
3498 * Check the signature.
3499 * Do not strictly follow the ONFI spec, maybe changed in future.
3500 */
3501 if (strncmp(ep->sig, "EPPS", 4)) {
3502 pr_debug("The signature is invalid.\n");
3503 goto ext_out;
3504 }
3505
3506 /* find the ECC section. */
3507 cursor = (uint8_t *)(ep + 1);
3508 for (i = 0; i < ONFI_EXT_SECTION_MAX; i++) {
3509 s = ep->sections + i;
3510 if (s->type == ONFI_SECTION_TYPE_2)
3511 break;
3512 cursor += s->length * 16;
3513 }
3514 if (i == ONFI_EXT_SECTION_MAX) {
3515 pr_debug("We can not find the ECC section.\n");
3516 goto ext_out;
3517 }
3518
3519 /* get the info we want. */
3520 ecc = (struct onfi_ext_ecc_info *)cursor;
3521
3522 if (!ecc->codeword_size) {
3523 pr_debug("Invalid codeword size\n");
3524 goto ext_out;
3525 }
3526
3527 chip->ecc_strength_ds = ecc->ecc_bits;
3528 chip->ecc_step_ds = 1 << ecc->codeword_size;
3529 ret = 0;
3530
3531 ext_out:
3532 kfree(ep);
3533 return ret;
3534 }
3535
3536 static int nand_setup_read_retry_micron(struct mtd_info *mtd, int retry_mode)
3537 {
3538 struct nand_chip *chip = mtd_to_nand(mtd);
3539 uint8_t feature[ONFI_SUBFEATURE_PARAM_LEN] = {retry_mode};
3540
3541 return chip->onfi_set_features(mtd, chip, ONFI_FEATURE_ADDR_READ_RETRY,
3542 feature);
3543 }
3544
3545 /*
3546 * Configure chip properties from Micron vendor-specific ONFI table
3547 */
3548 static void nand_onfi_detect_micron(struct nand_chip *chip,
3549 struct nand_onfi_params *p)
3550 {
3551 struct nand_onfi_vendor_micron *micron = (void *)p->vendor;
3552
3553 if (le16_to_cpu(p->vendor_revision) < 1)
3554 return;
3555
3556 chip->read_retries = micron->read_retry_options;
3557 chip->setup_read_retry = nand_setup_read_retry_micron;
3558 }
3559
3560 /*
3561 * Check if the NAND chip is ONFI compliant, returns 1 if it is, 0 otherwise.
3562 */
3563 static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
3564 int *busw)
3565 {
3566 struct nand_onfi_params *p = &chip->onfi_params;
3567 int i, j;
3568 int val;
3569
3570 /* Try ONFI for unknown chip or LP */
3571 chip->cmdfunc(mtd, NAND_CMD_READID, 0x20, -1);
3572 if (chip->read_byte(mtd) != 'O' || chip->read_byte(mtd) != 'N' ||
3573 chip->read_byte(mtd) != 'F' || chip->read_byte(mtd) != 'I')
3574 return 0;
3575
3576 chip->cmdfunc(mtd, NAND_CMD_PARAM, 0, -1);
3577 for (i = 0; i < 3; i++) {
3578 for (j = 0; j < sizeof(*p); j++)
3579 ((uint8_t *)p)[j] = chip->read_byte(mtd);
3580 if (onfi_crc16(ONFI_CRC_BASE, (uint8_t *)p, 254) ==
3581 le16_to_cpu(p->crc)) {
3582 break;
3583 }
3584 }
3585
3586 if (i == 3) {
3587 pr_err("Could not find valid ONFI parameter page; aborting\n");
3588 return 0;
3589 }
3590
3591 /* Check version */
3592 val = le16_to_cpu(p->revision);
3593 if (val & (1 << 5))
3594 chip->onfi_version = 23;
3595 else if (val & (1 << 4))
3596 chip->onfi_version = 22;
3597 else if (val & (1 << 3))
3598 chip->onfi_version = 21;
3599 else if (val & (1 << 2))
3600 chip->onfi_version = 20;
3601 else if (val & (1 << 1))
3602 chip->onfi_version = 10;
3603
3604 if (!chip->onfi_version) {
3605 pr_info("unsupported ONFI version: %d\n", val);
3606 return 0;
3607 }
3608
3609 sanitize_string(p->manufacturer, sizeof(p->manufacturer));
3610 sanitize_string(p->model, sizeof(p->model));
3611 if (!mtd->name)
3612 mtd->name = p->model;
3613
3614 mtd->writesize = le32_to_cpu(p->byte_per_page);
3615
3616 /*
3617 * pages_per_block and blocks_per_lun may not be a power-of-2 size
3618 * (don't ask me who thought of this...). MTD assumes that these
3619 * dimensions will be power-of-2, so just truncate the remaining area.
3620 */
3621 mtd->erasesize = 1 << (fls(le32_to_cpu(p->pages_per_block)) - 1);
3622 mtd->erasesize *= mtd->writesize;
3623
3624 mtd->oobsize = le16_to_cpu(p->spare_bytes_per_page);
3625
3626 /* See erasesize comment */
3627 chip->chipsize = 1 << (fls(le32_to_cpu(p->blocks_per_lun)) - 1);
3628 chip->chipsize *= (uint64_t)mtd->erasesize * p->lun_count;
3629 chip->bits_per_cell = p->bits_per_cell;
3630
3631 chip->max_bb_per_die = le16_to_cpu(p->bb_per_lun);
3632 chip->blocks_per_die = le32_to_cpu(p->blocks_per_lun);
3633
3634 if (onfi_feature(chip) & ONFI_FEATURE_16_BIT_BUS)
3635 *busw = NAND_BUSWIDTH_16;
3636 else
3637 *busw = 0;
3638
3639 if (p->ecc_bits != 0xff) {
3640 chip->ecc_strength_ds = p->ecc_bits;
3641 chip->ecc_step_ds = 512;
3642 } else if (chip->onfi_version >= 21 &&
3643 (onfi_feature(chip) & ONFI_FEATURE_EXT_PARAM_PAGE)) {
3644
3645 /*
3646 * The nand_flash_detect_ext_param_page() uses the
3647 * Change Read Column command which maybe not supported
3648 * by the chip->cmdfunc. So try to update the chip->cmdfunc
3649 * now. We do not replace user supplied command function.
3650 */
3651 if (mtd->writesize > 512 && chip->cmdfunc == nand_command)
3652 chip->cmdfunc = nand_command_lp;
3653
3654 /* The Extended Parameter Page is supported since ONFI 2.1. */
3655 if (nand_flash_detect_ext_param_page(mtd, chip, p))
3656 pr_warn("Failed to detect ONFI extended param page\n");
3657 } else {
3658 pr_warn("Could not retrieve ONFI ECC requirements\n");
3659 }
3660
3661 if (p->jedec_id == NAND_MFR_MICRON)
3662 nand_onfi_detect_micron(chip, p);
3663
3664 return 1;
3665 }
3666
3667 /*
3668 * Check if the NAND chip is JEDEC compliant, returns 1 if it is, 0 otherwise.
3669 */
3670 static int nand_flash_detect_jedec(struct mtd_info *mtd, struct nand_chip *chip,
3671 int *busw)
3672 {
3673 struct nand_jedec_params *p = &chip->jedec_params;
3674 struct jedec_ecc_info *ecc;
3675 int val;
3676 int i, j;
3677
3678 /* Try JEDEC for unknown chip or LP */
3679 chip->cmdfunc(mtd, NAND_CMD_READID, 0x40, -1);
3680 if (chip->read_byte(mtd) != 'J' || chip->read_byte(mtd) != 'E' ||
3681 chip->read_byte(mtd) != 'D' || chip->read_byte(mtd) != 'E' ||
3682 chip->read_byte(mtd) != 'C')
3683 return 0;
3684
3685 chip->cmdfunc(mtd, NAND_CMD_PARAM, 0x40, -1);
3686 for (i = 0; i < 3; i++) {
3687 for (j = 0; j < sizeof(*p); j++)
3688 ((uint8_t *)p)[j] = chip->read_byte(mtd);
3689
3690 if (onfi_crc16(ONFI_CRC_BASE, (uint8_t *)p, 510) ==
3691 le16_to_cpu(p->crc))
3692 break;
3693 }
3694
3695 if (i == 3) {
3696 pr_err("Could not find valid JEDEC parameter page; aborting\n");
3697 return 0;
3698 }
3699
3700 /* Check version */
3701 val = le16_to_cpu(p->revision);
3702 if (val & (1 << 2))
3703 chip->jedec_version = 10;
3704 else if (val & (1 << 1))
3705 chip->jedec_version = 1; /* vendor specific version */
3706
3707 if (!chip->jedec_version) {
3708 pr_info("unsupported JEDEC version: %d\n", val);
3709 return 0;
3710 }
3711
3712 sanitize_string(p->manufacturer, sizeof(p->manufacturer));
3713 sanitize_string(p->model, sizeof(p->model));
3714 if (!mtd->name)
3715 mtd->name = p->model;
3716
3717 mtd->writesize = le32_to_cpu(p->byte_per_page);
3718
3719 /* Please reference to the comment for nand_flash_detect_onfi. */
3720 mtd->erasesize = 1 << (fls(le32_to_cpu(p->pages_per_block)) - 1);
3721 mtd->erasesize *= mtd->writesize;
3722
3723 mtd->oobsize = le16_to_cpu(p->spare_bytes_per_page);
3724
3725 /* Please reference to the comment for nand_flash_detect_onfi. */
3726 chip->chipsize = 1 << (fls(le32_to_cpu(p->blocks_per_lun)) - 1);
3727 chip->chipsize *= (uint64_t)mtd->erasesize * p->lun_count;
3728 chip->bits_per_cell = p->bits_per_cell;
3729
3730 if (jedec_feature(chip) & JEDEC_FEATURE_16_BIT_BUS)
3731 *busw = NAND_BUSWIDTH_16;
3732 else
3733 *busw = 0;
3734
3735 /* ECC info */
3736 ecc = &p->ecc_info[0];
3737
3738 if (ecc->codeword_size >= 9) {
3739 chip->ecc_strength_ds = ecc->ecc_bits;
3740 chip->ecc_step_ds = 1 << ecc->codeword_size;
3741 } else {
3742 pr_warn("Invalid codeword size\n");
3743 }
3744
3745 return 1;
3746 }
3747
3748 /*
3749 * nand_id_has_period - Check if an ID string has a given wraparound period
3750 * @id_data: the ID string
3751 * @arrlen: the length of the @id_data array
3752 * @period: the period of repitition
3753 *
3754 * Check if an ID string is repeated within a given sequence of bytes at
3755 * specific repetition interval period (e.g., {0x20,0x01,0x7F,0x20} has a
3756 * period of 3). This is a helper function for nand_id_len(). Returns non-zero
3757 * if the repetition has a period of @period; otherwise, returns zero.
3758 */
3759 static int nand_id_has_period(u8 *id_data, int arrlen, int period)
3760 {
3761 int i, j;
3762 for (i = 0; i < period; i++)
3763 for (j = i + period; j < arrlen; j += period)
3764 if (id_data[i] != id_data[j])
3765 return 0;
3766 return 1;
3767 }
3768
3769 /*
3770 * nand_id_len - Get the length of an ID string returned by CMD_READID
3771 * @id_data: the ID string
3772 * @arrlen: the length of the @id_data array
3773
3774 * Returns the length of the ID string, according to known wraparound/trailing
3775 * zero patterns. If no pattern exists, returns the length of the array.
3776 */
3777 static int nand_id_len(u8 *id_data, int arrlen)
3778 {
3779 int last_nonzero, period;
3780
3781 /* Find last non-zero byte */
3782 for (last_nonzero = arrlen - 1; last_nonzero >= 0; last_nonzero--)
3783 if (id_data[last_nonzero])
3784 break;
3785
3786 /* All zeros */
3787 if (last_nonzero < 0)
3788 return 0;
3789
3790 /* Calculate wraparound period */
3791 for (period = 1; period < arrlen; period++)
3792 if (nand_id_has_period(id_data, arrlen, period))
3793 break;
3794
3795 /* There's a repeated pattern */
3796 if (period < arrlen)
3797 return period;
3798
3799 /* There are trailing zeros */
3800 if (last_nonzero < arrlen - 1)
3801 return last_nonzero + 1;
3802
3803 /* No pattern detected */
3804 return arrlen;
3805 }
3806
3807 /* Extract the bits of per cell from the 3rd byte of the extended ID */
3808 static int nand_get_bits_per_cell(u8 cellinfo)
3809 {
3810 int bits;
3811
3812 bits = cellinfo & NAND_CI_CELLTYPE_MSK;
3813 bits >>= NAND_CI_CELLTYPE_SHIFT;
3814 return bits + 1;
3815 }
3816
3817 /*
3818 * Many new NAND share similar device ID codes, which represent the size of the
3819 * chip. The rest of the parameters must be decoded according to generic or
3820 * manufacturer-specific "extended ID" decoding patterns.
3821 */
3822 static void nand_decode_ext_id(struct mtd_info *mtd, struct nand_chip *chip,
3823 u8 id_data[8], int *busw)
3824 {
3825 int extid, id_len;
3826 /* The 3rd id byte holds MLC / multichip data */
3827 chip->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
3828 /* The 4th id byte is the important one */
3829 extid = id_data[3];
3830
3831 id_len = nand_id_len(id_data, 8);
3832
3833 /*
3834 * Field definitions are in the following datasheets:
3835 * Old style (4,5 byte ID): Samsung K9GAG08U0M (p.32)
3836 * New Samsung (6 byte ID): Samsung K9GAG08U0F (p.44)
3837 * Hynix MLC (6 byte ID): Hynix H27UBG8T2B (p.22)
3838 *
3839 * Check for ID length, non-zero 6th byte, cell type, and Hynix/Samsung
3840 * ID to decide what to do.
3841 */
3842 if (id_len == 6 && id_data[0] == NAND_MFR_SAMSUNG &&
3843 !nand_is_slc(chip) && id_data[5] != 0x00) {
3844 /* Calc pagesize */
3845 mtd->writesize = 2048 << (extid & 0x03);
3846 extid >>= 2;
3847 /* Calc oobsize */
3848 switch (((extid >> 2) & 0x04) | (extid & 0x03)) {
3849 case 1:
3850 mtd->oobsize = 128;
3851 break;
3852 case 2:
3853 mtd->oobsize = 218;
3854 break;
3855 case 3:
3856 mtd->oobsize = 400;
3857 break;
3858 case 4:
3859 mtd->oobsize = 436;
3860 break;
3861 case 5:
3862 mtd->oobsize = 512;
3863 break;
3864 case 6:
3865 mtd->oobsize = 640;
3866 break;
3867 case 7:
3868 default: /* Other cases are "reserved" (unknown) */
3869 mtd->oobsize = 1024;
3870 break;
3871 }
3872 extid >>= 2;
3873 /* Calc blocksize */
3874 mtd->erasesize = (128 * 1024) <<
3875 (((extid >> 1) & 0x04) | (extid & 0x03));
3876 *busw = 0;
3877 } else if (id_len == 6 && id_data[0] == NAND_MFR_HYNIX &&
3878 !nand_is_slc(chip)) {
3879 unsigned int tmp;
3880
3881 /* Calc pagesize */
3882 mtd->writesize = 2048 << (extid & 0x03);
3883 extid >>= 2;
3884 /* Calc oobsize */
3885 switch (((extid >> 2) & 0x04) | (extid & 0x03)) {
3886 case 0:
3887 mtd->oobsize = 128;
3888 break;
3889 case 1:
3890 mtd->oobsize = 224;
3891 break;
3892 case 2:
3893 mtd->oobsize = 448;
3894 break;
3895 case 3:
3896 mtd->oobsize = 64;
3897 break;
3898 case 4:
3899 mtd->oobsize = 32;
3900 break;
3901 case 5:
3902 mtd->oobsize = 16;
3903 break;
3904 default:
3905 mtd->oobsize = 640;
3906 break;
3907 }
3908 extid >>= 2;
3909 /* Calc blocksize */
3910 tmp = ((extid >> 1) & 0x04) | (extid & 0x03);
3911 if (tmp < 0x03)
3912 mtd->erasesize = (128 * 1024) << tmp;
3913 else if (tmp == 0x03)
3914 mtd->erasesize = 768 * 1024;
3915 else
3916 mtd->erasesize = (64 * 1024) << tmp;
3917 *busw = 0;
3918 } else {
3919 /* Calc pagesize */
3920 mtd->writesize = 1024 << (extid & 0x03);
3921 extid >>= 2;
3922 /* Calc oobsize */
3923 mtd->oobsize = (8 << (extid & 0x01)) *
3924 (mtd->writesize >> 9);
3925 extid >>= 2;
3926 /* Calc blocksize. Blocksize is multiples of 64KiB */
3927 mtd->erasesize = (64 * 1024) << (extid & 0x03);
3928 extid >>= 2;
3929 /* Get buswidth information */
3930 *busw = (extid & 0x01) ? NAND_BUSWIDTH_16 : 0;
3931
3932 /*
3933 * Toshiba 24nm raw SLC (i.e., not BENAND) have 32B OOB per
3934 * 512B page. For Toshiba SLC, we decode the 5th/6th byte as
3935 * follows:
3936 * - ID byte 6, bits[2:0]: 100b -> 43nm, 101b -> 32nm,
3937 * 110b -> 24nm
3938 * - ID byte 5, bit[7]: 1 -> BENAND, 0 -> raw SLC
3939 */
3940 if (id_len >= 6 && id_data[0] == NAND_MFR_TOSHIBA &&
3941 nand_is_slc(chip) &&
3942 (id_data[5] & 0x7) == 0x6 /* 24nm */ &&
3943 !(id_data[4] & 0x80) /* !BENAND */) {
3944 mtd->oobsize = 32 * mtd->writesize >> 9;
3945 }
3946
3947 }
3948 }
3949
3950 /*
3951 * Old devices have chip data hardcoded in the device ID table. nand_decode_id
3952 * decodes a matching ID table entry and assigns the MTD size parameters for
3953 * the chip.
3954 */
3955 static void nand_decode_id(struct mtd_info *mtd, struct nand_chip *chip,
3956 struct nand_flash_dev *type, u8 id_data[8],
3957 int *busw)
3958 {
3959 int maf_id = id_data[0];
3960
3961 mtd->erasesize = type->erasesize;
3962 mtd->writesize = type->pagesize;
3963 mtd->oobsize = mtd->writesize / 32;
3964 *busw = type->options & NAND_BUSWIDTH_16;
3965
3966 /* All legacy ID NAND are small-page, SLC */
3967 chip->bits_per_cell = 1;
3968
3969 /*
3970 * Check for Spansion/AMD ID + repeating 5th, 6th byte since
3971 * some Spansion chips have erasesize that conflicts with size
3972 * listed in nand_ids table.
3973 * Data sheet (5 byte ID): Spansion S30ML-P ORNAND (p.39)
3974 */
3975 if (maf_id == NAND_MFR_AMD && id_data[4] != 0x00 && id_data[5] == 0x00
3976 && id_data[6] == 0x00 && id_data[7] == 0x00
3977 && mtd->writesize == 512) {
3978 mtd->erasesize = 128 * 1024;
3979 mtd->erasesize <<= ((id_data[3] & 0x03) << 1);
3980 }
3981 }
3982
3983 /*
3984 * Set the bad block marker/indicator (BBM/BBI) patterns according to some
3985 * heuristic patterns using various detected parameters (e.g., manufacturer,
3986 * page size, cell-type information).
3987 */
3988 static void nand_decode_bbm_options(struct mtd_info *mtd,
3989 struct nand_chip *chip, u8 id_data[8])
3990 {
3991 int maf_id = id_data[0];
3992
3993 /* Set the bad block position */
3994 if (mtd->writesize > 512 || (chip->options & NAND_BUSWIDTH_16))
3995 chip->badblockpos = NAND_LARGE_BADBLOCK_POS;
3996 else
3997 chip->badblockpos = NAND_SMALL_BADBLOCK_POS;
3998
3999 /*
4000 * Bad block marker is stored in the last page of each block on Samsung
4001 * and Hynix MLC devices; stored in first two pages of each block on
4002 * Micron devices with 2KiB pages and on SLC Samsung, Hynix, Toshiba,
4003 * AMD/Spansion, and Macronix. All others scan only the first page.
4004 */
4005 if (!nand_is_slc(chip) &&
4006 (maf_id == NAND_MFR_SAMSUNG ||
4007 maf_id == NAND_MFR_HYNIX))
4008 chip->bbt_options |= NAND_BBT_SCANLASTPAGE;
4009 else if ((nand_is_slc(chip) &&
4010 (maf_id == NAND_MFR_SAMSUNG ||
4011 maf_id == NAND_MFR_HYNIX ||
4012 maf_id == NAND_MFR_TOSHIBA ||
4013 maf_id == NAND_MFR_AMD ||
4014 maf_id == NAND_MFR_MACRONIX)) ||
4015 (mtd->writesize == 2048 &&
4016 maf_id == NAND_MFR_MICRON))
4017 chip->bbt_options |= NAND_BBT_SCAN2NDPAGE;
4018 }
4019
4020 static inline bool is_full_id_nand(struct nand_flash_dev *type)
4021 {
4022 return type->id_len;
4023 }
4024
4025 static bool find_full_id_nand(struct mtd_info *mtd, struct nand_chip *chip,
4026 struct nand_flash_dev *type, u8 *id_data, int *busw)
4027 {
4028 if (!strncmp(type->id, id_data, type->id_len)) {
4029 mtd->writesize = type->pagesize;
4030 mtd->erasesize = type->erasesize;
4031 mtd->oobsize = type->oobsize;
4032
4033 chip->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
4034 chip->chipsize = (uint64_t)type->chipsize << 20;
4035 chip->options |= type->options;
4036 chip->ecc_strength_ds = NAND_ECC_STRENGTH(type);
4037 chip->ecc_step_ds = NAND_ECC_STEP(type);
4038 chip->onfi_timing_mode_default =
4039 type->onfi_timing_mode_default;
4040
4041 *busw = type->options & NAND_BUSWIDTH_16;
4042
4043 if (!mtd->name)
4044 mtd->name = type->name;
4045
4046 return true;
4047 }
4048 return false;
4049 }
4050
4051 /*
4052 * Get the flash and manufacturer id and lookup if the type is supported.
4053 */
4054 static int nand_get_flash_type(struct mtd_info *mtd, struct nand_chip *chip,
4055 int *maf_id, int *dev_id,
4056 struct nand_flash_dev *type)
4057 {
4058 int busw;
4059 int i, maf_idx;
4060 u8 id_data[8];
4061
4062 /*
4063 * Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx)
4064 * after power-up.
4065 */
4066 nand_reset(chip, 0);
4067
4068 /* Select the device */
4069 chip->select_chip(mtd, 0);
4070
4071 /* Send the command for reading device ID */
4072 chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
4073
4074 /* Read manufacturer and device IDs */
4075 *maf_id = chip->read_byte(mtd);
4076 *dev_id = chip->read_byte(mtd);
4077
4078 /*
4079 * Try again to make sure, as some systems the bus-hold or other
4080 * interface concerns can cause random data which looks like a
4081 * possibly credible NAND flash to appear. If the two results do
4082 * not match, ignore the device completely.
4083 */
4084
4085 chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
4086
4087 /* Read entire ID string */
4088 for (i = 0; i < 8; i++)
4089 id_data[i] = chip->read_byte(mtd);
4090
4091 if (id_data[0] != *maf_id || id_data[1] != *dev_id) {
4092 pr_info("second ID read did not match %02x,%02x against %02x,%02x\n",
4093 *maf_id, *dev_id, id_data[0], id_data[1]);
4094 return -ENODEV;
4095 }
4096
4097 if (!type)
4098 type = nand_flash_ids;
4099
4100 for (; type->name != NULL; type++) {
4101 if (is_full_id_nand(type)) {
4102 if (find_full_id_nand(mtd, chip, type, id_data, &busw))
4103 goto ident_done;
4104 } else if (*dev_id == type->dev_id) {
4105 break;
4106 }
4107 }
4108
4109 chip->onfi_version = 0;
4110 if (!type->name || !type->pagesize) {
4111 /* Check if the chip is ONFI compliant */
4112 if (nand_flash_detect_onfi(mtd, chip, &busw))
4113 goto ident_done;
4114
4115 /* Check if the chip is JEDEC compliant */
4116 if (nand_flash_detect_jedec(mtd, chip, &busw))
4117 goto ident_done;
4118 }
4119
4120 if (!type->name)
4121 return -ENODEV;
4122
4123 if (!mtd->name)
4124 mtd->name = type->name;
4125
4126 chip->chipsize = (uint64_t)type->chipsize << 20;
4127
4128 if (!type->pagesize) {
4129 /* Decode parameters from extended ID */
4130 nand_decode_ext_id(mtd, chip, id_data, &busw);
4131 } else {
4132 nand_decode_id(mtd, chip, type, id_data, &busw);
4133 }
4134 /* Get chip options */
4135 chip->options |= type->options;
4136
4137 /*
4138 * Check if chip is not a Samsung device. Do not clear the
4139 * options for chips which do not have an extended id.
4140 */
4141 if (*maf_id != NAND_MFR_SAMSUNG && !type->pagesize)
4142 chip->options &= ~NAND_SAMSUNG_LP_OPTIONS;
4143 ident_done:
4144
4145 /* Try to identify manufacturer */
4146 for (maf_idx = 0; nand_manuf_ids[maf_idx].id != 0x0; maf_idx++) {
4147 if (nand_manuf_ids[maf_idx].id == *maf_id)
4148 break;
4149 }
4150
4151 if (chip->options & NAND_BUSWIDTH_AUTO) {
4152 WARN_ON(chip->options & NAND_BUSWIDTH_16);
4153 chip->options |= busw;
4154 nand_set_defaults(chip, busw);
4155 } else if (busw != (chip->options & NAND_BUSWIDTH_16)) {
4156 /*
4157 * Check, if buswidth is correct. Hardware drivers should set
4158 * chip correct!
4159 */
4160 pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
4161 *maf_id, *dev_id);
4162 pr_info("%s %s\n", nand_manuf_ids[maf_idx].name, mtd->name);
4163 pr_warn("bus width %d instead %d bit\n",
4164 (chip->options & NAND_BUSWIDTH_16) ? 16 : 8,
4165 busw ? 16 : 8);
4166 return -EINVAL;
4167 }
4168
4169 nand_decode_bbm_options(mtd, chip, id_data);
4170
4171 /* Calculate the address shift from the page size */
4172 chip->page_shift = ffs(mtd->writesize) - 1;
4173 /* Convert chipsize to number of pages per chip -1 */
4174 chip->pagemask = (chip->chipsize >> chip->page_shift) - 1;
4175
4176 chip->bbt_erase_shift = chip->phys_erase_shift =
4177 ffs(mtd->erasesize) - 1;
4178 if (chip->chipsize & 0xffffffff)
4179 chip->chip_shift = ffs((unsigned)chip->chipsize) - 1;
4180 else {
4181 chip->chip_shift = ffs((unsigned)(chip->chipsize >> 32));
4182 chip->chip_shift += 32 - 1;
4183 }
4184
4185 chip->badblockbits = 8;
4186 chip->erase = single_erase;
4187
4188 /* Do not replace user supplied command function! */
4189 if (mtd->writesize > 512 && chip->cmdfunc == nand_command)
4190 chip->cmdfunc = nand_command_lp;
4191
4192 pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
4193 *maf_id, *dev_id);
4194
4195 if (chip->onfi_version)
4196 pr_info("%s %s\n", nand_manuf_ids[maf_idx].name,
4197 chip->onfi_params.model);
4198 else if (chip->jedec_version)
4199 pr_info("%s %s\n", nand_manuf_ids[maf_idx].name,
4200 chip->jedec_params.model);
4201 else
4202 pr_info("%s %s\n", nand_manuf_ids[maf_idx].name,
4203 type->name);
4204
4205 pr_info("%d MiB, %s, erase size: %d KiB, page size: %d, OOB size: %d\n",
4206 (int)(chip->chipsize >> 20), nand_is_slc(chip) ? "SLC" : "MLC",
4207 mtd->erasesize >> 10, mtd->writesize, mtd->oobsize);
4208 return 0;
4209 }
4210
4211 static const char * const nand_ecc_modes[] = {
4212 [NAND_ECC_NONE] = "none",
4213 [NAND_ECC_SOFT] = "soft",
4214 [NAND_ECC_HW] = "hw",
4215 [NAND_ECC_HW_SYNDROME] = "hw_syndrome",
4216 [NAND_ECC_HW_OOB_FIRST] = "hw_oob_first",
4217 };
4218
4219 static int of_get_nand_ecc_mode(struct device_node *np)
4220 {
4221 const char *pm;
4222 int err, i;
4223
4224 err = of_property_read_string(np, "nand-ecc-mode", &pm);
4225 if (err < 0)
4226 return err;
4227
4228 for (i = 0; i < ARRAY_SIZE(nand_ecc_modes); i++)
4229 if (!strcasecmp(pm, nand_ecc_modes[i]))
4230 return i;
4231
4232 /*
4233 * For backward compatibility we support few obsoleted values that don't
4234 * have their mappings into nand_ecc_modes_t anymore (they were merged
4235 * with other enums).
4236 */
4237 if (!strcasecmp(pm, "soft_bch"))
4238 return NAND_ECC_SOFT;
4239
4240 return -ENODEV;
4241 }
4242
4243 static const char * const nand_ecc_algos[] = {
4244 [NAND_ECC_HAMMING] = "hamming",
4245 [NAND_ECC_BCH] = "bch",
4246 };
4247
4248 static int of_get_nand_ecc_algo(struct device_node *np)
4249 {
4250 const char *pm;
4251 int err, i;
4252
4253 err = of_property_read_string(np, "nand-ecc-algo", &pm);
4254 if (!err) {
4255 for (i = NAND_ECC_HAMMING; i < ARRAY_SIZE(nand_ecc_algos); i++)
4256 if (!strcasecmp(pm, nand_ecc_algos[i]))
4257 return i;
4258 return -ENODEV;
4259 }
4260
4261 /*
4262 * For backward compatibility we also read "nand-ecc-mode" checking
4263 * for some obsoleted values that were specifying ECC algorithm.
4264 */
4265 err = of_property_read_string(np, "nand-ecc-mode", &pm);
4266 if (err < 0)
4267 return err;
4268
4269 if (!strcasecmp(pm, "soft"))
4270 return NAND_ECC_HAMMING;
4271 else if (!strcasecmp(pm, "soft_bch"))
4272 return NAND_ECC_BCH;
4273
4274 return -ENODEV;
4275 }
4276
4277 static int of_get_nand_ecc_step_size(struct device_node *np)
4278 {
4279 int ret;
4280 u32 val;
4281
4282 ret = of_property_read_u32(np, "nand-ecc-step-size", &val);
4283 return ret ? ret : val;
4284 }
4285
4286 static int of_get_nand_ecc_strength(struct device_node *np)
4287 {
4288 int ret;
4289 u32 val;
4290
4291 ret = of_property_read_u32(np, "nand-ecc-strength", &val);
4292 return ret ? ret : val;
4293 }
4294
4295 static int of_get_nand_bus_width(struct device_node *np)
4296 {
4297 u32 val;
4298
4299 if (of_property_read_u32(np, "nand-bus-width", &val))
4300 return 8;
4301
4302 switch (val) {
4303 case 8:
4304 case 16:
4305 return val;
4306 default:
4307 return -EIO;
4308 }
4309 }
4310
4311 static bool of_get_nand_on_flash_bbt(struct device_node *np)
4312 {
4313 return of_property_read_bool(np, "nand-on-flash-bbt");
4314 }
4315
4316 static int nand_dt_init(struct nand_chip *chip)
4317 {
4318 struct device_node *dn = nand_get_flash_node(chip);
4319 int ecc_mode, ecc_algo, ecc_strength, ecc_step;
4320
4321 if (!dn)
4322 return 0;
4323
4324 if (of_get_nand_bus_width(dn) == 16)
4325 chip->options |= NAND_BUSWIDTH_16;
4326
4327 if (of_get_nand_on_flash_bbt(dn))
4328 chip->bbt_options |= NAND_BBT_USE_FLASH;
4329
4330 ecc_mode = of_get_nand_ecc_mode(dn);
4331 ecc_algo = of_get_nand_ecc_algo(dn);
4332 ecc_strength = of_get_nand_ecc_strength(dn);
4333 ecc_step = of_get_nand_ecc_step_size(dn);
4334
4335 if ((ecc_step >= 0 && !(ecc_strength >= 0)) ||
4336 (!(ecc_step >= 0) && ecc_strength >= 0)) {
4337 pr_err("must set both strength and step size in DT\n");
4338 return -EINVAL;
4339 }
4340
4341 if (ecc_mode >= 0)
4342 chip->ecc.mode = ecc_mode;
4343
4344 if (ecc_algo >= 0)
4345 chip->ecc.algo = ecc_algo;
4346
4347 if (ecc_strength >= 0)
4348 chip->ecc.strength = ecc_strength;
4349
4350 if (ecc_step > 0)
4351 chip->ecc.size = ecc_step;
4352
4353 if (of_property_read_bool(dn, "nand-ecc-maximize"))
4354 chip->ecc.options |= NAND_ECC_MAXIMIZE;
4355
4356 return 0;
4357 }
4358
4359 /**
4360 * nand_scan_ident - [NAND Interface] Scan for the NAND device
4361 * @mtd: MTD device structure
4362 * @maxchips: number of chips to scan for
4363 * @table: alternative NAND ID table
4364 *
4365 * This is the first phase of the normal nand_scan() function. It reads the
4366 * flash ID and sets up MTD fields accordingly.
4367 *
4368 */
4369 int nand_scan_ident(struct mtd_info *mtd, int maxchips,
4370 struct nand_flash_dev *table)
4371 {
4372 int i, nand_maf_id, nand_dev_id;
4373 struct nand_chip *chip = mtd_to_nand(mtd);
4374 int ret;
4375
4376 ret = nand_dt_init(chip);
4377 if (ret)
4378 return ret;
4379
4380 if (!mtd->name && mtd->dev.parent)
4381 mtd->name = dev_name(mtd->dev.parent);
4382
4383 if ((!chip->cmdfunc || !chip->select_chip) && !chip->cmd_ctrl) {
4384 /*
4385 * Default functions assigned for chip_select() and
4386 * cmdfunc() both expect cmd_ctrl() to be populated,
4387 * so we need to check that that's the case
4388 */
4389 pr_err("chip.cmd_ctrl() callback is not provided");
4390 return -EINVAL;
4391 }
4392 /* Set the default functions */
4393 nand_set_defaults(chip, chip->options & NAND_BUSWIDTH_16);
4394
4395 /* Read the flash type */
4396 ret = nand_get_flash_type(mtd, chip, &nand_maf_id, &nand_dev_id, table);
4397 if (ret) {
4398 if (!(chip->options & NAND_SCAN_SILENT_NODEV))
4399 pr_warn("No NAND device found\n");
4400 chip->select_chip(mtd, -1);
4401 return ret;
4402 }
4403
4404 /* Initialize the ->data_interface field. */
4405 ret = nand_init_data_interface(chip);
4406 if (ret)
4407 return ret;
4408
4409 /*
4410 * Setup the data interface correctly on the chip and controller side.
4411 * This explicit call to nand_setup_data_interface() is only required
4412 * for the first die, because nand_reset() has been called before
4413 * ->data_interface and ->default_onfi_timing_mode were set.
4414 * For the other dies, nand_reset() will automatically switch to the
4415 * best mode for us.
4416 */
4417 ret = nand_setup_data_interface(chip);
4418 if (ret)
4419 return ret;
4420
4421 chip->select_chip(mtd, -1);
4422
4423 /* Check for a chip array */
4424 for (i = 1; i < maxchips; i++) {
4425 /* See comment in nand_get_flash_type for reset */
4426 nand_reset(chip, i);
4427
4428 chip->select_chip(mtd, i);
4429 /* Send the command for reading device ID */
4430 chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
4431 /* Read manufacturer and device IDs */
4432 if (nand_maf_id != chip->read_byte(mtd) ||
4433 nand_dev_id != chip->read_byte(mtd)) {
4434 chip->select_chip(mtd, -1);
4435 break;
4436 }
4437 chip->select_chip(mtd, -1);
4438 }
4439 if (i > 1)
4440 pr_info("%d chips detected\n", i);
4441
4442 /* Store the number of chips and calc total size for mtd */
4443 chip->numchips = i;
4444 mtd->size = i * chip->chipsize;
4445
4446 return 0;
4447 }
4448 EXPORT_SYMBOL(nand_scan_ident);
4449
4450 static int nand_set_ecc_soft_ops(struct mtd_info *mtd)
4451 {
4452 struct nand_chip *chip = mtd_to_nand(mtd);
4453 struct nand_ecc_ctrl *ecc = &chip->ecc;
4454
4455 if (WARN_ON(ecc->mode != NAND_ECC_SOFT))
4456 return -EINVAL;
4457
4458 switch (ecc->algo) {
4459 case NAND_ECC_HAMMING:
4460 ecc->calculate = nand_calculate_ecc;
4461 ecc->correct = nand_correct_data;
4462 ecc->read_page = nand_read_page_swecc;
4463 ecc->read_subpage = nand_read_subpage;
4464 ecc->write_page = nand_write_page_swecc;
4465 ecc->read_page_raw = nand_read_page_raw;
4466 ecc->write_page_raw = nand_write_page_raw;
4467 ecc->read_oob = nand_read_oob_std;
4468 ecc->write_oob = nand_write_oob_std;
4469 if (!ecc->size)
4470 ecc->size = 256;
4471 ecc->bytes = 3;
4472 ecc->strength = 1;
4473 return 0;
4474 case NAND_ECC_BCH:
4475 if (!mtd_nand_has_bch()) {
4476 WARN(1, "CONFIG_MTD_NAND_ECC_BCH not enabled\n");
4477 return -EINVAL;
4478 }
4479 ecc->calculate = nand_bch_calculate_ecc;
4480 ecc->correct = nand_bch_correct_data;
4481 ecc->read_page = nand_read_page_swecc;
4482 ecc->read_subpage = nand_read_subpage;
4483 ecc->write_page = nand_write_page_swecc;
4484 ecc->read_page_raw = nand_read_page_raw;
4485 ecc->write_page_raw = nand_write_page_raw;
4486 ecc->read_oob = nand_read_oob_std;
4487 ecc->write_oob = nand_write_oob_std;
4488
4489 /*
4490 * Board driver should supply ecc.size and ecc.strength
4491 * values to select how many bits are correctable.
4492 * Otherwise, default to 4 bits for large page devices.
4493 */
4494 if (!ecc->size && (mtd->oobsize >= 64)) {
4495 ecc->size = 512;
4496 ecc->strength = 4;
4497 }
4498
4499 /*
4500 * if no ecc placement scheme was provided pickup the default
4501 * large page one.
4502 */
4503 if (!mtd->ooblayout) {
4504 /* handle large page devices only */
4505 if (mtd->oobsize < 64) {
4506 WARN(1, "OOB layout is required when using software BCH on small pages\n");
4507 return -EINVAL;
4508 }
4509
4510 mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
4511
4512 }
4513
4514 /*
4515 * We can only maximize ECC config when the default layout is
4516 * used, otherwise we don't know how many bytes can really be
4517 * used.
4518 */
4519 if (mtd->ooblayout == &nand_ooblayout_lp_ops &&
4520 ecc->options & NAND_ECC_MAXIMIZE) {
4521 int steps, bytes;
4522
4523 /* Always prefer 1k blocks over 512bytes ones */
4524 ecc->size = 1024;
4525 steps = mtd->writesize / ecc->size;
4526
4527 /* Reserve 2 bytes for the BBM */
4528 bytes = (mtd->oobsize - 2) / steps;
4529 ecc->strength = bytes * 8 / fls(8 * ecc->size);
4530 }
4531
4532 /* See nand_bch_init() for details. */
4533 ecc->bytes = 0;
4534 ecc->priv = nand_bch_init(mtd);
4535 if (!ecc->priv) {
4536 WARN(1, "BCH ECC initialization failed!\n");
4537 return -EINVAL;
4538 }
4539 return 0;
4540 default:
4541 WARN(1, "Unsupported ECC algorithm!\n");
4542 return -EINVAL;
4543 }
4544 }
4545
4546 /*
4547 * Check if the chip configuration meet the datasheet requirements.
4548
4549 * If our configuration corrects A bits per B bytes and the minimum
4550 * required correction level is X bits per Y bytes, then we must ensure
4551 * both of the following are true:
4552 *
4553 * (1) A / B >= X / Y
4554 * (2) A >= X
4555 *
4556 * Requirement (1) ensures we can correct for the required bitflip density.
4557 * Requirement (2) ensures we can correct even when all bitflips are clumped
4558 * in the same sector.
4559 */
4560 static bool nand_ecc_strength_good(struct mtd_info *mtd)
4561 {
4562 struct nand_chip *chip = mtd_to_nand(mtd);
4563 struct nand_ecc_ctrl *ecc = &chip->ecc;
4564 int corr, ds_corr;
4565
4566 if (ecc->size == 0 || chip->ecc_step_ds == 0)
4567 /* Not enough information */
4568 return true;
4569
4570 /*
4571 * We get the number of corrected bits per page to compare
4572 * the correction density.
4573 */
4574 corr = (mtd->writesize * ecc->strength) / ecc->size;
4575 ds_corr = (mtd->writesize * chip->ecc_strength_ds) / chip->ecc_step_ds;
4576
4577 return corr >= ds_corr && ecc->strength >= chip->ecc_strength_ds;
4578 }
4579
4580 static bool invalid_ecc_page_accessors(struct nand_chip *chip)
4581 {
4582 struct nand_ecc_ctrl *ecc = &chip->ecc;
4583
4584 if (nand_standard_page_accessors(ecc))
4585 return false;
4586
4587 /*
4588 * NAND_ECC_CUSTOM_PAGE_ACCESS flag is set, make sure the NAND
4589 * controller driver implements all the page accessors because
4590 * default helpers are not suitable when the core does not
4591 * send the READ0/PAGEPROG commands.
4592 */
4593 return (!ecc->read_page || !ecc->write_page ||
4594 !ecc->read_page_raw || !ecc->write_page_raw ||
4595 (NAND_HAS_SUBPAGE_READ(chip) && !ecc->read_subpage) ||
4596 (NAND_HAS_SUBPAGE_WRITE(chip) && !ecc->write_subpage &&
4597 ecc->hwctl && ecc->calculate));
4598 }
4599
4600 /**
4601 * nand_scan_tail - [NAND Interface] Scan for the NAND device
4602 * @mtd: MTD device structure
4603 *
4604 * This is the second phase of the normal nand_scan() function. It fills out
4605 * all the uninitialized function pointers with the defaults and scans for a
4606 * bad block table if appropriate.
4607 */
4608 int nand_scan_tail(struct mtd_info *mtd)
4609 {
4610 struct nand_chip *chip = mtd_to_nand(mtd);
4611 struct nand_ecc_ctrl *ecc = &chip->ecc;
4612 struct nand_buffers *nbuf;
4613 int ret;
4614
4615 /* New bad blocks should be marked in OOB, flash-based BBT, or both */
4616 if (WARN_ON((chip->bbt_options & NAND_BBT_NO_OOB_BBM) &&
4617 !(chip->bbt_options & NAND_BBT_USE_FLASH)))
4618 return -EINVAL;
4619
4620 if (invalid_ecc_page_accessors(chip)) {
4621 pr_err("Invalid ECC page accessors setup\n");
4622 return -EINVAL;
4623 }
4624
4625 if (!(chip->options & NAND_OWN_BUFFERS)) {
4626 nbuf = kzalloc(sizeof(*nbuf) + mtd->writesize
4627 + mtd->oobsize * 3, GFP_KERNEL);
4628 if (!nbuf)
4629 return -ENOMEM;
4630 nbuf->ecccalc = (uint8_t *)(nbuf + 1);
4631 nbuf->ecccode = nbuf->ecccalc + mtd->oobsize;
4632 nbuf->databuf = nbuf->ecccode + mtd->oobsize;
4633
4634 chip->buffers = nbuf;
4635 } else {
4636 if (!chip->buffers)
4637 return -ENOMEM;
4638 }
4639
4640 /* Set the internal oob buffer location, just after the page data */
4641 chip->oob_poi = chip->buffers->databuf + mtd->writesize;
4642
4643 /*
4644 * If no default placement scheme is given, select an appropriate one.
4645 */
4646 if (!mtd->ooblayout &&
4647 !(ecc->mode == NAND_ECC_SOFT && ecc->algo == NAND_ECC_BCH)) {
4648 switch (mtd->oobsize) {
4649 case 8:
4650 case 16:
4651 mtd_set_ooblayout(mtd, &nand_ooblayout_sp_ops);
4652 break;
4653 case 64:
4654 case 128:
4655 mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
4656 break;
4657 default:
4658 WARN(1, "No oob scheme defined for oobsize %d\n",
4659 mtd->oobsize);
4660 ret = -EINVAL;
4661 goto err_free;
4662 }
4663 }
4664
4665 if (!chip->write_page)
4666 chip->write_page = nand_write_page;
4667
4668 /*
4669 * Check ECC mode, default to software if 3byte/512byte hardware ECC is
4670 * selected and we have 256 byte pagesize fallback to software ECC
4671 */
4672
4673 switch (ecc->mode) {
4674 case NAND_ECC_HW_OOB_FIRST:
4675 /* Similar to NAND_ECC_HW, but a separate read_page handle */
4676 if (!ecc->calculate || !ecc->correct || !ecc->hwctl) {
4677 WARN(1, "No ECC functions supplied; hardware ECC not possible\n");
4678 ret = -EINVAL;
4679 goto err_free;
4680 }
4681 if (!ecc->read_page)
4682 ecc->read_page = nand_read_page_hwecc_oob_first;
4683
4684 case NAND_ECC_HW:
4685 /* Use standard hwecc read page function? */
4686 if (!ecc->read_page)
4687 ecc->read_page = nand_read_page_hwecc;
4688 if (!ecc->write_page)
4689 ecc->write_page = nand_write_page_hwecc;
4690 if (!ecc->read_page_raw)
4691 ecc->read_page_raw = nand_read_page_raw;
4692 if (!ecc->write_page_raw)
4693 ecc->write_page_raw = nand_write_page_raw;
4694 if (!ecc->read_oob)
4695 ecc->read_oob = nand_read_oob_std;
4696 if (!ecc->write_oob)
4697 ecc->write_oob = nand_write_oob_std;
4698 if (!ecc->read_subpage)
4699 ecc->read_subpage = nand_read_subpage;
4700 if (!ecc->write_subpage && ecc->hwctl && ecc->calculate)
4701 ecc->write_subpage = nand_write_subpage_hwecc;
4702
4703 case NAND_ECC_HW_SYNDROME:
4704 if ((!ecc->calculate || !ecc->correct || !ecc->hwctl) &&
4705 (!ecc->read_page ||
4706 ecc->read_page == nand_read_page_hwecc ||
4707 !ecc->write_page ||
4708 ecc->write_page == nand_write_page_hwecc)) {
4709 WARN(1, "No ECC functions supplied; hardware ECC not possible\n");
4710 ret = -EINVAL;
4711 goto err_free;
4712 }
4713 /* Use standard syndrome read/write page function? */
4714 if (!ecc->read_page)
4715 ecc->read_page = nand_read_page_syndrome;
4716 if (!ecc->write_page)
4717 ecc->write_page = nand_write_page_syndrome;
4718 if (!ecc->read_page_raw)
4719 ecc->read_page_raw = nand_read_page_raw_syndrome;
4720 if (!ecc->write_page_raw)
4721 ecc->write_page_raw = nand_write_page_raw_syndrome;
4722 if (!ecc->read_oob)
4723 ecc->read_oob = nand_read_oob_syndrome;
4724 if (!ecc->write_oob)
4725 ecc->write_oob = nand_write_oob_syndrome;
4726
4727 if (mtd->writesize >= ecc->size) {
4728 if (!ecc->strength) {
4729 WARN(1, "Driver must set ecc.strength when using hardware ECC\n");
4730 ret = -EINVAL;
4731 goto err_free;
4732 }
4733 break;
4734 }
4735 pr_warn("%d byte HW ECC not possible on %d byte page size, fallback to SW ECC\n",
4736 ecc->size, mtd->writesize);
4737 ecc->mode = NAND_ECC_SOFT;
4738 ecc->algo = NAND_ECC_HAMMING;
4739
4740 case NAND_ECC_SOFT:
4741 ret = nand_set_ecc_soft_ops(mtd);
4742 if (ret) {
4743 ret = -EINVAL;
4744 goto err_free;
4745 }
4746 break;
4747
4748 case NAND_ECC_NONE:
4749 pr_warn("NAND_ECC_NONE selected by board driver. This is not recommended!\n");
4750 ecc->read_page = nand_read_page_raw;
4751 ecc->write_page = nand_write_page_raw;
4752 ecc->read_oob = nand_read_oob_std;
4753 ecc->read_page_raw = nand_read_page_raw;
4754 ecc->write_page_raw = nand_write_page_raw;
4755 ecc->write_oob = nand_write_oob_std;
4756 ecc->size = mtd->writesize;
4757 ecc->bytes = 0;
4758 ecc->strength = 0;
4759 break;
4760
4761 default:
4762 WARN(1, "Invalid NAND_ECC_MODE %d\n", ecc->mode);
4763 ret = -EINVAL;
4764 goto err_free;
4765 }
4766
4767 /* For many systems, the standard OOB write also works for raw */
4768 if (!ecc->read_oob_raw)
4769 ecc->read_oob_raw = ecc->read_oob;
4770 if (!ecc->write_oob_raw)
4771 ecc->write_oob_raw = ecc->write_oob;
4772
4773 /* propagate ecc info to mtd_info */
4774 mtd->ecc_strength = ecc->strength;
4775 mtd->ecc_step_size = ecc->size;
4776
4777 /*
4778 * Set the number of read / write steps for one page depending on ECC
4779 * mode.
4780 */
4781 ecc->steps = mtd->writesize / ecc->size;
4782 if (ecc->steps * ecc->size != mtd->writesize) {
4783 WARN(1, "Invalid ECC parameters\n");
4784 ret = -EINVAL;
4785 goto err_free;
4786 }
4787 ecc->total = ecc->steps * ecc->bytes;
4788
4789 /*
4790 * The number of bytes available for a client to place data into
4791 * the out of band area.
4792 */
4793 ret = mtd_ooblayout_count_freebytes(mtd);
4794 if (ret < 0)
4795 ret = 0;
4796
4797 mtd->oobavail = ret;
4798
4799 /* ECC sanity check: warn if it's too weak */
4800 if (!nand_ecc_strength_good(mtd))
4801 pr_warn("WARNING: %s: the ECC used on your system is too weak compared to the one required by the NAND chip\n",
4802 mtd->name);
4803
4804 /* Allow subpage writes up to ecc.steps. Not possible for MLC flash */
4805 if (!(chip->options & NAND_NO_SUBPAGE_WRITE) && nand_is_slc(chip)) {
4806 switch (ecc->steps) {
4807 case 2:
4808 mtd->subpage_sft = 1;
4809 break;
4810 case 4:
4811 case 8:
4812 case 16:
4813 mtd->subpage_sft = 2;
4814 break;
4815 }
4816 }
4817 chip->subpagesize = mtd->writesize >> mtd->subpage_sft;
4818
4819 /* Initialize state */
4820 chip->state = FL_READY;
4821
4822 /* Invalidate the pagebuffer reference */
4823 chip->pagebuf = -1;
4824
4825 /* Large page NAND with SOFT_ECC should support subpage reads */
4826 switch (ecc->mode) {
4827 case NAND_ECC_SOFT:
4828 if (chip->page_shift > 9)
4829 chip->options |= NAND_SUBPAGE_READ;
4830 break;
4831
4832 default:
4833 break;
4834 }
4835
4836 /* Fill in remaining MTD driver data */
4837 mtd->type = nand_is_slc(chip) ? MTD_NANDFLASH : MTD_MLCNANDFLASH;
4838 mtd->flags = (chip->options & NAND_ROM) ? MTD_CAP_ROM :
4839 MTD_CAP_NANDFLASH;
4840 mtd->_erase = nand_erase;
4841 mtd->_point = NULL;
4842 mtd->_unpoint = NULL;
4843 mtd->_read = nand_read;
4844 mtd->_write = nand_write;
4845 mtd->_panic_write = panic_nand_write;
4846 mtd->_read_oob = nand_read_oob;
4847 mtd->_write_oob = nand_write_oob;
4848 mtd->_sync = nand_sync;
4849 mtd->_lock = NULL;
4850 mtd->_unlock = NULL;
4851 mtd->_suspend = nand_suspend;
4852 mtd->_resume = nand_resume;
4853 mtd->_reboot = nand_shutdown;
4854 mtd->_block_isreserved = nand_block_isreserved;
4855 mtd->_block_isbad = nand_block_isbad;
4856 mtd->_block_markbad = nand_block_markbad;
4857 mtd->_max_bad_blocks = nand_max_bad_blocks;
4858 mtd->writebufsize = mtd->writesize;
4859
4860 /*
4861 * Initialize bitflip_threshold to its default prior scan_bbt() call.
4862 * scan_bbt() might invoke mtd_read(), thus bitflip_threshold must be
4863 * properly set.
4864 */
4865 if (!mtd->bitflip_threshold)
4866 mtd->bitflip_threshold = DIV_ROUND_UP(mtd->ecc_strength * 3, 4);
4867
4868 /* Check, if we should skip the bad block table scan */
4869 if (chip->options & NAND_SKIP_BBTSCAN)
4870 return 0;
4871
4872 /* Build bad block table */
4873 return chip->scan_bbt(mtd);
4874 err_free:
4875 if (!(chip->options & NAND_OWN_BUFFERS))
4876 kfree(chip->buffers);
4877 return ret;
4878 }
4879 EXPORT_SYMBOL(nand_scan_tail);
4880
4881 /*
4882 * is_module_text_address() isn't exported, and it's mostly a pointless
4883 * test if this is a module _anyway_ -- they'd have to try _really_ hard
4884 * to call us from in-kernel code if the core NAND support is modular.
4885 */
4886 #ifdef MODULE
4887 #define caller_is_module() (1)
4888 #else
4889 #define caller_is_module() \
4890 is_module_text_address((unsigned long)__builtin_return_address(0))
4891 #endif
4892
4893 /**
4894 * nand_scan - [NAND Interface] Scan for the NAND device
4895 * @mtd: MTD device structure
4896 * @maxchips: number of chips to scan for
4897 *
4898 * This fills out all the uninitialized function pointers with the defaults.
4899 * The flash ID is read and the mtd/chip structures are filled with the
4900 * appropriate values.
4901 */
4902 int nand_scan(struct mtd_info *mtd, int maxchips)
4903 {
4904 int ret;
4905
4906 ret = nand_scan_ident(mtd, maxchips, NULL);
4907 if (!ret)
4908 ret = nand_scan_tail(mtd);
4909 return ret;
4910 }
4911 EXPORT_SYMBOL(nand_scan);
4912
4913 /**
4914 * nand_cleanup - [NAND Interface] Free resources held by the NAND device
4915 * @chip: NAND chip object
4916 */
4917 void nand_cleanup(struct nand_chip *chip)
4918 {
4919 if (chip->ecc.mode == NAND_ECC_SOFT &&
4920 chip->ecc.algo == NAND_ECC_BCH)
4921 nand_bch_free((struct nand_bch_control *)chip->ecc.priv);
4922
4923 nand_release_data_interface(chip);
4924
4925 /* Free bad block table memory */
4926 kfree(chip->bbt);
4927 if (!(chip->options & NAND_OWN_BUFFERS))
4928 kfree(chip->buffers);
4929
4930 /* Free bad block descriptor memory */
4931 if (chip->badblock_pattern && chip->badblock_pattern->options
4932 & NAND_BBT_DYNAMICSTRUCT)
4933 kfree(chip->badblock_pattern);
4934 }
4935 EXPORT_SYMBOL_GPL(nand_cleanup);
4936
4937 /**
4938 * nand_release - [NAND Interface] Unregister the MTD device and free resources
4939 * held by the NAND device
4940 * @mtd: MTD device structure
4941 */
4942 void nand_release(struct mtd_info *mtd)
4943 {
4944 mtd_device_unregister(mtd);
4945 nand_cleanup(mtd_to_nand(mtd));
4946 }
4947 EXPORT_SYMBOL_GPL(nand_release);
4948
4949 MODULE_LICENSE("GPL");
4950 MODULE_AUTHOR("Steven J. Hill <sjhill@realitydiluted.com>");
4951 MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>");
4952 MODULE_DESCRIPTION("Generic NAND flash driver code");