]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/mtd/onenand/omap2.c
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[mirror_ubuntu-artful-kernel.git] / drivers / mtd / onenand / omap2.c
CommitLineData
36cd4fb5
AH
1/*
2 * linux/drivers/mtd/onenand/omap2.c
3 *
4 * OneNAND driver for OMAP2 / OMAP3
5 *
6 * Copyright © 2005-2006 Nokia Corporation
7 *
8 * Author: Jarkko Lavinen <jarkko.lavinen@nokia.com> and Juha Yrjölä
9 * IRQ and DMA support written by Timo Teras
10 *
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License version 2 as published by
13 * the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but WITHOUT
16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * more details.
19 *
20 * You should have received a copy of the GNU General Public License along with
21 * this program; see the file COPYING. If not, write to the Free Software
22 * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23 *
24 */
25
26#include <linux/device.h>
27#include <linux/module.h>
28#include <linux/init.h>
29#include <linux/mtd/mtd.h>
30#include <linux/mtd/onenand.h>
31#include <linux/mtd/partitions.h>
32#include <linux/platform_device.h>
33#include <linux/interrupt.h>
34#include <linux/delay.h>
cbbd6956
AH
35#include <linux/dma-mapping.h>
36#include <linux/io.h>
5a0e3ad6 37#include <linux/slab.h>
36cd4fb5 38
36cd4fb5 39#include <asm/mach/flash.h>
ce491cf8
TL
40#include <plat/gpmc.h>
41#include <plat/onenand.h>
fe875358 42#include <mach/gpio.h>
36cd4fb5 43
ce491cf8 44#include <plat/dma.h>
36cd4fb5 45
ce491cf8 46#include <plat/board.h>
36cd4fb5
AH
47
48#define DRIVER_NAME "omap2-onenand"
49
50#define ONENAND_IO_SIZE SZ_128K
51#define ONENAND_BUFRAM_SIZE (1024 * 5)
52
53struct omap2_onenand {
54 struct platform_device *pdev;
55 int gpmc_cs;
56 unsigned long phys_base;
57 int gpio_irq;
58 struct mtd_info mtd;
59 struct mtd_partition *parts;
60 struct onenand_chip onenand;
61 struct completion irq_done;
62 struct completion dma_done;
63 int dma_channel;
64 int freq;
65 int (*setup)(void __iomem *base, int freq);
66};
67
68static void omap2_onenand_dma_cb(int lch, u16 ch_status, void *data)
69{
70 struct omap2_onenand *c = data;
71
72 complete(&c->dma_done);
73}
74
75static irqreturn_t omap2_onenand_interrupt(int irq, void *dev_id)
76{
77 struct omap2_onenand *c = dev_id;
78
79 complete(&c->irq_done);
80
81 return IRQ_HANDLED;
82}
83
84static inline unsigned short read_reg(struct omap2_onenand *c, int reg)
85{
86 return readw(c->onenand.base + reg);
87}
88
89static inline void write_reg(struct omap2_onenand *c, unsigned short value,
90 int reg)
91{
92 writew(value, c->onenand.base + reg);
93}
94
95static void wait_err(char *msg, int state, unsigned int ctrl, unsigned int intr)
96{
97 printk(KERN_ERR "onenand_wait: %s! state %d ctrl 0x%04x intr 0x%04x\n",
98 msg, state, ctrl, intr);
99}
100
101static void wait_warn(char *msg, int state, unsigned int ctrl,
102 unsigned int intr)
103{
104 printk(KERN_WARNING "onenand_wait: %s! state %d ctrl 0x%04x "
105 "intr 0x%04x\n", msg, state, ctrl, intr);
106}
107
108static int omap2_onenand_wait(struct mtd_info *mtd, int state)
109{
110 struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
111 unsigned int intr = 0;
112 unsigned int ctrl;
113 unsigned long timeout;
114 u32 syscfg;
115
72073027
MK
116 if (state == FL_RESETING || state == FL_PREPARING_ERASE ||
117 state == FL_VERIFYING_ERASE) {
118 int i = 21;
119 unsigned int intr_flags = ONENAND_INT_MASTER;
120
121 switch (state) {
122 case FL_RESETING:
123 intr_flags |= ONENAND_INT_RESET;
124 break;
125 case FL_PREPARING_ERASE:
126 intr_flags |= ONENAND_INT_ERASE;
127 break;
128 case FL_VERIFYING_ERASE:
129 i = 101;
130 break;
131 }
36cd4fb5 132
72073027 133 while (--i) {
36cd4fb5
AH
134 udelay(1);
135 intr = read_reg(c, ONENAND_REG_INTERRUPT);
136 if (intr & ONENAND_INT_MASTER)
137 break;
138 }
139 ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
140 if (ctrl & ONENAND_CTRL_ERROR) {
141 wait_err("controller error", state, ctrl, intr);
142 return -EIO;
143 }
72073027 144 if ((intr & intr_flags) != intr_flags) {
36cd4fb5
AH
145 wait_err("timeout", state, ctrl, intr);
146 return -EIO;
147 }
148 return 0;
149 }
150
151 if (state != FL_READING) {
152 int result;
153
154 /* Turn interrupts on */
155 syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
782b7a36
AH
156 if (!(syscfg & ONENAND_SYS_CFG1_IOBE)) {
157 syscfg |= ONENAND_SYS_CFG1_IOBE;
158 write_reg(c, syscfg, ONENAND_REG_SYS_CFG1);
159 if (cpu_is_omap34xx())
160 /* Add a delay to let GPIO settle */
161 syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
162 }
36cd4fb5
AH
163
164 INIT_COMPLETION(c->irq_done);
165 if (c->gpio_irq) {
0b84b5ca 166 result = gpio_get_value(c->gpio_irq);
36cd4fb5
AH
167 if (result == -1) {
168 ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
169 intr = read_reg(c, ONENAND_REG_INTERRUPT);
170 wait_err("gpio error", state, ctrl, intr);
171 return -EIO;
172 }
173 } else
174 result = 0;
175 if (result == 0) {
176 int retry_cnt = 0;
177retry:
178 result = wait_for_completion_timeout(&c->irq_done,
179 msecs_to_jiffies(20));
180 if (result == 0) {
181 /* Timeout after 20ms */
182 ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
183 if (ctrl & ONENAND_CTRL_ONGO) {
184 /*
185 * The operation seems to be still going
186 * so give it some more time.
187 */
188 retry_cnt += 1;
189 if (retry_cnt < 3)
190 goto retry;
191 intr = read_reg(c,
192 ONENAND_REG_INTERRUPT);
193 wait_err("timeout", state, ctrl, intr);
194 return -EIO;
195 }
196 intr = read_reg(c, ONENAND_REG_INTERRUPT);
197 if ((intr & ONENAND_INT_MASTER) == 0)
198 wait_warn("timeout", state, ctrl, intr);
199 }
200 }
201 } else {
8afbc114
AH
202 int retry_cnt = 0;
203
36cd4fb5
AH
204 /* Turn interrupts off */
205 syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
206 syscfg &= ~ONENAND_SYS_CFG1_IOBE;
207 write_reg(c, syscfg, ONENAND_REG_SYS_CFG1);
208
209 timeout = jiffies + msecs_to_jiffies(20);
8afbc114
AH
210 while (1) {
211 if (time_before(jiffies, timeout)) {
212 intr = read_reg(c, ONENAND_REG_INTERRUPT);
213 if (intr & ONENAND_INT_MASTER)
214 break;
215 } else {
216 /* Timeout after 20ms */
217 ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
218 if (ctrl & ONENAND_CTRL_ONGO) {
219 /*
220 * The operation seems to be still going
221 * so give it some more time.
222 */
223 retry_cnt += 1;
224 if (retry_cnt < 3) {
225 timeout = jiffies +
226 msecs_to_jiffies(20);
227 continue;
228 }
229 }
36cd4fb5 230 break;
8afbc114 231 }
36cd4fb5
AH
232 }
233 }
234
235 intr = read_reg(c, ONENAND_REG_INTERRUPT);
236 ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
237
238 if (intr & ONENAND_INT_READ) {
239 int ecc = read_reg(c, ONENAND_REG_ECC_STATUS);
240
241 if (ecc) {
242 unsigned int addr1, addr8;
243
244 addr1 = read_reg(c, ONENAND_REG_START_ADDRESS1);
245 addr8 = read_reg(c, ONENAND_REG_START_ADDRESS8);
246 if (ecc & ONENAND_ECC_2BIT_ALL) {
247 printk(KERN_ERR "onenand_wait: ECC error = "
248 "0x%04x, addr1 %#x, addr8 %#x\n",
249 ecc, addr1, addr8);
250 mtd->ecc_stats.failed++;
251 return -EBADMSG;
252 } else if (ecc & ONENAND_ECC_1BIT_ALL) {
253 printk(KERN_NOTICE "onenand_wait: correctable "
254 "ECC error = 0x%04x, addr1 %#x, "
255 "addr8 %#x\n", ecc, addr1, addr8);
256 mtd->ecc_stats.corrected++;
257 }
258 }
259 } else if (state == FL_READING) {
260 wait_err("timeout", state, ctrl, intr);
261 return -EIO;
262 }
263
264 if (ctrl & ONENAND_CTRL_ERROR) {
265 wait_err("controller error", state, ctrl, intr);
266 if (ctrl & ONENAND_CTRL_LOCK)
267 printk(KERN_ERR "onenand_wait: "
268 "Device is write protected!!!\n");
269 return -EIO;
270 }
271
272 if (ctrl & 0xFE9F)
273 wait_warn("unexpected controller status", state, ctrl, intr);
274
275 return 0;
276}
277
278static inline int omap2_onenand_bufferram_offset(struct mtd_info *mtd, int area)
279{
280 struct onenand_chip *this = mtd->priv;
281
282 if (ONENAND_CURRENT_BUFFERRAM(this)) {
283 if (area == ONENAND_DATARAM)
00acf4a8 284 return this->writesize;
36cd4fb5
AH
285 if (area == ONENAND_SPARERAM)
286 return mtd->oobsize;
287 }
288
289 return 0;
290}
291
292#if defined(CONFIG_ARCH_OMAP3) || defined(MULTI_OMAP2)
293
294static int omap3_onenand_read_bufferram(struct mtd_info *mtd, int area,
295 unsigned char *buffer, int offset,
296 size_t count)
297{
298 struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
299 struct onenand_chip *this = mtd->priv;
300 dma_addr_t dma_src, dma_dst;
301 int bram_offset;
302 unsigned long timeout;
303 void *buf = (void *)buffer;
304 size_t xtra;
305 volatile unsigned *done;
306
307 bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
308 if (bram_offset & 3 || (size_t)buf & 3 || count < 384)
309 goto out_copy;
310
a29f280b
AH
311 /* panic_write() may be in an interrupt context */
312 if (in_interrupt())
313 goto out_copy;
314
36cd4fb5
AH
315 if (buf >= high_memory) {
316 struct page *p1;
317
318 if (((size_t)buf & PAGE_MASK) !=
319 ((size_t)(buf + count - 1) & PAGE_MASK))
320 goto out_copy;
321 p1 = vmalloc_to_page(buf);
322 if (!p1)
323 goto out_copy;
324 buf = page_address(p1) + ((size_t)buf & ~PAGE_MASK);
325 }
326
327 xtra = count & 3;
328 if (xtra) {
329 count -= xtra;
330 memcpy(buf + count, this->base + bram_offset + count, xtra);
331 }
332
333 dma_src = c->phys_base + bram_offset;
334 dma_dst = dma_map_single(&c->pdev->dev, buf, count, DMA_FROM_DEVICE);
335 if (dma_mapping_error(&c->pdev->dev, dma_dst)) {
336 dev_err(&c->pdev->dev,
337 "Couldn't DMA map a %d byte buffer\n",
338 count);
339 goto out_copy;
340 }
341
342 omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S32,
343 count >> 2, 1, 0, 0, 0);
344 omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
345 dma_src, 0, 0);
346 omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
347 dma_dst, 0, 0);
348
349 INIT_COMPLETION(c->dma_done);
350 omap_start_dma(c->dma_channel);
351
352 timeout = jiffies + msecs_to_jiffies(20);
353 done = &c->dma_done.done;
354 while (time_before(jiffies, timeout))
355 if (*done)
356 break;
357
358 dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_FROM_DEVICE);
359
360 if (!*done) {
361 dev_err(&c->pdev->dev, "timeout waiting for DMA\n");
362 goto out_copy;
363 }
364
365 return 0;
366
367out_copy:
368 memcpy(buf, this->base + bram_offset, count);
369 return 0;
370}
371
372static int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area,
373 const unsigned char *buffer,
374 int offset, size_t count)
375{
376 struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
377 struct onenand_chip *this = mtd->priv;
378 dma_addr_t dma_src, dma_dst;
379 int bram_offset;
380 unsigned long timeout;
381 void *buf = (void *)buffer;
382 volatile unsigned *done;
383
384 bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
385 if (bram_offset & 3 || (size_t)buf & 3 || count < 384)
386 goto out_copy;
387
388 /* panic_write() may be in an interrupt context */
389 if (in_interrupt())
390 goto out_copy;
391
392 if (buf >= high_memory) {
393 struct page *p1;
394
395 if (((size_t)buf & PAGE_MASK) !=
396 ((size_t)(buf + count - 1) & PAGE_MASK))
397 goto out_copy;
398 p1 = vmalloc_to_page(buf);
399 if (!p1)
400 goto out_copy;
401 buf = page_address(p1) + ((size_t)buf & ~PAGE_MASK);
402 }
403
404 dma_src = dma_map_single(&c->pdev->dev, buf, count, DMA_TO_DEVICE);
405 dma_dst = c->phys_base + bram_offset;
406 if (dma_mapping_error(&c->pdev->dev, dma_dst)) {
407 dev_err(&c->pdev->dev,
408 "Couldn't DMA map a %d byte buffer\n",
409 count);
410 return -1;
411 }
412
413 omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S32,
414 count >> 2, 1, 0, 0, 0);
415 omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
416 dma_src, 0, 0);
417 omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
418 dma_dst, 0, 0);
419
420 INIT_COMPLETION(c->dma_done);
421 omap_start_dma(c->dma_channel);
422
423 timeout = jiffies + msecs_to_jiffies(20);
424 done = &c->dma_done.done;
425 while (time_before(jiffies, timeout))
426 if (*done)
427 break;
428
429 dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_TO_DEVICE);
430
431 if (!*done) {
432 dev_err(&c->pdev->dev, "timeout waiting for DMA\n");
433 goto out_copy;
434 }
435
436 return 0;
437
438out_copy:
439 memcpy(this->base + bram_offset, buf, count);
440 return 0;
441}
442
443#else
444
445int omap3_onenand_read_bufferram(struct mtd_info *mtd, int area,
446 unsigned char *buffer, int offset,
447 size_t count);
448
449int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area,
450 const unsigned char *buffer,
451 int offset, size_t count);
452
453#endif
454
455#if defined(CONFIG_ARCH_OMAP2) || defined(MULTI_OMAP2)
456
457static int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area,
458 unsigned char *buffer, int offset,
459 size_t count)
460{
461 struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
462 struct onenand_chip *this = mtd->priv;
463 dma_addr_t dma_src, dma_dst;
464 int bram_offset;
465
466 bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
467 /* DMA is not used. Revisit PM requirements before enabling it. */
468 if (1 || (c->dma_channel < 0) ||
469 ((void *) buffer >= (void *) high_memory) || (bram_offset & 3) ||
470 (((unsigned int) buffer) & 3) || (count < 1024) || (count & 3)) {
471 memcpy(buffer, (__force void *)(this->base + bram_offset),
472 count);
473 return 0;
474 }
475
476 dma_src = c->phys_base + bram_offset;
477 dma_dst = dma_map_single(&c->pdev->dev, buffer, count,
478 DMA_FROM_DEVICE);
479 if (dma_mapping_error(&c->pdev->dev, dma_dst)) {
480 dev_err(&c->pdev->dev,
481 "Couldn't DMA map a %d byte buffer\n",
482 count);
483 return -1;
484 }
485
486 omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S32,
487 count / 4, 1, 0, 0, 0);
488 omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
489 dma_src, 0, 0);
490 omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
491 dma_dst, 0, 0);
492
493 INIT_COMPLETION(c->dma_done);
494 omap_start_dma(c->dma_channel);
495 wait_for_completion(&c->dma_done);
496
497 dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_FROM_DEVICE);
498
499 return 0;
500}
501
502static int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
503 const unsigned char *buffer,
504 int offset, size_t count)
505{
506 struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
507 struct onenand_chip *this = mtd->priv;
508 dma_addr_t dma_src, dma_dst;
509 int bram_offset;
510
511 bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
512 /* DMA is not used. Revisit PM requirements before enabling it. */
513 if (1 || (c->dma_channel < 0) ||
514 ((void *) buffer >= (void *) high_memory) || (bram_offset & 3) ||
515 (((unsigned int) buffer) & 3) || (count < 1024) || (count & 3)) {
516 memcpy((__force void *)(this->base + bram_offset), buffer,
517 count);
518 return 0;
519 }
520
521 dma_src = dma_map_single(&c->pdev->dev, (void *) buffer, count,
522 DMA_TO_DEVICE);
523 dma_dst = c->phys_base + bram_offset;
524 if (dma_mapping_error(&c->pdev->dev, dma_dst)) {
525 dev_err(&c->pdev->dev,
526 "Couldn't DMA map a %d byte buffer\n",
527 count);
528 return -1;
529 }
530
531 omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S16,
532 count / 2, 1, 0, 0, 0);
533 omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
534 dma_src, 0, 0);
535 omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
536 dma_dst, 0, 0);
537
538 INIT_COMPLETION(c->dma_done);
539 omap_start_dma(c->dma_channel);
540 wait_for_completion(&c->dma_done);
541
542 dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_TO_DEVICE);
543
544 return 0;
545}
546
547#else
548
549int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area,
550 unsigned char *buffer, int offset,
551 size_t count);
552
553int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
554 const unsigned char *buffer,
555 int offset, size_t count);
556
557#endif
558
559static struct platform_driver omap2_onenand_driver;
560
561static int __adjust_timing(struct device *dev, void *data)
562{
563 int ret = 0;
564 struct omap2_onenand *c;
565
566 c = dev_get_drvdata(dev);
567
568 BUG_ON(c->setup == NULL);
569
570 /* DMA is not in use so this is all that is needed */
571 /* Revisit for OMAP3! */
572 ret = c->setup(c->onenand.base, c->freq);
573
574 return ret;
575}
576
577int omap2_onenand_rephase(void)
578{
579 return driver_for_each_device(&omap2_onenand_driver.driver, NULL,
580 NULL, __adjust_timing);
581}
582
d3412dbd 583static void omap2_onenand_shutdown(struct platform_device *pdev)
36cd4fb5
AH
584{
585 struct omap2_onenand *c = dev_get_drvdata(&pdev->dev);
586
587 /* With certain content in the buffer RAM, the OMAP boot ROM code
588 * can recognize the flash chip incorrectly. Zero it out before
589 * soft reset.
590 */
591 memset((__force void *)c->onenand.base, 0, ONENAND_BUFRAM_SIZE);
592}
593
594static int __devinit omap2_onenand_probe(struct platform_device *pdev)
595{
596 struct omap_onenand_platform_data *pdata;
597 struct omap2_onenand *c;
598 int r;
599
600 pdata = pdev->dev.platform_data;
601 if (pdata == NULL) {
602 dev_err(&pdev->dev, "platform data missing\n");
603 return -ENODEV;
604 }
605
606 c = kzalloc(sizeof(struct omap2_onenand), GFP_KERNEL);
607 if (!c)
608 return -ENOMEM;
609
610 init_completion(&c->irq_done);
611 init_completion(&c->dma_done);
612 c->gpmc_cs = pdata->cs;
613 c->gpio_irq = pdata->gpio_irq;
614 c->dma_channel = pdata->dma_channel;
615 if (c->dma_channel < 0) {
616 /* if -1, don't use DMA */
617 c->gpio_irq = 0;
618 }
619
620 r = gpmc_cs_request(c->gpmc_cs, ONENAND_IO_SIZE, &c->phys_base);
621 if (r < 0) {
622 dev_err(&pdev->dev, "Cannot request GPMC CS\n");
623 goto err_kfree;
624 }
625
626 if (request_mem_region(c->phys_base, ONENAND_IO_SIZE,
627 pdev->dev.driver->name) == NULL) {
628 dev_err(&pdev->dev, "Cannot reserve memory region at 0x%08lx, "
629 "size: 0x%x\n", c->phys_base, ONENAND_IO_SIZE);
630 r = -EBUSY;
631 goto err_free_cs;
632 }
633 c->onenand.base = ioremap(c->phys_base, ONENAND_IO_SIZE);
634 if (c->onenand.base == NULL) {
635 r = -ENOMEM;
636 goto err_release_mem_region;
637 }
638
639 if (pdata->onenand_setup != NULL) {
640 r = pdata->onenand_setup(c->onenand.base, c->freq);
641 if (r < 0) {
642 dev_err(&pdev->dev, "Onenand platform setup failed: "
643 "%d\n", r);
644 goto err_iounmap;
645 }
646 c->setup = pdata->onenand_setup;
647 }
648
649 if (c->gpio_irq) {
73069e38 650 if ((r = gpio_request(c->gpio_irq, "OneNAND irq")) < 0) {
36cd4fb5
AH
651 dev_err(&pdev->dev, "Failed to request GPIO%d for "
652 "OneNAND\n", c->gpio_irq);
653 goto err_iounmap;
654 }
40e3925b 655 gpio_direction_input(c->gpio_irq);
36cd4fb5 656
15f74b03 657 if ((r = request_irq(gpio_to_irq(c->gpio_irq),
36cd4fb5
AH
658 omap2_onenand_interrupt, IRQF_TRIGGER_RISING,
659 pdev->dev.driver->name, c)) < 0)
660 goto err_release_gpio;
661 }
662
663 if (c->dma_channel >= 0) {
664 r = omap_request_dma(0, pdev->dev.driver->name,
665 omap2_onenand_dma_cb, (void *) c,
666 &c->dma_channel);
667 if (r == 0) {
668 omap_set_dma_write_mode(c->dma_channel,
669 OMAP_DMA_WRITE_NON_POSTED);
670 omap_set_dma_src_data_pack(c->dma_channel, 1);
671 omap_set_dma_src_burst_mode(c->dma_channel,
672 OMAP_DMA_DATA_BURST_8);
673 omap_set_dma_dest_data_pack(c->dma_channel, 1);
674 omap_set_dma_dest_burst_mode(c->dma_channel,
675 OMAP_DMA_DATA_BURST_8);
676 } else {
677 dev_info(&pdev->dev,
678 "failed to allocate DMA for OneNAND, "
679 "using PIO instead\n");
680 c->dma_channel = -1;
681 }
682 }
683
684 dev_info(&pdev->dev, "initializing on CS%d, phys base 0x%08lx, virtual "
685 "base %p\n", c->gpmc_cs, c->phys_base,
686 c->onenand.base);
687
688 c->pdev = pdev;
475b44c1 689 c->mtd.name = dev_name(&pdev->dev);
36cd4fb5
AH
690 c->mtd.priv = &c->onenand;
691 c->mtd.owner = THIS_MODULE;
692
87f39f04
DB
693 c->mtd.dev.parent = &pdev->dev;
694
36cd4fb5
AH
695 if (c->dma_channel >= 0) {
696 struct onenand_chip *this = &c->onenand;
697
698 this->wait = omap2_onenand_wait;
699 if (cpu_is_omap34xx()) {
700 this->read_bufferram = omap3_onenand_read_bufferram;
701 this->write_bufferram = omap3_onenand_write_bufferram;
702 } else {
703 this->read_bufferram = omap2_onenand_read_bufferram;
704 this->write_bufferram = omap2_onenand_write_bufferram;
705 }
706 }
707
708 if ((r = onenand_scan(&c->mtd, 1)) < 0)
709 goto err_release_dma;
710
711 switch ((c->onenand.version_id >> 4) & 0xf) {
712 case 0:
713 c->freq = 40;
714 break;
715 case 1:
716 c->freq = 54;
717 break;
718 case 2:
719 c->freq = 66;
720 break;
721 case 3:
722 c->freq = 83;
723 break;
724 }
725
726#ifdef CONFIG_MTD_PARTITIONS
727 if (pdata->parts != NULL)
728 r = add_mtd_partitions(&c->mtd, pdata->parts,
729 pdata->nr_parts);
730 else
731#endif
732 r = add_mtd_device(&c->mtd);
733 if (r < 0)
734 goto err_release_onenand;
735
736 platform_set_drvdata(pdev, c);
737
738 return 0;
739
740err_release_onenand:
741 onenand_release(&c->mtd);
742err_release_dma:
743 if (c->dma_channel != -1)
744 omap_free_dma(c->dma_channel);
745 if (c->gpio_irq)
15f74b03 746 free_irq(gpio_to_irq(c->gpio_irq), c);
36cd4fb5
AH
747err_release_gpio:
748 if (c->gpio_irq)
73069e38 749 gpio_free(c->gpio_irq);
36cd4fb5
AH
750err_iounmap:
751 iounmap(c->onenand.base);
752err_release_mem_region:
753 release_mem_region(c->phys_base, ONENAND_IO_SIZE);
754err_free_cs:
755 gpmc_cs_free(c->gpmc_cs);
756err_kfree:
757 kfree(c);
758
759 return r;
760}
761
762static int __devexit omap2_onenand_remove(struct platform_device *pdev)
763{
764 struct omap2_onenand *c = dev_get_drvdata(&pdev->dev);
765
766 BUG_ON(c == NULL);
767
768#ifdef CONFIG_MTD_PARTITIONS
769 if (c->parts)
770 del_mtd_partitions(&c->mtd);
771 else
772 del_mtd_device(&c->mtd);
773#else
774 del_mtd_device(&c->mtd);
775#endif
776
777 onenand_release(&c->mtd);
778 if (c->dma_channel != -1)
779 omap_free_dma(c->dma_channel);
780 omap2_onenand_shutdown(pdev);
781 platform_set_drvdata(pdev, NULL);
782 if (c->gpio_irq) {
15f74b03 783 free_irq(gpio_to_irq(c->gpio_irq), c);
73069e38 784 gpio_free(c->gpio_irq);
36cd4fb5
AH
785 }
786 iounmap(c->onenand.base);
787 release_mem_region(c->phys_base, ONENAND_IO_SIZE);
3cae1cc1 788 gpmc_cs_free(c->gpmc_cs);
36cd4fb5
AH
789 kfree(c);
790
791 return 0;
792}
793
794static struct platform_driver omap2_onenand_driver = {
795 .probe = omap2_onenand_probe,
d3412dbd 796 .remove = __devexit_p(omap2_onenand_remove),
36cd4fb5
AH
797 .shutdown = omap2_onenand_shutdown,
798 .driver = {
799 .name = DRIVER_NAME,
800 .owner = THIS_MODULE,
801 },
802};
803
804static int __init omap2_onenand_init(void)
805{
806 printk(KERN_INFO "OneNAND driver initializing\n");
807 return platform_driver_register(&omap2_onenand_driver);
808}
809
810static void __exit omap2_onenand_exit(void)
811{
812 platform_driver_unregister(&omap2_onenand_driver);
813}
814
815module_init(omap2_onenand_init);
816module_exit(omap2_onenand_exit);
817
818MODULE_ALIAS(DRIVER_NAME);
819MODULE_LICENSE("GPL");
820MODULE_AUTHOR("Jarkko Lavinen <jarkko.lavinen@nokia.com>");
821MODULE_DESCRIPTION("Glue layer for OneNAND flash on OMAP2 / OMAP3");