]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/mtd/chips/cfi_cmdset_0001.c
[JFFS2] Track parent inode for directories (for NFS export)
[mirror_ubuntu-artful-kernel.git] / drivers / mtd / chips / cfi_cmdset_0001.c
CommitLineData
1da177e4
LT
1/*
2 * Common Flash Interface support:
3 * Intel Extended Vendor Command Set (ID 0x0001)
4 *
5 * (C) 2000 Red Hat. GPL'd
6 *
8bc3b380 7 * $Id: cfi_cmdset_0001.c,v 1.186 2005/11/23 22:07:52 nico Exp $
1f948b43 8 *
1da177e4 9 *
1da177e4
LT
10 * 10/10/2000 Nicolas Pitre <nico@cam.org>
11 * - completely revamped method functions so they are aware and
12 * independent of the flash geometry (buswidth, interleave, etc.)
13 * - scalability vs code size is completely set at compile-time
14 * (see include/linux/mtd/cfi.h for selection)
15 * - optimized write buffer method
16 * 02/05/2002 Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
17 * - reworked lock/unlock/erase support for var size flash
0ecbc81a
RG
18 * 21/03/2007 Rodolfo Giometti <giometti@linux.it>
19 * - auto unlock sectors on resume for auto locking flash on power up
1da177e4
LT
20 */
21
22#include <linux/module.h>
23#include <linux/types.h>
24#include <linux/kernel.h>
25#include <linux/sched.h>
26#include <linux/init.h>
27#include <asm/io.h>
28#include <asm/byteorder.h>
29
30#include <linux/errno.h>
31#include <linux/slab.h>
32#include <linux/delay.h>
33#include <linux/interrupt.h>
963a6fb0 34#include <linux/reboot.h>
0ecbc81a 35#include <linux/bitmap.h>
1da177e4
LT
36#include <linux/mtd/xip.h>
37#include <linux/mtd/map.h>
38#include <linux/mtd/mtd.h>
39#include <linux/mtd/compatmac.h>
40#include <linux/mtd/cfi.h>
41
42/* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
43/* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
44
45// debugging, turns off buffer write mode if set to 1
46#define FORCE_WORD_WRITE 0
47
48#define MANUFACTURER_INTEL 0x0089
49#define I82802AB 0x00ad
50#define I82802AC 0x00ac
51#define MANUFACTURER_ST 0x0020
52#define M50LPW080 0x002F
d10a39d1 53#define AT49BV640D 0x02de
1da177e4
LT
54
55static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
1da177e4
LT
56static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
57static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
e102d54a 58static int cfi_intelext_writev(struct mtd_info *, const struct kvec *, unsigned long, loff_t, size_t *);
1da177e4
LT
59static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
60static void cfi_intelext_sync (struct mtd_info *);
61static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
62static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
8048d2fc 63#ifdef CONFIG_MTD_OTP
f77814dd
NP
64static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
65static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
66static int cfi_intelext_write_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
67static int cfi_intelext_lock_user_prot_reg (struct mtd_info *, loff_t, size_t);
68static int cfi_intelext_get_fact_prot_info (struct mtd_info *,
69 struct otp_info *, size_t);
70static int cfi_intelext_get_user_prot_info (struct mtd_info *,
71 struct otp_info *, size_t);
8048d2fc 72#endif
1da177e4
LT
73static int cfi_intelext_suspend (struct mtd_info *);
74static void cfi_intelext_resume (struct mtd_info *);
963a6fb0 75static int cfi_intelext_reboot (struct notifier_block *, unsigned long, void *);
1da177e4
LT
76
77static void cfi_intelext_destroy(struct mtd_info *);
78
79struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
80
81static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
82static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
83
84static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
85 size_t *retlen, u_char **mtdbuf);
86static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from,
87 size_t len);
88
5a37cf19 89static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
1da177e4
LT
90static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
91static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
92#include "fwh_lock.h"
93
94
95
96/*
97 * *********** SETUP AND PROBE BITS ***********
98 */
99
100static struct mtd_chip_driver cfi_intelext_chipdrv = {
101 .probe = NULL, /* Not usable directly */
102 .destroy = cfi_intelext_destroy,
103 .name = "cfi_cmdset_0001",
104 .module = THIS_MODULE
105};
106
107/* #define DEBUG_LOCK_BITS */
108/* #define DEBUG_CFI_FEATURES */
109
110#ifdef DEBUG_CFI_FEATURES
111static void cfi_tell_features(struct cfi_pri_intelext *extp)
112{
113 int i;
638d9838 114 printk(" Extended Query version %c.%c\n", extp->MajorVersion, extp->MinorVersion);
1da177e4
LT
115 printk(" Feature/Command Support: %4.4X\n", extp->FeatureSupport);
116 printk(" - Chip Erase: %s\n", extp->FeatureSupport&1?"supported":"unsupported");
117 printk(" - Suspend Erase: %s\n", extp->FeatureSupport&2?"supported":"unsupported");
118 printk(" - Suspend Program: %s\n", extp->FeatureSupport&4?"supported":"unsupported");
119 printk(" - Legacy Lock/Unlock: %s\n", extp->FeatureSupport&8?"supported":"unsupported");
120 printk(" - Queued Erase: %s\n", extp->FeatureSupport&16?"supported":"unsupported");
121 printk(" - Instant block lock: %s\n", extp->FeatureSupport&32?"supported":"unsupported");
122 printk(" - Protection Bits: %s\n", extp->FeatureSupport&64?"supported":"unsupported");
123 printk(" - Page-mode read: %s\n", extp->FeatureSupport&128?"supported":"unsupported");
124 printk(" - Synchronous read: %s\n", extp->FeatureSupport&256?"supported":"unsupported");
125 printk(" - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
638d9838
NP
126 printk(" - Extended Flash Array: %s\n", extp->FeatureSupport&1024?"supported":"unsupported");
127 for (i=11; i<32; i++) {
1f948b43 128 if (extp->FeatureSupport & (1<<i))
1da177e4
LT
129 printk(" - Unknown Bit %X: supported\n", i);
130 }
1f948b43 131
1da177e4
LT
132 printk(" Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
133 printk(" - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
134 for (i=1; i<8; i++) {
135 if (extp->SuspendCmdSupport & (1<<i))
136 printk(" - Unknown Bit %X: supported\n", i);
137 }
1f948b43 138
1da177e4
LT
139 printk(" Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
140 printk(" - Lock Bit Active: %s\n", extp->BlkStatusRegMask&1?"yes":"no");
638d9838
NP
141 printk(" - Lock-Down Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
142 for (i=2; i<3; i++) {
1da177e4
LT
143 if (extp->BlkStatusRegMask & (1<<i))
144 printk(" - Unknown Bit %X Active: yes\n",i);
145 }
638d9838
NP
146 printk(" - EFA Lock Bit: %s\n", extp->BlkStatusRegMask&16?"yes":"no");
147 printk(" - EFA Lock-Down Bit: %s\n", extp->BlkStatusRegMask&32?"yes":"no");
148 for (i=6; i<16; i++) {
149 if (extp->BlkStatusRegMask & (1<<i))
150 printk(" - Unknown Bit %X Active: yes\n",i);
151 }
152
1f948b43 153 printk(" Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
1da177e4
LT
154 extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
155 if (extp->VppOptimal)
1f948b43 156 printk(" Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
1da177e4
LT
157 extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
158}
159#endif
160
d10a39d1
HCE
161/* Atmel chips don't use the same PRI format as Intel chips */
162static void fixup_convert_atmel_pri(struct mtd_info *mtd, void *param)
163{
164 struct map_info *map = mtd->priv;
165 struct cfi_private *cfi = map->fldrv_priv;
166 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
167 struct cfi_pri_atmel atmel_pri;
168 uint32_t features = 0;
169
170 /* Reverse byteswapping */
171 extp->FeatureSupport = cpu_to_le32(extp->FeatureSupport);
172 extp->BlkStatusRegMask = cpu_to_le16(extp->BlkStatusRegMask);
173 extp->ProtRegAddr = cpu_to_le16(extp->ProtRegAddr);
174
175 memcpy(&atmel_pri, extp, sizeof(atmel_pri));
176 memset((char *)extp + 5, 0, sizeof(*extp) - 5);
177
178 printk(KERN_ERR "atmel Features: %02x\n", atmel_pri.Features);
179
180 if (atmel_pri.Features & 0x01) /* chip erase supported */
181 features |= (1<<0);
182 if (atmel_pri.Features & 0x02) /* erase suspend supported */
183 features |= (1<<1);
184 if (atmel_pri.Features & 0x04) /* program suspend supported */
185 features |= (1<<2);
186 if (atmel_pri.Features & 0x08) /* simultaneous operations supported */
187 features |= (1<<9);
188 if (atmel_pri.Features & 0x20) /* page mode read supported */
189 features |= (1<<7);
190 if (atmel_pri.Features & 0x40) /* queued erase supported */
191 features |= (1<<4);
192 if (atmel_pri.Features & 0x80) /* Protection bits supported */
193 features |= (1<<6);
194
195 extp->FeatureSupport = features;
196
197 /* burst write mode not supported */
198 cfi->cfiq->BufWriteTimeoutTyp = 0;
199 cfi->cfiq->BufWriteTimeoutMax = 0;
200}
201
1da177e4 202#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
1f948b43 203/* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
1da177e4
LT
204static void fixup_intel_strataflash(struct mtd_info *mtd, void* param)
205{
206 struct map_info *map = mtd->priv;
207 struct cfi_private *cfi = map->fldrv_priv;
208 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
209
210 printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
211 "erase on write disabled.\n");
212 extp->SuspendCmdSupport &= ~1;
213}
214#endif
215
216#ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
217static void fixup_no_write_suspend(struct mtd_info *mtd, void* param)
218{
219 struct map_info *map = mtd->priv;
220 struct cfi_private *cfi = map->fldrv_priv;
221 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
222
223 if (cfip && (cfip->FeatureSupport&4)) {
224 cfip->FeatureSupport &= ~4;
225 printk(KERN_WARNING "cfi_cmdset_0001: write suspend disabled\n");
226 }
227}
228#endif
229
230static void fixup_st_m28w320ct(struct mtd_info *mtd, void* param)
231{
232 struct map_info *map = mtd->priv;
233 struct cfi_private *cfi = map->fldrv_priv;
1f948b43 234
1da177e4
LT
235 cfi->cfiq->BufWriteTimeoutTyp = 0; /* Not supported */
236 cfi->cfiq->BufWriteTimeoutMax = 0; /* Not supported */
237}
238
239static void fixup_st_m28w320cb(struct mtd_info *mtd, void* param)
240{
241 struct map_info *map = mtd->priv;
242 struct cfi_private *cfi = map->fldrv_priv;
1f948b43 243
1da177e4
LT
244 /* Note this is done after the region info is endian swapped */
245 cfi->cfiq->EraseRegionInfo[1] =
246 (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
247};
248
249static void fixup_use_point(struct mtd_info *mtd, void *param)
250{
251 struct map_info *map = mtd->priv;
252 if (!mtd->point && map_is_linear(map)) {
253 mtd->point = cfi_intelext_point;
254 mtd->unpoint = cfi_intelext_unpoint;
255 }
256}
257
258static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
259{
260 struct map_info *map = mtd->priv;
261 struct cfi_private *cfi = map->fldrv_priv;
262 if (cfi->cfiq->BufWriteTimeoutTyp) {
263 printk(KERN_INFO "Using buffer write method\n" );
264 mtd->write = cfi_intelext_write_buffers;
e102d54a 265 mtd->writev = cfi_intelext_writev;
1da177e4
LT
266 }
267}
268
0ecbc81a
RG
269/*
270 * Some chips power-up with all sectors locked by default.
271 */
e619a75f 272static void fixup_unlock_powerup_lock(struct mtd_info *mtd, void *param)
0ecbc81a 273{
e619a75f
JT
274 struct map_info *map = mtd->priv;
275 struct cfi_private *cfi = map->fldrv_priv;
276 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
277
278 if (cfip->FeatureSupport&32) {
279 printk(KERN_INFO "Using auto-unlock on power-up/resume\n" );
280 mtd->flags |= MTD_POWERUP_LOCK;
281 }
0ecbc81a
RG
282}
283
1da177e4 284static struct cfi_fixup cfi_fixup_table[] = {
d10a39d1 285 { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL },
1da177e4 286#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
1f948b43 287 { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL },
1da177e4
LT
288#endif
289#ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
290 { CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend, NULL },
291#endif
292#if !FORCE_WORD_WRITE
293 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL },
294#endif
295 { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct, NULL },
296 { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb, NULL },
e619a75f 297 { MANUFACTURER_INTEL, CFI_ID_ANY, fixup_unlock_powerup_lock, NULL, },
1da177e4
LT
298 { 0, 0, NULL, NULL }
299};
300
301static struct cfi_fixup jedec_fixup_table[] = {
302 { MANUFACTURER_INTEL, I82802AB, fixup_use_fwh_lock, NULL, },
303 { MANUFACTURER_INTEL, I82802AC, fixup_use_fwh_lock, NULL, },
304 { MANUFACTURER_ST, M50LPW080, fixup_use_fwh_lock, NULL, },
305 { 0, 0, NULL, NULL }
306};
307static struct cfi_fixup fixup_table[] = {
308 /* The CFI vendor ids and the JEDEC vendor IDs appear
309 * to be common. It is like the devices id's are as
310 * well. This table is to pick all cases where
311 * we know that is the case.
312 */
313 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point, NULL },
314 { 0, 0, NULL, NULL }
315};
316
317static inline struct cfi_pri_intelext *
318read_pri_intelext(struct map_info *map, __u16 adr)
319{
320 struct cfi_pri_intelext *extp;
321 unsigned int extp_size = sizeof(*extp);
322
323 again:
324 extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
325 if (!extp)
326 return NULL;
327
d88f977b 328 if (extp->MajorVersion != '1' ||
b1c9c9be 329 (extp->MinorVersion < '0' || extp->MinorVersion > '5')) {
d88f977b
TP
330 printk(KERN_ERR " Unknown Intel/Sharp Extended Query "
331 "version %c.%c.\n", extp->MajorVersion,
332 extp->MinorVersion);
333 kfree(extp);
334 return NULL;
335 }
336
1da177e4
LT
337 /* Do some byteswapping if necessary */
338 extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
339 extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
340 extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
341
638d9838 342 if (extp->MajorVersion == '1' && extp->MinorVersion >= '3') {
1da177e4
LT
343 unsigned int extra_size = 0;
344 int nb_parts, i;
345
346 /* Protection Register info */
72b56a2d
NP
347 extra_size += (extp->NumProtectionFields - 1) *
348 sizeof(struct cfi_intelext_otpinfo);
1da177e4
LT
349
350 /* Burst Read info */
6f6ed056
NP
351 extra_size += 2;
352 if (extp_size < sizeof(*extp) + extra_size)
353 goto need_more;
354 extra_size += extp->extra[extra_size-1];
1da177e4
LT
355
356 /* Number of hardware-partitions */
357 extra_size += 1;
358 if (extp_size < sizeof(*extp) + extra_size)
359 goto need_more;
360 nb_parts = extp->extra[extra_size - 1];
361
638d9838
NP
362 /* skip the sizeof(partregion) field in CFI 1.4 */
363 if (extp->MinorVersion >= '4')
364 extra_size += 2;
365
1da177e4
LT
366 for (i = 0; i < nb_parts; i++) {
367 struct cfi_intelext_regioninfo *rinfo;
368 rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
369 extra_size += sizeof(*rinfo);
370 if (extp_size < sizeof(*extp) + extra_size)
371 goto need_more;
372 rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
373 extra_size += (rinfo->NumBlockTypes - 1)
374 * sizeof(struct cfi_intelext_blockinfo);
375 }
376
638d9838
NP
377 if (extp->MinorVersion >= '4')
378 extra_size += sizeof(struct cfi_intelext_programming_regioninfo);
379
1da177e4
LT
380 if (extp_size < sizeof(*extp) + extra_size) {
381 need_more:
382 extp_size = sizeof(*extp) + extra_size;
383 kfree(extp);
384 if (extp_size > 4096) {
385 printk(KERN_ERR
386 "%s: cfi_pri_intelext is too fat\n",
cb53b3b9 387 __func__);
1da177e4
LT
388 return NULL;
389 }
390 goto again;
391 }
392 }
1f948b43 393
1da177e4
LT
394 return extp;
395}
396
1da177e4
LT
397struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
398{
399 struct cfi_private *cfi = map->fldrv_priv;
400 struct mtd_info *mtd;
401 int i;
402
95b93a0c 403 mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
1da177e4
LT
404 if (!mtd) {
405 printk(KERN_ERR "Failed to allocate memory for MTD device\n");
406 return NULL;
407 }
1da177e4
LT
408 mtd->priv = map;
409 mtd->type = MTD_NORFLASH;
410
411 /* Fill in the default mtd operations */
412 mtd->erase = cfi_intelext_erase_varsize;
413 mtd->read = cfi_intelext_read;
414 mtd->write = cfi_intelext_write_words;
415 mtd->sync = cfi_intelext_sync;
416 mtd->lock = cfi_intelext_lock;
417 mtd->unlock = cfi_intelext_unlock;
418 mtd->suspend = cfi_intelext_suspend;
419 mtd->resume = cfi_intelext_resume;
420 mtd->flags = MTD_CAP_NORFLASH;
421 mtd->name = map->name;
17ffc7ba 422 mtd->writesize = 1;
963a6fb0
NP
423
424 mtd->reboot_notifier.notifier_call = cfi_intelext_reboot;
425
1da177e4 426 if (cfi->cfi_mode == CFI_MODE_CFI) {
1f948b43 427 /*
1da177e4
LT
428 * It's a real CFI chip, not one for which the probe
429 * routine faked a CFI structure. So we read the feature
430 * table from it.
431 */
432 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
433 struct cfi_pri_intelext *extp;
434
435 extp = read_pri_intelext(map, adr);
436 if (!extp) {
437 kfree(mtd);
438 return NULL;
439 }
440
441 /* Install our own private info structure */
1f948b43 442 cfi->cmdset_priv = extp;
1da177e4
LT
443
444 cfi_fixup(mtd, cfi_fixup_table);
445
446#ifdef DEBUG_CFI_FEATURES
447 /* Tell the user about it in lots of lovely detail */
448 cfi_tell_features(extp);
1f948b43 449#endif
1da177e4
LT
450
451 if(extp->SuspendCmdSupport & 1) {
452 printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
453 }
454 }
455 else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
456 /* Apply jedec specific fixups */
457 cfi_fixup(mtd, jedec_fixup_table);
458 }
459 /* Apply generic fixups */
460 cfi_fixup(mtd, fixup_table);
461
462 for (i=0; i< cfi->numchips; i++) {
2a5bd596
DW
463 if (cfi->cfiq->WordWriteTimeoutTyp)
464 cfi->chips[i].word_write_time =
465 1<<cfi->cfiq->WordWriteTimeoutTyp;
466 else
467 cfi->chips[i].word_write_time = 50000;
468
469 if (cfi->cfiq->BufWriteTimeoutTyp)
470 cfi->chips[i].buffer_write_time =
471 1<<cfi->cfiq->BufWriteTimeoutTyp;
472 /* No default; if it isn't specified, we won't use it */
473
474 if (cfi->cfiq->BlockEraseTimeoutTyp)
475 cfi->chips[i].erase_time =
476 1000<<cfi->cfiq->BlockEraseTimeoutTyp;
477 else
478 cfi->chips[i].erase_time = 2000000;
479
1da177e4 480 cfi->chips[i].ref_point_counter = 0;
c314b6f1 481 init_waitqueue_head(&(cfi->chips[i].wq));
1f948b43 482 }
1da177e4
LT
483
484 map->fldrv = &cfi_intelext_chipdrv;
1f948b43 485
1da177e4
LT
486 return cfi_intelext_setup(mtd);
487}
a15bdeef
DW
488struct mtd_info *cfi_cmdset_0003(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
489struct mtd_info *cfi_cmdset_0200(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
490EXPORT_SYMBOL_GPL(cfi_cmdset_0001);
491EXPORT_SYMBOL_GPL(cfi_cmdset_0003);
492EXPORT_SYMBOL_GPL(cfi_cmdset_0200);
1da177e4
LT
493
494static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
495{
496 struct map_info *map = mtd->priv;
497 struct cfi_private *cfi = map->fldrv_priv;
498 unsigned long offset = 0;
499 int i,j;
500 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
501
502 //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
503
504 mtd->size = devsize * cfi->numchips;
505
506 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
1f948b43 507 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
1da177e4 508 * mtd->numeraseregions, GFP_KERNEL);
1f948b43 509 if (!mtd->eraseregions) {
1da177e4
LT
510 printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
511 goto setup_err;
512 }
1f948b43 513
1da177e4
LT
514 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
515 unsigned long ernum, ersize;
516 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
517 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
518
519 if (mtd->erasesize < ersize) {
520 mtd->erasesize = ersize;
521 }
522 for (j=0; j<cfi->numchips; j++) {
523 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
524 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
525 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
0ecbc81a 526 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap = kmalloc(ernum / 8 + 1, GFP_KERNEL);
1da177e4
LT
527 }
528 offset += (ersize * ernum);
529 }
530
531 if (offset != devsize) {
532 /* Argh */
533 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
534 goto setup_err;
535 }
536
537 for (i=0; i<mtd->numeraseregions;i++){
4843653c 538 printk(KERN_DEBUG "erase region %d: offset=0x%x,size=0x%x,blocks=%d\n",
1da177e4
LT
539 i,mtd->eraseregions[i].offset,
540 mtd->eraseregions[i].erasesize,
541 mtd->eraseregions[i].numblocks);
542 }
543
f77814dd 544#ifdef CONFIG_MTD_OTP
1da177e4 545 mtd->read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
f77814dd
NP
546 mtd->read_user_prot_reg = cfi_intelext_read_user_prot_reg;
547 mtd->write_user_prot_reg = cfi_intelext_write_user_prot_reg;
548 mtd->lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
549 mtd->get_fact_prot_info = cfi_intelext_get_fact_prot_info;
550 mtd->get_user_prot_info = cfi_intelext_get_user_prot_info;
1da177e4
LT
551#endif
552
553 /* This function has the potential to distort the reality
554 a bit and therefore should be called last. */
555 if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
556 goto setup_err;
557
558 __module_get(THIS_MODULE);
963a6fb0 559 register_reboot_notifier(&mtd->reboot_notifier);
1da177e4
LT
560 return mtd;
561
562 setup_err:
563 if(mtd) {
fa671646 564 kfree(mtd->eraseregions);
1da177e4
LT
565 kfree(mtd);
566 }
567 kfree(cfi->cmdset_priv);
568 return NULL;
569}
570
571static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
572 struct cfi_private **pcfi)
573{
574 struct map_info *map = mtd->priv;
575 struct cfi_private *cfi = *pcfi;
576 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
577
578 /*
8f1a866f 579 * Probing of multi-partition flash chips.
1da177e4
LT
580 *
581 * To support multiple partitions when available, we simply arrange
582 * for each of them to have their own flchip structure even if they
583 * are on the same physical chip. This means completely recreating
584 * a new cfi_private structure right here which is a blatent code
585 * layering violation, but this is still the least intrusive
586 * arrangement at this point. This can be rearranged in the future
587 * if someone feels motivated enough. --nico
588 */
638d9838 589 if (extp && extp->MajorVersion == '1' && extp->MinorVersion >= '3'
1da177e4
LT
590 && extp->FeatureSupport & (1 << 9)) {
591 struct cfi_private *newcfi;
592 struct flchip *chip;
593 struct flchip_shared *shared;
594 int offs, numregions, numparts, partshift, numvirtchips, i, j;
595
596 /* Protection Register info */
72b56a2d
NP
597 offs = (extp->NumProtectionFields - 1) *
598 sizeof(struct cfi_intelext_otpinfo);
1da177e4
LT
599
600 /* Burst Read info */
6f6ed056 601 offs += extp->extra[offs+1]+2;
1da177e4
LT
602
603 /* Number of partition regions */
604 numregions = extp->extra[offs];
605 offs += 1;
606
638d9838
NP
607 /* skip the sizeof(partregion) field in CFI 1.4 */
608 if (extp->MinorVersion >= '4')
609 offs += 2;
610
1da177e4
LT
611 /* Number of hardware partitions */
612 numparts = 0;
613 for (i = 0; i < numregions; i++) {
614 struct cfi_intelext_regioninfo *rinfo;
615 rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
616 numparts += rinfo->NumIdentPartitions;
617 offs += sizeof(*rinfo)
618 + (rinfo->NumBlockTypes - 1) *
619 sizeof(struct cfi_intelext_blockinfo);
620 }
621
fe224668
TK
622 if (!numparts)
623 numparts = 1;
624
638d9838
NP
625 /* Programming Region info */
626 if (extp->MinorVersion >= '4') {
627 struct cfi_intelext_programming_regioninfo *prinfo;
628 prinfo = (struct cfi_intelext_programming_regioninfo *)&extp->extra[offs];
28318776 629 mtd->writesize = cfi->interleave << prinfo->ProgRegShift;
5fa43394 630 mtd->flags &= ~MTD_BIT_WRITEABLE;
638d9838 631 printk(KERN_DEBUG "%s: program region size/ctrl_valid/ctrl_inval = %d/%d/%d\n",
28318776 632 map->name, mtd->writesize,
d4160855
AB
633 cfi->interleave * prinfo->ControlValid,
634 cfi->interleave * prinfo->ControlInvalid);
638d9838
NP
635 }
636
1da177e4
LT
637 /*
638 * All functions below currently rely on all chips having
639 * the same geometry so we'll just assume that all hardware
640 * partitions are of the same size too.
641 */
642 partshift = cfi->chipshift - __ffs(numparts);
643
644 if ((1 << partshift) < mtd->erasesize) {
645 printk( KERN_ERR
646 "%s: bad number of hw partitions (%d)\n",
cb53b3b9 647 __func__, numparts);
1da177e4
LT
648 return -EINVAL;
649 }
650
651 numvirtchips = cfi->numchips * numparts;
652 newcfi = kmalloc(sizeof(struct cfi_private) + numvirtchips * sizeof(struct flchip), GFP_KERNEL);
653 if (!newcfi)
654 return -ENOMEM;
655 shared = kmalloc(sizeof(struct flchip_shared) * cfi->numchips, GFP_KERNEL);
656 if (!shared) {
657 kfree(newcfi);
658 return -ENOMEM;
659 }
660 memcpy(newcfi, cfi, sizeof(struct cfi_private));
661 newcfi->numchips = numvirtchips;
662 newcfi->chipshift = partshift;
663
664 chip = &newcfi->chips[0];
665 for (i = 0; i < cfi->numchips; i++) {
666 shared[i].writing = shared[i].erasing = NULL;
667 spin_lock_init(&shared[i].lock);
668 for (j = 0; j < numparts; j++) {
669 *chip = cfi->chips[i];
670 chip->start += j << partshift;
671 chip->priv = &shared[i];
672 /* those should be reset too since
673 they create memory references. */
674 init_waitqueue_head(&chip->wq);
675 spin_lock_init(&chip->_spinlock);
676 chip->mutex = &chip->_spinlock;
677 chip++;
678 }
679 }
680
681 printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
682 "--> %d partitions of %d KiB\n",
683 map->name, cfi->numchips, cfi->interleave,
684 newcfi->numchips, 1<<(newcfi->chipshift-10));
685
686 map->fldrv_priv = newcfi;
687 *pcfi = newcfi;
688 kfree(cfi);
689 }
690
691 return 0;
692}
693
694/*
695 * *********** CHIP ACCESS FUNCTIONS ***********
696 */
5a37cf19 697static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
1da177e4
LT
698{
699 DECLARE_WAITQUEUE(wait, current);
700 struct cfi_private *cfi = map->fldrv_priv;
701 map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
1da177e4 702 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
5a37cf19 703 unsigned long timeo = jiffies + HZ;
1da177e4
LT
704
705 switch (chip->state) {
706
707 case FL_STATUS:
708 for (;;) {
709 status = map_read(map, adr);
710 if (map_word_andequal(map, status, status_OK, status_OK))
711 break;
712
713 /* At this point we're fine with write operations
714 in other partitions as they don't conflict. */
715 if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
716 break;
717
1da177e4
LT
718 spin_unlock(chip->mutex);
719 cfi_udelay(1);
720 spin_lock(chip->mutex);
721 /* Someone else might have been playing with it. */
5a37cf19 722 return -EAGAIN;
1da177e4 723 }
fb6d080c 724 /* Fall through */
1da177e4
LT
725 case FL_READY:
726 case FL_CFI_QUERY:
727 case FL_JEDEC_QUERY:
728 return 0;
729
730 case FL_ERASING:
731 if (!cfip ||
732 !(cfip->FeatureSupport & 2) ||
733 !(mode == FL_READY || mode == FL_POINT ||
734 (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
735 goto sleep;
736
737
738 /* Erase suspend */
739 map_write(map, CMD(0xB0), adr);
740
741 /* If the flash has finished erasing, then 'erase suspend'
742 * appears to make some (28F320) flash devices switch to
743 * 'read' mode. Make sure that we switch to 'read status'
744 * mode so we get the right data. --rmk
745 */
746 map_write(map, CMD(0x70), adr);
747 chip->oldstate = FL_ERASING;
748 chip->state = FL_ERASE_SUSPENDING;
749 chip->erase_suspended = 1;
750 for (;;) {
751 status = map_read(map, adr);
752 if (map_word_andequal(map, status, status_OK, status_OK))
753 break;
754
755 if (time_after(jiffies, timeo)) {
756 /* Urgh. Resume and pretend we weren't here. */
757 map_write(map, CMD(0xd0), adr);
758 /* Make sure we're in 'read status' mode if it had finished */
759 map_write(map, CMD(0x70), adr);
760 chip->state = FL_ERASING;
761 chip->oldstate = FL_READY;
4843653c
NP
762 printk(KERN_ERR "%s: Chip not ready after erase "
763 "suspended: status = 0x%lx\n", map->name, status.x[0]);
1da177e4
LT
764 return -EIO;
765 }
766
767 spin_unlock(chip->mutex);
768 cfi_udelay(1);
769 spin_lock(chip->mutex);
770 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
771 So we can just loop here. */
772 }
773 chip->state = FL_STATUS;
774 return 0;
775
776 case FL_XIP_WHILE_ERASING:
777 if (mode != FL_READY && mode != FL_POINT &&
778 (mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1)))
779 goto sleep;
780 chip->oldstate = chip->state;
781 chip->state = FL_READY;
782 return 0;
783
fb6d080c
AK
784 case FL_SHUTDOWN:
785 /* The machine is rebooting now,so no one can get chip anymore */
786 return -EIO;
1da177e4
LT
787 case FL_POINT:
788 /* Only if there's no operation suspended... */
789 if (mode == FL_READY && chip->oldstate == FL_READY)
790 return 0;
fb6d080c 791 /* Fall through */
1da177e4
LT
792 default:
793 sleep:
794 set_current_state(TASK_UNINTERRUPTIBLE);
795 add_wait_queue(&chip->wq, &wait);
796 spin_unlock(chip->mutex);
797 schedule();
798 remove_wait_queue(&chip->wq, &wait);
799 spin_lock(chip->mutex);
5a37cf19
AK
800 return -EAGAIN;
801 }
802}
803
804static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
805{
806 int ret;
6c24e416 807 DECLARE_WAITQUEUE(wait, current);
5a37cf19
AK
808
809 retry:
810 if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING
811 || mode == FL_OTP_WRITE || mode == FL_SHUTDOWN)) {
812 /*
813 * OK. We have possibility for contention on the write/erase
814 * operations which are global to the real chip and not per
815 * partition. So let's fight it over in the partition which
816 * currently has authority on the operation.
817 *
818 * The rules are as follows:
819 *
820 * - any write operation must own shared->writing.
821 *
822 * - any erase operation must own _both_ shared->writing and
823 * shared->erasing.
824 *
825 * - contention arbitration is handled in the owner's context.
826 *
827 * The 'shared' struct can be read and/or written only when
828 * its lock is taken.
829 */
830 struct flchip_shared *shared = chip->priv;
831 struct flchip *contender;
832 spin_lock(&shared->lock);
833 contender = shared->writing;
834 if (contender && contender != chip) {
835 /*
836 * The engine to perform desired operation on this
837 * partition is already in use by someone else.
838 * Let's fight over it in the context of the chip
839 * currently using it. If it is possible to suspend,
840 * that other partition will do just that, otherwise
841 * it'll happily send us to sleep. In any case, when
842 * get_chip returns success we're clear to go ahead.
843 */
844 ret = spin_trylock(contender->mutex);
845 spin_unlock(&shared->lock);
846 if (!ret)
847 goto retry;
848 spin_unlock(chip->mutex);
849 ret = chip_ready(map, contender, contender->start, mode);
850 spin_lock(chip->mutex);
851
852 if (ret == -EAGAIN) {
853 spin_unlock(contender->mutex);
854 goto retry;
855 }
856 if (ret) {
857 spin_unlock(contender->mutex);
858 return ret;
859 }
860 spin_lock(&shared->lock);
861 spin_unlock(contender->mutex);
862 }
863
6c24e416
AB
864 /* Check if we already have suspended erase
865 * on this chip. Sleep. */
866 if (mode == FL_ERASING && shared->erasing
867 && shared->erasing->oldstate == FL_ERASING) {
868 spin_unlock(&shared->lock);
869 set_current_state(TASK_UNINTERRUPTIBLE);
870 add_wait_queue(&chip->wq, &wait);
871 spin_unlock(chip->mutex);
872 schedule();
873 remove_wait_queue(&chip->wq, &wait);
874 spin_lock(chip->mutex);
875 goto retry;
876 }
877
5a37cf19
AK
878 /* We now own it */
879 shared->writing = chip;
880 if (mode == FL_ERASING)
881 shared->erasing = chip;
882 spin_unlock(&shared->lock);
1da177e4 883 }
5a37cf19
AK
884 ret = chip_ready(map, chip, adr, mode);
885 if (ret == -EAGAIN)
886 goto retry;
887
888 return ret;
1da177e4
LT
889}
890
891static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
892{
893 struct cfi_private *cfi = map->fldrv_priv;
894
895 if (chip->priv) {
896 struct flchip_shared *shared = chip->priv;
897 spin_lock(&shared->lock);
898 if (shared->writing == chip && chip->oldstate == FL_READY) {
899 /* We own the ability to write, but we're done */
900 shared->writing = shared->erasing;
901 if (shared->writing && shared->writing != chip) {
902 /* give back ownership to who we loaned it from */
903 struct flchip *loaner = shared->writing;
904 spin_lock(loaner->mutex);
905 spin_unlock(&shared->lock);
906 spin_unlock(chip->mutex);
907 put_chip(map, loaner, loaner->start);
908 spin_lock(chip->mutex);
909 spin_unlock(loaner->mutex);
910 wake_up(&chip->wq);
911 return;
912 }
913 shared->erasing = NULL;
914 shared->writing = NULL;
915 } else if (shared->erasing == chip && shared->writing != chip) {
916 /*
917 * We own the ability to erase without the ability
918 * to write, which means the erase was suspended
919 * and some other partition is currently writing.
920 * Don't let the switch below mess things up since
921 * we don't have ownership to resume anything.
922 */
923 spin_unlock(&shared->lock);
924 wake_up(&chip->wq);
925 return;
926 }
927 spin_unlock(&shared->lock);
928 }
929
930 switch(chip->oldstate) {
931 case FL_ERASING:
932 chip->state = chip->oldstate;
1f948b43 933 /* What if one interleaved chip has finished and the
1da177e4 934 other hasn't? The old code would leave the finished
1f948b43 935 one in READY mode. That's bad, and caused -EROFS
1da177e4
LT
936 errors to be returned from do_erase_oneblock because
937 that's the only bit it checked for at the time.
1f948b43 938 As the state machine appears to explicitly allow
1da177e4 939 sending the 0x70 (Read Status) command to an erasing
1f948b43 940 chip and expecting it to be ignored, that's what we
1da177e4
LT
941 do. */
942 map_write(map, CMD(0xd0), adr);
943 map_write(map, CMD(0x70), adr);
944 chip->oldstate = FL_READY;
945 chip->state = FL_ERASING;
946 break;
947
948 case FL_XIP_WHILE_ERASING:
949 chip->state = chip->oldstate;
950 chip->oldstate = FL_READY;
951 break;
952
953 case FL_READY:
954 case FL_STATUS:
955 case FL_JEDEC_QUERY:
956 /* We should really make set_vpp() count, rather than doing this */
957 DISABLE_VPP(map);
958 break;
959 default:
4843653c 960 printk(KERN_ERR "%s: put_chip() called with oldstate %d!!\n", map->name, chip->oldstate);
1da177e4
LT
961 }
962 wake_up(&chip->wq);
963}
964
965#ifdef CONFIG_MTD_XIP
966
967/*
968 * No interrupt what so ever can be serviced while the flash isn't in array
969 * mode. This is ensured by the xip_disable() and xip_enable() functions
970 * enclosing any code path where the flash is known not to be in array mode.
971 * And within a XIP disabled code path, only functions marked with __xipram
972 * may be called and nothing else (it's a good thing to inspect generated
973 * assembly to make sure inline functions were actually inlined and that gcc
974 * didn't emit calls to its own support functions). Also configuring MTD CFI
975 * support to a single buswidth and a single interleave is also recommended.
1da177e4
LT
976 */
977
978static void xip_disable(struct map_info *map, struct flchip *chip,
979 unsigned long adr)
980{
981 /* TODO: chips with no XIP use should ignore and return */
982 (void) map_read(map, adr); /* ensure mmu mapping is up to date */
1da177e4
LT
983 local_irq_disable();
984}
985
986static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
987 unsigned long adr)
988{
989 struct cfi_private *cfi = map->fldrv_priv;
990 if (chip->state != FL_POINT && chip->state != FL_READY) {
991 map_write(map, CMD(0xff), adr);
992 chip->state = FL_READY;
993 }
994 (void) map_read(map, adr);
97f927a4 995 xip_iprefetch();
1da177e4 996 local_irq_enable();
1da177e4
LT
997}
998
999/*
1000 * When a delay is required for the flash operation to complete, the
c172471b
NP
1001 * xip_wait_for_operation() function is polling for both the given timeout
1002 * and pending (but still masked) hardware interrupts. Whenever there is an
1003 * interrupt pending then the flash erase or write operation is suspended,
1004 * array mode restored and interrupts unmasked. Task scheduling might also
1005 * happen at that point. The CPU eventually returns from the interrupt or
1006 * the call to schedule() and the suspended flash operation is resumed for
1007 * the remaining of the delay period.
1da177e4
LT
1008 *
1009 * Warning: this function _will_ fool interrupt latency tracing tools.
1010 */
1011
c172471b
NP
1012static int __xipram xip_wait_for_operation(
1013 struct map_info *map, struct flchip *chip,
46a1652c 1014 unsigned long adr, unsigned int chip_op_time )
1da177e4
LT
1015{
1016 struct cfi_private *cfi = map->fldrv_priv;
1017 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
1018 map_word status, OK = CMD(0x80);
c172471b 1019 unsigned long usec, suspended, start, done;
1da177e4
LT
1020 flstate_t oldstate, newstate;
1021
c172471b 1022 start = xip_currtime();
46a1652c 1023 usec = chip_op_time * 8;
c172471b
NP
1024 if (usec == 0)
1025 usec = 500000;
1026 done = 0;
1027
1da177e4
LT
1028 do {
1029 cpu_relax();
1030 if (xip_irqpending() && cfip &&
1031 ((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) ||
1032 (chip->state == FL_WRITING && (cfip->FeatureSupport&4))) &&
1033 (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
1034 /*
1035 * Let's suspend the erase or write operation when
1036 * supported. Note that we currently don't try to
1037 * suspend interleaved chips if there is already
1038 * another operation suspended (imagine what happens
1039 * when one chip was already done with the current
1040 * operation while another chip suspended it, then
1041 * we resume the whole thing at once). Yes, it
1042 * can happen!
1043 */
c172471b 1044 usec -= done;
1da177e4
LT
1045 map_write(map, CMD(0xb0), adr);
1046 map_write(map, CMD(0x70), adr);
1da177e4
LT
1047 suspended = xip_currtime();
1048 do {
1049 if (xip_elapsed_since(suspended) > 100000) {
1050 /*
1051 * The chip doesn't want to suspend
1052 * after waiting for 100 msecs.
1053 * This is a critical error but there
1054 * is not much we can do here.
1055 */
c172471b 1056 return -EIO;
1da177e4
LT
1057 }
1058 status = map_read(map, adr);
1059 } while (!map_word_andequal(map, status, OK, OK));
1060
1061 /* Suspend succeeded */
1062 oldstate = chip->state;
1063 if (oldstate == FL_ERASING) {
1064 if (!map_word_bitsset(map, status, CMD(0x40)))
1065 break;
1066 newstate = FL_XIP_WHILE_ERASING;
1067 chip->erase_suspended = 1;
1068 } else {
1069 if (!map_word_bitsset(map, status, CMD(0x04)))
1070 break;
1071 newstate = FL_XIP_WHILE_WRITING;
1072 chip->write_suspended = 1;
1073 }
1074 chip->state = newstate;
1075 map_write(map, CMD(0xff), adr);
1076 (void) map_read(map, adr);
ca5c23c3 1077 xip_iprefetch();
1da177e4 1078 local_irq_enable();
6da70124 1079 spin_unlock(chip->mutex);
ca5c23c3 1080 xip_iprefetch();
1da177e4
LT
1081 cond_resched();
1082
1083 /*
1084 * We're back. However someone else might have
1085 * decided to go write to the chip if we are in
1086 * a suspended erase state. If so let's wait
1087 * until it's done.
1088 */
6da70124 1089 spin_lock(chip->mutex);
1da177e4
LT
1090 while (chip->state != newstate) {
1091 DECLARE_WAITQUEUE(wait, current);
1092 set_current_state(TASK_UNINTERRUPTIBLE);
1093 add_wait_queue(&chip->wq, &wait);
6da70124 1094 spin_unlock(chip->mutex);
1da177e4
LT
1095 schedule();
1096 remove_wait_queue(&chip->wq, &wait);
6da70124 1097 spin_lock(chip->mutex);
1da177e4
LT
1098 }
1099 /* Disallow XIP again */
1100 local_irq_disable();
1101
1102 /* Resume the write or erase operation */
1103 map_write(map, CMD(0xd0), adr);
1104 map_write(map, CMD(0x70), adr);
1105 chip->state = oldstate;
1106 start = xip_currtime();
1107 } else if (usec >= 1000000/HZ) {
1108 /*
1109 * Try to save on CPU power when waiting delay
1110 * is at least a system timer tick period.
1111 * No need to be extremely accurate here.
1112 */
1113 xip_cpu_idle();
1114 }
1115 status = map_read(map, adr);
c172471b 1116 done = xip_elapsed_since(start);
1da177e4 1117 } while (!map_word_andequal(map, status, OK, OK)
c172471b 1118 && done < usec);
1da177e4 1119
c172471b
NP
1120 return (done >= usec) ? -ETIME : 0;
1121}
1da177e4
LT
1122
1123/*
1124 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
1125 * the flash is actively programming or erasing since we have to poll for
1126 * the operation to complete anyway. We can't do that in a generic way with
6da70124 1127 * a XIP setup so do it before the actual flash operation in this case
c172471b 1128 * and stub it out from INVAL_CACHE_AND_WAIT.
1da177e4 1129 */
6da70124
NP
1130#define XIP_INVAL_CACHED_RANGE(map, from, size) \
1131 INVALIDATE_CACHED_RANGE(map, from, size)
1132
46a1652c
AK
1133#define INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, inval_adr, inval_len, usec) \
1134 xip_wait_for_operation(map, chip, cmd_adr, usec)
1da177e4
LT
1135
1136#else
1137
1138#define xip_disable(map, chip, adr)
1139#define xip_enable(map, chip, adr)
1da177e4 1140#define XIP_INVAL_CACHED_RANGE(x...)
c172471b
NP
1141#define INVAL_CACHE_AND_WAIT inval_cache_and_wait_for_operation
1142
1143static int inval_cache_and_wait_for_operation(
1144 struct map_info *map, struct flchip *chip,
1145 unsigned long cmd_adr, unsigned long inval_adr, int inval_len,
46a1652c 1146 unsigned int chip_op_time)
c172471b
NP
1147{
1148 struct cfi_private *cfi = map->fldrv_priv;
1149 map_word status, status_OK = CMD(0x80);
46a1652c
AK
1150 int chip_state = chip->state;
1151 unsigned int timeo, sleep_time;
c172471b
NP
1152
1153 spin_unlock(chip->mutex);
1154 if (inval_len)
1155 INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len);
c172471b
NP
1156 spin_lock(chip->mutex);
1157
46a1652c
AK
1158 /* set our timeout to 8 times the expected delay */
1159 timeo = chip_op_time * 8;
1160 if (!timeo)
1161 timeo = 500000;
1162 sleep_time = chip_op_time / 2;
c172471b 1163
c172471b 1164 for (;;) {
c172471b
NP
1165 status = map_read(map, cmd_adr);
1166 if (map_word_andequal(map, status, status_OK, status_OK))
1167 break;
1da177e4 1168
46a1652c 1169 if (!timeo) {
c172471b
NP
1170 map_write(map, CMD(0x70), cmd_adr);
1171 chip->state = FL_STATUS;
1172 return -ETIME;
1173 }
1174
46a1652c 1175 /* OK Still waiting. Drop the lock, wait a while and retry. */
c172471b 1176 spin_unlock(chip->mutex);
46a1652c
AK
1177 if (sleep_time >= 1000000/HZ) {
1178 /*
1179 * Half of the normal delay still remaining
1180 * can be performed with a sleeping delay instead
1181 * of busy waiting.
1182 */
1183 msleep(sleep_time/1000);
1184 timeo -= sleep_time;
1185 sleep_time = 1000000/HZ;
1186 } else {
1187 udelay(1);
1188 cond_resched();
1189 timeo--;
1190 }
c172471b 1191 spin_lock(chip->mutex);
c172471b 1192
967bf623 1193 while (chip->state != chip_state) {
46a1652c
AK
1194 /* Someone's suspended the operation: sleep */
1195 DECLARE_WAITQUEUE(wait, current);
1196 set_current_state(TASK_UNINTERRUPTIBLE);
1197 add_wait_queue(&chip->wq, &wait);
1198 spin_unlock(chip->mutex);
1199 schedule();
1200 remove_wait_queue(&chip->wq, &wait);
1201 spin_lock(chip->mutex);
1202 }
1203 }
c172471b
NP
1204
1205 /* Done and happy. */
1206 chip->state = FL_STATUS;
1207 return 0;
1208}
6da70124 1209
1da177e4
LT
1210#endif
1211
c172471b 1212#define WAIT_TIMEOUT(map, chip, adr, udelay) \
46a1652c 1213 INVAL_CACHE_AND_WAIT(map, chip, adr, 0, 0, udelay);
c172471b
NP
1214
1215
1da177e4
LT
1216static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
1217{
1218 unsigned long cmd_addr;
1219 struct cfi_private *cfi = map->fldrv_priv;
1220 int ret = 0;
1221
1222 adr += chip->start;
1223
1f948b43
TG
1224 /* Ensure cmd read/writes are aligned. */
1225 cmd_addr = adr & ~(map_bankwidth(map)-1);
1da177e4
LT
1226
1227 spin_lock(chip->mutex);
1228
1229 ret = get_chip(map, chip, cmd_addr, FL_POINT);
1230
1231 if (!ret) {
1232 if (chip->state != FL_POINT && chip->state != FL_READY)
1233 map_write(map, CMD(0xff), cmd_addr);
1234
1235 chip->state = FL_POINT;
1236 chip->ref_point_counter++;
1237 }
1238 spin_unlock(chip->mutex);
1239
1240 return ret;
1241}
1242
1243static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char **mtdbuf)
1244{
1245 struct map_info *map = mtd->priv;
1246 struct cfi_private *cfi = map->fldrv_priv;
097f2576 1247 unsigned long ofs, last_end = 0;
1da177e4
LT
1248 int chipnum;
1249 int ret = 0;
1250
1251 if (!map->virt || (from + len > mtd->size))
1252 return -EINVAL;
1f948b43 1253
1da177e4
LT
1254 /* Now lock the chip(s) to POINT state */
1255
1256 /* ofs: offset within the first chip that the first read should start */
1257 chipnum = (from >> cfi->chipshift);
1258 ofs = from - (chipnum << cfi->chipshift);
1259
097f2576
AL
1260 *mtdbuf = (void *)map->virt + cfi->chips[chipnum].start + ofs;
1261 *retlen = 0;
1262
1da177e4
LT
1263 while (len) {
1264 unsigned long thislen;
1265
1266 if (chipnum >= cfi->numchips)
1267 break;
1268
097f2576
AL
1269 /* We cannot point across chips that are virtually disjoint */
1270 if (!last_end)
1271 last_end = cfi->chips[chipnum].start;
1272 else if (cfi->chips[chipnum].start != last_end)
1273 break;
1274
1da177e4
LT
1275 if ((len + ofs -1) >> cfi->chipshift)
1276 thislen = (1<<cfi->chipshift) - ofs;
1277 else
1278 thislen = len;
1279
1280 ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
1281 if (ret)
1282 break;
1283
1284 *retlen += thislen;
1285 len -= thislen;
1f948b43 1286
1da177e4 1287 ofs = 0;
097f2576 1288 last_end += 1 << cfi->chipshift;
1da177e4
LT
1289 chipnum++;
1290 }
1291 return 0;
1292}
1293
1294static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from, size_t len)
1295{
1296 struct map_info *map = mtd->priv;
1297 struct cfi_private *cfi = map->fldrv_priv;
1298 unsigned long ofs;
1299 int chipnum;
1300
1301 /* Now unlock the chip(s) POINT state */
1302
1303 /* ofs: offset within the first chip that the first read should start */
1304 chipnum = (from >> cfi->chipshift);
1305 ofs = from - (chipnum << cfi->chipshift);
1306
1307 while (len) {
1308 unsigned long thislen;
1309 struct flchip *chip;
1310
1311 chip = &cfi->chips[chipnum];
1312 if (chipnum >= cfi->numchips)
1313 break;
1314
1315 if ((len + ofs -1) >> cfi->chipshift)
1316 thislen = (1<<cfi->chipshift) - ofs;
1317 else
1318 thislen = len;
1319
1320 spin_lock(chip->mutex);
1321 if (chip->state == FL_POINT) {
1322 chip->ref_point_counter--;
1323 if(chip->ref_point_counter == 0)
1324 chip->state = FL_READY;
1325 } else
4843653c 1326 printk(KERN_ERR "%s: Warning: unpoint called on non pointed region\n", map->name); /* Should this give an error? */
1da177e4
LT
1327
1328 put_chip(map, chip, chip->start);
1329 spin_unlock(chip->mutex);
1330
1331 len -= thislen;
1332 ofs = 0;
1333 chipnum++;
1334 }
1335}
1336
1337static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1338{
1339 unsigned long cmd_addr;
1340 struct cfi_private *cfi = map->fldrv_priv;
1341 int ret;
1342
1343 adr += chip->start;
1344
1f948b43
TG
1345 /* Ensure cmd read/writes are aligned. */
1346 cmd_addr = adr & ~(map_bankwidth(map)-1);
1da177e4
LT
1347
1348 spin_lock(chip->mutex);
1349 ret = get_chip(map, chip, cmd_addr, FL_READY);
1350 if (ret) {
1351 spin_unlock(chip->mutex);
1352 return ret;
1353 }
1354
1355 if (chip->state != FL_POINT && chip->state != FL_READY) {
1356 map_write(map, CMD(0xff), cmd_addr);
1357
1358 chip->state = FL_READY;
1359 }
1360
1361 map_copy_from(map, buf, adr, len);
1362
1363 put_chip(map, chip, cmd_addr);
1364
1365 spin_unlock(chip->mutex);
1366 return 0;
1367}
1368
1369static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1370{
1371 struct map_info *map = mtd->priv;
1372 struct cfi_private *cfi = map->fldrv_priv;
1373 unsigned long ofs;
1374 int chipnum;
1375 int ret = 0;
1376
1377 /* ofs: offset within the first chip that the first read should start */
1378 chipnum = (from >> cfi->chipshift);
1379 ofs = from - (chipnum << cfi->chipshift);
1380
1381 *retlen = 0;
1382
1383 while (len) {
1384 unsigned long thislen;
1385
1386 if (chipnum >= cfi->numchips)
1387 break;
1388
1389 if ((len + ofs -1) >> cfi->chipshift)
1390 thislen = (1<<cfi->chipshift) - ofs;
1391 else
1392 thislen = len;
1393
1394 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1395 if (ret)
1396 break;
1397
1398 *retlen += thislen;
1399 len -= thislen;
1400 buf += thislen;
1f948b43 1401
1da177e4
LT
1402 ofs = 0;
1403 chipnum++;
1404 }
1405 return ret;
1406}
1407
1da177e4 1408static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
f77814dd 1409 unsigned long adr, map_word datum, int mode)
1da177e4
LT
1410{
1411 struct cfi_private *cfi = map->fldrv_priv;
c172471b
NP
1412 map_word status, write_cmd;
1413 int ret=0;
1da177e4
LT
1414
1415 adr += chip->start;
1416
f77814dd 1417 switch (mode) {
638d9838
NP
1418 case FL_WRITING:
1419 write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0x40) : CMD(0x41);
1420 break;
1421 case FL_OTP_WRITE:
1422 write_cmd = CMD(0xc0);
1423 break;
1424 default:
1425 return -EINVAL;
f77814dd 1426 }
1da177e4
LT
1427
1428 spin_lock(chip->mutex);
f77814dd 1429 ret = get_chip(map, chip, adr, mode);
1da177e4
LT
1430 if (ret) {
1431 spin_unlock(chip->mutex);
1432 return ret;
1433 }
1434
1435 XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1436 ENABLE_VPP(map);
1437 xip_disable(map, chip, adr);
f77814dd 1438 map_write(map, write_cmd, adr);
1da177e4 1439 map_write(map, datum, adr);
f77814dd 1440 chip->state = mode;
1da177e4 1441
c172471b
NP
1442 ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1443 adr, map_bankwidth(map),
46a1652c 1444 chip->word_write_time);
c172471b
NP
1445 if (ret) {
1446 xip_enable(map, chip, adr);
1447 printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
1448 goto out;
1da177e4 1449 }
1da177e4 1450
4843653c 1451 /* check for errors */
c172471b 1452 status = map_read(map, adr);
4843653c
NP
1453 if (map_word_bitsset(map, status, CMD(0x1a))) {
1454 unsigned long chipstatus = MERGESTATUS(status);
1455
1456 /* reset status */
1da177e4 1457 map_write(map, CMD(0x50), adr);
1da177e4 1458 map_write(map, CMD(0x70), adr);
4843653c
NP
1459 xip_enable(map, chip, adr);
1460
1461 if (chipstatus & 0x02) {
1462 ret = -EROFS;
1463 } else if (chipstatus & 0x08) {
1464 printk(KERN_ERR "%s: word write error (bad VPP)\n", map->name);
1465 ret = -EIO;
1466 } else {
1467 printk(KERN_ERR "%s: word write error (status 0x%lx)\n", map->name, chipstatus);
1468 ret = -EINVAL;
1469 }
1470
1471 goto out;
1da177e4
LT
1472 }
1473
1474 xip_enable(map, chip, adr);
1475 out: put_chip(map, chip, adr);
1476 spin_unlock(chip->mutex);
1da177e4
LT
1477 return ret;
1478}
1479
1480
1481static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
1482{
1483 struct map_info *map = mtd->priv;
1484 struct cfi_private *cfi = map->fldrv_priv;
1485 int ret = 0;
1486 int chipnum;
1487 unsigned long ofs;
1488
1489 *retlen = 0;
1490 if (!len)
1491 return 0;
1492
1493 chipnum = to >> cfi->chipshift;
1494 ofs = to - (chipnum << cfi->chipshift);
1495
1496 /* If it's not bus-aligned, do the first byte write */
1497 if (ofs & (map_bankwidth(map)-1)) {
1498 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1499 int gap = ofs - bus_ofs;
1500 int n;
1501 map_word datum;
1502
1503 n = min_t(int, len, map_bankwidth(map)-gap);
1504 datum = map_word_ff(map);
1505 datum = map_word_load_partial(map, datum, buf, gap, n);
1506
1507 ret = do_write_oneword(map, &cfi->chips[chipnum],
f77814dd 1508 bus_ofs, datum, FL_WRITING);
1f948b43 1509 if (ret)
1da177e4
LT
1510 return ret;
1511
1512 len -= n;
1513 ofs += n;
1514 buf += n;
1515 (*retlen) += n;
1516
1517 if (ofs >> cfi->chipshift) {
1f948b43 1518 chipnum ++;
1da177e4
LT
1519 ofs = 0;
1520 if (chipnum == cfi->numchips)
1521 return 0;
1522 }
1523 }
1f948b43 1524
1da177e4
LT
1525 while(len >= map_bankwidth(map)) {
1526 map_word datum = map_word_load(map, buf);
1527
1528 ret = do_write_oneword(map, &cfi->chips[chipnum],
f77814dd 1529 ofs, datum, FL_WRITING);
1da177e4
LT
1530 if (ret)
1531 return ret;
1532
1533 ofs += map_bankwidth(map);
1534 buf += map_bankwidth(map);
1535 (*retlen) += map_bankwidth(map);
1536 len -= map_bankwidth(map);
1537
1538 if (ofs >> cfi->chipshift) {
1f948b43 1539 chipnum ++;
1da177e4
LT
1540 ofs = 0;
1541 if (chipnum == cfi->numchips)
1542 return 0;
1543 }
1544 }
1545
1546 if (len & (map_bankwidth(map)-1)) {
1547 map_word datum;
1548
1549 datum = map_word_ff(map);
1550 datum = map_word_load_partial(map, datum, buf, 0, len);
1551
1552 ret = do_write_oneword(map, &cfi->chips[chipnum],
f77814dd 1553 ofs, datum, FL_WRITING);
1f948b43 1554 if (ret)
1da177e4 1555 return ret;
1f948b43 1556
1da177e4
LT
1557 (*retlen) += len;
1558 }
1559
1560 return 0;
1561}
1562
1563
1f948b43 1564static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
e102d54a
NP
1565 unsigned long adr, const struct kvec **pvec,
1566 unsigned long *pvec_seek, int len)
1da177e4
LT
1567{
1568 struct cfi_private *cfi = map->fldrv_priv;
c172471b
NP
1569 map_word status, write_cmd, datum;
1570 unsigned long cmd_adr;
1571 int ret, wbufsize, word_gap, words;
e102d54a
NP
1572 const struct kvec *vec;
1573 unsigned long vec_seek;
646fd127
MC
1574 unsigned long initial_adr;
1575 int initial_len = len;
1da177e4
LT
1576
1577 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1578 adr += chip->start;
646fd127 1579 initial_adr = adr;
1da177e4 1580 cmd_adr = adr & ~(wbufsize-1);
638d9838 1581
1da177e4 1582 /* Let's determine this according to the interleave only once */
638d9838 1583 write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0xe8) : CMD(0xe9);
1da177e4
LT
1584
1585 spin_lock(chip->mutex);
1586 ret = get_chip(map, chip, cmd_adr, FL_WRITING);
1587 if (ret) {
1588 spin_unlock(chip->mutex);
1589 return ret;
1590 }
1591
646fd127 1592 XIP_INVAL_CACHED_RANGE(map, initial_adr, initial_len);
1da177e4
LT
1593 ENABLE_VPP(map);
1594 xip_disable(map, chip, cmd_adr);
1595
151e7659 1596 /* §4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1f948b43 1597 [...], the device will not accept any more Write to Buffer commands".
1da177e4
LT
1598 So we must check here and reset those bits if they're set. Otherwise
1599 we're just pissing in the wind */
6e7a6809 1600 if (chip->state != FL_STATUS) {
1da177e4 1601 map_write(map, CMD(0x70), cmd_adr);
6e7a6809
NP
1602 chip->state = FL_STATUS;
1603 }
1da177e4
LT
1604 status = map_read(map, cmd_adr);
1605 if (map_word_bitsset(map, status, CMD(0x30))) {
1606 xip_enable(map, chip, cmd_adr);
1607 printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
1608 xip_disable(map, chip, cmd_adr);
1609 map_write(map, CMD(0x50), cmd_adr);
1610 map_write(map, CMD(0x70), cmd_adr);
1611 }
1612
1613 chip->state = FL_WRITING_TO_BUFFER;
c172471b
NP
1614 map_write(map, write_cmd, cmd_adr);
1615 ret = WAIT_TIMEOUT(map, chip, cmd_adr, 0);
1616 if (ret) {
1617 /* Argh. Not ready for write to buffer */
1618 map_word Xstatus = map_read(map, cmd_adr);
1619 map_write(map, CMD(0x70), cmd_adr);
1620 chip->state = FL_STATUS;
1da177e4 1621 status = map_read(map, cmd_adr);
c172471b
NP
1622 map_write(map, CMD(0x50), cmd_adr);
1623 map_write(map, CMD(0x70), cmd_adr);
1624 xip_enable(map, chip, cmd_adr);
1625 printk(KERN_ERR "%s: Chip not ready for buffer write. Xstatus = %lx, status = %lx\n",
1626 map->name, Xstatus.x[0], status.x[0]);
1627 goto out;
1da177e4
LT
1628 }
1629
e102d54a
NP
1630 /* Figure out the number of words to write */
1631 word_gap = (-adr & (map_bankwidth(map)-1));
1632 words = (len - word_gap + map_bankwidth(map) - 1) / map_bankwidth(map);
1633 if (!word_gap) {
1634 words--;
1635 } else {
1636 word_gap = map_bankwidth(map) - word_gap;
1637 adr -= word_gap;
1638 datum = map_word_ff(map);
1639 }
1640
1da177e4 1641 /* Write length of data to come */
e102d54a 1642 map_write(map, CMD(words), cmd_adr );
1da177e4
LT
1643
1644 /* Write data */
e102d54a
NP
1645 vec = *pvec;
1646 vec_seek = *pvec_seek;
1647 do {
1648 int n = map_bankwidth(map) - word_gap;
1649 if (n > vec->iov_len - vec_seek)
1650 n = vec->iov_len - vec_seek;
1651 if (n > len)
1652 n = len;
1da177e4 1653
e102d54a
NP
1654 if (!word_gap && len < map_bankwidth(map))
1655 datum = map_word_ff(map);
1da177e4 1656
e102d54a 1657 datum = map_word_load_partial(map, datum,
1f948b43 1658 vec->iov_base + vec_seek,
e102d54a 1659 word_gap, n);
1da177e4 1660
e102d54a
NP
1661 len -= n;
1662 word_gap += n;
1663 if (!len || word_gap == map_bankwidth(map)) {
1664 map_write(map, datum, adr);
1665 adr += map_bankwidth(map);
1666 word_gap = 0;
1667 }
1da177e4 1668
e102d54a
NP
1669 vec_seek += n;
1670 if (vec_seek == vec->iov_len) {
1671 vec++;
1672 vec_seek = 0;
1673 }
1674 } while (len);
1675 *pvec = vec;
1676 *pvec_seek = vec_seek;
1da177e4
LT
1677
1678 /* GO GO GO */
1679 map_write(map, CMD(0xd0), cmd_adr);
1680 chip->state = FL_WRITING;
1681
c172471b 1682 ret = INVAL_CACHE_AND_WAIT(map, chip, cmd_adr,
646fd127 1683 initial_adr, initial_len,
46a1652c 1684 chip->buffer_write_time);
c172471b
NP
1685 if (ret) {
1686 map_write(map, CMD(0x70), cmd_adr);
1687 chip->state = FL_STATUS;
1688 xip_enable(map, chip, cmd_adr);
1689 printk(KERN_ERR "%s: buffer write error (status timeout)\n", map->name);
1690 goto out;
1da177e4 1691 }
1da177e4 1692
4843653c 1693 /* check for errors */
c172471b 1694 status = map_read(map, cmd_adr);
4843653c
NP
1695 if (map_word_bitsset(map, status, CMD(0x1a))) {
1696 unsigned long chipstatus = MERGESTATUS(status);
1697
1698 /* reset status */
1da177e4 1699 map_write(map, CMD(0x50), cmd_adr);
4843653c
NP
1700 map_write(map, CMD(0x70), cmd_adr);
1701 xip_enable(map, chip, cmd_adr);
1702
1703 if (chipstatus & 0x02) {
1704 ret = -EROFS;
1705 } else if (chipstatus & 0x08) {
1706 printk(KERN_ERR "%s: buffer write error (bad VPP)\n", map->name);
1707 ret = -EIO;
1708 } else {
1709 printk(KERN_ERR "%s: buffer write error (status 0x%lx)\n", map->name, chipstatus);
1710 ret = -EINVAL;
1711 }
1712
1713 goto out;
1da177e4
LT
1714 }
1715
1716 xip_enable(map, chip, cmd_adr);
1717 out: put_chip(map, chip, cmd_adr);
1718 spin_unlock(chip->mutex);
1719 return ret;
1720}
1721
e102d54a
NP
1722static int cfi_intelext_writev (struct mtd_info *mtd, const struct kvec *vecs,
1723 unsigned long count, loff_t to, size_t *retlen)
1da177e4
LT
1724{
1725 struct map_info *map = mtd->priv;
1726 struct cfi_private *cfi = map->fldrv_priv;
1727 int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1728 int ret = 0;
1729 int chipnum;
e102d54a
NP
1730 unsigned long ofs, vec_seek, i;
1731 size_t len = 0;
1732
1733 for (i = 0; i < count; i++)
1734 len += vecs[i].iov_len;
1da177e4
LT
1735
1736 *retlen = 0;
1737 if (!len)
1738 return 0;
1739
1740 chipnum = to >> cfi->chipshift;
e102d54a
NP
1741 ofs = to - (chipnum << cfi->chipshift);
1742 vec_seek = 0;
1da177e4 1743
e102d54a 1744 do {
1da177e4
LT
1745 /* We must not cross write block boundaries */
1746 int size = wbufsize - (ofs & (wbufsize-1));
1747
1748 if (size > len)
1749 size = len;
1f948b43 1750 ret = do_write_buffer(map, &cfi->chips[chipnum],
e102d54a 1751 ofs, &vecs, &vec_seek, size);
1da177e4
LT
1752 if (ret)
1753 return ret;
1754
1755 ofs += size;
1da177e4
LT
1756 (*retlen) += size;
1757 len -= size;
1758
1759 if (ofs >> cfi->chipshift) {
1f948b43 1760 chipnum ++;
1da177e4
LT
1761 ofs = 0;
1762 if (chipnum == cfi->numchips)
1763 return 0;
1764 }
df54b52c
JB
1765
1766 /* Be nice and reschedule with the chip in a usable state for other
1767 processes. */
1768 cond_resched();
1769
e102d54a
NP
1770 } while (len);
1771
1da177e4
LT
1772 return 0;
1773}
1774
e102d54a
NP
1775static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
1776 size_t len, size_t *retlen, const u_char *buf)
1777{
1778 struct kvec vec;
1779
1780 vec.iov_base = (void *) buf;
1781 vec.iov_len = len;
1782
1783 return cfi_intelext_writev(mtd, &vec, 1, to, retlen);
1784}
1785
1da177e4
LT
1786static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1787 unsigned long adr, int len, void *thunk)
1788{
1789 struct cfi_private *cfi = map->fldrv_priv;
c172471b 1790 map_word status;
1da177e4 1791 int retries = 3;
c172471b 1792 int ret;
1da177e4
LT
1793
1794 adr += chip->start;
1795
1da177e4
LT
1796 retry:
1797 spin_lock(chip->mutex);
1798 ret = get_chip(map, chip, adr, FL_ERASING);
1799 if (ret) {
1800 spin_unlock(chip->mutex);
1801 return ret;
1802 }
1803
1804 XIP_INVAL_CACHED_RANGE(map, adr, len);
1805 ENABLE_VPP(map);
1806 xip_disable(map, chip, adr);
1807
1808 /* Clear the status register first */
1809 map_write(map, CMD(0x50), adr);
1810
1811 /* Now erase */
1812 map_write(map, CMD(0x20), adr);
1813 map_write(map, CMD(0xD0), adr);
1814 chip->state = FL_ERASING;
1815 chip->erase_suspended = 0;
1816
c172471b
NP
1817 ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1818 adr, len,
46a1652c 1819 chip->erase_time);
c172471b
NP
1820 if (ret) {
1821 map_write(map, CMD(0x70), adr);
1822 chip->state = FL_STATUS;
1823 xip_enable(map, chip, adr);
1824 printk(KERN_ERR "%s: block erase error: (status timeout)\n", map->name);
1825 goto out;
1da177e4
LT
1826 }
1827
1828 /* We've broken this before. It doesn't hurt to be safe */
1829 map_write(map, CMD(0x70), adr);
1830 chip->state = FL_STATUS;
1831 status = map_read(map, adr);
1832
4843653c 1833 /* check for errors */
1da177e4 1834 if (map_word_bitsset(map, status, CMD(0x3a))) {
4843653c 1835 unsigned long chipstatus = MERGESTATUS(status);
1da177e4
LT
1836
1837 /* Reset the error bits */
1838 map_write(map, CMD(0x50), adr);
1839 map_write(map, CMD(0x70), adr);
1840 xip_enable(map, chip, adr);
1841
1da177e4 1842 if ((chipstatus & 0x30) == 0x30) {
4843653c
NP
1843 printk(KERN_ERR "%s: block erase error: (bad command sequence, status 0x%lx)\n", map->name, chipstatus);
1844 ret = -EINVAL;
1da177e4
LT
1845 } else if (chipstatus & 0x02) {
1846 /* Protection bit set */
1847 ret = -EROFS;
1848 } else if (chipstatus & 0x8) {
1849 /* Voltage */
4843653c 1850 printk(KERN_ERR "%s: block erase error: (bad VPP)\n", map->name);
1da177e4 1851 ret = -EIO;
4843653c
NP
1852 } else if (chipstatus & 0x20 && retries--) {
1853 printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
4843653c
NP
1854 put_chip(map, chip, adr);
1855 spin_unlock(chip->mutex);
1856 goto retry;
1857 } else {
1858 printk(KERN_ERR "%s: block erase failed at 0x%08lx (status 0x%lx)\n", map->name, adr, chipstatus);
1da177e4
LT
1859 ret = -EIO;
1860 }
4843653c
NP
1861
1862 goto out;
1da177e4
LT
1863 }
1864
4843653c 1865 xip_enable(map, chip, adr);
1da177e4
LT
1866 out: put_chip(map, chip, adr);
1867 spin_unlock(chip->mutex);
1868 return ret;
1869}
1870
029a9eb1 1871static int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1da177e4
LT
1872{
1873 unsigned long ofs, len;
1874 int ret;
1875
1876 ofs = instr->addr;
1877 len = instr->len;
1878
1879 ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1880 if (ret)
1881 return ret;
1882
1883 instr->state = MTD_ERASE_DONE;
1884 mtd_erase_callback(instr);
1f948b43 1885
1da177e4
LT
1886 return 0;
1887}
1888
1889static void cfi_intelext_sync (struct mtd_info *mtd)
1890{
1891 struct map_info *map = mtd->priv;
1892 struct cfi_private *cfi = map->fldrv_priv;
1893 int i;
1894 struct flchip *chip;
1895 int ret = 0;
1896
1897 for (i=0; !ret && i<cfi->numchips; i++) {
1898 chip = &cfi->chips[i];
1899
1900 spin_lock(chip->mutex);
1901 ret = get_chip(map, chip, chip->start, FL_SYNCING);
1902
1903 if (!ret) {
1904 chip->oldstate = chip->state;
1905 chip->state = FL_SYNCING;
1f948b43 1906 /* No need to wake_up() on this state change -
1da177e4
LT
1907 * as the whole point is that nobody can do anything
1908 * with the chip now anyway.
1909 */
1910 }
1911 spin_unlock(chip->mutex);
1912 }
1913
1914 /* Unlock the chips again */
1915
1916 for (i--; i >=0; i--) {
1917 chip = &cfi->chips[i];
1918
1919 spin_lock(chip->mutex);
1f948b43 1920
1da177e4
LT
1921 if (chip->state == FL_SYNCING) {
1922 chip->state = chip->oldstate;
09c79335 1923 chip->oldstate = FL_READY;
1da177e4
LT
1924 wake_up(&chip->wq);
1925 }
1926 spin_unlock(chip->mutex);
1927 }
1928}
1929
0ecbc81a 1930static int __xipram do_getlockstatus_oneblock(struct map_info *map,
1da177e4
LT
1931 struct flchip *chip,
1932 unsigned long adr,
1933 int len, void *thunk)
1934{
1935 struct cfi_private *cfi = map->fldrv_priv;
1936 int status, ofs_factor = cfi->interleave * cfi->device_type;
1937
c25bb1f5 1938 adr += chip->start;
1da177e4 1939 xip_disable(map, chip, adr+(2*ofs_factor));
c25bb1f5 1940 map_write(map, CMD(0x90), adr+(2*ofs_factor));
1da177e4
LT
1941 chip->state = FL_JEDEC_QUERY;
1942 status = cfi_read_query(map, adr+(2*ofs_factor));
1943 xip_enable(map, chip, 0);
0ecbc81a
RG
1944 return status;
1945}
1946
1947#ifdef DEBUG_LOCK_BITS
1948static int __xipram do_printlockstatus_oneblock(struct map_info *map,
1949 struct flchip *chip,
1950 unsigned long adr,
1951 int len, void *thunk)
1952{
1da177e4 1953 printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
0ecbc81a 1954 adr, do_getlockstatus_oneblock(map, chip, adr, len, thunk));
1da177e4
LT
1955 return 0;
1956}
1957#endif
1958
1959#define DO_XXLOCK_ONEBLOCK_LOCK ((void *) 1)
1960#define DO_XXLOCK_ONEBLOCK_UNLOCK ((void *) 2)
1961
1962static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
1963 unsigned long adr, int len, void *thunk)
1964{
1965 struct cfi_private *cfi = map->fldrv_priv;
9a6e73ec 1966 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
c172471b 1967 int udelay;
1da177e4
LT
1968 int ret;
1969
1970 adr += chip->start;
1971
1da177e4
LT
1972 spin_lock(chip->mutex);
1973 ret = get_chip(map, chip, adr, FL_LOCKING);
1974 if (ret) {
1975 spin_unlock(chip->mutex);
1976 return ret;
1977 }
1978
1979 ENABLE_VPP(map);
1980 xip_disable(map, chip, adr);
1f948b43 1981
1da177e4
LT
1982 map_write(map, CMD(0x60), adr);
1983 if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
1984 map_write(map, CMD(0x01), adr);
1985 chip->state = FL_LOCKING;
1986 } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
1987 map_write(map, CMD(0xD0), adr);
1988 chip->state = FL_UNLOCKING;
1989 } else
1990 BUG();
1991
9a6e73ec
TP
1992 /*
1993 * If Instant Individual Block Locking supported then no need
1994 * to delay.
1995 */
c172471b 1996 udelay = (!extp || !(extp->FeatureSupport & (1 << 5))) ? 1000000/HZ : 0;
9a6e73ec 1997
c172471b
NP
1998 ret = WAIT_TIMEOUT(map, chip, adr, udelay);
1999 if (ret) {
2000 map_write(map, CMD(0x70), adr);
2001 chip->state = FL_STATUS;
2002 xip_enable(map, chip, adr);
2003 printk(KERN_ERR "%s: block unlock error: (status timeout)\n", map->name);
2004 goto out;
1da177e4 2005 }
1f948b43 2006
1da177e4 2007 xip_enable(map, chip, adr);
c172471b 2008out: put_chip(map, chip, adr);
1da177e4 2009 spin_unlock(chip->mutex);
c172471b 2010 return ret;
1da177e4
LT
2011}
2012
2013static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
2014{
2015 int ret;
2016
2017#ifdef DEBUG_LOCK_BITS
2018 printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
cb53b3b9 2019 __func__, ofs, len);
1da177e4 2020 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1da1caf8 2021 ofs, len, NULL);
1da177e4
LT
2022#endif
2023
1f948b43 2024 ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
1da177e4 2025 ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
1f948b43 2026
1da177e4
LT
2027#ifdef DEBUG_LOCK_BITS
2028 printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
cb53b3b9 2029 __func__, ret);
1da177e4 2030 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1da1caf8 2031 ofs, len, NULL);
1da177e4
LT
2032#endif
2033
2034 return ret;
2035}
2036
2037static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
2038{
2039 int ret;
2040
2041#ifdef DEBUG_LOCK_BITS
2042 printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
cb53b3b9 2043 __func__, ofs, len);
1da177e4 2044 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1da1caf8 2045 ofs, len, NULL);
1da177e4
LT
2046#endif
2047
2048 ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2049 ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
1f948b43 2050
1da177e4
LT
2051#ifdef DEBUG_LOCK_BITS
2052 printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
cb53b3b9 2053 __func__, ret);
1f948b43 2054 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1da1caf8 2055 ofs, len, NULL);
1da177e4 2056#endif
1f948b43 2057
1da177e4
LT
2058 return ret;
2059}
2060
f77814dd
NP
2061#ifdef CONFIG_MTD_OTP
2062
1f948b43 2063typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
f77814dd
NP
2064 u_long data_offset, u_char *buf, u_int size,
2065 u_long prot_offset, u_int groupno, u_int groupsize);
2066
2067static int __xipram
2068do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
2069 u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2070{
2071 struct cfi_private *cfi = map->fldrv_priv;
2072 int ret;
2073
2074 spin_lock(chip->mutex);
2075 ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
2076 if (ret) {
2077 spin_unlock(chip->mutex);
2078 return ret;
2079 }
2080
2081 /* let's ensure we're not reading back cached data from array mode */
6da70124 2082 INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
f77814dd
NP
2083
2084 xip_disable(map, chip, chip->start);
2085 if (chip->state != FL_JEDEC_QUERY) {
2086 map_write(map, CMD(0x90), chip->start);
2087 chip->state = FL_JEDEC_QUERY;
2088 }
2089 map_copy_from(map, buf, chip->start + offset, size);
2090 xip_enable(map, chip, chip->start);
2091
2092 /* then ensure we don't keep OTP data in the cache */
6da70124 2093 INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
f77814dd
NP
2094
2095 put_chip(map, chip, chip->start);
2096 spin_unlock(chip->mutex);
2097 return 0;
2098}
2099
2100static int
2101do_otp_write(struct map_info *map, struct flchip *chip, u_long offset,
2102 u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2103{
2104 int ret;
2105
2106 while (size) {
2107 unsigned long bus_ofs = offset & ~(map_bankwidth(map)-1);
2108 int gap = offset - bus_ofs;
2109 int n = min_t(int, size, map_bankwidth(map)-gap);
2110 map_word datum = map_word_ff(map);
2111
2112 datum = map_word_load_partial(map, datum, buf, gap, n);
2113 ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
1f948b43 2114 if (ret)
f77814dd
NP
2115 return ret;
2116
2117 offset += n;
2118 buf += n;
2119 size -= n;
2120 }
2121
2122 return 0;
2123}
2124
2125static int
2126do_otp_lock(struct map_info *map, struct flchip *chip, u_long offset,
2127 u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2128{
2129 struct cfi_private *cfi = map->fldrv_priv;
2130 map_word datum;
2131
2132 /* make sure area matches group boundaries */
332d71f7 2133 if (size != grpsz)
f77814dd
NP
2134 return -EXDEV;
2135
2136 datum = map_word_ff(map);
2137 datum = map_word_clr(map, datum, CMD(1 << grpno));
2138 return do_write_oneword(map, chip, prot, datum, FL_OTP_WRITE);
2139}
2140
2141static int cfi_intelext_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
2142 size_t *retlen, u_char *buf,
2143 otp_op_t action, int user_regs)
2144{
2145 struct map_info *map = mtd->priv;
2146 struct cfi_private *cfi = map->fldrv_priv;
2147 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2148 struct flchip *chip;
2149 struct cfi_intelext_otpinfo *otp;
2150 u_long devsize, reg_prot_offset, data_offset;
2151 u_int chip_num, chip_step, field, reg_fact_size, reg_user_size;
2152 u_int groups, groupno, groupsize, reg_fact_groups, reg_user_groups;
2153 int ret;
2154
2155 *retlen = 0;
2156
2157 /* Check that we actually have some OTP registers */
2158 if (!extp || !(extp->FeatureSupport & 64) || !extp->NumProtectionFields)
2159 return -ENODATA;
2160
2161 /* we need real chips here not virtual ones */
2162 devsize = (1 << cfi->cfiq->DevSize) * cfi->interleave;
2163 chip_step = devsize >> cfi->chipshift;
dce2b4da
NP
2164 chip_num = 0;
2165
2166 /* Some chips have OTP located in the _top_ partition only.
2167 For example: Intel 28F256L18T (T means top-parameter device) */
2168 if (cfi->mfr == MANUFACTURER_INTEL) {
2169 switch (cfi->id) {
2170 case 0x880b:
2171 case 0x880c:
2172 case 0x880d:
2173 chip_num = chip_step - 1;
2174 }
2175 }
f77814dd 2176
dce2b4da 2177 for ( ; chip_num < cfi->numchips; chip_num += chip_step) {
f77814dd
NP
2178 chip = &cfi->chips[chip_num];
2179 otp = (struct cfi_intelext_otpinfo *)&extp->extra[0];
2180
2181 /* first OTP region */
2182 field = 0;
2183 reg_prot_offset = extp->ProtRegAddr;
2184 reg_fact_groups = 1;
2185 reg_fact_size = 1 << extp->FactProtRegSize;
2186 reg_user_groups = 1;
2187 reg_user_size = 1 << extp->UserProtRegSize;
2188
2189 while (len > 0) {
2190 /* flash geometry fixup */
2191 data_offset = reg_prot_offset + 1;
2192 data_offset *= cfi->interleave * cfi->device_type;
2193 reg_prot_offset *= cfi->interleave * cfi->device_type;
2194 reg_fact_size *= cfi->interleave;
2195 reg_user_size *= cfi->interleave;
2196
2197 if (user_regs) {
2198 groups = reg_user_groups;
2199 groupsize = reg_user_size;
2200 /* skip over factory reg area */
2201 groupno = reg_fact_groups;
2202 data_offset += reg_fact_groups * reg_fact_size;
2203 } else {
2204 groups = reg_fact_groups;
2205 groupsize = reg_fact_size;
2206 groupno = 0;
2207 }
2208
332d71f7 2209 while (len > 0 && groups > 0) {
f77814dd
NP
2210 if (!action) {
2211 /*
2212 * Special case: if action is NULL
2213 * we fill buf with otp_info records.
2214 */
2215 struct otp_info *otpinfo;
2216 map_word lockword;
2217 len -= sizeof(struct otp_info);
2218 if (len <= 0)
2219 return -ENOSPC;
2220 ret = do_otp_read(map, chip,
2221 reg_prot_offset,
2222 (u_char *)&lockword,
2223 map_bankwidth(map),
2224 0, 0, 0);
2225 if (ret)
2226 return ret;
2227 otpinfo = (struct otp_info *)buf;
2228 otpinfo->start = from;
2229 otpinfo->length = groupsize;
2230 otpinfo->locked =
2231 !map_word_bitsset(map, lockword,
2232 CMD(1 << groupno));
2233 from += groupsize;
2234 buf += sizeof(*otpinfo);
2235 *retlen += sizeof(*otpinfo);
2236 } else if (from >= groupsize) {
2237 from -= groupsize;
332d71f7 2238 data_offset += groupsize;
f77814dd
NP
2239 } else {
2240 int size = groupsize;
2241 data_offset += from;
2242 size -= from;
2243 from = 0;
2244 if (size > len)
2245 size = len;
2246 ret = action(map, chip, data_offset,
2247 buf, size, reg_prot_offset,
2248 groupno, groupsize);
2249 if (ret < 0)
2250 return ret;
2251 buf += size;
2252 len -= size;
2253 *retlen += size;
332d71f7 2254 data_offset += size;
f77814dd
NP
2255 }
2256 groupno++;
2257 groups--;
2258 }
2259
2260 /* next OTP region */
2261 if (++field == extp->NumProtectionFields)
2262 break;
2263 reg_prot_offset = otp->ProtRegAddr;
2264 reg_fact_groups = otp->FactGroups;
2265 reg_fact_size = 1 << otp->FactProtRegSize;
2266 reg_user_groups = otp->UserGroups;
2267 reg_user_size = 1 << otp->UserProtRegSize;
2268 otp++;
2269 }
2270 }
2271
2272 return 0;
2273}
2274
2275static int cfi_intelext_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
2276 size_t len, size_t *retlen,
2277 u_char *buf)
2278{
2279 return cfi_intelext_otp_walk(mtd, from, len, retlen,
2280 buf, do_otp_read, 0);
2281}
2282
2283static int cfi_intelext_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
2284 size_t len, size_t *retlen,
2285 u_char *buf)
2286{
2287 return cfi_intelext_otp_walk(mtd, from, len, retlen,
2288 buf, do_otp_read, 1);
2289}
2290
2291static int cfi_intelext_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
2292 size_t len, size_t *retlen,
2293 u_char *buf)
2294{
2295 return cfi_intelext_otp_walk(mtd, from, len, retlen,
2296 buf, do_otp_write, 1);
2297}
2298
2299static int cfi_intelext_lock_user_prot_reg(struct mtd_info *mtd,
2300 loff_t from, size_t len)
2301{
2302 size_t retlen;
2303 return cfi_intelext_otp_walk(mtd, from, len, &retlen,
2304 NULL, do_otp_lock, 1);
2305}
2306
1f948b43 2307static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd,
f77814dd
NP
2308 struct otp_info *buf, size_t len)
2309{
2310 size_t retlen;
2311 int ret;
2312
2313 ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 0);
2314 return ret ? : retlen;
2315}
2316
2317static int cfi_intelext_get_user_prot_info(struct mtd_info *mtd,
2318 struct otp_info *buf, size_t len)
2319{
2320 size_t retlen;
2321 int ret;
2322
2323 ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 1);
2324 return ret ? : retlen;
2325}
2326
2327#endif
2328
0ecbc81a
RG
2329static void cfi_intelext_save_locks(struct mtd_info *mtd)
2330{
2331 struct mtd_erase_region_info *region;
2332 int block, status, i;
2333 unsigned long adr;
2334 size_t len;
2335
2336 for (i = 0; i < mtd->numeraseregions; i++) {
2337 region = &mtd->eraseregions[i];
2338 if (!region->lockmap)
2339 continue;
2340
2341 for (block = 0; block < region->numblocks; block++){
2342 len = region->erasesize;
2343 adr = region->offset + block * len;
2344
2345 status = cfi_varsize_frob(mtd,
029a9eb1 2346 do_getlockstatus_oneblock, adr, len, NULL);
0ecbc81a
RG
2347 if (status)
2348 set_bit(block, region->lockmap);
2349 else
2350 clear_bit(block, region->lockmap);
2351 }
2352 }
2353}
2354
1da177e4
LT
2355static int cfi_intelext_suspend(struct mtd_info *mtd)
2356{
2357 struct map_info *map = mtd->priv;
2358 struct cfi_private *cfi = map->fldrv_priv;
0ecbc81a 2359 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
1da177e4
LT
2360 int i;
2361 struct flchip *chip;
2362 int ret = 0;
2363
e619a75f 2364 if ((mtd->flags & MTD_POWERUP_LOCK)
0ecbc81a
RG
2365 && extp && (extp->FeatureSupport & (1 << 5)))
2366 cfi_intelext_save_locks(mtd);
2367
1da177e4
LT
2368 for (i=0; !ret && i<cfi->numchips; i++) {
2369 chip = &cfi->chips[i];
2370
2371 spin_lock(chip->mutex);
2372
2373 switch (chip->state) {
2374 case FL_READY:
2375 case FL_STATUS:
2376 case FL_CFI_QUERY:
2377 case FL_JEDEC_QUERY:
2378 if (chip->oldstate == FL_READY) {
a86aaa6d
DA
2379 /* place the chip in a known state before suspend */
2380 map_write(map, CMD(0xFF), cfi->chips[i].start);
1da177e4
LT
2381 chip->oldstate = chip->state;
2382 chip->state = FL_PM_SUSPENDED;
1f948b43 2383 /* No need to wake_up() on this state change -
1da177e4
LT
2384 * as the whole point is that nobody can do anything
2385 * with the chip now anyway.
2386 */
2387 } else {
2388 /* There seems to be an operation pending. We must wait for it. */
2389 printk(KERN_NOTICE "Flash device refused suspend due to pending operation (oldstate %d)\n", chip->oldstate);
2390 ret = -EAGAIN;
2391 }
2392 break;
2393 default:
2394 /* Should we actually wait? Once upon a time these routines weren't
2395 allowed to. Or should we return -EAGAIN, because the upper layers
2396 ought to have already shut down anything which was using the device
2397 anyway? The latter for now. */
2398 printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->oldstate);
2399 ret = -EAGAIN;
2400 case FL_PM_SUSPENDED:
2401 break;
2402 }
2403 spin_unlock(chip->mutex);
2404 }
2405
2406 /* Unlock the chips again */
2407
2408 if (ret) {
2409 for (i--; i >=0; i--) {
2410 chip = &cfi->chips[i];
1f948b43 2411
1da177e4 2412 spin_lock(chip->mutex);
1f948b43 2413
1da177e4
LT
2414 if (chip->state == FL_PM_SUSPENDED) {
2415 /* No need to force it into a known state here,
2416 because we're returning failure, and it didn't
2417 get power cycled */
2418 chip->state = chip->oldstate;
2419 chip->oldstate = FL_READY;
2420 wake_up(&chip->wq);
2421 }
2422 spin_unlock(chip->mutex);
2423 }
1f948b43
TG
2424 }
2425
1da177e4
LT
2426 return ret;
2427}
2428
0ecbc81a
RG
2429static void cfi_intelext_restore_locks(struct mtd_info *mtd)
2430{
2431 struct mtd_erase_region_info *region;
2432 int block, i;
2433 unsigned long adr;
2434 size_t len;
2435
2436 for (i = 0; i < mtd->numeraseregions; i++) {
2437 region = &mtd->eraseregions[i];
2438 if (!region->lockmap)
2439 continue;
2440
2441 for (block = 0; block < region->numblocks; block++) {
2442 len = region->erasesize;
2443 adr = region->offset + block * len;
2444
2445 if (!test_bit(block, region->lockmap))
2446 cfi_intelext_unlock(mtd, adr, len);
2447 }
2448 }
2449}
2450
1da177e4
LT
2451static void cfi_intelext_resume(struct mtd_info *mtd)
2452{
2453 struct map_info *map = mtd->priv;
2454 struct cfi_private *cfi = map->fldrv_priv;
0ecbc81a 2455 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
1da177e4
LT
2456 int i;
2457 struct flchip *chip;
2458
2459 for (i=0; i<cfi->numchips; i++) {
1f948b43 2460
1da177e4
LT
2461 chip = &cfi->chips[i];
2462
2463 spin_lock(chip->mutex);
1f948b43 2464
1da177e4
LT
2465 /* Go to known state. Chip may have been power cycled */
2466 if (chip->state == FL_PM_SUSPENDED) {
2467 map_write(map, CMD(0xFF), cfi->chips[i].start);
2468 chip->oldstate = chip->state = FL_READY;
2469 wake_up(&chip->wq);
2470 }
2471
2472 spin_unlock(chip->mutex);
2473 }
0ecbc81a 2474
e619a75f 2475 if ((mtd->flags & MTD_POWERUP_LOCK)
0ecbc81a
RG
2476 && extp && (extp->FeatureSupport & (1 << 5)))
2477 cfi_intelext_restore_locks(mtd);
1da177e4
LT
2478}
2479
963a6fb0
NP
2480static int cfi_intelext_reset(struct mtd_info *mtd)
2481{
2482 struct map_info *map = mtd->priv;
2483 struct cfi_private *cfi = map->fldrv_priv;
2484 int i, ret;
2485
2486 for (i=0; i < cfi->numchips; i++) {
2487 struct flchip *chip = &cfi->chips[i];
2488
2489 /* force the completion of any ongoing operation
1f948b43 2490 and switch to array mode so any bootloader in
963a6fb0
NP
2491 flash is accessible for soft reboot. */
2492 spin_lock(chip->mutex);
c4a9f88d 2493 ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
963a6fb0
NP
2494 if (!ret) {
2495 map_write(map, CMD(0xff), chip->start);
c4a9f88d 2496 chip->state = FL_SHUTDOWN;
963a6fb0
NP
2497 }
2498 spin_unlock(chip->mutex);
2499 }
2500
2501 return 0;
2502}
2503
2504static int cfi_intelext_reboot(struct notifier_block *nb, unsigned long val,
2505 void *v)
2506{
2507 struct mtd_info *mtd;
2508
2509 mtd = container_of(nb, struct mtd_info, reboot_notifier);
2510 cfi_intelext_reset(mtd);
2511 return NOTIFY_DONE;
2512}
2513
1da177e4
LT
2514static void cfi_intelext_destroy(struct mtd_info *mtd)
2515{
2516 struct map_info *map = mtd->priv;
2517 struct cfi_private *cfi = map->fldrv_priv;
0ecbc81a
RG
2518 struct mtd_erase_region_info *region;
2519 int i;
963a6fb0
NP
2520 cfi_intelext_reset(mtd);
2521 unregister_reboot_notifier(&mtd->reboot_notifier);
1da177e4
LT
2522 kfree(cfi->cmdset_priv);
2523 kfree(cfi->cfiq);
2524 kfree(cfi->chips[0].priv);
2525 kfree(cfi);
0ecbc81a
RG
2526 for (i = 0; i < mtd->numeraseregions; i++) {
2527 region = &mtd->eraseregions[i];
2528 if (region->lockmap)
2529 kfree(region->lockmap);
2530 }
1da177e4
LT
2531 kfree(mtd->eraseregions);
2532}
2533
1da177e4
LT
2534MODULE_LICENSE("GPL");
2535MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
2536MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");
a15bdeef
DW
2537MODULE_ALIAS("cfi_cmdset_0003");
2538MODULE_ALIAS("cfi_cmdset_0200");