]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - drivers/mtd/chips/cfi_cmdset_0001.c
IDE: terminate ACPI DMI list
[mirror_ubuntu-zesty-kernel.git] / drivers / mtd / chips / cfi_cmdset_0001.c
CommitLineData
1da177e4
LT
1/*
2 * Common Flash Interface support:
3 * Intel Extended Vendor Command Set (ID 0x0001)
4 *
5 * (C) 2000 Red Hat. GPL'd
6 *
8bc3b380 7 * $Id: cfi_cmdset_0001.c,v 1.186 2005/11/23 22:07:52 nico Exp $
1f948b43 8 *
1da177e4 9 *
1da177e4
LT
10 * 10/10/2000 Nicolas Pitre <nico@cam.org>
11 * - completely revamped method functions so they are aware and
12 * independent of the flash geometry (buswidth, interleave, etc.)
13 * - scalability vs code size is completely set at compile-time
14 * (see include/linux/mtd/cfi.h for selection)
15 * - optimized write buffer method
16 * 02/05/2002 Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
17 * - reworked lock/unlock/erase support for var size flash
0ecbc81a
RG
18 * 21/03/2007 Rodolfo Giometti <giometti@linux.it>
19 * - auto unlock sectors on resume for auto locking flash on power up
1da177e4
LT
20 */
21
22#include <linux/module.h>
23#include <linux/types.h>
24#include <linux/kernel.h>
25#include <linux/sched.h>
26#include <linux/init.h>
27#include <asm/io.h>
28#include <asm/byteorder.h>
29
30#include <linux/errno.h>
31#include <linux/slab.h>
32#include <linux/delay.h>
33#include <linux/interrupt.h>
963a6fb0 34#include <linux/reboot.h>
0ecbc81a 35#include <linux/bitmap.h>
1da177e4
LT
36#include <linux/mtd/xip.h>
37#include <linux/mtd/map.h>
38#include <linux/mtd/mtd.h>
39#include <linux/mtd/compatmac.h>
40#include <linux/mtd/cfi.h>
41
42/* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
43/* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
44
45// debugging, turns off buffer write mode if set to 1
46#define FORCE_WORD_WRITE 0
47
48#define MANUFACTURER_INTEL 0x0089
49#define I82802AB 0x00ad
50#define I82802AC 0x00ac
51#define MANUFACTURER_ST 0x0020
52#define M50LPW080 0x002F
53
54static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
1da177e4
LT
55static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
56static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
e102d54a 57static int cfi_intelext_writev(struct mtd_info *, const struct kvec *, unsigned long, loff_t, size_t *);
1da177e4
LT
58static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
59static void cfi_intelext_sync (struct mtd_info *);
60static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
61static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
8048d2fc 62#ifdef CONFIG_MTD_OTP
f77814dd
NP
63static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
64static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
65static int cfi_intelext_write_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
66static int cfi_intelext_lock_user_prot_reg (struct mtd_info *, loff_t, size_t);
67static int cfi_intelext_get_fact_prot_info (struct mtd_info *,
68 struct otp_info *, size_t);
69static int cfi_intelext_get_user_prot_info (struct mtd_info *,
70 struct otp_info *, size_t);
8048d2fc 71#endif
1da177e4
LT
72static int cfi_intelext_suspend (struct mtd_info *);
73static void cfi_intelext_resume (struct mtd_info *);
963a6fb0 74static int cfi_intelext_reboot (struct notifier_block *, unsigned long, void *);
1da177e4
LT
75
76static void cfi_intelext_destroy(struct mtd_info *);
77
78struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
79
80static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
81static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
82
83static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
84 size_t *retlen, u_char **mtdbuf);
85static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from,
86 size_t len);
87
5a37cf19 88static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
1da177e4
LT
89static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
90static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
91#include "fwh_lock.h"
92
93
94
95/*
96 * *********** SETUP AND PROBE BITS ***********
97 */
98
99static struct mtd_chip_driver cfi_intelext_chipdrv = {
100 .probe = NULL, /* Not usable directly */
101 .destroy = cfi_intelext_destroy,
102 .name = "cfi_cmdset_0001",
103 .module = THIS_MODULE
104};
105
106/* #define DEBUG_LOCK_BITS */
107/* #define DEBUG_CFI_FEATURES */
108
109#ifdef DEBUG_CFI_FEATURES
110static void cfi_tell_features(struct cfi_pri_intelext *extp)
111{
112 int i;
638d9838 113 printk(" Extended Query version %c.%c\n", extp->MajorVersion, extp->MinorVersion);
1da177e4
LT
114 printk(" Feature/Command Support: %4.4X\n", extp->FeatureSupport);
115 printk(" - Chip Erase: %s\n", extp->FeatureSupport&1?"supported":"unsupported");
116 printk(" - Suspend Erase: %s\n", extp->FeatureSupport&2?"supported":"unsupported");
117 printk(" - Suspend Program: %s\n", extp->FeatureSupport&4?"supported":"unsupported");
118 printk(" - Legacy Lock/Unlock: %s\n", extp->FeatureSupport&8?"supported":"unsupported");
119 printk(" - Queued Erase: %s\n", extp->FeatureSupport&16?"supported":"unsupported");
120 printk(" - Instant block lock: %s\n", extp->FeatureSupport&32?"supported":"unsupported");
121 printk(" - Protection Bits: %s\n", extp->FeatureSupport&64?"supported":"unsupported");
122 printk(" - Page-mode read: %s\n", extp->FeatureSupport&128?"supported":"unsupported");
123 printk(" - Synchronous read: %s\n", extp->FeatureSupport&256?"supported":"unsupported");
124 printk(" - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
638d9838
NP
125 printk(" - Extended Flash Array: %s\n", extp->FeatureSupport&1024?"supported":"unsupported");
126 for (i=11; i<32; i++) {
1f948b43 127 if (extp->FeatureSupport & (1<<i))
1da177e4
LT
128 printk(" - Unknown Bit %X: supported\n", i);
129 }
1f948b43 130
1da177e4
LT
131 printk(" Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
132 printk(" - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
133 for (i=1; i<8; i++) {
134 if (extp->SuspendCmdSupport & (1<<i))
135 printk(" - Unknown Bit %X: supported\n", i);
136 }
1f948b43 137
1da177e4
LT
138 printk(" Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
139 printk(" - Lock Bit Active: %s\n", extp->BlkStatusRegMask&1?"yes":"no");
638d9838
NP
140 printk(" - Lock-Down Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
141 for (i=2; i<3; i++) {
1da177e4
LT
142 if (extp->BlkStatusRegMask & (1<<i))
143 printk(" - Unknown Bit %X Active: yes\n",i);
144 }
638d9838
NP
145 printk(" - EFA Lock Bit: %s\n", extp->BlkStatusRegMask&16?"yes":"no");
146 printk(" - EFA Lock-Down Bit: %s\n", extp->BlkStatusRegMask&32?"yes":"no");
147 for (i=6; i<16; i++) {
148 if (extp->BlkStatusRegMask & (1<<i))
149 printk(" - Unknown Bit %X Active: yes\n",i);
150 }
151
1f948b43 152 printk(" Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
1da177e4
LT
153 extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
154 if (extp->VppOptimal)
1f948b43 155 printk(" Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
1da177e4
LT
156 extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
157}
158#endif
159
160#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
1f948b43 161/* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
1da177e4
LT
162static void fixup_intel_strataflash(struct mtd_info *mtd, void* param)
163{
164 struct map_info *map = mtd->priv;
165 struct cfi_private *cfi = map->fldrv_priv;
166 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
167
168 printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
169 "erase on write disabled.\n");
170 extp->SuspendCmdSupport &= ~1;
171}
172#endif
173
174#ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
175static void fixup_no_write_suspend(struct mtd_info *mtd, void* param)
176{
177 struct map_info *map = mtd->priv;
178 struct cfi_private *cfi = map->fldrv_priv;
179 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
180
181 if (cfip && (cfip->FeatureSupport&4)) {
182 cfip->FeatureSupport &= ~4;
183 printk(KERN_WARNING "cfi_cmdset_0001: write suspend disabled\n");
184 }
185}
186#endif
187
188static void fixup_st_m28w320ct(struct mtd_info *mtd, void* param)
189{
190 struct map_info *map = mtd->priv;
191 struct cfi_private *cfi = map->fldrv_priv;
1f948b43 192
1da177e4
LT
193 cfi->cfiq->BufWriteTimeoutTyp = 0; /* Not supported */
194 cfi->cfiq->BufWriteTimeoutMax = 0; /* Not supported */
195}
196
197static void fixup_st_m28w320cb(struct mtd_info *mtd, void* param)
198{
199 struct map_info *map = mtd->priv;
200 struct cfi_private *cfi = map->fldrv_priv;
1f948b43 201
1da177e4
LT
202 /* Note this is done after the region info is endian swapped */
203 cfi->cfiq->EraseRegionInfo[1] =
204 (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
205};
206
207static void fixup_use_point(struct mtd_info *mtd, void *param)
208{
209 struct map_info *map = mtd->priv;
210 if (!mtd->point && map_is_linear(map)) {
211 mtd->point = cfi_intelext_point;
212 mtd->unpoint = cfi_intelext_unpoint;
213 }
214}
215
216static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
217{
218 struct map_info *map = mtd->priv;
219 struct cfi_private *cfi = map->fldrv_priv;
220 if (cfi->cfiq->BufWriteTimeoutTyp) {
221 printk(KERN_INFO "Using buffer write method\n" );
222 mtd->write = cfi_intelext_write_buffers;
e102d54a 223 mtd->writev = cfi_intelext_writev;
1da177e4
LT
224 }
225}
226
0ecbc81a
RG
227/*
228 * Some chips power-up with all sectors locked by default.
229 */
230static void fixup_use_powerup_lock(struct mtd_info *mtd, void *param)
231{
232 printk(KERN_INFO "Using auto-unlock on power-up/resume\n" );
233 mtd->flags |= MTD_STUPID_LOCK;
234}
235
1da177e4
LT
236static struct cfi_fixup cfi_fixup_table[] = {
237#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
1f948b43 238 { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL },
1da177e4
LT
239#endif
240#ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
241 { CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend, NULL },
242#endif
243#if !FORCE_WORD_WRITE
244 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL },
245#endif
246 { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct, NULL },
247 { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb, NULL },
0ecbc81a 248 { MANUFACTURER_INTEL, 0x891c, fixup_use_powerup_lock, NULL, },
1da177e4
LT
249 { 0, 0, NULL, NULL }
250};
251
252static struct cfi_fixup jedec_fixup_table[] = {
253 { MANUFACTURER_INTEL, I82802AB, fixup_use_fwh_lock, NULL, },
254 { MANUFACTURER_INTEL, I82802AC, fixup_use_fwh_lock, NULL, },
255 { MANUFACTURER_ST, M50LPW080, fixup_use_fwh_lock, NULL, },
256 { 0, 0, NULL, NULL }
257};
258static struct cfi_fixup fixup_table[] = {
259 /* The CFI vendor ids and the JEDEC vendor IDs appear
260 * to be common. It is like the devices id's are as
261 * well. This table is to pick all cases where
262 * we know that is the case.
263 */
264 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point, NULL },
265 { 0, 0, NULL, NULL }
266};
267
268static inline struct cfi_pri_intelext *
269read_pri_intelext(struct map_info *map, __u16 adr)
270{
271 struct cfi_pri_intelext *extp;
272 unsigned int extp_size = sizeof(*extp);
273
274 again:
275 extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
276 if (!extp)
277 return NULL;
278
d88f977b 279 if (extp->MajorVersion != '1' ||
638d9838 280 (extp->MinorVersion < '0' || extp->MinorVersion > '4')) {
d88f977b
TP
281 printk(KERN_ERR " Unknown Intel/Sharp Extended Query "
282 "version %c.%c.\n", extp->MajorVersion,
283 extp->MinorVersion);
284 kfree(extp);
285 return NULL;
286 }
287
1da177e4
LT
288 /* Do some byteswapping if necessary */
289 extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
290 extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
291 extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
292
638d9838 293 if (extp->MajorVersion == '1' && extp->MinorVersion >= '3') {
1da177e4
LT
294 unsigned int extra_size = 0;
295 int nb_parts, i;
296
297 /* Protection Register info */
72b56a2d
NP
298 extra_size += (extp->NumProtectionFields - 1) *
299 sizeof(struct cfi_intelext_otpinfo);
1da177e4
LT
300
301 /* Burst Read info */
6f6ed056
NP
302 extra_size += 2;
303 if (extp_size < sizeof(*extp) + extra_size)
304 goto need_more;
305 extra_size += extp->extra[extra_size-1];
1da177e4
LT
306
307 /* Number of hardware-partitions */
308 extra_size += 1;
309 if (extp_size < sizeof(*extp) + extra_size)
310 goto need_more;
311 nb_parts = extp->extra[extra_size - 1];
312
638d9838
NP
313 /* skip the sizeof(partregion) field in CFI 1.4 */
314 if (extp->MinorVersion >= '4')
315 extra_size += 2;
316
1da177e4
LT
317 for (i = 0; i < nb_parts; i++) {
318 struct cfi_intelext_regioninfo *rinfo;
319 rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
320 extra_size += sizeof(*rinfo);
321 if (extp_size < sizeof(*extp) + extra_size)
322 goto need_more;
323 rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
324 extra_size += (rinfo->NumBlockTypes - 1)
325 * sizeof(struct cfi_intelext_blockinfo);
326 }
327
638d9838
NP
328 if (extp->MinorVersion >= '4')
329 extra_size += sizeof(struct cfi_intelext_programming_regioninfo);
330
1da177e4
LT
331 if (extp_size < sizeof(*extp) + extra_size) {
332 need_more:
333 extp_size = sizeof(*extp) + extra_size;
334 kfree(extp);
335 if (extp_size > 4096) {
336 printk(KERN_ERR
337 "%s: cfi_pri_intelext is too fat\n",
338 __FUNCTION__);
339 return NULL;
340 }
341 goto again;
342 }
343 }
1f948b43 344
1da177e4
LT
345 return extp;
346}
347
1da177e4
LT
348struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
349{
350 struct cfi_private *cfi = map->fldrv_priv;
351 struct mtd_info *mtd;
352 int i;
353
95b93a0c 354 mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
1da177e4
LT
355 if (!mtd) {
356 printk(KERN_ERR "Failed to allocate memory for MTD device\n");
357 return NULL;
358 }
1da177e4
LT
359 mtd->priv = map;
360 mtd->type = MTD_NORFLASH;
361
362 /* Fill in the default mtd operations */
363 mtd->erase = cfi_intelext_erase_varsize;
364 mtd->read = cfi_intelext_read;
365 mtd->write = cfi_intelext_write_words;
366 mtd->sync = cfi_intelext_sync;
367 mtd->lock = cfi_intelext_lock;
368 mtd->unlock = cfi_intelext_unlock;
369 mtd->suspend = cfi_intelext_suspend;
370 mtd->resume = cfi_intelext_resume;
371 mtd->flags = MTD_CAP_NORFLASH;
372 mtd->name = map->name;
17ffc7ba 373 mtd->writesize = 1;
963a6fb0
NP
374
375 mtd->reboot_notifier.notifier_call = cfi_intelext_reboot;
376
1da177e4 377 if (cfi->cfi_mode == CFI_MODE_CFI) {
1f948b43 378 /*
1da177e4
LT
379 * It's a real CFI chip, not one for which the probe
380 * routine faked a CFI structure. So we read the feature
381 * table from it.
382 */
383 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
384 struct cfi_pri_intelext *extp;
385
386 extp = read_pri_intelext(map, adr);
387 if (!extp) {
388 kfree(mtd);
389 return NULL;
390 }
391
392 /* Install our own private info structure */
1f948b43 393 cfi->cmdset_priv = extp;
1da177e4
LT
394
395 cfi_fixup(mtd, cfi_fixup_table);
396
397#ifdef DEBUG_CFI_FEATURES
398 /* Tell the user about it in lots of lovely detail */
399 cfi_tell_features(extp);
1f948b43 400#endif
1da177e4
LT
401
402 if(extp->SuspendCmdSupport & 1) {
403 printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
404 }
405 }
406 else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
407 /* Apply jedec specific fixups */
408 cfi_fixup(mtd, jedec_fixup_table);
409 }
410 /* Apply generic fixups */
411 cfi_fixup(mtd, fixup_table);
412
413 for (i=0; i< cfi->numchips; i++) {
2a5bd596
DW
414 if (cfi->cfiq->WordWriteTimeoutTyp)
415 cfi->chips[i].word_write_time =
416 1<<cfi->cfiq->WordWriteTimeoutTyp;
417 else
418 cfi->chips[i].word_write_time = 50000;
419
420 if (cfi->cfiq->BufWriteTimeoutTyp)
421 cfi->chips[i].buffer_write_time =
422 1<<cfi->cfiq->BufWriteTimeoutTyp;
423 /* No default; if it isn't specified, we won't use it */
424
425 if (cfi->cfiq->BlockEraseTimeoutTyp)
426 cfi->chips[i].erase_time =
427 1000<<cfi->cfiq->BlockEraseTimeoutTyp;
428 else
429 cfi->chips[i].erase_time = 2000000;
430
1da177e4 431 cfi->chips[i].ref_point_counter = 0;
c314b6f1 432 init_waitqueue_head(&(cfi->chips[i].wq));
1f948b43 433 }
1da177e4
LT
434
435 map->fldrv = &cfi_intelext_chipdrv;
1f948b43 436
1da177e4
LT
437 return cfi_intelext_setup(mtd);
438}
a15bdeef
DW
439struct mtd_info *cfi_cmdset_0003(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
440struct mtd_info *cfi_cmdset_0200(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
441EXPORT_SYMBOL_GPL(cfi_cmdset_0001);
442EXPORT_SYMBOL_GPL(cfi_cmdset_0003);
443EXPORT_SYMBOL_GPL(cfi_cmdset_0200);
1da177e4
LT
444
445static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
446{
447 struct map_info *map = mtd->priv;
448 struct cfi_private *cfi = map->fldrv_priv;
449 unsigned long offset = 0;
450 int i,j;
451 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
452
453 //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
454
455 mtd->size = devsize * cfi->numchips;
456
457 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
1f948b43 458 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
1da177e4 459 * mtd->numeraseregions, GFP_KERNEL);
1f948b43 460 if (!mtd->eraseregions) {
1da177e4
LT
461 printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
462 goto setup_err;
463 }
1f948b43 464
1da177e4
LT
465 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
466 unsigned long ernum, ersize;
467 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
468 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
469
470 if (mtd->erasesize < ersize) {
471 mtd->erasesize = ersize;
472 }
473 for (j=0; j<cfi->numchips; j++) {
474 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
475 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
476 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
0ecbc81a 477 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap = kmalloc(ernum / 8 + 1, GFP_KERNEL);
1da177e4
LT
478 }
479 offset += (ersize * ernum);
480 }
481
482 if (offset != devsize) {
483 /* Argh */
484 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
485 goto setup_err;
486 }
487
488 for (i=0; i<mtd->numeraseregions;i++){
4843653c 489 printk(KERN_DEBUG "erase region %d: offset=0x%x,size=0x%x,blocks=%d\n",
1da177e4
LT
490 i,mtd->eraseregions[i].offset,
491 mtd->eraseregions[i].erasesize,
492 mtd->eraseregions[i].numblocks);
493 }
494
f77814dd 495#ifdef CONFIG_MTD_OTP
1da177e4 496 mtd->read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
f77814dd
NP
497 mtd->read_user_prot_reg = cfi_intelext_read_user_prot_reg;
498 mtd->write_user_prot_reg = cfi_intelext_write_user_prot_reg;
499 mtd->lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
500 mtd->get_fact_prot_info = cfi_intelext_get_fact_prot_info;
501 mtd->get_user_prot_info = cfi_intelext_get_user_prot_info;
1da177e4
LT
502#endif
503
504 /* This function has the potential to distort the reality
505 a bit and therefore should be called last. */
506 if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
507 goto setup_err;
508
509 __module_get(THIS_MODULE);
963a6fb0 510 register_reboot_notifier(&mtd->reboot_notifier);
1da177e4
LT
511 return mtd;
512
513 setup_err:
514 if(mtd) {
fa671646 515 kfree(mtd->eraseregions);
1da177e4
LT
516 kfree(mtd);
517 }
518 kfree(cfi->cmdset_priv);
519 return NULL;
520}
521
522static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
523 struct cfi_private **pcfi)
524{
525 struct map_info *map = mtd->priv;
526 struct cfi_private *cfi = *pcfi;
527 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
528
529 /*
8f1a866f 530 * Probing of multi-partition flash chips.
1da177e4
LT
531 *
532 * To support multiple partitions when available, we simply arrange
533 * for each of them to have their own flchip structure even if they
534 * are on the same physical chip. This means completely recreating
535 * a new cfi_private structure right here which is a blatent code
536 * layering violation, but this is still the least intrusive
537 * arrangement at this point. This can be rearranged in the future
538 * if someone feels motivated enough. --nico
539 */
638d9838 540 if (extp && extp->MajorVersion == '1' && extp->MinorVersion >= '3'
1da177e4
LT
541 && extp->FeatureSupport & (1 << 9)) {
542 struct cfi_private *newcfi;
543 struct flchip *chip;
544 struct flchip_shared *shared;
545 int offs, numregions, numparts, partshift, numvirtchips, i, j;
546
547 /* Protection Register info */
72b56a2d
NP
548 offs = (extp->NumProtectionFields - 1) *
549 sizeof(struct cfi_intelext_otpinfo);
1da177e4
LT
550
551 /* Burst Read info */
6f6ed056 552 offs += extp->extra[offs+1]+2;
1da177e4
LT
553
554 /* Number of partition regions */
555 numregions = extp->extra[offs];
556 offs += 1;
557
638d9838
NP
558 /* skip the sizeof(partregion) field in CFI 1.4 */
559 if (extp->MinorVersion >= '4')
560 offs += 2;
561
1da177e4
LT
562 /* Number of hardware partitions */
563 numparts = 0;
564 for (i = 0; i < numregions; i++) {
565 struct cfi_intelext_regioninfo *rinfo;
566 rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
567 numparts += rinfo->NumIdentPartitions;
568 offs += sizeof(*rinfo)
569 + (rinfo->NumBlockTypes - 1) *
570 sizeof(struct cfi_intelext_blockinfo);
571 }
572
638d9838
NP
573 /* Programming Region info */
574 if (extp->MinorVersion >= '4') {
575 struct cfi_intelext_programming_regioninfo *prinfo;
576 prinfo = (struct cfi_intelext_programming_regioninfo *)&extp->extra[offs];
28318776 577 mtd->writesize = cfi->interleave << prinfo->ProgRegShift;
5fa43394 578 mtd->flags &= ~MTD_BIT_WRITEABLE;
638d9838 579 printk(KERN_DEBUG "%s: program region size/ctrl_valid/ctrl_inval = %d/%d/%d\n",
28318776 580 map->name, mtd->writesize,
d4160855
AB
581 cfi->interleave * prinfo->ControlValid,
582 cfi->interleave * prinfo->ControlInvalid);
638d9838
NP
583 }
584
1da177e4
LT
585 /*
586 * All functions below currently rely on all chips having
587 * the same geometry so we'll just assume that all hardware
588 * partitions are of the same size too.
589 */
590 partshift = cfi->chipshift - __ffs(numparts);
591
592 if ((1 << partshift) < mtd->erasesize) {
593 printk( KERN_ERR
594 "%s: bad number of hw partitions (%d)\n",
595 __FUNCTION__, numparts);
596 return -EINVAL;
597 }
598
599 numvirtchips = cfi->numchips * numparts;
600 newcfi = kmalloc(sizeof(struct cfi_private) + numvirtchips * sizeof(struct flchip), GFP_KERNEL);
601 if (!newcfi)
602 return -ENOMEM;
603 shared = kmalloc(sizeof(struct flchip_shared) * cfi->numchips, GFP_KERNEL);
604 if (!shared) {
605 kfree(newcfi);
606 return -ENOMEM;
607 }
608 memcpy(newcfi, cfi, sizeof(struct cfi_private));
609 newcfi->numchips = numvirtchips;
610 newcfi->chipshift = partshift;
611
612 chip = &newcfi->chips[0];
613 for (i = 0; i < cfi->numchips; i++) {
614 shared[i].writing = shared[i].erasing = NULL;
615 spin_lock_init(&shared[i].lock);
616 for (j = 0; j < numparts; j++) {
617 *chip = cfi->chips[i];
618 chip->start += j << partshift;
619 chip->priv = &shared[i];
620 /* those should be reset too since
621 they create memory references. */
622 init_waitqueue_head(&chip->wq);
623 spin_lock_init(&chip->_spinlock);
624 chip->mutex = &chip->_spinlock;
625 chip++;
626 }
627 }
628
629 printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
630 "--> %d partitions of %d KiB\n",
631 map->name, cfi->numchips, cfi->interleave,
632 newcfi->numchips, 1<<(newcfi->chipshift-10));
633
634 map->fldrv_priv = newcfi;
635 *pcfi = newcfi;
636 kfree(cfi);
637 }
638
639 return 0;
640}
641
642/*
643 * *********** CHIP ACCESS FUNCTIONS ***********
644 */
5a37cf19 645static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
1da177e4
LT
646{
647 DECLARE_WAITQUEUE(wait, current);
648 struct cfi_private *cfi = map->fldrv_priv;
649 map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
1da177e4 650 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
5a37cf19 651 unsigned long timeo = jiffies + HZ;
1da177e4
LT
652
653 switch (chip->state) {
654
655 case FL_STATUS:
656 for (;;) {
657 status = map_read(map, adr);
658 if (map_word_andequal(map, status, status_OK, status_OK))
659 break;
660
661 /* At this point we're fine with write operations
662 in other partitions as they don't conflict. */
663 if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
664 break;
665
1da177e4
LT
666 spin_unlock(chip->mutex);
667 cfi_udelay(1);
668 spin_lock(chip->mutex);
669 /* Someone else might have been playing with it. */
5a37cf19 670 return -EAGAIN;
1da177e4 671 }
1f948b43 672
1da177e4
LT
673 case FL_READY:
674 case FL_CFI_QUERY:
675 case FL_JEDEC_QUERY:
676 return 0;
677
678 case FL_ERASING:
679 if (!cfip ||
680 !(cfip->FeatureSupport & 2) ||
681 !(mode == FL_READY || mode == FL_POINT ||
682 (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
683 goto sleep;
684
685
686 /* Erase suspend */
687 map_write(map, CMD(0xB0), adr);
688
689 /* If the flash has finished erasing, then 'erase suspend'
690 * appears to make some (28F320) flash devices switch to
691 * 'read' mode. Make sure that we switch to 'read status'
692 * mode so we get the right data. --rmk
693 */
694 map_write(map, CMD(0x70), adr);
695 chip->oldstate = FL_ERASING;
696 chip->state = FL_ERASE_SUSPENDING;
697 chip->erase_suspended = 1;
698 for (;;) {
699 status = map_read(map, adr);
700 if (map_word_andequal(map, status, status_OK, status_OK))
701 break;
702
703 if (time_after(jiffies, timeo)) {
704 /* Urgh. Resume and pretend we weren't here. */
705 map_write(map, CMD(0xd0), adr);
706 /* Make sure we're in 'read status' mode if it had finished */
707 map_write(map, CMD(0x70), adr);
708 chip->state = FL_ERASING;
709 chip->oldstate = FL_READY;
4843653c
NP
710 printk(KERN_ERR "%s: Chip not ready after erase "
711 "suspended: status = 0x%lx\n", map->name, status.x[0]);
1da177e4
LT
712 return -EIO;
713 }
714
715 spin_unlock(chip->mutex);
716 cfi_udelay(1);
717 spin_lock(chip->mutex);
718 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
719 So we can just loop here. */
720 }
721 chip->state = FL_STATUS;
722 return 0;
723
724 case FL_XIP_WHILE_ERASING:
725 if (mode != FL_READY && mode != FL_POINT &&
726 (mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1)))
727 goto sleep;
728 chip->oldstate = chip->state;
729 chip->state = FL_READY;
730 return 0;
731
732 case FL_POINT:
733 /* Only if there's no operation suspended... */
734 if (mode == FL_READY && chip->oldstate == FL_READY)
735 return 0;
736
c4a9f88d
KH
737 case FL_SHUTDOWN:
738 /* The machine is rebooting now,so no one can get chip anymore */
739 return -EIO;
1da177e4
LT
740 default:
741 sleep:
742 set_current_state(TASK_UNINTERRUPTIBLE);
743 add_wait_queue(&chip->wq, &wait);
744 spin_unlock(chip->mutex);
745 schedule();
746 remove_wait_queue(&chip->wq, &wait);
747 spin_lock(chip->mutex);
5a37cf19
AK
748 return -EAGAIN;
749 }
750}
751
752static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
753{
754 int ret;
755
756 retry:
757 if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING
758 || mode == FL_OTP_WRITE || mode == FL_SHUTDOWN)) {
759 /*
760 * OK. We have possibility for contention on the write/erase
761 * operations which are global to the real chip and not per
762 * partition. So let's fight it over in the partition which
763 * currently has authority on the operation.
764 *
765 * The rules are as follows:
766 *
767 * - any write operation must own shared->writing.
768 *
769 * - any erase operation must own _both_ shared->writing and
770 * shared->erasing.
771 *
772 * - contention arbitration is handled in the owner's context.
773 *
774 * The 'shared' struct can be read and/or written only when
775 * its lock is taken.
776 */
777 struct flchip_shared *shared = chip->priv;
778 struct flchip *contender;
779 spin_lock(&shared->lock);
780 contender = shared->writing;
781 if (contender && contender != chip) {
782 /*
783 * The engine to perform desired operation on this
784 * partition is already in use by someone else.
785 * Let's fight over it in the context of the chip
786 * currently using it. If it is possible to suspend,
787 * that other partition will do just that, otherwise
788 * it'll happily send us to sleep. In any case, when
789 * get_chip returns success we're clear to go ahead.
790 */
791 ret = spin_trylock(contender->mutex);
792 spin_unlock(&shared->lock);
793 if (!ret)
794 goto retry;
795 spin_unlock(chip->mutex);
796 ret = chip_ready(map, contender, contender->start, mode);
797 spin_lock(chip->mutex);
798
799 if (ret == -EAGAIN) {
800 spin_unlock(contender->mutex);
801 goto retry;
802 }
803 if (ret) {
804 spin_unlock(contender->mutex);
805 return ret;
806 }
807 spin_lock(&shared->lock);
808 spin_unlock(contender->mutex);
809 }
810
811 /* We now own it */
812 shared->writing = chip;
813 if (mode == FL_ERASING)
814 shared->erasing = chip;
815 spin_unlock(&shared->lock);
1da177e4 816 }
5a37cf19
AK
817 ret = chip_ready(map, chip, adr, mode);
818 if (ret == -EAGAIN)
819 goto retry;
820
821 return ret;
1da177e4
LT
822}
823
824static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
825{
826 struct cfi_private *cfi = map->fldrv_priv;
827
828 if (chip->priv) {
829 struct flchip_shared *shared = chip->priv;
830 spin_lock(&shared->lock);
831 if (shared->writing == chip && chip->oldstate == FL_READY) {
832 /* We own the ability to write, but we're done */
833 shared->writing = shared->erasing;
834 if (shared->writing && shared->writing != chip) {
835 /* give back ownership to who we loaned it from */
836 struct flchip *loaner = shared->writing;
837 spin_lock(loaner->mutex);
838 spin_unlock(&shared->lock);
839 spin_unlock(chip->mutex);
840 put_chip(map, loaner, loaner->start);
841 spin_lock(chip->mutex);
842 spin_unlock(loaner->mutex);
843 wake_up(&chip->wq);
844 return;
845 }
846 shared->erasing = NULL;
847 shared->writing = NULL;
848 } else if (shared->erasing == chip && shared->writing != chip) {
849 /*
850 * We own the ability to erase without the ability
851 * to write, which means the erase was suspended
852 * and some other partition is currently writing.
853 * Don't let the switch below mess things up since
854 * we don't have ownership to resume anything.
855 */
856 spin_unlock(&shared->lock);
857 wake_up(&chip->wq);
858 return;
859 }
860 spin_unlock(&shared->lock);
861 }
862
863 switch(chip->oldstate) {
864 case FL_ERASING:
865 chip->state = chip->oldstate;
1f948b43 866 /* What if one interleaved chip has finished and the
1da177e4 867 other hasn't? The old code would leave the finished
1f948b43 868 one in READY mode. That's bad, and caused -EROFS
1da177e4
LT
869 errors to be returned from do_erase_oneblock because
870 that's the only bit it checked for at the time.
1f948b43 871 As the state machine appears to explicitly allow
1da177e4 872 sending the 0x70 (Read Status) command to an erasing
1f948b43 873 chip and expecting it to be ignored, that's what we
1da177e4
LT
874 do. */
875 map_write(map, CMD(0xd0), adr);
876 map_write(map, CMD(0x70), adr);
877 chip->oldstate = FL_READY;
878 chip->state = FL_ERASING;
879 break;
880
881 case FL_XIP_WHILE_ERASING:
882 chip->state = chip->oldstate;
883 chip->oldstate = FL_READY;
884 break;
885
886 case FL_READY:
887 case FL_STATUS:
888 case FL_JEDEC_QUERY:
889 /* We should really make set_vpp() count, rather than doing this */
890 DISABLE_VPP(map);
891 break;
892 default:
4843653c 893 printk(KERN_ERR "%s: put_chip() called with oldstate %d!!\n", map->name, chip->oldstate);
1da177e4
LT
894 }
895 wake_up(&chip->wq);
896}
897
898#ifdef CONFIG_MTD_XIP
899
900/*
901 * No interrupt what so ever can be serviced while the flash isn't in array
902 * mode. This is ensured by the xip_disable() and xip_enable() functions
903 * enclosing any code path where the flash is known not to be in array mode.
904 * And within a XIP disabled code path, only functions marked with __xipram
905 * may be called and nothing else (it's a good thing to inspect generated
906 * assembly to make sure inline functions were actually inlined and that gcc
907 * didn't emit calls to its own support functions). Also configuring MTD CFI
908 * support to a single buswidth and a single interleave is also recommended.
1da177e4
LT
909 */
910
911static void xip_disable(struct map_info *map, struct flchip *chip,
912 unsigned long adr)
913{
914 /* TODO: chips with no XIP use should ignore and return */
915 (void) map_read(map, adr); /* ensure mmu mapping is up to date */
1da177e4
LT
916 local_irq_disable();
917}
918
919static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
920 unsigned long adr)
921{
922 struct cfi_private *cfi = map->fldrv_priv;
923 if (chip->state != FL_POINT && chip->state != FL_READY) {
924 map_write(map, CMD(0xff), adr);
925 chip->state = FL_READY;
926 }
927 (void) map_read(map, adr);
97f927a4 928 xip_iprefetch();
1da177e4 929 local_irq_enable();
1da177e4
LT
930}
931
932/*
933 * When a delay is required for the flash operation to complete, the
c172471b
NP
934 * xip_wait_for_operation() function is polling for both the given timeout
935 * and pending (but still masked) hardware interrupts. Whenever there is an
936 * interrupt pending then the flash erase or write operation is suspended,
937 * array mode restored and interrupts unmasked. Task scheduling might also
938 * happen at that point. The CPU eventually returns from the interrupt or
939 * the call to schedule() and the suspended flash operation is resumed for
940 * the remaining of the delay period.
1da177e4
LT
941 *
942 * Warning: this function _will_ fool interrupt latency tracing tools.
943 */
944
c172471b
NP
945static int __xipram xip_wait_for_operation(
946 struct map_info *map, struct flchip *chip,
46a1652c 947 unsigned long adr, unsigned int chip_op_time )
1da177e4
LT
948{
949 struct cfi_private *cfi = map->fldrv_priv;
950 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
951 map_word status, OK = CMD(0x80);
c172471b 952 unsigned long usec, suspended, start, done;
1da177e4
LT
953 flstate_t oldstate, newstate;
954
c172471b 955 start = xip_currtime();
46a1652c 956 usec = chip_op_time * 8;
c172471b
NP
957 if (usec == 0)
958 usec = 500000;
959 done = 0;
960
1da177e4
LT
961 do {
962 cpu_relax();
963 if (xip_irqpending() && cfip &&
964 ((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) ||
965 (chip->state == FL_WRITING && (cfip->FeatureSupport&4))) &&
966 (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
967 /*
968 * Let's suspend the erase or write operation when
969 * supported. Note that we currently don't try to
970 * suspend interleaved chips if there is already
971 * another operation suspended (imagine what happens
972 * when one chip was already done with the current
973 * operation while another chip suspended it, then
974 * we resume the whole thing at once). Yes, it
975 * can happen!
976 */
c172471b 977 usec -= done;
1da177e4
LT
978 map_write(map, CMD(0xb0), adr);
979 map_write(map, CMD(0x70), adr);
1da177e4
LT
980 suspended = xip_currtime();
981 do {
982 if (xip_elapsed_since(suspended) > 100000) {
983 /*
984 * The chip doesn't want to suspend
985 * after waiting for 100 msecs.
986 * This is a critical error but there
987 * is not much we can do here.
988 */
c172471b 989 return -EIO;
1da177e4
LT
990 }
991 status = map_read(map, adr);
992 } while (!map_word_andequal(map, status, OK, OK));
993
994 /* Suspend succeeded */
995 oldstate = chip->state;
996 if (oldstate == FL_ERASING) {
997 if (!map_word_bitsset(map, status, CMD(0x40)))
998 break;
999 newstate = FL_XIP_WHILE_ERASING;
1000 chip->erase_suspended = 1;
1001 } else {
1002 if (!map_word_bitsset(map, status, CMD(0x04)))
1003 break;
1004 newstate = FL_XIP_WHILE_WRITING;
1005 chip->write_suspended = 1;
1006 }
1007 chip->state = newstate;
1008 map_write(map, CMD(0xff), adr);
1009 (void) map_read(map, adr);
1010 asm volatile (".rep 8; nop; .endr");
1011 local_irq_enable();
6da70124 1012 spin_unlock(chip->mutex);
1da177e4
LT
1013 asm volatile (".rep 8; nop; .endr");
1014 cond_resched();
1015
1016 /*
1017 * We're back. However someone else might have
1018 * decided to go write to the chip if we are in
1019 * a suspended erase state. If so let's wait
1020 * until it's done.
1021 */
6da70124 1022 spin_lock(chip->mutex);
1da177e4
LT
1023 while (chip->state != newstate) {
1024 DECLARE_WAITQUEUE(wait, current);
1025 set_current_state(TASK_UNINTERRUPTIBLE);
1026 add_wait_queue(&chip->wq, &wait);
6da70124 1027 spin_unlock(chip->mutex);
1da177e4
LT
1028 schedule();
1029 remove_wait_queue(&chip->wq, &wait);
6da70124 1030 spin_lock(chip->mutex);
1da177e4
LT
1031 }
1032 /* Disallow XIP again */
1033 local_irq_disable();
1034
1035 /* Resume the write or erase operation */
1036 map_write(map, CMD(0xd0), adr);
1037 map_write(map, CMD(0x70), adr);
1038 chip->state = oldstate;
1039 start = xip_currtime();
1040 } else if (usec >= 1000000/HZ) {
1041 /*
1042 * Try to save on CPU power when waiting delay
1043 * is at least a system timer tick period.
1044 * No need to be extremely accurate here.
1045 */
1046 xip_cpu_idle();
1047 }
1048 status = map_read(map, adr);
c172471b 1049 done = xip_elapsed_since(start);
1da177e4 1050 } while (!map_word_andequal(map, status, OK, OK)
c172471b 1051 && done < usec);
1da177e4 1052
c172471b
NP
1053 return (done >= usec) ? -ETIME : 0;
1054}
1da177e4
LT
1055
1056/*
1057 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
1058 * the flash is actively programming or erasing since we have to poll for
1059 * the operation to complete anyway. We can't do that in a generic way with
6da70124 1060 * a XIP setup so do it before the actual flash operation in this case
c172471b 1061 * and stub it out from INVAL_CACHE_AND_WAIT.
1da177e4 1062 */
6da70124
NP
1063#define XIP_INVAL_CACHED_RANGE(map, from, size) \
1064 INVALIDATE_CACHED_RANGE(map, from, size)
1065
46a1652c
AK
1066#define INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, inval_adr, inval_len, usec) \
1067 xip_wait_for_operation(map, chip, cmd_adr, usec)
1da177e4
LT
1068
1069#else
1070
1071#define xip_disable(map, chip, adr)
1072#define xip_enable(map, chip, adr)
1da177e4 1073#define XIP_INVAL_CACHED_RANGE(x...)
c172471b
NP
1074#define INVAL_CACHE_AND_WAIT inval_cache_and_wait_for_operation
1075
1076static int inval_cache_and_wait_for_operation(
1077 struct map_info *map, struct flchip *chip,
1078 unsigned long cmd_adr, unsigned long inval_adr, int inval_len,
46a1652c 1079 unsigned int chip_op_time)
c172471b
NP
1080{
1081 struct cfi_private *cfi = map->fldrv_priv;
1082 map_word status, status_OK = CMD(0x80);
46a1652c
AK
1083 int chip_state = chip->state;
1084 unsigned int timeo, sleep_time;
c172471b
NP
1085
1086 spin_unlock(chip->mutex);
1087 if (inval_len)
1088 INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len);
c172471b
NP
1089 spin_lock(chip->mutex);
1090
46a1652c
AK
1091 /* set our timeout to 8 times the expected delay */
1092 timeo = chip_op_time * 8;
1093 if (!timeo)
1094 timeo = 500000;
1095 sleep_time = chip_op_time / 2;
c172471b 1096
c172471b 1097 for (;;) {
c172471b
NP
1098 status = map_read(map, cmd_adr);
1099 if (map_word_andequal(map, status, status_OK, status_OK))
1100 break;
1da177e4 1101
46a1652c 1102 if (!timeo) {
c172471b
NP
1103 map_write(map, CMD(0x70), cmd_adr);
1104 chip->state = FL_STATUS;
1105 return -ETIME;
1106 }
1107
46a1652c 1108 /* OK Still waiting. Drop the lock, wait a while and retry. */
c172471b 1109 spin_unlock(chip->mutex);
46a1652c
AK
1110 if (sleep_time >= 1000000/HZ) {
1111 /*
1112 * Half of the normal delay still remaining
1113 * can be performed with a sleeping delay instead
1114 * of busy waiting.
1115 */
1116 msleep(sleep_time/1000);
1117 timeo -= sleep_time;
1118 sleep_time = 1000000/HZ;
1119 } else {
1120 udelay(1);
1121 cond_resched();
1122 timeo--;
1123 }
c172471b 1124 spin_lock(chip->mutex);
c172471b 1125
967bf623 1126 while (chip->state != chip_state) {
46a1652c
AK
1127 /* Someone's suspended the operation: sleep */
1128 DECLARE_WAITQUEUE(wait, current);
1129 set_current_state(TASK_UNINTERRUPTIBLE);
1130 add_wait_queue(&chip->wq, &wait);
1131 spin_unlock(chip->mutex);
1132 schedule();
1133 remove_wait_queue(&chip->wq, &wait);
1134 spin_lock(chip->mutex);
1135 }
1136 }
c172471b
NP
1137
1138 /* Done and happy. */
1139 chip->state = FL_STATUS;
1140 return 0;
1141}
6da70124 1142
1da177e4
LT
1143#endif
1144
c172471b 1145#define WAIT_TIMEOUT(map, chip, adr, udelay) \
46a1652c 1146 INVAL_CACHE_AND_WAIT(map, chip, adr, 0, 0, udelay);
c172471b
NP
1147
1148
1da177e4
LT
1149static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
1150{
1151 unsigned long cmd_addr;
1152 struct cfi_private *cfi = map->fldrv_priv;
1153 int ret = 0;
1154
1155 adr += chip->start;
1156
1f948b43
TG
1157 /* Ensure cmd read/writes are aligned. */
1158 cmd_addr = adr & ~(map_bankwidth(map)-1);
1da177e4
LT
1159
1160 spin_lock(chip->mutex);
1161
1162 ret = get_chip(map, chip, cmd_addr, FL_POINT);
1163
1164 if (!ret) {
1165 if (chip->state != FL_POINT && chip->state != FL_READY)
1166 map_write(map, CMD(0xff), cmd_addr);
1167
1168 chip->state = FL_POINT;
1169 chip->ref_point_counter++;
1170 }
1171 spin_unlock(chip->mutex);
1172
1173 return ret;
1174}
1175
1176static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char **mtdbuf)
1177{
1178 struct map_info *map = mtd->priv;
1179 struct cfi_private *cfi = map->fldrv_priv;
097f2576 1180 unsigned long ofs, last_end = 0;
1da177e4
LT
1181 int chipnum;
1182 int ret = 0;
1183
1184 if (!map->virt || (from + len > mtd->size))
1185 return -EINVAL;
1f948b43 1186
1da177e4
LT
1187 /* Now lock the chip(s) to POINT state */
1188
1189 /* ofs: offset within the first chip that the first read should start */
1190 chipnum = (from >> cfi->chipshift);
1191 ofs = from - (chipnum << cfi->chipshift);
1192
097f2576
AL
1193 *mtdbuf = (void *)map->virt + cfi->chips[chipnum].start + ofs;
1194 *retlen = 0;
1195
1da177e4
LT
1196 while (len) {
1197 unsigned long thislen;
1198
1199 if (chipnum >= cfi->numchips)
1200 break;
1201
097f2576
AL
1202 /* We cannot point across chips that are virtually disjoint */
1203 if (!last_end)
1204 last_end = cfi->chips[chipnum].start;
1205 else if (cfi->chips[chipnum].start != last_end)
1206 break;
1207
1da177e4
LT
1208 if ((len + ofs -1) >> cfi->chipshift)
1209 thislen = (1<<cfi->chipshift) - ofs;
1210 else
1211 thislen = len;
1212
1213 ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
1214 if (ret)
1215 break;
1216
1217 *retlen += thislen;
1218 len -= thislen;
1f948b43 1219
1da177e4 1220 ofs = 0;
097f2576 1221 last_end += 1 << cfi->chipshift;
1da177e4
LT
1222 chipnum++;
1223 }
1224 return 0;
1225}
1226
1227static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from, size_t len)
1228{
1229 struct map_info *map = mtd->priv;
1230 struct cfi_private *cfi = map->fldrv_priv;
1231 unsigned long ofs;
1232 int chipnum;
1233
1234 /* Now unlock the chip(s) POINT state */
1235
1236 /* ofs: offset within the first chip that the first read should start */
1237 chipnum = (from >> cfi->chipshift);
1238 ofs = from - (chipnum << cfi->chipshift);
1239
1240 while (len) {
1241 unsigned long thislen;
1242 struct flchip *chip;
1243
1244 chip = &cfi->chips[chipnum];
1245 if (chipnum >= cfi->numchips)
1246 break;
1247
1248 if ((len + ofs -1) >> cfi->chipshift)
1249 thislen = (1<<cfi->chipshift) - ofs;
1250 else
1251 thislen = len;
1252
1253 spin_lock(chip->mutex);
1254 if (chip->state == FL_POINT) {
1255 chip->ref_point_counter--;
1256 if(chip->ref_point_counter == 0)
1257 chip->state = FL_READY;
1258 } else
4843653c 1259 printk(KERN_ERR "%s: Warning: unpoint called on non pointed region\n", map->name); /* Should this give an error? */
1da177e4
LT
1260
1261 put_chip(map, chip, chip->start);
1262 spin_unlock(chip->mutex);
1263
1264 len -= thislen;
1265 ofs = 0;
1266 chipnum++;
1267 }
1268}
1269
1270static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1271{
1272 unsigned long cmd_addr;
1273 struct cfi_private *cfi = map->fldrv_priv;
1274 int ret;
1275
1276 adr += chip->start;
1277
1f948b43
TG
1278 /* Ensure cmd read/writes are aligned. */
1279 cmd_addr = adr & ~(map_bankwidth(map)-1);
1da177e4
LT
1280
1281 spin_lock(chip->mutex);
1282 ret = get_chip(map, chip, cmd_addr, FL_READY);
1283 if (ret) {
1284 spin_unlock(chip->mutex);
1285 return ret;
1286 }
1287
1288 if (chip->state != FL_POINT && chip->state != FL_READY) {
1289 map_write(map, CMD(0xff), cmd_addr);
1290
1291 chip->state = FL_READY;
1292 }
1293
1294 map_copy_from(map, buf, adr, len);
1295
1296 put_chip(map, chip, cmd_addr);
1297
1298 spin_unlock(chip->mutex);
1299 return 0;
1300}
1301
1302static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1303{
1304 struct map_info *map = mtd->priv;
1305 struct cfi_private *cfi = map->fldrv_priv;
1306 unsigned long ofs;
1307 int chipnum;
1308 int ret = 0;
1309
1310 /* ofs: offset within the first chip that the first read should start */
1311 chipnum = (from >> cfi->chipshift);
1312 ofs = from - (chipnum << cfi->chipshift);
1313
1314 *retlen = 0;
1315
1316 while (len) {
1317 unsigned long thislen;
1318
1319 if (chipnum >= cfi->numchips)
1320 break;
1321
1322 if ((len + ofs -1) >> cfi->chipshift)
1323 thislen = (1<<cfi->chipshift) - ofs;
1324 else
1325 thislen = len;
1326
1327 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1328 if (ret)
1329 break;
1330
1331 *retlen += thislen;
1332 len -= thislen;
1333 buf += thislen;
1f948b43 1334
1da177e4
LT
1335 ofs = 0;
1336 chipnum++;
1337 }
1338 return ret;
1339}
1340
1da177e4 1341static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
f77814dd 1342 unsigned long adr, map_word datum, int mode)
1da177e4
LT
1343{
1344 struct cfi_private *cfi = map->fldrv_priv;
c172471b
NP
1345 map_word status, write_cmd;
1346 int ret=0;
1da177e4
LT
1347
1348 adr += chip->start;
1349
f77814dd 1350 switch (mode) {
638d9838
NP
1351 case FL_WRITING:
1352 write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0x40) : CMD(0x41);
1353 break;
1354 case FL_OTP_WRITE:
1355 write_cmd = CMD(0xc0);
1356 break;
1357 default:
1358 return -EINVAL;
f77814dd 1359 }
1da177e4
LT
1360
1361 spin_lock(chip->mutex);
f77814dd 1362 ret = get_chip(map, chip, adr, mode);
1da177e4
LT
1363 if (ret) {
1364 spin_unlock(chip->mutex);
1365 return ret;
1366 }
1367
1368 XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1369 ENABLE_VPP(map);
1370 xip_disable(map, chip, adr);
f77814dd 1371 map_write(map, write_cmd, adr);
1da177e4 1372 map_write(map, datum, adr);
f77814dd 1373 chip->state = mode;
1da177e4 1374
c172471b
NP
1375 ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1376 adr, map_bankwidth(map),
46a1652c 1377 chip->word_write_time);
c172471b
NP
1378 if (ret) {
1379 xip_enable(map, chip, adr);
1380 printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
1381 goto out;
1da177e4 1382 }
1da177e4 1383
4843653c 1384 /* check for errors */
c172471b 1385 status = map_read(map, adr);
4843653c
NP
1386 if (map_word_bitsset(map, status, CMD(0x1a))) {
1387 unsigned long chipstatus = MERGESTATUS(status);
1388
1389 /* reset status */
1da177e4 1390 map_write(map, CMD(0x50), adr);
1da177e4 1391 map_write(map, CMD(0x70), adr);
4843653c
NP
1392 xip_enable(map, chip, adr);
1393
1394 if (chipstatus & 0x02) {
1395 ret = -EROFS;
1396 } else if (chipstatus & 0x08) {
1397 printk(KERN_ERR "%s: word write error (bad VPP)\n", map->name);
1398 ret = -EIO;
1399 } else {
1400 printk(KERN_ERR "%s: word write error (status 0x%lx)\n", map->name, chipstatus);
1401 ret = -EINVAL;
1402 }
1403
1404 goto out;
1da177e4
LT
1405 }
1406
1407 xip_enable(map, chip, adr);
1408 out: put_chip(map, chip, adr);
1409 spin_unlock(chip->mutex);
1da177e4
LT
1410 return ret;
1411}
1412
1413
1414static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
1415{
1416 struct map_info *map = mtd->priv;
1417 struct cfi_private *cfi = map->fldrv_priv;
1418 int ret = 0;
1419 int chipnum;
1420 unsigned long ofs;
1421
1422 *retlen = 0;
1423 if (!len)
1424 return 0;
1425
1426 chipnum = to >> cfi->chipshift;
1427 ofs = to - (chipnum << cfi->chipshift);
1428
1429 /* If it's not bus-aligned, do the first byte write */
1430 if (ofs & (map_bankwidth(map)-1)) {
1431 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1432 int gap = ofs - bus_ofs;
1433 int n;
1434 map_word datum;
1435
1436 n = min_t(int, len, map_bankwidth(map)-gap);
1437 datum = map_word_ff(map);
1438 datum = map_word_load_partial(map, datum, buf, gap, n);
1439
1440 ret = do_write_oneword(map, &cfi->chips[chipnum],
f77814dd 1441 bus_ofs, datum, FL_WRITING);
1f948b43 1442 if (ret)
1da177e4
LT
1443 return ret;
1444
1445 len -= n;
1446 ofs += n;
1447 buf += n;
1448 (*retlen) += n;
1449
1450 if (ofs >> cfi->chipshift) {
1f948b43 1451 chipnum ++;
1da177e4
LT
1452 ofs = 0;
1453 if (chipnum == cfi->numchips)
1454 return 0;
1455 }
1456 }
1f948b43 1457
1da177e4
LT
1458 while(len >= map_bankwidth(map)) {
1459 map_word datum = map_word_load(map, buf);
1460
1461 ret = do_write_oneword(map, &cfi->chips[chipnum],
f77814dd 1462 ofs, datum, FL_WRITING);
1da177e4
LT
1463 if (ret)
1464 return ret;
1465
1466 ofs += map_bankwidth(map);
1467 buf += map_bankwidth(map);
1468 (*retlen) += map_bankwidth(map);
1469 len -= map_bankwidth(map);
1470
1471 if (ofs >> cfi->chipshift) {
1f948b43 1472 chipnum ++;
1da177e4
LT
1473 ofs = 0;
1474 if (chipnum == cfi->numchips)
1475 return 0;
1476 }
1477 }
1478
1479 if (len & (map_bankwidth(map)-1)) {
1480 map_word datum;
1481
1482 datum = map_word_ff(map);
1483 datum = map_word_load_partial(map, datum, buf, 0, len);
1484
1485 ret = do_write_oneword(map, &cfi->chips[chipnum],
f77814dd 1486 ofs, datum, FL_WRITING);
1f948b43 1487 if (ret)
1da177e4 1488 return ret;
1f948b43 1489
1da177e4
LT
1490 (*retlen) += len;
1491 }
1492
1493 return 0;
1494}
1495
1496
1f948b43 1497static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
e102d54a
NP
1498 unsigned long adr, const struct kvec **pvec,
1499 unsigned long *pvec_seek, int len)
1da177e4
LT
1500{
1501 struct cfi_private *cfi = map->fldrv_priv;
c172471b
NP
1502 map_word status, write_cmd, datum;
1503 unsigned long cmd_adr;
1504 int ret, wbufsize, word_gap, words;
e102d54a
NP
1505 const struct kvec *vec;
1506 unsigned long vec_seek;
1da177e4
LT
1507
1508 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1509 adr += chip->start;
1510 cmd_adr = adr & ~(wbufsize-1);
638d9838 1511
1da177e4 1512 /* Let's determine this according to the interleave only once */
638d9838 1513 write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0xe8) : CMD(0xe9);
1da177e4
LT
1514
1515 spin_lock(chip->mutex);
1516 ret = get_chip(map, chip, cmd_adr, FL_WRITING);
1517 if (ret) {
1518 spin_unlock(chip->mutex);
1519 return ret;
1520 }
1521
1522 XIP_INVAL_CACHED_RANGE(map, adr, len);
1523 ENABLE_VPP(map);
1524 xip_disable(map, chip, cmd_adr);
1525
151e7659 1526 /* §4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1f948b43 1527 [...], the device will not accept any more Write to Buffer commands".
1da177e4
LT
1528 So we must check here and reset those bits if they're set. Otherwise
1529 we're just pissing in the wind */
6e7a6809 1530 if (chip->state != FL_STATUS) {
1da177e4 1531 map_write(map, CMD(0x70), cmd_adr);
6e7a6809
NP
1532 chip->state = FL_STATUS;
1533 }
1da177e4
LT
1534 status = map_read(map, cmd_adr);
1535 if (map_word_bitsset(map, status, CMD(0x30))) {
1536 xip_enable(map, chip, cmd_adr);
1537 printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
1538 xip_disable(map, chip, cmd_adr);
1539 map_write(map, CMD(0x50), cmd_adr);
1540 map_write(map, CMD(0x70), cmd_adr);
1541 }
1542
1543 chip->state = FL_WRITING_TO_BUFFER;
c172471b
NP
1544 map_write(map, write_cmd, cmd_adr);
1545 ret = WAIT_TIMEOUT(map, chip, cmd_adr, 0);
1546 if (ret) {
1547 /* Argh. Not ready for write to buffer */
1548 map_word Xstatus = map_read(map, cmd_adr);
1549 map_write(map, CMD(0x70), cmd_adr);
1550 chip->state = FL_STATUS;
1da177e4 1551 status = map_read(map, cmd_adr);
c172471b
NP
1552 map_write(map, CMD(0x50), cmd_adr);
1553 map_write(map, CMD(0x70), cmd_adr);
1554 xip_enable(map, chip, cmd_adr);
1555 printk(KERN_ERR "%s: Chip not ready for buffer write. Xstatus = %lx, status = %lx\n",
1556 map->name, Xstatus.x[0], status.x[0]);
1557 goto out;
1da177e4
LT
1558 }
1559
e102d54a
NP
1560 /* Figure out the number of words to write */
1561 word_gap = (-adr & (map_bankwidth(map)-1));
1562 words = (len - word_gap + map_bankwidth(map) - 1) / map_bankwidth(map);
1563 if (!word_gap) {
1564 words--;
1565 } else {
1566 word_gap = map_bankwidth(map) - word_gap;
1567 adr -= word_gap;
1568 datum = map_word_ff(map);
1569 }
1570
1da177e4 1571 /* Write length of data to come */
e102d54a 1572 map_write(map, CMD(words), cmd_adr );
1da177e4
LT
1573
1574 /* Write data */
e102d54a
NP
1575 vec = *pvec;
1576 vec_seek = *pvec_seek;
1577 do {
1578 int n = map_bankwidth(map) - word_gap;
1579 if (n > vec->iov_len - vec_seek)
1580 n = vec->iov_len - vec_seek;
1581 if (n > len)
1582 n = len;
1da177e4 1583
e102d54a
NP
1584 if (!word_gap && len < map_bankwidth(map))
1585 datum = map_word_ff(map);
1da177e4 1586
e102d54a 1587 datum = map_word_load_partial(map, datum,
1f948b43 1588 vec->iov_base + vec_seek,
e102d54a 1589 word_gap, n);
1da177e4 1590
e102d54a
NP
1591 len -= n;
1592 word_gap += n;
1593 if (!len || word_gap == map_bankwidth(map)) {
1594 map_write(map, datum, adr);
1595 adr += map_bankwidth(map);
1596 word_gap = 0;
1597 }
1da177e4 1598
e102d54a
NP
1599 vec_seek += n;
1600 if (vec_seek == vec->iov_len) {
1601 vec++;
1602 vec_seek = 0;
1603 }
1604 } while (len);
1605 *pvec = vec;
1606 *pvec_seek = vec_seek;
1da177e4
LT
1607
1608 /* GO GO GO */
1609 map_write(map, CMD(0xd0), cmd_adr);
1610 chip->state = FL_WRITING;
1611
c172471b
NP
1612 ret = INVAL_CACHE_AND_WAIT(map, chip, cmd_adr,
1613 adr, len,
46a1652c 1614 chip->buffer_write_time);
c172471b
NP
1615 if (ret) {
1616 map_write(map, CMD(0x70), cmd_adr);
1617 chip->state = FL_STATUS;
1618 xip_enable(map, chip, cmd_adr);
1619 printk(KERN_ERR "%s: buffer write error (status timeout)\n", map->name);
1620 goto out;
1da177e4 1621 }
1da177e4 1622
4843653c 1623 /* check for errors */
c172471b 1624 status = map_read(map, cmd_adr);
4843653c
NP
1625 if (map_word_bitsset(map, status, CMD(0x1a))) {
1626 unsigned long chipstatus = MERGESTATUS(status);
1627
1628 /* reset status */
1da177e4 1629 map_write(map, CMD(0x50), cmd_adr);
4843653c
NP
1630 map_write(map, CMD(0x70), cmd_adr);
1631 xip_enable(map, chip, cmd_adr);
1632
1633 if (chipstatus & 0x02) {
1634 ret = -EROFS;
1635 } else if (chipstatus & 0x08) {
1636 printk(KERN_ERR "%s: buffer write error (bad VPP)\n", map->name);
1637 ret = -EIO;
1638 } else {
1639 printk(KERN_ERR "%s: buffer write error (status 0x%lx)\n", map->name, chipstatus);
1640 ret = -EINVAL;
1641 }
1642
1643 goto out;
1da177e4
LT
1644 }
1645
1646 xip_enable(map, chip, cmd_adr);
1647 out: put_chip(map, chip, cmd_adr);
1648 spin_unlock(chip->mutex);
1649 return ret;
1650}
1651
e102d54a
NP
1652static int cfi_intelext_writev (struct mtd_info *mtd, const struct kvec *vecs,
1653 unsigned long count, loff_t to, size_t *retlen)
1da177e4
LT
1654{
1655 struct map_info *map = mtd->priv;
1656 struct cfi_private *cfi = map->fldrv_priv;
1657 int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1658 int ret = 0;
1659 int chipnum;
e102d54a
NP
1660 unsigned long ofs, vec_seek, i;
1661 size_t len = 0;
1662
1663 for (i = 0; i < count; i++)
1664 len += vecs[i].iov_len;
1da177e4
LT
1665
1666 *retlen = 0;
1667 if (!len)
1668 return 0;
1669
1670 chipnum = to >> cfi->chipshift;
e102d54a
NP
1671 ofs = to - (chipnum << cfi->chipshift);
1672 vec_seek = 0;
1da177e4 1673
e102d54a 1674 do {
1da177e4
LT
1675 /* We must not cross write block boundaries */
1676 int size = wbufsize - (ofs & (wbufsize-1));
1677
1678 if (size > len)
1679 size = len;
1f948b43 1680 ret = do_write_buffer(map, &cfi->chips[chipnum],
e102d54a 1681 ofs, &vecs, &vec_seek, size);
1da177e4
LT
1682 if (ret)
1683 return ret;
1684
1685 ofs += size;
1da177e4
LT
1686 (*retlen) += size;
1687 len -= size;
1688
1689 if (ofs >> cfi->chipshift) {
1f948b43 1690 chipnum ++;
1da177e4
LT
1691 ofs = 0;
1692 if (chipnum == cfi->numchips)
1693 return 0;
1694 }
df54b52c
JB
1695
1696 /* Be nice and reschedule with the chip in a usable state for other
1697 processes. */
1698 cond_resched();
1699
e102d54a
NP
1700 } while (len);
1701
1da177e4
LT
1702 return 0;
1703}
1704
e102d54a
NP
1705static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
1706 size_t len, size_t *retlen, const u_char *buf)
1707{
1708 struct kvec vec;
1709
1710 vec.iov_base = (void *) buf;
1711 vec.iov_len = len;
1712
1713 return cfi_intelext_writev(mtd, &vec, 1, to, retlen);
1714}
1715
1da177e4
LT
1716static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1717 unsigned long adr, int len, void *thunk)
1718{
1719 struct cfi_private *cfi = map->fldrv_priv;
c172471b 1720 map_word status;
1da177e4 1721 int retries = 3;
c172471b 1722 int ret;
1da177e4
LT
1723
1724 adr += chip->start;
1725
1da177e4
LT
1726 retry:
1727 spin_lock(chip->mutex);
1728 ret = get_chip(map, chip, adr, FL_ERASING);
1729 if (ret) {
1730 spin_unlock(chip->mutex);
1731 return ret;
1732 }
1733
1734 XIP_INVAL_CACHED_RANGE(map, adr, len);
1735 ENABLE_VPP(map);
1736 xip_disable(map, chip, adr);
1737
1738 /* Clear the status register first */
1739 map_write(map, CMD(0x50), adr);
1740
1741 /* Now erase */
1742 map_write(map, CMD(0x20), adr);
1743 map_write(map, CMD(0xD0), adr);
1744 chip->state = FL_ERASING;
1745 chip->erase_suspended = 0;
1746
c172471b
NP
1747 ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1748 adr, len,
46a1652c 1749 chip->erase_time);
c172471b
NP
1750 if (ret) {
1751 map_write(map, CMD(0x70), adr);
1752 chip->state = FL_STATUS;
1753 xip_enable(map, chip, adr);
1754 printk(KERN_ERR "%s: block erase error: (status timeout)\n", map->name);
1755 goto out;
1da177e4
LT
1756 }
1757
1758 /* We've broken this before. It doesn't hurt to be safe */
1759 map_write(map, CMD(0x70), adr);
1760 chip->state = FL_STATUS;
1761 status = map_read(map, adr);
1762
4843653c 1763 /* check for errors */
1da177e4 1764 if (map_word_bitsset(map, status, CMD(0x3a))) {
4843653c 1765 unsigned long chipstatus = MERGESTATUS(status);
1da177e4
LT
1766
1767 /* Reset the error bits */
1768 map_write(map, CMD(0x50), adr);
1769 map_write(map, CMD(0x70), adr);
1770 xip_enable(map, chip, adr);
1771
1da177e4 1772 if ((chipstatus & 0x30) == 0x30) {
4843653c
NP
1773 printk(KERN_ERR "%s: block erase error: (bad command sequence, status 0x%lx)\n", map->name, chipstatus);
1774 ret = -EINVAL;
1da177e4
LT
1775 } else if (chipstatus & 0x02) {
1776 /* Protection bit set */
1777 ret = -EROFS;
1778 } else if (chipstatus & 0x8) {
1779 /* Voltage */
4843653c 1780 printk(KERN_ERR "%s: block erase error: (bad VPP)\n", map->name);
1da177e4 1781 ret = -EIO;
4843653c
NP
1782 } else if (chipstatus & 0x20 && retries--) {
1783 printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
4843653c
NP
1784 put_chip(map, chip, adr);
1785 spin_unlock(chip->mutex);
1786 goto retry;
1787 } else {
1788 printk(KERN_ERR "%s: block erase failed at 0x%08lx (status 0x%lx)\n", map->name, adr, chipstatus);
1da177e4
LT
1789 ret = -EIO;
1790 }
4843653c
NP
1791
1792 goto out;
1da177e4
LT
1793 }
1794
4843653c 1795 xip_enable(map, chip, adr);
1da177e4
LT
1796 out: put_chip(map, chip, adr);
1797 spin_unlock(chip->mutex);
1798 return ret;
1799}
1800
029a9eb1 1801static int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1da177e4
LT
1802{
1803 unsigned long ofs, len;
1804 int ret;
1805
1806 ofs = instr->addr;
1807 len = instr->len;
1808
1809 ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1810 if (ret)
1811 return ret;
1812
1813 instr->state = MTD_ERASE_DONE;
1814 mtd_erase_callback(instr);
1f948b43 1815
1da177e4
LT
1816 return 0;
1817}
1818
1819static void cfi_intelext_sync (struct mtd_info *mtd)
1820{
1821 struct map_info *map = mtd->priv;
1822 struct cfi_private *cfi = map->fldrv_priv;
1823 int i;
1824 struct flchip *chip;
1825 int ret = 0;
1826
1827 for (i=0; !ret && i<cfi->numchips; i++) {
1828 chip = &cfi->chips[i];
1829
1830 spin_lock(chip->mutex);
1831 ret = get_chip(map, chip, chip->start, FL_SYNCING);
1832
1833 if (!ret) {
1834 chip->oldstate = chip->state;
1835 chip->state = FL_SYNCING;
1f948b43 1836 /* No need to wake_up() on this state change -
1da177e4
LT
1837 * as the whole point is that nobody can do anything
1838 * with the chip now anyway.
1839 */
1840 }
1841 spin_unlock(chip->mutex);
1842 }
1843
1844 /* Unlock the chips again */
1845
1846 for (i--; i >=0; i--) {
1847 chip = &cfi->chips[i];
1848
1849 spin_lock(chip->mutex);
1f948b43 1850
1da177e4
LT
1851 if (chip->state == FL_SYNCING) {
1852 chip->state = chip->oldstate;
09c79335 1853 chip->oldstate = FL_READY;
1da177e4
LT
1854 wake_up(&chip->wq);
1855 }
1856 spin_unlock(chip->mutex);
1857 }
1858}
1859
0ecbc81a 1860static int __xipram do_getlockstatus_oneblock(struct map_info *map,
1da177e4
LT
1861 struct flchip *chip,
1862 unsigned long adr,
1863 int len, void *thunk)
1864{
1865 struct cfi_private *cfi = map->fldrv_priv;
1866 int status, ofs_factor = cfi->interleave * cfi->device_type;
1867
c25bb1f5 1868 adr += chip->start;
1da177e4 1869 xip_disable(map, chip, adr+(2*ofs_factor));
c25bb1f5 1870 map_write(map, CMD(0x90), adr+(2*ofs_factor));
1da177e4
LT
1871 chip->state = FL_JEDEC_QUERY;
1872 status = cfi_read_query(map, adr+(2*ofs_factor));
1873 xip_enable(map, chip, 0);
0ecbc81a
RG
1874 return status;
1875}
1876
1877#ifdef DEBUG_LOCK_BITS
1878static int __xipram do_printlockstatus_oneblock(struct map_info *map,
1879 struct flchip *chip,
1880 unsigned long adr,
1881 int len, void *thunk)
1882{
1da177e4 1883 printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
0ecbc81a 1884 adr, do_getlockstatus_oneblock(map, chip, adr, len, thunk));
1da177e4
LT
1885 return 0;
1886}
1887#endif
1888
1889#define DO_XXLOCK_ONEBLOCK_LOCK ((void *) 1)
1890#define DO_XXLOCK_ONEBLOCK_UNLOCK ((void *) 2)
1891
1892static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
1893 unsigned long adr, int len, void *thunk)
1894{
1895 struct cfi_private *cfi = map->fldrv_priv;
9a6e73ec 1896 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
c172471b 1897 int udelay;
1da177e4
LT
1898 int ret;
1899
1900 adr += chip->start;
1901
1da177e4
LT
1902 spin_lock(chip->mutex);
1903 ret = get_chip(map, chip, adr, FL_LOCKING);
1904 if (ret) {
1905 spin_unlock(chip->mutex);
1906 return ret;
1907 }
1908
1909 ENABLE_VPP(map);
1910 xip_disable(map, chip, adr);
1f948b43 1911
1da177e4
LT
1912 map_write(map, CMD(0x60), adr);
1913 if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
1914 map_write(map, CMD(0x01), adr);
1915 chip->state = FL_LOCKING;
1916 } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
1917 map_write(map, CMD(0xD0), adr);
1918 chip->state = FL_UNLOCKING;
1919 } else
1920 BUG();
1921
9a6e73ec
TP
1922 /*
1923 * If Instant Individual Block Locking supported then no need
1924 * to delay.
1925 */
c172471b 1926 udelay = (!extp || !(extp->FeatureSupport & (1 << 5))) ? 1000000/HZ : 0;
9a6e73ec 1927
c172471b
NP
1928 ret = WAIT_TIMEOUT(map, chip, adr, udelay);
1929 if (ret) {
1930 map_write(map, CMD(0x70), adr);
1931 chip->state = FL_STATUS;
1932 xip_enable(map, chip, adr);
1933 printk(KERN_ERR "%s: block unlock error: (status timeout)\n", map->name);
1934 goto out;
1da177e4 1935 }
1f948b43 1936
1da177e4 1937 xip_enable(map, chip, adr);
c172471b 1938out: put_chip(map, chip, adr);
1da177e4 1939 spin_unlock(chip->mutex);
c172471b 1940 return ret;
1da177e4
LT
1941}
1942
1943static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
1944{
1945 int ret;
1946
1947#ifdef DEBUG_LOCK_BITS
1948 printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
1949 __FUNCTION__, ofs, len);
1950 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1da1caf8 1951 ofs, len, NULL);
1da177e4
LT
1952#endif
1953
1f948b43 1954 ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
1da177e4 1955 ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
1f948b43 1956
1da177e4
LT
1957#ifdef DEBUG_LOCK_BITS
1958 printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
1959 __FUNCTION__, ret);
1960 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1da1caf8 1961 ofs, len, NULL);
1da177e4
LT
1962#endif
1963
1964 return ret;
1965}
1966
1967static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
1968{
1969 int ret;
1970
1971#ifdef DEBUG_LOCK_BITS
1972 printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
1973 __FUNCTION__, ofs, len);
1974 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1da1caf8 1975 ofs, len, NULL);
1da177e4
LT
1976#endif
1977
1978 ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
1979 ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
1f948b43 1980
1da177e4
LT
1981#ifdef DEBUG_LOCK_BITS
1982 printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
1983 __FUNCTION__, ret);
1f948b43 1984 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1da1caf8 1985 ofs, len, NULL);
1da177e4 1986#endif
1f948b43 1987
1da177e4
LT
1988 return ret;
1989}
1990
f77814dd
NP
1991#ifdef CONFIG_MTD_OTP
1992
1f948b43 1993typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
f77814dd
NP
1994 u_long data_offset, u_char *buf, u_int size,
1995 u_long prot_offset, u_int groupno, u_int groupsize);
1996
1997static int __xipram
1998do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
1999 u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2000{
2001 struct cfi_private *cfi = map->fldrv_priv;
2002 int ret;
2003
2004 spin_lock(chip->mutex);
2005 ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
2006 if (ret) {
2007 spin_unlock(chip->mutex);
2008 return ret;
2009 }
2010
2011 /* let's ensure we're not reading back cached data from array mode */
6da70124 2012 INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
f77814dd
NP
2013
2014 xip_disable(map, chip, chip->start);
2015 if (chip->state != FL_JEDEC_QUERY) {
2016 map_write(map, CMD(0x90), chip->start);
2017 chip->state = FL_JEDEC_QUERY;
2018 }
2019 map_copy_from(map, buf, chip->start + offset, size);
2020 xip_enable(map, chip, chip->start);
2021
2022 /* then ensure we don't keep OTP data in the cache */
6da70124 2023 INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
f77814dd
NP
2024
2025 put_chip(map, chip, chip->start);
2026 spin_unlock(chip->mutex);
2027 return 0;
2028}
2029
2030static int
2031do_otp_write(struct map_info *map, struct flchip *chip, u_long offset,
2032 u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2033{
2034 int ret;
2035
2036 while (size) {
2037 unsigned long bus_ofs = offset & ~(map_bankwidth(map)-1);
2038 int gap = offset - bus_ofs;
2039 int n = min_t(int, size, map_bankwidth(map)-gap);
2040 map_word datum = map_word_ff(map);
2041
2042 datum = map_word_load_partial(map, datum, buf, gap, n);
2043 ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
1f948b43 2044 if (ret)
f77814dd
NP
2045 return ret;
2046
2047 offset += n;
2048 buf += n;
2049 size -= n;
2050 }
2051
2052 return 0;
2053}
2054
2055static int
2056do_otp_lock(struct map_info *map, struct flchip *chip, u_long offset,
2057 u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2058{
2059 struct cfi_private *cfi = map->fldrv_priv;
2060 map_word datum;
2061
2062 /* make sure area matches group boundaries */
332d71f7 2063 if (size != grpsz)
f77814dd
NP
2064 return -EXDEV;
2065
2066 datum = map_word_ff(map);
2067 datum = map_word_clr(map, datum, CMD(1 << grpno));
2068 return do_write_oneword(map, chip, prot, datum, FL_OTP_WRITE);
2069}
2070
2071static int cfi_intelext_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
2072 size_t *retlen, u_char *buf,
2073 otp_op_t action, int user_regs)
2074{
2075 struct map_info *map = mtd->priv;
2076 struct cfi_private *cfi = map->fldrv_priv;
2077 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2078 struct flchip *chip;
2079 struct cfi_intelext_otpinfo *otp;
2080 u_long devsize, reg_prot_offset, data_offset;
2081 u_int chip_num, chip_step, field, reg_fact_size, reg_user_size;
2082 u_int groups, groupno, groupsize, reg_fact_groups, reg_user_groups;
2083 int ret;
2084
2085 *retlen = 0;
2086
2087 /* Check that we actually have some OTP registers */
2088 if (!extp || !(extp->FeatureSupport & 64) || !extp->NumProtectionFields)
2089 return -ENODATA;
2090
2091 /* we need real chips here not virtual ones */
2092 devsize = (1 << cfi->cfiq->DevSize) * cfi->interleave;
2093 chip_step = devsize >> cfi->chipshift;
dce2b4da
NP
2094 chip_num = 0;
2095
2096 /* Some chips have OTP located in the _top_ partition only.
2097 For example: Intel 28F256L18T (T means top-parameter device) */
2098 if (cfi->mfr == MANUFACTURER_INTEL) {
2099 switch (cfi->id) {
2100 case 0x880b:
2101 case 0x880c:
2102 case 0x880d:
2103 chip_num = chip_step - 1;
2104 }
2105 }
f77814dd 2106
dce2b4da 2107 for ( ; chip_num < cfi->numchips; chip_num += chip_step) {
f77814dd
NP
2108 chip = &cfi->chips[chip_num];
2109 otp = (struct cfi_intelext_otpinfo *)&extp->extra[0];
2110
2111 /* first OTP region */
2112 field = 0;
2113 reg_prot_offset = extp->ProtRegAddr;
2114 reg_fact_groups = 1;
2115 reg_fact_size = 1 << extp->FactProtRegSize;
2116 reg_user_groups = 1;
2117 reg_user_size = 1 << extp->UserProtRegSize;
2118
2119 while (len > 0) {
2120 /* flash geometry fixup */
2121 data_offset = reg_prot_offset + 1;
2122 data_offset *= cfi->interleave * cfi->device_type;
2123 reg_prot_offset *= cfi->interleave * cfi->device_type;
2124 reg_fact_size *= cfi->interleave;
2125 reg_user_size *= cfi->interleave;
2126
2127 if (user_regs) {
2128 groups = reg_user_groups;
2129 groupsize = reg_user_size;
2130 /* skip over factory reg area */
2131 groupno = reg_fact_groups;
2132 data_offset += reg_fact_groups * reg_fact_size;
2133 } else {
2134 groups = reg_fact_groups;
2135 groupsize = reg_fact_size;
2136 groupno = 0;
2137 }
2138
332d71f7 2139 while (len > 0 && groups > 0) {
f77814dd
NP
2140 if (!action) {
2141 /*
2142 * Special case: if action is NULL
2143 * we fill buf with otp_info records.
2144 */
2145 struct otp_info *otpinfo;
2146 map_word lockword;
2147 len -= sizeof(struct otp_info);
2148 if (len <= 0)
2149 return -ENOSPC;
2150 ret = do_otp_read(map, chip,
2151 reg_prot_offset,
2152 (u_char *)&lockword,
2153 map_bankwidth(map),
2154 0, 0, 0);
2155 if (ret)
2156 return ret;
2157 otpinfo = (struct otp_info *)buf;
2158 otpinfo->start = from;
2159 otpinfo->length = groupsize;
2160 otpinfo->locked =
2161 !map_word_bitsset(map, lockword,
2162 CMD(1 << groupno));
2163 from += groupsize;
2164 buf += sizeof(*otpinfo);
2165 *retlen += sizeof(*otpinfo);
2166 } else if (from >= groupsize) {
2167 from -= groupsize;
332d71f7 2168 data_offset += groupsize;
f77814dd
NP
2169 } else {
2170 int size = groupsize;
2171 data_offset += from;
2172 size -= from;
2173 from = 0;
2174 if (size > len)
2175 size = len;
2176 ret = action(map, chip, data_offset,
2177 buf, size, reg_prot_offset,
2178 groupno, groupsize);
2179 if (ret < 0)
2180 return ret;
2181 buf += size;
2182 len -= size;
2183 *retlen += size;
332d71f7 2184 data_offset += size;
f77814dd
NP
2185 }
2186 groupno++;
2187 groups--;
2188 }
2189
2190 /* next OTP region */
2191 if (++field == extp->NumProtectionFields)
2192 break;
2193 reg_prot_offset = otp->ProtRegAddr;
2194 reg_fact_groups = otp->FactGroups;
2195 reg_fact_size = 1 << otp->FactProtRegSize;
2196 reg_user_groups = otp->UserGroups;
2197 reg_user_size = 1 << otp->UserProtRegSize;
2198 otp++;
2199 }
2200 }
2201
2202 return 0;
2203}
2204
2205static int cfi_intelext_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
2206 size_t len, size_t *retlen,
2207 u_char *buf)
2208{
2209 return cfi_intelext_otp_walk(mtd, from, len, retlen,
2210 buf, do_otp_read, 0);
2211}
2212
2213static int cfi_intelext_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
2214 size_t len, size_t *retlen,
2215 u_char *buf)
2216{
2217 return cfi_intelext_otp_walk(mtd, from, len, retlen,
2218 buf, do_otp_read, 1);
2219}
2220
2221static int cfi_intelext_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
2222 size_t len, size_t *retlen,
2223 u_char *buf)
2224{
2225 return cfi_intelext_otp_walk(mtd, from, len, retlen,
2226 buf, do_otp_write, 1);
2227}
2228
2229static int cfi_intelext_lock_user_prot_reg(struct mtd_info *mtd,
2230 loff_t from, size_t len)
2231{
2232 size_t retlen;
2233 return cfi_intelext_otp_walk(mtd, from, len, &retlen,
2234 NULL, do_otp_lock, 1);
2235}
2236
1f948b43 2237static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd,
f77814dd
NP
2238 struct otp_info *buf, size_t len)
2239{
2240 size_t retlen;
2241 int ret;
2242
2243 ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 0);
2244 return ret ? : retlen;
2245}
2246
2247static int cfi_intelext_get_user_prot_info(struct mtd_info *mtd,
2248 struct otp_info *buf, size_t len)
2249{
2250 size_t retlen;
2251 int ret;
2252
2253 ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 1);
2254 return ret ? : retlen;
2255}
2256
2257#endif
2258
0ecbc81a
RG
2259static void cfi_intelext_save_locks(struct mtd_info *mtd)
2260{
2261 struct mtd_erase_region_info *region;
2262 int block, status, i;
2263 unsigned long adr;
2264 size_t len;
2265
2266 for (i = 0; i < mtd->numeraseregions; i++) {
2267 region = &mtd->eraseregions[i];
2268 if (!region->lockmap)
2269 continue;
2270
2271 for (block = 0; block < region->numblocks; block++){
2272 len = region->erasesize;
2273 adr = region->offset + block * len;
2274
2275 status = cfi_varsize_frob(mtd,
029a9eb1 2276 do_getlockstatus_oneblock, adr, len, NULL);
0ecbc81a
RG
2277 if (status)
2278 set_bit(block, region->lockmap);
2279 else
2280 clear_bit(block, region->lockmap);
2281 }
2282 }
2283}
2284
1da177e4
LT
2285static int cfi_intelext_suspend(struct mtd_info *mtd)
2286{
2287 struct map_info *map = mtd->priv;
2288 struct cfi_private *cfi = map->fldrv_priv;
0ecbc81a 2289 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
1da177e4
LT
2290 int i;
2291 struct flchip *chip;
2292 int ret = 0;
2293
0ecbc81a
RG
2294 if ((mtd->flags & MTD_STUPID_LOCK)
2295 && extp && (extp->FeatureSupport & (1 << 5)))
2296 cfi_intelext_save_locks(mtd);
2297
1da177e4
LT
2298 for (i=0; !ret && i<cfi->numchips; i++) {
2299 chip = &cfi->chips[i];
2300
2301 spin_lock(chip->mutex);
2302
2303 switch (chip->state) {
2304 case FL_READY:
2305 case FL_STATUS:
2306 case FL_CFI_QUERY:
2307 case FL_JEDEC_QUERY:
2308 if (chip->oldstate == FL_READY) {
a86aaa6d
DA
2309 /* place the chip in a known state before suspend */
2310 map_write(map, CMD(0xFF), cfi->chips[i].start);
1da177e4
LT
2311 chip->oldstate = chip->state;
2312 chip->state = FL_PM_SUSPENDED;
1f948b43 2313 /* No need to wake_up() on this state change -
1da177e4
LT
2314 * as the whole point is that nobody can do anything
2315 * with the chip now anyway.
2316 */
2317 } else {
2318 /* There seems to be an operation pending. We must wait for it. */
2319 printk(KERN_NOTICE "Flash device refused suspend due to pending operation (oldstate %d)\n", chip->oldstate);
2320 ret = -EAGAIN;
2321 }
2322 break;
2323 default:
2324 /* Should we actually wait? Once upon a time these routines weren't
2325 allowed to. Or should we return -EAGAIN, because the upper layers
2326 ought to have already shut down anything which was using the device
2327 anyway? The latter for now. */
2328 printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->oldstate);
2329 ret = -EAGAIN;
2330 case FL_PM_SUSPENDED:
2331 break;
2332 }
2333 spin_unlock(chip->mutex);
2334 }
2335
2336 /* Unlock the chips again */
2337
2338 if (ret) {
2339 for (i--; i >=0; i--) {
2340 chip = &cfi->chips[i];
1f948b43 2341
1da177e4 2342 spin_lock(chip->mutex);
1f948b43 2343
1da177e4
LT
2344 if (chip->state == FL_PM_SUSPENDED) {
2345 /* No need to force it into a known state here,
2346 because we're returning failure, and it didn't
2347 get power cycled */
2348 chip->state = chip->oldstate;
2349 chip->oldstate = FL_READY;
2350 wake_up(&chip->wq);
2351 }
2352 spin_unlock(chip->mutex);
2353 }
1f948b43
TG
2354 }
2355
1da177e4
LT
2356 return ret;
2357}
2358
0ecbc81a
RG
2359static void cfi_intelext_restore_locks(struct mtd_info *mtd)
2360{
2361 struct mtd_erase_region_info *region;
2362 int block, i;
2363 unsigned long adr;
2364 size_t len;
2365
2366 for (i = 0; i < mtd->numeraseregions; i++) {
2367 region = &mtd->eraseregions[i];
2368 if (!region->lockmap)
2369 continue;
2370
2371 for (block = 0; block < region->numblocks; block++) {
2372 len = region->erasesize;
2373 adr = region->offset + block * len;
2374
2375 if (!test_bit(block, region->lockmap))
2376 cfi_intelext_unlock(mtd, adr, len);
2377 }
2378 }
2379}
2380
1da177e4
LT
2381static void cfi_intelext_resume(struct mtd_info *mtd)
2382{
2383 struct map_info *map = mtd->priv;
2384 struct cfi_private *cfi = map->fldrv_priv;
0ecbc81a 2385 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
1da177e4
LT
2386 int i;
2387 struct flchip *chip;
2388
2389 for (i=0; i<cfi->numchips; i++) {
1f948b43 2390
1da177e4
LT
2391 chip = &cfi->chips[i];
2392
2393 spin_lock(chip->mutex);
1f948b43 2394
1da177e4
LT
2395 /* Go to known state. Chip may have been power cycled */
2396 if (chip->state == FL_PM_SUSPENDED) {
2397 map_write(map, CMD(0xFF), cfi->chips[i].start);
2398 chip->oldstate = chip->state = FL_READY;
2399 wake_up(&chip->wq);
2400 }
2401
2402 spin_unlock(chip->mutex);
2403 }
0ecbc81a
RG
2404
2405 if ((mtd->flags & MTD_STUPID_LOCK)
2406 && extp && (extp->FeatureSupport & (1 << 5)))
2407 cfi_intelext_restore_locks(mtd);
1da177e4
LT
2408}
2409
963a6fb0
NP
2410static int cfi_intelext_reset(struct mtd_info *mtd)
2411{
2412 struct map_info *map = mtd->priv;
2413 struct cfi_private *cfi = map->fldrv_priv;
2414 int i, ret;
2415
2416 for (i=0; i < cfi->numchips; i++) {
2417 struct flchip *chip = &cfi->chips[i];
2418
2419 /* force the completion of any ongoing operation
1f948b43 2420 and switch to array mode so any bootloader in
963a6fb0
NP
2421 flash is accessible for soft reboot. */
2422 spin_lock(chip->mutex);
c4a9f88d 2423 ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
963a6fb0
NP
2424 if (!ret) {
2425 map_write(map, CMD(0xff), chip->start);
c4a9f88d 2426 chip->state = FL_SHUTDOWN;
963a6fb0
NP
2427 }
2428 spin_unlock(chip->mutex);
2429 }
2430
2431 return 0;
2432}
2433
2434static int cfi_intelext_reboot(struct notifier_block *nb, unsigned long val,
2435 void *v)
2436{
2437 struct mtd_info *mtd;
2438
2439 mtd = container_of(nb, struct mtd_info, reboot_notifier);
2440 cfi_intelext_reset(mtd);
2441 return NOTIFY_DONE;
2442}
2443
1da177e4
LT
2444static void cfi_intelext_destroy(struct mtd_info *mtd)
2445{
2446 struct map_info *map = mtd->priv;
2447 struct cfi_private *cfi = map->fldrv_priv;
0ecbc81a
RG
2448 struct mtd_erase_region_info *region;
2449 int i;
963a6fb0
NP
2450 cfi_intelext_reset(mtd);
2451 unregister_reboot_notifier(&mtd->reboot_notifier);
1da177e4
LT
2452 kfree(cfi->cmdset_priv);
2453 kfree(cfi->cfiq);
2454 kfree(cfi->chips[0].priv);
2455 kfree(cfi);
0ecbc81a
RG
2456 for (i = 0; i < mtd->numeraseregions; i++) {
2457 region = &mtd->eraseregions[i];
2458 if (region->lockmap)
2459 kfree(region->lockmap);
2460 }
1da177e4
LT
2461 kfree(mtd->eraseregions);
2462}
2463
1da177e4
LT
2464MODULE_LICENSE("GPL");
2465MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
2466MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");
a15bdeef
DW
2467MODULE_ALIAS("cfi_cmdset_0003");
2468MODULE_ALIAS("cfi_cmdset_0200");