]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - drivers/mtd/chips/cfi_cmdset_0001.c
[MTD] RBTX4939 map driver
[mirror_ubuntu-zesty-kernel.git] / drivers / mtd / chips / cfi_cmdset_0001.c
CommitLineData
1da177e4
LT
1/*
2 * Common Flash Interface support:
3 * Intel Extended Vendor Command Set (ID 0x0001)
4 *
5 * (C) 2000 Red Hat. GPL'd
6 *
1da177e4 7 *
1da177e4
LT
8 * 10/10/2000 Nicolas Pitre <nico@cam.org>
9 * - completely revamped method functions so they are aware and
10 * independent of the flash geometry (buswidth, interleave, etc.)
11 * - scalability vs code size is completely set at compile-time
12 * (see include/linux/mtd/cfi.h for selection)
13 * - optimized write buffer method
14 * 02/05/2002 Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
15 * - reworked lock/unlock/erase support for var size flash
0ecbc81a
RG
16 * 21/03/2007 Rodolfo Giometti <giometti@linux.it>
17 * - auto unlock sectors on resume for auto locking flash on power up
1da177e4
LT
18 */
19
20#include <linux/module.h>
21#include <linux/types.h>
22#include <linux/kernel.h>
23#include <linux/sched.h>
24#include <linux/init.h>
25#include <asm/io.h>
26#include <asm/byteorder.h>
27
28#include <linux/errno.h>
29#include <linux/slab.h>
30#include <linux/delay.h>
31#include <linux/interrupt.h>
963a6fb0 32#include <linux/reboot.h>
0ecbc81a 33#include <linux/bitmap.h>
1da177e4
LT
34#include <linux/mtd/xip.h>
35#include <linux/mtd/map.h>
36#include <linux/mtd/mtd.h>
37#include <linux/mtd/compatmac.h>
38#include <linux/mtd/cfi.h>
39
40/* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
41/* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
42
43// debugging, turns off buffer write mode if set to 1
44#define FORCE_WORD_WRITE 0
45
46#define MANUFACTURER_INTEL 0x0089
47#define I82802AB 0x00ad
48#define I82802AC 0x00ac
49#define MANUFACTURER_ST 0x0020
50#define M50LPW080 0x002F
deb1a5f1
NC
51#define M50FLW080A 0x0080
52#define M50FLW080B 0x0081
d10a39d1 53#define AT49BV640D 0x02de
1da177e4
LT
54
55static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
1da177e4
LT
56static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
57static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
e102d54a 58static int cfi_intelext_writev(struct mtd_info *, const struct kvec *, unsigned long, loff_t, size_t *);
1da177e4
LT
59static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
60static void cfi_intelext_sync (struct mtd_info *);
69423d99
AH
61static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
62static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
8048d2fc 63#ifdef CONFIG_MTD_OTP
f77814dd
NP
64static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
65static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
66static int cfi_intelext_write_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
67static int cfi_intelext_lock_user_prot_reg (struct mtd_info *, loff_t, size_t);
68static int cfi_intelext_get_fact_prot_info (struct mtd_info *,
69 struct otp_info *, size_t);
70static int cfi_intelext_get_user_prot_info (struct mtd_info *,
71 struct otp_info *, size_t);
8048d2fc 72#endif
1da177e4
LT
73static int cfi_intelext_suspend (struct mtd_info *);
74static void cfi_intelext_resume (struct mtd_info *);
963a6fb0 75static int cfi_intelext_reboot (struct notifier_block *, unsigned long, void *);
1da177e4
LT
76
77static void cfi_intelext_destroy(struct mtd_info *);
78
79struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
80
81static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
82static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
83
84static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
a98889f3
JH
85 size_t *retlen, void **virt, resource_size_t *phys);
86static void cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len);
1da177e4 87
5a37cf19 88static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
1da177e4
LT
89static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
90static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
91#include "fwh_lock.h"
92
93
94
95/*
96 * *********** SETUP AND PROBE BITS ***********
97 */
98
99static struct mtd_chip_driver cfi_intelext_chipdrv = {
100 .probe = NULL, /* Not usable directly */
101 .destroy = cfi_intelext_destroy,
102 .name = "cfi_cmdset_0001",
103 .module = THIS_MODULE
104};
105
106/* #define DEBUG_LOCK_BITS */
107/* #define DEBUG_CFI_FEATURES */
108
109#ifdef DEBUG_CFI_FEATURES
110static void cfi_tell_features(struct cfi_pri_intelext *extp)
111{
112 int i;
638d9838 113 printk(" Extended Query version %c.%c\n", extp->MajorVersion, extp->MinorVersion);
1da177e4
LT
114 printk(" Feature/Command Support: %4.4X\n", extp->FeatureSupport);
115 printk(" - Chip Erase: %s\n", extp->FeatureSupport&1?"supported":"unsupported");
116 printk(" - Suspend Erase: %s\n", extp->FeatureSupport&2?"supported":"unsupported");
117 printk(" - Suspend Program: %s\n", extp->FeatureSupport&4?"supported":"unsupported");
118 printk(" - Legacy Lock/Unlock: %s\n", extp->FeatureSupport&8?"supported":"unsupported");
119 printk(" - Queued Erase: %s\n", extp->FeatureSupport&16?"supported":"unsupported");
120 printk(" - Instant block lock: %s\n", extp->FeatureSupport&32?"supported":"unsupported");
121 printk(" - Protection Bits: %s\n", extp->FeatureSupport&64?"supported":"unsupported");
122 printk(" - Page-mode read: %s\n", extp->FeatureSupport&128?"supported":"unsupported");
123 printk(" - Synchronous read: %s\n", extp->FeatureSupport&256?"supported":"unsupported");
124 printk(" - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
638d9838
NP
125 printk(" - Extended Flash Array: %s\n", extp->FeatureSupport&1024?"supported":"unsupported");
126 for (i=11; i<32; i++) {
1f948b43 127 if (extp->FeatureSupport & (1<<i))
1da177e4
LT
128 printk(" - Unknown Bit %X: supported\n", i);
129 }
1f948b43 130
1da177e4
LT
131 printk(" Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
132 printk(" - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
133 for (i=1; i<8; i++) {
134 if (extp->SuspendCmdSupport & (1<<i))
135 printk(" - Unknown Bit %X: supported\n", i);
136 }
1f948b43 137
1da177e4
LT
138 printk(" Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
139 printk(" - Lock Bit Active: %s\n", extp->BlkStatusRegMask&1?"yes":"no");
638d9838
NP
140 printk(" - Lock-Down Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
141 for (i=2; i<3; i++) {
1da177e4
LT
142 if (extp->BlkStatusRegMask & (1<<i))
143 printk(" - Unknown Bit %X Active: yes\n",i);
144 }
638d9838
NP
145 printk(" - EFA Lock Bit: %s\n", extp->BlkStatusRegMask&16?"yes":"no");
146 printk(" - EFA Lock-Down Bit: %s\n", extp->BlkStatusRegMask&32?"yes":"no");
147 for (i=6; i<16; i++) {
148 if (extp->BlkStatusRegMask & (1<<i))
149 printk(" - Unknown Bit %X Active: yes\n",i);
150 }
151
1f948b43 152 printk(" Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
1da177e4
LT
153 extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
154 if (extp->VppOptimal)
1f948b43 155 printk(" Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
1da177e4
LT
156 extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
157}
158#endif
159
d10a39d1
HCE
160/* Atmel chips don't use the same PRI format as Intel chips */
161static void fixup_convert_atmel_pri(struct mtd_info *mtd, void *param)
162{
163 struct map_info *map = mtd->priv;
164 struct cfi_private *cfi = map->fldrv_priv;
165 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
166 struct cfi_pri_atmel atmel_pri;
167 uint32_t features = 0;
168
169 /* Reverse byteswapping */
170 extp->FeatureSupport = cpu_to_le32(extp->FeatureSupport);
171 extp->BlkStatusRegMask = cpu_to_le16(extp->BlkStatusRegMask);
172 extp->ProtRegAddr = cpu_to_le16(extp->ProtRegAddr);
173
174 memcpy(&atmel_pri, extp, sizeof(atmel_pri));
175 memset((char *)extp + 5, 0, sizeof(*extp) - 5);
176
177 printk(KERN_ERR "atmel Features: %02x\n", atmel_pri.Features);
178
179 if (atmel_pri.Features & 0x01) /* chip erase supported */
180 features |= (1<<0);
181 if (atmel_pri.Features & 0x02) /* erase suspend supported */
182 features |= (1<<1);
183 if (atmel_pri.Features & 0x04) /* program suspend supported */
184 features |= (1<<2);
185 if (atmel_pri.Features & 0x08) /* simultaneous operations supported */
186 features |= (1<<9);
187 if (atmel_pri.Features & 0x20) /* page mode read supported */
188 features |= (1<<7);
189 if (atmel_pri.Features & 0x40) /* queued erase supported */
190 features |= (1<<4);
191 if (atmel_pri.Features & 0x80) /* Protection bits supported */
192 features |= (1<<6);
193
194 extp->FeatureSupport = features;
195
196 /* burst write mode not supported */
197 cfi->cfiq->BufWriteTimeoutTyp = 0;
198 cfi->cfiq->BufWriteTimeoutMax = 0;
199}
200
1da177e4 201#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
1f948b43 202/* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
1da177e4
LT
203static void fixup_intel_strataflash(struct mtd_info *mtd, void* param)
204{
205 struct map_info *map = mtd->priv;
206 struct cfi_private *cfi = map->fldrv_priv;
91949d64 207 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
1da177e4
LT
208
209 printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
210 "erase on write disabled.\n");
211 extp->SuspendCmdSupport &= ~1;
212}
213#endif
214
215#ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
216static void fixup_no_write_suspend(struct mtd_info *mtd, void* param)
217{
218 struct map_info *map = mtd->priv;
219 struct cfi_private *cfi = map->fldrv_priv;
220 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
221
222 if (cfip && (cfip->FeatureSupport&4)) {
223 cfip->FeatureSupport &= ~4;
224 printk(KERN_WARNING "cfi_cmdset_0001: write suspend disabled\n");
225 }
226}
227#endif
228
229static void fixup_st_m28w320ct(struct mtd_info *mtd, void* param)
230{
231 struct map_info *map = mtd->priv;
232 struct cfi_private *cfi = map->fldrv_priv;
1f948b43 233
1da177e4
LT
234 cfi->cfiq->BufWriteTimeoutTyp = 0; /* Not supported */
235 cfi->cfiq->BufWriteTimeoutMax = 0; /* Not supported */
236}
237
238static void fixup_st_m28w320cb(struct mtd_info *mtd, void* param)
239{
240 struct map_info *map = mtd->priv;
241 struct cfi_private *cfi = map->fldrv_priv;
1f948b43 242
1da177e4
LT
243 /* Note this is done after the region info is endian swapped */
244 cfi->cfiq->EraseRegionInfo[1] =
245 (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
246};
247
248static void fixup_use_point(struct mtd_info *mtd, void *param)
249{
250 struct map_info *map = mtd->priv;
251 if (!mtd->point && map_is_linear(map)) {
252 mtd->point = cfi_intelext_point;
253 mtd->unpoint = cfi_intelext_unpoint;
254 }
255}
256
257static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
258{
259 struct map_info *map = mtd->priv;
260 struct cfi_private *cfi = map->fldrv_priv;
261 if (cfi->cfiq->BufWriteTimeoutTyp) {
262 printk(KERN_INFO "Using buffer write method\n" );
263 mtd->write = cfi_intelext_write_buffers;
e102d54a 264 mtd->writev = cfi_intelext_writev;
1da177e4
LT
265 }
266}
267
0ecbc81a
RG
268/*
269 * Some chips power-up with all sectors locked by default.
270 */
e619a75f 271static void fixup_unlock_powerup_lock(struct mtd_info *mtd, void *param)
0ecbc81a 272{
e619a75f
JT
273 struct map_info *map = mtd->priv;
274 struct cfi_private *cfi = map->fldrv_priv;
275 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
276
277 if (cfip->FeatureSupport&32) {
278 printk(KERN_INFO "Using auto-unlock on power-up/resume\n" );
279 mtd->flags |= MTD_POWERUP_LOCK;
280 }
0ecbc81a
RG
281}
282
1da177e4 283static struct cfi_fixup cfi_fixup_table[] = {
d10a39d1 284 { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL },
1da177e4 285#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
1f948b43 286 { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL },
1da177e4
LT
287#endif
288#ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
289 { CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend, NULL },
290#endif
291#if !FORCE_WORD_WRITE
292 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL },
293#endif
294 { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct, NULL },
295 { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb, NULL },
e619a75f 296 { MANUFACTURER_INTEL, CFI_ID_ANY, fixup_unlock_powerup_lock, NULL, },
1da177e4
LT
297 { 0, 0, NULL, NULL }
298};
299
300static struct cfi_fixup jedec_fixup_table[] = {
301 { MANUFACTURER_INTEL, I82802AB, fixup_use_fwh_lock, NULL, },
302 { MANUFACTURER_INTEL, I82802AC, fixup_use_fwh_lock, NULL, },
303 { MANUFACTURER_ST, M50LPW080, fixup_use_fwh_lock, NULL, },
deb1a5f1
NC
304 { MANUFACTURER_ST, M50FLW080A, fixup_use_fwh_lock, NULL, },
305 { MANUFACTURER_ST, M50FLW080B, fixup_use_fwh_lock, NULL, },
1da177e4
LT
306 { 0, 0, NULL, NULL }
307};
308static struct cfi_fixup fixup_table[] = {
309 /* The CFI vendor ids and the JEDEC vendor IDs appear
310 * to be common. It is like the devices id's are as
311 * well. This table is to pick all cases where
312 * we know that is the case.
313 */
314 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point, NULL },
315 { 0, 0, NULL, NULL }
316};
317
318static inline struct cfi_pri_intelext *
319read_pri_intelext(struct map_info *map, __u16 adr)
320{
321 struct cfi_pri_intelext *extp;
322 unsigned int extp_size = sizeof(*extp);
323
324 again:
325 extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
326 if (!extp)
327 return NULL;
328
d88f977b 329 if (extp->MajorVersion != '1' ||
b1c9c9be 330 (extp->MinorVersion < '0' || extp->MinorVersion > '5')) {
d88f977b
TP
331 printk(KERN_ERR " Unknown Intel/Sharp Extended Query "
332 "version %c.%c.\n", extp->MajorVersion,
333 extp->MinorVersion);
334 kfree(extp);
335 return NULL;
336 }
337
1da177e4
LT
338 /* Do some byteswapping if necessary */
339 extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
340 extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
341 extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
342
638d9838 343 if (extp->MajorVersion == '1' && extp->MinorVersion >= '3') {
1da177e4
LT
344 unsigned int extra_size = 0;
345 int nb_parts, i;
346
347 /* Protection Register info */
72b56a2d
NP
348 extra_size += (extp->NumProtectionFields - 1) *
349 sizeof(struct cfi_intelext_otpinfo);
1da177e4
LT
350
351 /* Burst Read info */
6f6ed056
NP
352 extra_size += 2;
353 if (extp_size < sizeof(*extp) + extra_size)
354 goto need_more;
355 extra_size += extp->extra[extra_size-1];
1da177e4
LT
356
357 /* Number of hardware-partitions */
358 extra_size += 1;
359 if (extp_size < sizeof(*extp) + extra_size)
360 goto need_more;
361 nb_parts = extp->extra[extra_size - 1];
362
638d9838
NP
363 /* skip the sizeof(partregion) field in CFI 1.4 */
364 if (extp->MinorVersion >= '4')
365 extra_size += 2;
366
1da177e4
LT
367 for (i = 0; i < nb_parts; i++) {
368 struct cfi_intelext_regioninfo *rinfo;
369 rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
370 extra_size += sizeof(*rinfo);
371 if (extp_size < sizeof(*extp) + extra_size)
372 goto need_more;
373 rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
374 extra_size += (rinfo->NumBlockTypes - 1)
375 * sizeof(struct cfi_intelext_blockinfo);
376 }
377
638d9838
NP
378 if (extp->MinorVersion >= '4')
379 extra_size += sizeof(struct cfi_intelext_programming_regioninfo);
380
1da177e4
LT
381 if (extp_size < sizeof(*extp) + extra_size) {
382 need_more:
383 extp_size = sizeof(*extp) + extra_size;
384 kfree(extp);
385 if (extp_size > 4096) {
386 printk(KERN_ERR
387 "%s: cfi_pri_intelext is too fat\n",
cb53b3b9 388 __func__);
1da177e4
LT
389 return NULL;
390 }
391 goto again;
392 }
393 }
1f948b43 394
1da177e4
LT
395 return extp;
396}
397
1da177e4
LT
398struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
399{
400 struct cfi_private *cfi = map->fldrv_priv;
401 struct mtd_info *mtd;
402 int i;
403
95b93a0c 404 mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
1da177e4
LT
405 if (!mtd) {
406 printk(KERN_ERR "Failed to allocate memory for MTD device\n");
407 return NULL;
408 }
1da177e4
LT
409 mtd->priv = map;
410 mtd->type = MTD_NORFLASH;
411
412 /* Fill in the default mtd operations */
413 mtd->erase = cfi_intelext_erase_varsize;
414 mtd->read = cfi_intelext_read;
415 mtd->write = cfi_intelext_write_words;
416 mtd->sync = cfi_intelext_sync;
417 mtd->lock = cfi_intelext_lock;
418 mtd->unlock = cfi_intelext_unlock;
419 mtd->suspend = cfi_intelext_suspend;
420 mtd->resume = cfi_intelext_resume;
421 mtd->flags = MTD_CAP_NORFLASH;
422 mtd->name = map->name;
17ffc7ba 423 mtd->writesize = 1;
963a6fb0
NP
424
425 mtd->reboot_notifier.notifier_call = cfi_intelext_reboot;
426
1da177e4 427 if (cfi->cfi_mode == CFI_MODE_CFI) {
1f948b43 428 /*
1da177e4
LT
429 * It's a real CFI chip, not one for which the probe
430 * routine faked a CFI structure. So we read the feature
431 * table from it.
432 */
433 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
434 struct cfi_pri_intelext *extp;
435
436 extp = read_pri_intelext(map, adr);
437 if (!extp) {
438 kfree(mtd);
439 return NULL;
440 }
441
442 /* Install our own private info structure */
1f948b43 443 cfi->cmdset_priv = extp;
1da177e4
LT
444
445 cfi_fixup(mtd, cfi_fixup_table);
446
447#ifdef DEBUG_CFI_FEATURES
448 /* Tell the user about it in lots of lovely detail */
449 cfi_tell_features(extp);
1f948b43 450#endif
1da177e4
LT
451
452 if(extp->SuspendCmdSupport & 1) {
453 printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
454 }
455 }
456 else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
457 /* Apply jedec specific fixups */
458 cfi_fixup(mtd, jedec_fixup_table);
459 }
460 /* Apply generic fixups */
461 cfi_fixup(mtd, fixup_table);
462
463 for (i=0; i< cfi->numchips; i++) {
2a5bd596
DW
464 if (cfi->cfiq->WordWriteTimeoutTyp)
465 cfi->chips[i].word_write_time =
466 1<<cfi->cfiq->WordWriteTimeoutTyp;
467 else
468 cfi->chips[i].word_write_time = 50000;
469
470 if (cfi->cfiq->BufWriteTimeoutTyp)
471 cfi->chips[i].buffer_write_time =
472 1<<cfi->cfiq->BufWriteTimeoutTyp;
473 /* No default; if it isn't specified, we won't use it */
474
475 if (cfi->cfiq->BlockEraseTimeoutTyp)
476 cfi->chips[i].erase_time =
477 1000<<cfi->cfiq->BlockEraseTimeoutTyp;
478 else
479 cfi->chips[i].erase_time = 2000000;
480
e93cafe4
AG
481 if (cfi->cfiq->WordWriteTimeoutTyp &&
482 cfi->cfiq->WordWriteTimeoutMax)
483 cfi->chips[i].word_write_time_max =
484 1<<(cfi->cfiq->WordWriteTimeoutTyp +
485 cfi->cfiq->WordWriteTimeoutMax);
486 else
487 cfi->chips[i].word_write_time_max = 50000 * 8;
488
489 if (cfi->cfiq->BufWriteTimeoutTyp &&
490 cfi->cfiq->BufWriteTimeoutMax)
491 cfi->chips[i].buffer_write_time_max =
492 1<<(cfi->cfiq->BufWriteTimeoutTyp +
493 cfi->cfiq->BufWriteTimeoutMax);
494
495 if (cfi->cfiq->BlockEraseTimeoutTyp &&
496 cfi->cfiq->BlockEraseTimeoutMax)
497 cfi->chips[i].erase_time_max =
498 1000<<(cfi->cfiq->BlockEraseTimeoutTyp +
499 cfi->cfiq->BlockEraseTimeoutMax);
500 else
501 cfi->chips[i].erase_time_max = 2000000 * 8;
502
1da177e4 503 cfi->chips[i].ref_point_counter = 0;
c314b6f1 504 init_waitqueue_head(&(cfi->chips[i].wq));
1f948b43 505 }
1da177e4
LT
506
507 map->fldrv = &cfi_intelext_chipdrv;
1f948b43 508
1da177e4
LT
509 return cfi_intelext_setup(mtd);
510}
a15bdeef
DW
511struct mtd_info *cfi_cmdset_0003(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
512struct mtd_info *cfi_cmdset_0200(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
513EXPORT_SYMBOL_GPL(cfi_cmdset_0001);
514EXPORT_SYMBOL_GPL(cfi_cmdset_0003);
515EXPORT_SYMBOL_GPL(cfi_cmdset_0200);
1da177e4
LT
516
517static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
518{
519 struct map_info *map = mtd->priv;
520 struct cfi_private *cfi = map->fldrv_priv;
521 unsigned long offset = 0;
522 int i,j;
523 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
524
525 //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
526
527 mtd->size = devsize * cfi->numchips;
528
529 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
1f948b43 530 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
1da177e4 531 * mtd->numeraseregions, GFP_KERNEL);
1f948b43 532 if (!mtd->eraseregions) {
1da177e4
LT
533 printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
534 goto setup_err;
535 }
1f948b43 536
1da177e4
LT
537 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
538 unsigned long ernum, ersize;
539 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
540 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
541
542 if (mtd->erasesize < ersize) {
543 mtd->erasesize = ersize;
544 }
545 for (j=0; j<cfi->numchips; j++) {
546 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
547 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
548 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
0ecbc81a 549 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap = kmalloc(ernum / 8 + 1, GFP_KERNEL);
1da177e4
LT
550 }
551 offset += (ersize * ernum);
552 }
553
554 if (offset != devsize) {
555 /* Argh */
556 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
557 goto setup_err;
558 }
559
560 for (i=0; i<mtd->numeraseregions;i++){
69423d99
AH
561 printk(KERN_DEBUG "erase region %d: offset=0x%llx,size=0x%x,blocks=%d\n",
562 i,(unsigned long long)mtd->eraseregions[i].offset,
1da177e4
LT
563 mtd->eraseregions[i].erasesize,
564 mtd->eraseregions[i].numblocks);
565 }
566
f77814dd 567#ifdef CONFIG_MTD_OTP
1da177e4 568 mtd->read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
f77814dd
NP
569 mtd->read_user_prot_reg = cfi_intelext_read_user_prot_reg;
570 mtd->write_user_prot_reg = cfi_intelext_write_user_prot_reg;
571 mtd->lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
572 mtd->get_fact_prot_info = cfi_intelext_get_fact_prot_info;
573 mtd->get_user_prot_info = cfi_intelext_get_user_prot_info;
1da177e4
LT
574#endif
575
576 /* This function has the potential to distort the reality
577 a bit and therefore should be called last. */
578 if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
579 goto setup_err;
580
581 __module_get(THIS_MODULE);
963a6fb0 582 register_reboot_notifier(&mtd->reboot_notifier);
1da177e4
LT
583 return mtd;
584
585 setup_err:
586 if(mtd) {
fa671646 587 kfree(mtd->eraseregions);
1da177e4
LT
588 kfree(mtd);
589 }
590 kfree(cfi->cmdset_priv);
591 return NULL;
592}
593
594static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
595 struct cfi_private **pcfi)
596{
597 struct map_info *map = mtd->priv;
598 struct cfi_private *cfi = *pcfi;
599 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
600
601 /*
8f1a866f 602 * Probing of multi-partition flash chips.
1da177e4
LT
603 *
604 * To support multiple partitions when available, we simply arrange
605 * for each of them to have their own flchip structure even if they
606 * are on the same physical chip. This means completely recreating
607 * a new cfi_private structure right here which is a blatent code
608 * layering violation, but this is still the least intrusive
609 * arrangement at this point. This can be rearranged in the future
610 * if someone feels motivated enough. --nico
611 */
638d9838 612 if (extp && extp->MajorVersion == '1' && extp->MinorVersion >= '3'
1da177e4
LT
613 && extp->FeatureSupport & (1 << 9)) {
614 struct cfi_private *newcfi;
615 struct flchip *chip;
616 struct flchip_shared *shared;
617 int offs, numregions, numparts, partshift, numvirtchips, i, j;
618
619 /* Protection Register info */
72b56a2d
NP
620 offs = (extp->NumProtectionFields - 1) *
621 sizeof(struct cfi_intelext_otpinfo);
1da177e4
LT
622
623 /* Burst Read info */
6f6ed056 624 offs += extp->extra[offs+1]+2;
1da177e4
LT
625
626 /* Number of partition regions */
627 numregions = extp->extra[offs];
628 offs += 1;
629
638d9838
NP
630 /* skip the sizeof(partregion) field in CFI 1.4 */
631 if (extp->MinorVersion >= '4')
632 offs += 2;
633
1da177e4
LT
634 /* Number of hardware partitions */
635 numparts = 0;
636 for (i = 0; i < numregions; i++) {
637 struct cfi_intelext_regioninfo *rinfo;
638 rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
639 numparts += rinfo->NumIdentPartitions;
640 offs += sizeof(*rinfo)
641 + (rinfo->NumBlockTypes - 1) *
642 sizeof(struct cfi_intelext_blockinfo);
643 }
644
fe224668
TK
645 if (!numparts)
646 numparts = 1;
647
638d9838
NP
648 /* Programming Region info */
649 if (extp->MinorVersion >= '4') {
650 struct cfi_intelext_programming_regioninfo *prinfo;
651 prinfo = (struct cfi_intelext_programming_regioninfo *)&extp->extra[offs];
28318776 652 mtd->writesize = cfi->interleave << prinfo->ProgRegShift;
5fa43394 653 mtd->flags &= ~MTD_BIT_WRITEABLE;
638d9838 654 printk(KERN_DEBUG "%s: program region size/ctrl_valid/ctrl_inval = %d/%d/%d\n",
28318776 655 map->name, mtd->writesize,
d4160855
AB
656 cfi->interleave * prinfo->ControlValid,
657 cfi->interleave * prinfo->ControlInvalid);
638d9838
NP
658 }
659
1da177e4
LT
660 /*
661 * All functions below currently rely on all chips having
662 * the same geometry so we'll just assume that all hardware
663 * partitions are of the same size too.
664 */
665 partshift = cfi->chipshift - __ffs(numparts);
666
667 if ((1 << partshift) < mtd->erasesize) {
668 printk( KERN_ERR
669 "%s: bad number of hw partitions (%d)\n",
cb53b3b9 670 __func__, numparts);
1da177e4
LT
671 return -EINVAL;
672 }
673
674 numvirtchips = cfi->numchips * numparts;
675 newcfi = kmalloc(sizeof(struct cfi_private) + numvirtchips * sizeof(struct flchip), GFP_KERNEL);
676 if (!newcfi)
677 return -ENOMEM;
678 shared = kmalloc(sizeof(struct flchip_shared) * cfi->numchips, GFP_KERNEL);
679 if (!shared) {
680 kfree(newcfi);
681 return -ENOMEM;
682 }
683 memcpy(newcfi, cfi, sizeof(struct cfi_private));
684 newcfi->numchips = numvirtchips;
685 newcfi->chipshift = partshift;
686
687 chip = &newcfi->chips[0];
688 for (i = 0; i < cfi->numchips; i++) {
689 shared[i].writing = shared[i].erasing = NULL;
690 spin_lock_init(&shared[i].lock);
691 for (j = 0; j < numparts; j++) {
692 *chip = cfi->chips[i];
693 chip->start += j << partshift;
694 chip->priv = &shared[i];
695 /* those should be reset too since
696 they create memory references. */
697 init_waitqueue_head(&chip->wq);
698 spin_lock_init(&chip->_spinlock);
699 chip->mutex = &chip->_spinlock;
700 chip++;
701 }
702 }
703
704 printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
705 "--> %d partitions of %d KiB\n",
706 map->name, cfi->numchips, cfi->interleave,
707 newcfi->numchips, 1<<(newcfi->chipshift-10));
708
709 map->fldrv_priv = newcfi;
710 *pcfi = newcfi;
711 kfree(cfi);
712 }
713
714 return 0;
715}
716
717/*
718 * *********** CHIP ACCESS FUNCTIONS ***********
719 */
5a37cf19 720static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
1da177e4
LT
721{
722 DECLARE_WAITQUEUE(wait, current);
723 struct cfi_private *cfi = map->fldrv_priv;
724 map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
1da177e4 725 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
5a37cf19 726 unsigned long timeo = jiffies + HZ;
1da177e4 727
3afe7eb3
AB
728 /* Prevent setting state FL_SYNCING for chip in suspended state. */
729 if (mode == FL_SYNCING && chip->oldstate != FL_READY)
730 goto sleep;
731
1da177e4
LT
732 switch (chip->state) {
733
734 case FL_STATUS:
735 for (;;) {
736 status = map_read(map, adr);
737 if (map_word_andequal(map, status, status_OK, status_OK))
738 break;
739
740 /* At this point we're fine with write operations
741 in other partitions as they don't conflict. */
742 if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
743 break;
744
1da177e4
LT
745 spin_unlock(chip->mutex);
746 cfi_udelay(1);
747 spin_lock(chip->mutex);
748 /* Someone else might have been playing with it. */
5a37cf19 749 return -EAGAIN;
1da177e4 750 }
fb6d080c 751 /* Fall through */
1da177e4
LT
752 case FL_READY:
753 case FL_CFI_QUERY:
754 case FL_JEDEC_QUERY:
755 return 0;
756
757 case FL_ERASING:
758 if (!cfip ||
759 !(cfip->FeatureSupport & 2) ||
760 !(mode == FL_READY || mode == FL_POINT ||
761 (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
762 goto sleep;
763
764
765 /* Erase suspend */
766 map_write(map, CMD(0xB0), adr);
767
768 /* If the flash has finished erasing, then 'erase suspend'
769 * appears to make some (28F320) flash devices switch to
770 * 'read' mode. Make sure that we switch to 'read status'
771 * mode so we get the right data. --rmk
772 */
773 map_write(map, CMD(0x70), adr);
774 chip->oldstate = FL_ERASING;
775 chip->state = FL_ERASE_SUSPENDING;
776 chip->erase_suspended = 1;
777 for (;;) {
778 status = map_read(map, adr);
779 if (map_word_andequal(map, status, status_OK, status_OK))
780 break;
781
782 if (time_after(jiffies, timeo)) {
783 /* Urgh. Resume and pretend we weren't here. */
784 map_write(map, CMD(0xd0), adr);
785 /* Make sure we're in 'read status' mode if it had finished */
786 map_write(map, CMD(0x70), adr);
787 chip->state = FL_ERASING;
788 chip->oldstate = FL_READY;
4843653c
NP
789 printk(KERN_ERR "%s: Chip not ready after erase "
790 "suspended: status = 0x%lx\n", map->name, status.x[0]);
1da177e4
LT
791 return -EIO;
792 }
793
794 spin_unlock(chip->mutex);
795 cfi_udelay(1);
796 spin_lock(chip->mutex);
797 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
798 So we can just loop here. */
799 }
800 chip->state = FL_STATUS;
801 return 0;
802
803 case FL_XIP_WHILE_ERASING:
804 if (mode != FL_READY && mode != FL_POINT &&
805 (mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1)))
806 goto sleep;
807 chip->oldstate = chip->state;
808 chip->state = FL_READY;
809 return 0;
810
fb6d080c
AK
811 case FL_SHUTDOWN:
812 /* The machine is rebooting now,so no one can get chip anymore */
813 return -EIO;
1da177e4
LT
814 case FL_POINT:
815 /* Only if there's no operation suspended... */
816 if (mode == FL_READY && chip->oldstate == FL_READY)
817 return 0;
fb6d080c 818 /* Fall through */
1da177e4
LT
819 default:
820 sleep:
821 set_current_state(TASK_UNINTERRUPTIBLE);
822 add_wait_queue(&chip->wq, &wait);
823 spin_unlock(chip->mutex);
824 schedule();
825 remove_wait_queue(&chip->wq, &wait);
826 spin_lock(chip->mutex);
5a37cf19
AK
827 return -EAGAIN;
828 }
829}
830
831static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
832{
833 int ret;
6c24e416 834 DECLARE_WAITQUEUE(wait, current);
5a37cf19
AK
835
836 retry:
3afe7eb3
AB
837 if (chip->priv &&
838 (mode == FL_WRITING || mode == FL_ERASING || mode == FL_OTP_WRITE
839 || mode == FL_SHUTDOWN) && chip->state != FL_SYNCING) {
5a37cf19
AK
840 /*
841 * OK. We have possibility for contention on the write/erase
842 * operations which are global to the real chip and not per
843 * partition. So let's fight it over in the partition which
844 * currently has authority on the operation.
845 *
846 * The rules are as follows:
847 *
848 * - any write operation must own shared->writing.
849 *
850 * - any erase operation must own _both_ shared->writing and
851 * shared->erasing.
852 *
853 * - contention arbitration is handled in the owner's context.
854 *
855 * The 'shared' struct can be read and/or written only when
856 * its lock is taken.
857 */
858 struct flchip_shared *shared = chip->priv;
859 struct flchip *contender;
860 spin_lock(&shared->lock);
861 contender = shared->writing;
862 if (contender && contender != chip) {
863 /*
864 * The engine to perform desired operation on this
865 * partition is already in use by someone else.
866 * Let's fight over it in the context of the chip
867 * currently using it. If it is possible to suspend,
868 * that other partition will do just that, otherwise
869 * it'll happily send us to sleep. In any case, when
870 * get_chip returns success we're clear to go ahead.
871 */
872 ret = spin_trylock(contender->mutex);
873 spin_unlock(&shared->lock);
874 if (!ret)
875 goto retry;
876 spin_unlock(chip->mutex);
877 ret = chip_ready(map, contender, contender->start, mode);
878 spin_lock(chip->mutex);
879
880 if (ret == -EAGAIN) {
881 spin_unlock(contender->mutex);
882 goto retry;
883 }
884 if (ret) {
885 spin_unlock(contender->mutex);
886 return ret;
887 }
888 spin_lock(&shared->lock);
3afe7eb3
AB
889
890 /* We should not own chip if it is already
891 * in FL_SYNCING state. Put contender and retry. */
892 if (chip->state == FL_SYNCING) {
893 put_chip(map, contender, contender->start);
894 spin_unlock(contender->mutex);
895 goto retry;
896 }
5a37cf19
AK
897 spin_unlock(contender->mutex);
898 }
899
6c24e416
AB
900 /* Check if we already have suspended erase
901 * on this chip. Sleep. */
902 if (mode == FL_ERASING && shared->erasing
903 && shared->erasing->oldstate == FL_ERASING) {
904 spin_unlock(&shared->lock);
905 set_current_state(TASK_UNINTERRUPTIBLE);
906 add_wait_queue(&chip->wq, &wait);
907 spin_unlock(chip->mutex);
908 schedule();
909 remove_wait_queue(&chip->wq, &wait);
910 spin_lock(chip->mutex);
911 goto retry;
912 }
913
5a37cf19
AK
914 /* We now own it */
915 shared->writing = chip;
916 if (mode == FL_ERASING)
917 shared->erasing = chip;
918 spin_unlock(&shared->lock);
1da177e4 919 }
5a37cf19
AK
920 ret = chip_ready(map, chip, adr, mode);
921 if (ret == -EAGAIN)
922 goto retry;
923
924 return ret;
1da177e4
LT
925}
926
927static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
928{
929 struct cfi_private *cfi = map->fldrv_priv;
930
931 if (chip->priv) {
932 struct flchip_shared *shared = chip->priv;
933 spin_lock(&shared->lock);
934 if (shared->writing == chip && chip->oldstate == FL_READY) {
935 /* We own the ability to write, but we're done */
936 shared->writing = shared->erasing;
937 if (shared->writing && shared->writing != chip) {
938 /* give back ownership to who we loaned it from */
939 struct flchip *loaner = shared->writing;
940 spin_lock(loaner->mutex);
941 spin_unlock(&shared->lock);
942 spin_unlock(chip->mutex);
943 put_chip(map, loaner, loaner->start);
944 spin_lock(chip->mutex);
945 spin_unlock(loaner->mutex);
946 wake_up(&chip->wq);
947 return;
948 }
949 shared->erasing = NULL;
950 shared->writing = NULL;
951 } else if (shared->erasing == chip && shared->writing != chip) {
952 /*
953 * We own the ability to erase without the ability
954 * to write, which means the erase was suspended
955 * and some other partition is currently writing.
956 * Don't let the switch below mess things up since
957 * we don't have ownership to resume anything.
958 */
959 spin_unlock(&shared->lock);
960 wake_up(&chip->wq);
961 return;
962 }
963 spin_unlock(&shared->lock);
964 }
965
966 switch(chip->oldstate) {
967 case FL_ERASING:
968 chip->state = chip->oldstate;
1f948b43 969 /* What if one interleaved chip has finished and the
1da177e4 970 other hasn't? The old code would leave the finished
1f948b43 971 one in READY mode. That's bad, and caused -EROFS
1da177e4
LT
972 errors to be returned from do_erase_oneblock because
973 that's the only bit it checked for at the time.
1f948b43 974 As the state machine appears to explicitly allow
1da177e4 975 sending the 0x70 (Read Status) command to an erasing
1f948b43 976 chip and expecting it to be ignored, that's what we
1da177e4
LT
977 do. */
978 map_write(map, CMD(0xd0), adr);
979 map_write(map, CMD(0x70), adr);
980 chip->oldstate = FL_READY;
981 chip->state = FL_ERASING;
982 break;
983
984 case FL_XIP_WHILE_ERASING:
985 chip->state = chip->oldstate;
986 chip->oldstate = FL_READY;
987 break;
988
989 case FL_READY:
990 case FL_STATUS:
991 case FL_JEDEC_QUERY:
992 /* We should really make set_vpp() count, rather than doing this */
993 DISABLE_VPP(map);
994 break;
995 default:
4843653c 996 printk(KERN_ERR "%s: put_chip() called with oldstate %d!!\n", map->name, chip->oldstate);
1da177e4
LT
997 }
998 wake_up(&chip->wq);
999}
1000
1001#ifdef CONFIG_MTD_XIP
1002
1003/*
1004 * No interrupt what so ever can be serviced while the flash isn't in array
1005 * mode. This is ensured by the xip_disable() and xip_enable() functions
1006 * enclosing any code path where the flash is known not to be in array mode.
1007 * And within a XIP disabled code path, only functions marked with __xipram
1008 * may be called and nothing else (it's a good thing to inspect generated
1009 * assembly to make sure inline functions were actually inlined and that gcc
1010 * didn't emit calls to its own support functions). Also configuring MTD CFI
1011 * support to a single buswidth and a single interleave is also recommended.
1da177e4
LT
1012 */
1013
1014static void xip_disable(struct map_info *map, struct flchip *chip,
1015 unsigned long adr)
1016{
1017 /* TODO: chips with no XIP use should ignore and return */
1018 (void) map_read(map, adr); /* ensure mmu mapping is up to date */
1da177e4
LT
1019 local_irq_disable();
1020}
1021
1022static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
1023 unsigned long adr)
1024{
1025 struct cfi_private *cfi = map->fldrv_priv;
1026 if (chip->state != FL_POINT && chip->state != FL_READY) {
1027 map_write(map, CMD(0xff), adr);
1028 chip->state = FL_READY;
1029 }
1030 (void) map_read(map, adr);
97f927a4 1031 xip_iprefetch();
1da177e4 1032 local_irq_enable();
1da177e4
LT
1033}
1034
1035/*
1036 * When a delay is required for the flash operation to complete, the
c172471b
NP
1037 * xip_wait_for_operation() function is polling for both the given timeout
1038 * and pending (but still masked) hardware interrupts. Whenever there is an
1039 * interrupt pending then the flash erase or write operation is suspended,
1040 * array mode restored and interrupts unmasked. Task scheduling might also
1041 * happen at that point. The CPU eventually returns from the interrupt or
1042 * the call to schedule() and the suspended flash operation is resumed for
1043 * the remaining of the delay period.
1da177e4
LT
1044 *
1045 * Warning: this function _will_ fool interrupt latency tracing tools.
1046 */
1047
c172471b
NP
1048static int __xipram xip_wait_for_operation(
1049 struct map_info *map, struct flchip *chip,
e93cafe4 1050 unsigned long adr, unsigned int chip_op_time_max)
1da177e4
LT
1051{
1052 struct cfi_private *cfi = map->fldrv_priv;
1053 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
1054 map_word status, OK = CMD(0x80);
c172471b 1055 unsigned long usec, suspended, start, done;
1da177e4
LT
1056 flstate_t oldstate, newstate;
1057
c172471b 1058 start = xip_currtime();
e93cafe4 1059 usec = chip_op_time_max;
c172471b
NP
1060 if (usec == 0)
1061 usec = 500000;
1062 done = 0;
1063
1da177e4
LT
1064 do {
1065 cpu_relax();
1066 if (xip_irqpending() && cfip &&
1067 ((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) ||
1068 (chip->state == FL_WRITING && (cfip->FeatureSupport&4))) &&
1069 (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
1070 /*
1071 * Let's suspend the erase or write operation when
1072 * supported. Note that we currently don't try to
1073 * suspend interleaved chips if there is already
1074 * another operation suspended (imagine what happens
1075 * when one chip was already done with the current
1076 * operation while another chip suspended it, then
1077 * we resume the whole thing at once). Yes, it
1078 * can happen!
1079 */
c172471b 1080 usec -= done;
1da177e4
LT
1081 map_write(map, CMD(0xb0), adr);
1082 map_write(map, CMD(0x70), adr);
1da177e4
LT
1083 suspended = xip_currtime();
1084 do {
1085 if (xip_elapsed_since(suspended) > 100000) {
1086 /*
1087 * The chip doesn't want to suspend
1088 * after waiting for 100 msecs.
1089 * This is a critical error but there
1090 * is not much we can do here.
1091 */
c172471b 1092 return -EIO;
1da177e4
LT
1093 }
1094 status = map_read(map, adr);
1095 } while (!map_word_andequal(map, status, OK, OK));
1096
1097 /* Suspend succeeded */
1098 oldstate = chip->state;
1099 if (oldstate == FL_ERASING) {
1100 if (!map_word_bitsset(map, status, CMD(0x40)))
1101 break;
1102 newstate = FL_XIP_WHILE_ERASING;
1103 chip->erase_suspended = 1;
1104 } else {
1105 if (!map_word_bitsset(map, status, CMD(0x04)))
1106 break;
1107 newstate = FL_XIP_WHILE_WRITING;
1108 chip->write_suspended = 1;
1109 }
1110 chip->state = newstate;
1111 map_write(map, CMD(0xff), adr);
1112 (void) map_read(map, adr);
ca5c23c3 1113 xip_iprefetch();
1da177e4 1114 local_irq_enable();
6da70124 1115 spin_unlock(chip->mutex);
ca5c23c3 1116 xip_iprefetch();
1da177e4
LT
1117 cond_resched();
1118
1119 /*
1120 * We're back. However someone else might have
1121 * decided to go write to the chip if we are in
1122 * a suspended erase state. If so let's wait
1123 * until it's done.
1124 */
6da70124 1125 spin_lock(chip->mutex);
1da177e4
LT
1126 while (chip->state != newstate) {
1127 DECLARE_WAITQUEUE(wait, current);
1128 set_current_state(TASK_UNINTERRUPTIBLE);
1129 add_wait_queue(&chip->wq, &wait);
6da70124 1130 spin_unlock(chip->mutex);
1da177e4
LT
1131 schedule();
1132 remove_wait_queue(&chip->wq, &wait);
6da70124 1133 spin_lock(chip->mutex);
1da177e4
LT
1134 }
1135 /* Disallow XIP again */
1136 local_irq_disable();
1137
1138 /* Resume the write or erase operation */
1139 map_write(map, CMD(0xd0), adr);
1140 map_write(map, CMD(0x70), adr);
1141 chip->state = oldstate;
1142 start = xip_currtime();
1143 } else if (usec >= 1000000/HZ) {
1144 /*
1145 * Try to save on CPU power when waiting delay
1146 * is at least a system timer tick period.
1147 * No need to be extremely accurate here.
1148 */
1149 xip_cpu_idle();
1150 }
1151 status = map_read(map, adr);
c172471b 1152 done = xip_elapsed_since(start);
1da177e4 1153 } while (!map_word_andequal(map, status, OK, OK)
c172471b 1154 && done < usec);
1da177e4 1155
c172471b
NP
1156 return (done >= usec) ? -ETIME : 0;
1157}
1da177e4
LT
1158
1159/*
1160 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
1161 * the flash is actively programming or erasing since we have to poll for
1162 * the operation to complete anyway. We can't do that in a generic way with
6da70124 1163 * a XIP setup so do it before the actual flash operation in this case
c172471b 1164 * and stub it out from INVAL_CACHE_AND_WAIT.
1da177e4 1165 */
6da70124
NP
1166#define XIP_INVAL_CACHED_RANGE(map, from, size) \
1167 INVALIDATE_CACHED_RANGE(map, from, size)
1168
e93cafe4
AG
1169#define INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, inval_adr, inval_len, usec, usec_max) \
1170 xip_wait_for_operation(map, chip, cmd_adr, usec_max)
1da177e4
LT
1171
1172#else
1173
1174#define xip_disable(map, chip, adr)
1175#define xip_enable(map, chip, adr)
1da177e4 1176#define XIP_INVAL_CACHED_RANGE(x...)
c172471b
NP
1177#define INVAL_CACHE_AND_WAIT inval_cache_and_wait_for_operation
1178
1179static int inval_cache_and_wait_for_operation(
1180 struct map_info *map, struct flchip *chip,
1181 unsigned long cmd_adr, unsigned long inval_adr, int inval_len,
e93cafe4 1182 unsigned int chip_op_time, unsigned int chip_op_time_max)
c172471b
NP
1183{
1184 struct cfi_private *cfi = map->fldrv_priv;
1185 map_word status, status_OK = CMD(0x80);
46a1652c 1186 int chip_state = chip->state;
998453fb 1187 unsigned int timeo, sleep_time, reset_timeo;
c172471b
NP
1188
1189 spin_unlock(chip->mutex);
1190 if (inval_len)
1191 INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len);
c172471b
NP
1192 spin_lock(chip->mutex);
1193
e93cafe4 1194 timeo = chip_op_time_max;
46a1652c
AK
1195 if (!timeo)
1196 timeo = 500000;
998453fb 1197 reset_timeo = timeo;
46a1652c 1198 sleep_time = chip_op_time / 2;
c172471b 1199
c172471b 1200 for (;;) {
c172471b
NP
1201 status = map_read(map, cmd_adr);
1202 if (map_word_andequal(map, status, status_OK, status_OK))
1203 break;
1da177e4 1204
46a1652c 1205 if (!timeo) {
c172471b
NP
1206 map_write(map, CMD(0x70), cmd_adr);
1207 chip->state = FL_STATUS;
1208 return -ETIME;
1209 }
1210
46a1652c 1211 /* OK Still waiting. Drop the lock, wait a while and retry. */
c172471b 1212 spin_unlock(chip->mutex);
46a1652c
AK
1213 if (sleep_time >= 1000000/HZ) {
1214 /*
1215 * Half of the normal delay still remaining
1216 * can be performed with a sleeping delay instead
1217 * of busy waiting.
1218 */
1219 msleep(sleep_time/1000);
1220 timeo -= sleep_time;
1221 sleep_time = 1000000/HZ;
1222 } else {
1223 udelay(1);
1224 cond_resched();
1225 timeo--;
1226 }
c172471b 1227 spin_lock(chip->mutex);
c172471b 1228
967bf623 1229 while (chip->state != chip_state) {
46a1652c
AK
1230 /* Someone's suspended the operation: sleep */
1231 DECLARE_WAITQUEUE(wait, current);
1232 set_current_state(TASK_UNINTERRUPTIBLE);
1233 add_wait_queue(&chip->wq, &wait);
1234 spin_unlock(chip->mutex);
1235 schedule();
1236 remove_wait_queue(&chip->wq, &wait);
1237 spin_lock(chip->mutex);
1238 }
998453fb
AK
1239 if (chip->erase_suspended || chip->write_suspended) {
1240 /* Suspend has occured while sleep: reset timeout */
1241 timeo = reset_timeo;
1242 chip->erase_suspended = 0;
1243 chip->write_suspended = 0;
1244 }
46a1652c 1245 }
c172471b
NP
1246
1247 /* Done and happy. */
1248 chip->state = FL_STATUS;
1249 return 0;
1250}
6da70124 1251
1da177e4
LT
1252#endif
1253
e93cafe4
AG
1254#define WAIT_TIMEOUT(map, chip, adr, udelay, udelay_max) \
1255 INVAL_CACHE_AND_WAIT(map, chip, adr, 0, 0, udelay, udelay_max);
c172471b
NP
1256
1257
1da177e4
LT
1258static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
1259{
1260 unsigned long cmd_addr;
1261 struct cfi_private *cfi = map->fldrv_priv;
1262 int ret = 0;
1263
1264 adr += chip->start;
1265
1f948b43
TG
1266 /* Ensure cmd read/writes are aligned. */
1267 cmd_addr = adr & ~(map_bankwidth(map)-1);
1da177e4
LT
1268
1269 spin_lock(chip->mutex);
1270
1271 ret = get_chip(map, chip, cmd_addr, FL_POINT);
1272
1273 if (!ret) {
1274 if (chip->state != FL_POINT && chip->state != FL_READY)
1275 map_write(map, CMD(0xff), cmd_addr);
1276
1277 chip->state = FL_POINT;
1278 chip->ref_point_counter++;
1279 }
1280 spin_unlock(chip->mutex);
1281
1282 return ret;
1283}
1284
a98889f3
JH
1285static int cfi_intelext_point(struct mtd_info *mtd, loff_t from, size_t len,
1286 size_t *retlen, void **virt, resource_size_t *phys)
1da177e4
LT
1287{
1288 struct map_info *map = mtd->priv;
1289 struct cfi_private *cfi = map->fldrv_priv;
097f2576 1290 unsigned long ofs, last_end = 0;
1da177e4
LT
1291 int chipnum;
1292 int ret = 0;
1293
1294 if (!map->virt || (from + len > mtd->size))
1295 return -EINVAL;
1f948b43 1296
1da177e4
LT
1297 /* Now lock the chip(s) to POINT state */
1298
1299 /* ofs: offset within the first chip that the first read should start */
1300 chipnum = (from >> cfi->chipshift);
1301 ofs = from - (chipnum << cfi->chipshift);
1302
a98889f3 1303 *virt = map->virt + cfi->chips[chipnum].start + ofs;
097f2576 1304 *retlen = 0;
a98889f3
JH
1305 if (phys)
1306 *phys = map->phys + cfi->chips[chipnum].start + ofs;
097f2576 1307
1da177e4
LT
1308 while (len) {
1309 unsigned long thislen;
1310
1311 if (chipnum >= cfi->numchips)
1312 break;
1313
097f2576
AL
1314 /* We cannot point across chips that are virtually disjoint */
1315 if (!last_end)
1316 last_end = cfi->chips[chipnum].start;
1317 else if (cfi->chips[chipnum].start != last_end)
1318 break;
1319
1da177e4
LT
1320 if ((len + ofs -1) >> cfi->chipshift)
1321 thislen = (1<<cfi->chipshift) - ofs;
1322 else
1323 thislen = len;
1324
1325 ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
1326 if (ret)
1327 break;
1328
1329 *retlen += thislen;
1330 len -= thislen;
1f948b43 1331
1da177e4 1332 ofs = 0;
097f2576 1333 last_end += 1 << cfi->chipshift;
1da177e4
LT
1334 chipnum++;
1335 }
1336 return 0;
1337}
1338
a98889f3 1339static void cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
1da177e4
LT
1340{
1341 struct map_info *map = mtd->priv;
1342 struct cfi_private *cfi = map->fldrv_priv;
1343 unsigned long ofs;
1344 int chipnum;
1345
1346 /* Now unlock the chip(s) POINT state */
1347
1348 /* ofs: offset within the first chip that the first read should start */
1349 chipnum = (from >> cfi->chipshift);
1350 ofs = from - (chipnum << cfi->chipshift);
1351
1352 while (len) {
1353 unsigned long thislen;
1354 struct flchip *chip;
1355
1356 chip = &cfi->chips[chipnum];
1357 if (chipnum >= cfi->numchips)
1358 break;
1359
1360 if ((len + ofs -1) >> cfi->chipshift)
1361 thislen = (1<<cfi->chipshift) - ofs;
1362 else
1363 thislen = len;
1364
1365 spin_lock(chip->mutex);
1366 if (chip->state == FL_POINT) {
1367 chip->ref_point_counter--;
1368 if(chip->ref_point_counter == 0)
1369 chip->state = FL_READY;
1370 } else
4843653c 1371 printk(KERN_ERR "%s: Warning: unpoint called on non pointed region\n", map->name); /* Should this give an error? */
1da177e4
LT
1372
1373 put_chip(map, chip, chip->start);
1374 spin_unlock(chip->mutex);
1375
1376 len -= thislen;
1377 ofs = 0;
1378 chipnum++;
1379 }
1380}
1381
1382static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1383{
1384 unsigned long cmd_addr;
1385 struct cfi_private *cfi = map->fldrv_priv;
1386 int ret;
1387
1388 adr += chip->start;
1389
1f948b43
TG
1390 /* Ensure cmd read/writes are aligned. */
1391 cmd_addr = adr & ~(map_bankwidth(map)-1);
1da177e4
LT
1392
1393 spin_lock(chip->mutex);
1394 ret = get_chip(map, chip, cmd_addr, FL_READY);
1395 if (ret) {
1396 spin_unlock(chip->mutex);
1397 return ret;
1398 }
1399
1400 if (chip->state != FL_POINT && chip->state != FL_READY) {
1401 map_write(map, CMD(0xff), cmd_addr);
1402
1403 chip->state = FL_READY;
1404 }
1405
1406 map_copy_from(map, buf, adr, len);
1407
1408 put_chip(map, chip, cmd_addr);
1409
1410 spin_unlock(chip->mutex);
1411 return 0;
1412}
1413
1414static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1415{
1416 struct map_info *map = mtd->priv;
1417 struct cfi_private *cfi = map->fldrv_priv;
1418 unsigned long ofs;
1419 int chipnum;
1420 int ret = 0;
1421
1422 /* ofs: offset within the first chip that the first read should start */
1423 chipnum = (from >> cfi->chipshift);
1424 ofs = from - (chipnum << cfi->chipshift);
1425
1426 *retlen = 0;
1427
1428 while (len) {
1429 unsigned long thislen;
1430
1431 if (chipnum >= cfi->numchips)
1432 break;
1433
1434 if ((len + ofs -1) >> cfi->chipshift)
1435 thislen = (1<<cfi->chipshift) - ofs;
1436 else
1437 thislen = len;
1438
1439 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1440 if (ret)
1441 break;
1442
1443 *retlen += thislen;
1444 len -= thislen;
1445 buf += thislen;
1f948b43 1446
1da177e4
LT
1447 ofs = 0;
1448 chipnum++;
1449 }
1450 return ret;
1451}
1452
1da177e4 1453static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
f77814dd 1454 unsigned long adr, map_word datum, int mode)
1da177e4
LT
1455{
1456 struct cfi_private *cfi = map->fldrv_priv;
c172471b
NP
1457 map_word status, write_cmd;
1458 int ret=0;
1da177e4
LT
1459
1460 adr += chip->start;
1461
f77814dd 1462 switch (mode) {
638d9838
NP
1463 case FL_WRITING:
1464 write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0x40) : CMD(0x41);
1465 break;
1466 case FL_OTP_WRITE:
1467 write_cmd = CMD(0xc0);
1468 break;
1469 default:
1470 return -EINVAL;
f77814dd 1471 }
1da177e4
LT
1472
1473 spin_lock(chip->mutex);
f77814dd 1474 ret = get_chip(map, chip, adr, mode);
1da177e4
LT
1475 if (ret) {
1476 spin_unlock(chip->mutex);
1477 return ret;
1478 }
1479
1480 XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1481 ENABLE_VPP(map);
1482 xip_disable(map, chip, adr);
f77814dd 1483 map_write(map, write_cmd, adr);
1da177e4 1484 map_write(map, datum, adr);
f77814dd 1485 chip->state = mode;
1da177e4 1486
c172471b
NP
1487 ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1488 adr, map_bankwidth(map),
e93cafe4
AG
1489 chip->word_write_time,
1490 chip->word_write_time_max);
c172471b
NP
1491 if (ret) {
1492 xip_enable(map, chip, adr);
1493 printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
1494 goto out;
1da177e4 1495 }
1da177e4 1496
4843653c 1497 /* check for errors */
c172471b 1498 status = map_read(map, adr);
4843653c
NP
1499 if (map_word_bitsset(map, status, CMD(0x1a))) {
1500 unsigned long chipstatus = MERGESTATUS(status);
1501
1502 /* reset status */
1da177e4 1503 map_write(map, CMD(0x50), adr);
1da177e4 1504 map_write(map, CMD(0x70), adr);
4843653c
NP
1505 xip_enable(map, chip, adr);
1506
1507 if (chipstatus & 0x02) {
1508 ret = -EROFS;
1509 } else if (chipstatus & 0x08) {
1510 printk(KERN_ERR "%s: word write error (bad VPP)\n", map->name);
1511 ret = -EIO;
1512 } else {
1513 printk(KERN_ERR "%s: word write error (status 0x%lx)\n", map->name, chipstatus);
1514 ret = -EINVAL;
1515 }
1516
1517 goto out;
1da177e4
LT
1518 }
1519
1520 xip_enable(map, chip, adr);
1521 out: put_chip(map, chip, adr);
1522 spin_unlock(chip->mutex);
1da177e4
LT
1523 return ret;
1524}
1525
1526
1527static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
1528{
1529 struct map_info *map = mtd->priv;
1530 struct cfi_private *cfi = map->fldrv_priv;
1531 int ret = 0;
1532 int chipnum;
1533 unsigned long ofs;
1534
1535 *retlen = 0;
1536 if (!len)
1537 return 0;
1538
1539 chipnum = to >> cfi->chipshift;
1540 ofs = to - (chipnum << cfi->chipshift);
1541
1542 /* If it's not bus-aligned, do the first byte write */
1543 if (ofs & (map_bankwidth(map)-1)) {
1544 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1545 int gap = ofs - bus_ofs;
1546 int n;
1547 map_word datum;
1548
1549 n = min_t(int, len, map_bankwidth(map)-gap);
1550 datum = map_word_ff(map);
1551 datum = map_word_load_partial(map, datum, buf, gap, n);
1552
1553 ret = do_write_oneword(map, &cfi->chips[chipnum],
f77814dd 1554 bus_ofs, datum, FL_WRITING);
1f948b43 1555 if (ret)
1da177e4
LT
1556 return ret;
1557
1558 len -= n;
1559 ofs += n;
1560 buf += n;
1561 (*retlen) += n;
1562
1563 if (ofs >> cfi->chipshift) {
1f948b43 1564 chipnum ++;
1da177e4
LT
1565 ofs = 0;
1566 if (chipnum == cfi->numchips)
1567 return 0;
1568 }
1569 }
1f948b43 1570
1da177e4
LT
1571 while(len >= map_bankwidth(map)) {
1572 map_word datum = map_word_load(map, buf);
1573
1574 ret = do_write_oneword(map, &cfi->chips[chipnum],
f77814dd 1575 ofs, datum, FL_WRITING);
1da177e4
LT
1576 if (ret)
1577 return ret;
1578
1579 ofs += map_bankwidth(map);
1580 buf += map_bankwidth(map);
1581 (*retlen) += map_bankwidth(map);
1582 len -= map_bankwidth(map);
1583
1584 if (ofs >> cfi->chipshift) {
1f948b43 1585 chipnum ++;
1da177e4
LT
1586 ofs = 0;
1587 if (chipnum == cfi->numchips)
1588 return 0;
1589 }
1590 }
1591
1592 if (len & (map_bankwidth(map)-1)) {
1593 map_word datum;
1594
1595 datum = map_word_ff(map);
1596 datum = map_word_load_partial(map, datum, buf, 0, len);
1597
1598 ret = do_write_oneword(map, &cfi->chips[chipnum],
f77814dd 1599 ofs, datum, FL_WRITING);
1f948b43 1600 if (ret)
1da177e4 1601 return ret;
1f948b43 1602
1da177e4
LT
1603 (*retlen) += len;
1604 }
1605
1606 return 0;
1607}
1608
1609
1f948b43 1610static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
e102d54a
NP
1611 unsigned long adr, const struct kvec **pvec,
1612 unsigned long *pvec_seek, int len)
1da177e4
LT
1613{
1614 struct cfi_private *cfi = map->fldrv_priv;
c172471b
NP
1615 map_word status, write_cmd, datum;
1616 unsigned long cmd_adr;
1617 int ret, wbufsize, word_gap, words;
e102d54a
NP
1618 const struct kvec *vec;
1619 unsigned long vec_seek;
646fd127
MC
1620 unsigned long initial_adr;
1621 int initial_len = len;
1da177e4
LT
1622
1623 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1624 adr += chip->start;
646fd127 1625 initial_adr = adr;
1da177e4 1626 cmd_adr = adr & ~(wbufsize-1);
638d9838 1627
1da177e4 1628 /* Let's determine this according to the interleave only once */
638d9838 1629 write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0xe8) : CMD(0xe9);
1da177e4
LT
1630
1631 spin_lock(chip->mutex);
1632 ret = get_chip(map, chip, cmd_adr, FL_WRITING);
1633 if (ret) {
1634 spin_unlock(chip->mutex);
1635 return ret;
1636 }
1637
646fd127 1638 XIP_INVAL_CACHED_RANGE(map, initial_adr, initial_len);
1da177e4
LT
1639 ENABLE_VPP(map);
1640 xip_disable(map, chip, cmd_adr);
1641
151e7659 1642 /* §4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1f948b43 1643 [...], the device will not accept any more Write to Buffer commands".
1da177e4
LT
1644 So we must check here and reset those bits if they're set. Otherwise
1645 we're just pissing in the wind */
6e7a6809 1646 if (chip->state != FL_STATUS) {
1da177e4 1647 map_write(map, CMD(0x70), cmd_adr);
6e7a6809
NP
1648 chip->state = FL_STATUS;
1649 }
1da177e4
LT
1650 status = map_read(map, cmd_adr);
1651 if (map_word_bitsset(map, status, CMD(0x30))) {
1652 xip_enable(map, chip, cmd_adr);
1653 printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
1654 xip_disable(map, chip, cmd_adr);
1655 map_write(map, CMD(0x50), cmd_adr);
1656 map_write(map, CMD(0x70), cmd_adr);
1657 }
1658
1659 chip->state = FL_WRITING_TO_BUFFER;
c172471b 1660 map_write(map, write_cmd, cmd_adr);
e93cafe4 1661 ret = WAIT_TIMEOUT(map, chip, cmd_adr, 0, 0);
c172471b
NP
1662 if (ret) {
1663 /* Argh. Not ready for write to buffer */
1664 map_word Xstatus = map_read(map, cmd_adr);
1665 map_write(map, CMD(0x70), cmd_adr);
1666 chip->state = FL_STATUS;
1da177e4 1667 status = map_read(map, cmd_adr);
c172471b
NP
1668 map_write(map, CMD(0x50), cmd_adr);
1669 map_write(map, CMD(0x70), cmd_adr);
1670 xip_enable(map, chip, cmd_adr);
1671 printk(KERN_ERR "%s: Chip not ready for buffer write. Xstatus = %lx, status = %lx\n",
1672 map->name, Xstatus.x[0], status.x[0]);
1673 goto out;
1da177e4
LT
1674 }
1675
e102d54a
NP
1676 /* Figure out the number of words to write */
1677 word_gap = (-adr & (map_bankwidth(map)-1));
c8872b06 1678 words = DIV_ROUND_UP(len - word_gap, map_bankwidth(map));
e102d54a
NP
1679 if (!word_gap) {
1680 words--;
1681 } else {
1682 word_gap = map_bankwidth(map) - word_gap;
1683 adr -= word_gap;
1684 datum = map_word_ff(map);
1685 }
1686
1da177e4 1687 /* Write length of data to come */
e102d54a 1688 map_write(map, CMD(words), cmd_adr );
1da177e4
LT
1689
1690 /* Write data */
e102d54a
NP
1691 vec = *pvec;
1692 vec_seek = *pvec_seek;
1693 do {
1694 int n = map_bankwidth(map) - word_gap;
1695 if (n > vec->iov_len - vec_seek)
1696 n = vec->iov_len - vec_seek;
1697 if (n > len)
1698 n = len;
1da177e4 1699
e102d54a
NP
1700 if (!word_gap && len < map_bankwidth(map))
1701 datum = map_word_ff(map);
1da177e4 1702
e102d54a 1703 datum = map_word_load_partial(map, datum,
1f948b43 1704 vec->iov_base + vec_seek,
e102d54a 1705 word_gap, n);
1da177e4 1706
e102d54a
NP
1707 len -= n;
1708 word_gap += n;
1709 if (!len || word_gap == map_bankwidth(map)) {
1710 map_write(map, datum, adr);
1711 adr += map_bankwidth(map);
1712 word_gap = 0;
1713 }
1da177e4 1714
e102d54a
NP
1715 vec_seek += n;
1716 if (vec_seek == vec->iov_len) {
1717 vec++;
1718 vec_seek = 0;
1719 }
1720 } while (len);
1721 *pvec = vec;
1722 *pvec_seek = vec_seek;
1da177e4
LT
1723
1724 /* GO GO GO */
1725 map_write(map, CMD(0xd0), cmd_adr);
1726 chip->state = FL_WRITING;
1727
c172471b 1728 ret = INVAL_CACHE_AND_WAIT(map, chip, cmd_adr,
646fd127 1729 initial_adr, initial_len,
e93cafe4
AG
1730 chip->buffer_write_time,
1731 chip->buffer_write_time_max);
c172471b
NP
1732 if (ret) {
1733 map_write(map, CMD(0x70), cmd_adr);
1734 chip->state = FL_STATUS;
1735 xip_enable(map, chip, cmd_adr);
1736 printk(KERN_ERR "%s: buffer write error (status timeout)\n", map->name);
1737 goto out;
1da177e4 1738 }
1da177e4 1739
4843653c 1740 /* check for errors */
c172471b 1741 status = map_read(map, cmd_adr);
4843653c
NP
1742 if (map_word_bitsset(map, status, CMD(0x1a))) {
1743 unsigned long chipstatus = MERGESTATUS(status);
1744
1745 /* reset status */
1da177e4 1746 map_write(map, CMD(0x50), cmd_adr);
4843653c
NP
1747 map_write(map, CMD(0x70), cmd_adr);
1748 xip_enable(map, chip, cmd_adr);
1749
1750 if (chipstatus & 0x02) {
1751 ret = -EROFS;
1752 } else if (chipstatus & 0x08) {
1753 printk(KERN_ERR "%s: buffer write error (bad VPP)\n", map->name);
1754 ret = -EIO;
1755 } else {
1756 printk(KERN_ERR "%s: buffer write error (status 0x%lx)\n", map->name, chipstatus);
1757 ret = -EINVAL;
1758 }
1759
1760 goto out;
1da177e4
LT
1761 }
1762
1763 xip_enable(map, chip, cmd_adr);
1764 out: put_chip(map, chip, cmd_adr);
1765 spin_unlock(chip->mutex);
1766 return ret;
1767}
1768
e102d54a
NP
1769static int cfi_intelext_writev (struct mtd_info *mtd, const struct kvec *vecs,
1770 unsigned long count, loff_t to, size_t *retlen)
1da177e4
LT
1771{
1772 struct map_info *map = mtd->priv;
1773 struct cfi_private *cfi = map->fldrv_priv;
1774 int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1775 int ret = 0;
1776 int chipnum;
e102d54a
NP
1777 unsigned long ofs, vec_seek, i;
1778 size_t len = 0;
1779
1780 for (i = 0; i < count; i++)
1781 len += vecs[i].iov_len;
1da177e4
LT
1782
1783 *retlen = 0;
1784 if (!len)
1785 return 0;
1786
1787 chipnum = to >> cfi->chipshift;
e102d54a
NP
1788 ofs = to - (chipnum << cfi->chipshift);
1789 vec_seek = 0;
1da177e4 1790
e102d54a 1791 do {
1da177e4
LT
1792 /* We must not cross write block boundaries */
1793 int size = wbufsize - (ofs & (wbufsize-1));
1794
1795 if (size > len)
1796 size = len;
1f948b43 1797 ret = do_write_buffer(map, &cfi->chips[chipnum],
e102d54a 1798 ofs, &vecs, &vec_seek, size);
1da177e4
LT
1799 if (ret)
1800 return ret;
1801
1802 ofs += size;
1da177e4
LT
1803 (*retlen) += size;
1804 len -= size;
1805
1806 if (ofs >> cfi->chipshift) {
1f948b43 1807 chipnum ++;
1da177e4
LT
1808 ofs = 0;
1809 if (chipnum == cfi->numchips)
1810 return 0;
1811 }
df54b52c
JB
1812
1813 /* Be nice and reschedule with the chip in a usable state for other
1814 processes. */
1815 cond_resched();
1816
e102d54a
NP
1817 } while (len);
1818
1da177e4
LT
1819 return 0;
1820}
1821
e102d54a
NP
1822static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
1823 size_t len, size_t *retlen, const u_char *buf)
1824{
1825 struct kvec vec;
1826
1827 vec.iov_base = (void *) buf;
1828 vec.iov_len = len;
1829
1830 return cfi_intelext_writev(mtd, &vec, 1, to, retlen);
1831}
1832
1da177e4
LT
1833static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1834 unsigned long adr, int len, void *thunk)
1835{
1836 struct cfi_private *cfi = map->fldrv_priv;
c172471b 1837 map_word status;
1da177e4 1838 int retries = 3;
c172471b 1839 int ret;
1da177e4
LT
1840
1841 adr += chip->start;
1842
1da177e4
LT
1843 retry:
1844 spin_lock(chip->mutex);
1845 ret = get_chip(map, chip, adr, FL_ERASING);
1846 if (ret) {
1847 spin_unlock(chip->mutex);
1848 return ret;
1849 }
1850
1851 XIP_INVAL_CACHED_RANGE(map, adr, len);
1852 ENABLE_VPP(map);
1853 xip_disable(map, chip, adr);
1854
1855 /* Clear the status register first */
1856 map_write(map, CMD(0x50), adr);
1857
1858 /* Now erase */
1859 map_write(map, CMD(0x20), adr);
1860 map_write(map, CMD(0xD0), adr);
1861 chip->state = FL_ERASING;
1862 chip->erase_suspended = 0;
1863
c172471b
NP
1864 ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1865 adr, len,
e93cafe4
AG
1866 chip->erase_time,
1867 chip->erase_time_max);
c172471b
NP
1868 if (ret) {
1869 map_write(map, CMD(0x70), adr);
1870 chip->state = FL_STATUS;
1871 xip_enable(map, chip, adr);
1872 printk(KERN_ERR "%s: block erase error: (status timeout)\n", map->name);
1873 goto out;
1da177e4
LT
1874 }
1875
1876 /* We've broken this before. It doesn't hurt to be safe */
1877 map_write(map, CMD(0x70), adr);
1878 chip->state = FL_STATUS;
1879 status = map_read(map, adr);
1880
4843653c 1881 /* check for errors */
1da177e4 1882 if (map_word_bitsset(map, status, CMD(0x3a))) {
4843653c 1883 unsigned long chipstatus = MERGESTATUS(status);
1da177e4
LT
1884
1885 /* Reset the error bits */
1886 map_write(map, CMD(0x50), adr);
1887 map_write(map, CMD(0x70), adr);
1888 xip_enable(map, chip, adr);
1889
1da177e4 1890 if ((chipstatus & 0x30) == 0x30) {
4843653c
NP
1891 printk(KERN_ERR "%s: block erase error: (bad command sequence, status 0x%lx)\n", map->name, chipstatus);
1892 ret = -EINVAL;
1da177e4
LT
1893 } else if (chipstatus & 0x02) {
1894 /* Protection bit set */
1895 ret = -EROFS;
1896 } else if (chipstatus & 0x8) {
1897 /* Voltage */
4843653c 1898 printk(KERN_ERR "%s: block erase error: (bad VPP)\n", map->name);
1da177e4 1899 ret = -EIO;
4843653c
NP
1900 } else if (chipstatus & 0x20 && retries--) {
1901 printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
4843653c
NP
1902 put_chip(map, chip, adr);
1903 spin_unlock(chip->mutex);
1904 goto retry;
1905 } else {
1906 printk(KERN_ERR "%s: block erase failed at 0x%08lx (status 0x%lx)\n", map->name, adr, chipstatus);
1da177e4
LT
1907 ret = -EIO;
1908 }
4843653c
NP
1909
1910 goto out;
1da177e4
LT
1911 }
1912
4843653c 1913 xip_enable(map, chip, adr);
1da177e4
LT
1914 out: put_chip(map, chip, adr);
1915 spin_unlock(chip->mutex);
1916 return ret;
1917}
1918
029a9eb1 1919static int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1da177e4
LT
1920{
1921 unsigned long ofs, len;
1922 int ret;
1923
1924 ofs = instr->addr;
1925 len = instr->len;
1926
1927 ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1928 if (ret)
1929 return ret;
1930
1931 instr->state = MTD_ERASE_DONE;
1932 mtd_erase_callback(instr);
1f948b43 1933
1da177e4
LT
1934 return 0;
1935}
1936
1937static void cfi_intelext_sync (struct mtd_info *mtd)
1938{
1939 struct map_info *map = mtd->priv;
1940 struct cfi_private *cfi = map->fldrv_priv;
1941 int i;
1942 struct flchip *chip;
1943 int ret = 0;
1944
1945 for (i=0; !ret && i<cfi->numchips; i++) {
1946 chip = &cfi->chips[i];
1947
1948 spin_lock(chip->mutex);
1949 ret = get_chip(map, chip, chip->start, FL_SYNCING);
1950
1951 if (!ret) {
1952 chip->oldstate = chip->state;
1953 chip->state = FL_SYNCING;
1f948b43 1954 /* No need to wake_up() on this state change -
1da177e4
LT
1955 * as the whole point is that nobody can do anything
1956 * with the chip now anyway.
1957 */
1958 }
1959 spin_unlock(chip->mutex);
1960 }
1961
1962 /* Unlock the chips again */
1963
1964 for (i--; i >=0; i--) {
1965 chip = &cfi->chips[i];
1966
1967 spin_lock(chip->mutex);
1f948b43 1968
1da177e4
LT
1969 if (chip->state == FL_SYNCING) {
1970 chip->state = chip->oldstate;
09c79335 1971 chip->oldstate = FL_READY;
1da177e4
LT
1972 wake_up(&chip->wq);
1973 }
1974 spin_unlock(chip->mutex);
1975 }
1976}
1977
0ecbc81a 1978static int __xipram do_getlockstatus_oneblock(struct map_info *map,
1da177e4
LT
1979 struct flchip *chip,
1980 unsigned long adr,
1981 int len, void *thunk)
1982{
1983 struct cfi_private *cfi = map->fldrv_priv;
1984 int status, ofs_factor = cfi->interleave * cfi->device_type;
1985
c25bb1f5 1986 adr += chip->start;
1da177e4 1987 xip_disable(map, chip, adr+(2*ofs_factor));
c25bb1f5 1988 map_write(map, CMD(0x90), adr+(2*ofs_factor));
1da177e4
LT
1989 chip->state = FL_JEDEC_QUERY;
1990 status = cfi_read_query(map, adr+(2*ofs_factor));
1991 xip_enable(map, chip, 0);
0ecbc81a
RG
1992 return status;
1993}
1994
1995#ifdef DEBUG_LOCK_BITS
1996static int __xipram do_printlockstatus_oneblock(struct map_info *map,
1997 struct flchip *chip,
1998 unsigned long adr,
1999 int len, void *thunk)
2000{
1da177e4 2001 printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
0ecbc81a 2002 adr, do_getlockstatus_oneblock(map, chip, adr, len, thunk));
1da177e4
LT
2003 return 0;
2004}
2005#endif
2006
2007#define DO_XXLOCK_ONEBLOCK_LOCK ((void *) 1)
2008#define DO_XXLOCK_ONEBLOCK_UNLOCK ((void *) 2)
2009
2010static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
2011 unsigned long adr, int len, void *thunk)
2012{
2013 struct cfi_private *cfi = map->fldrv_priv;
9a6e73ec 2014 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
c172471b 2015 int udelay;
1da177e4
LT
2016 int ret;
2017
2018 adr += chip->start;
2019
1da177e4
LT
2020 spin_lock(chip->mutex);
2021 ret = get_chip(map, chip, adr, FL_LOCKING);
2022 if (ret) {
2023 spin_unlock(chip->mutex);
2024 return ret;
2025 }
2026
2027 ENABLE_VPP(map);
2028 xip_disable(map, chip, adr);
1f948b43 2029
1da177e4
LT
2030 map_write(map, CMD(0x60), adr);
2031 if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
2032 map_write(map, CMD(0x01), adr);
2033 chip->state = FL_LOCKING;
2034 } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
2035 map_write(map, CMD(0xD0), adr);
2036 chip->state = FL_UNLOCKING;
2037 } else
2038 BUG();
2039
9a6e73ec
TP
2040 /*
2041 * If Instant Individual Block Locking supported then no need
2042 * to delay.
2043 */
c172471b 2044 udelay = (!extp || !(extp->FeatureSupport & (1 << 5))) ? 1000000/HZ : 0;
9a6e73ec 2045
e93cafe4 2046 ret = WAIT_TIMEOUT(map, chip, adr, udelay, udelay * 100);
c172471b
NP
2047 if (ret) {
2048 map_write(map, CMD(0x70), adr);
2049 chip->state = FL_STATUS;
2050 xip_enable(map, chip, adr);
2051 printk(KERN_ERR "%s: block unlock error: (status timeout)\n", map->name);
2052 goto out;
1da177e4 2053 }
1f948b43 2054
1da177e4 2055 xip_enable(map, chip, adr);
c172471b 2056out: put_chip(map, chip, adr);
1da177e4 2057 spin_unlock(chip->mutex);
c172471b 2058 return ret;
1da177e4
LT
2059}
2060
69423d99 2061static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1da177e4
LT
2062{
2063 int ret;
2064
2065#ifdef DEBUG_LOCK_BITS
2066 printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
cb53b3b9 2067 __func__, ofs, len);
1da177e4 2068 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1da1caf8 2069 ofs, len, NULL);
1da177e4
LT
2070#endif
2071
1f948b43 2072 ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
1da177e4 2073 ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
1f948b43 2074
1da177e4
LT
2075#ifdef DEBUG_LOCK_BITS
2076 printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
cb53b3b9 2077 __func__, ret);
1da177e4 2078 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1da1caf8 2079 ofs, len, NULL);
1da177e4
LT
2080#endif
2081
2082 return ret;
2083}
2084
69423d99 2085static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1da177e4
LT
2086{
2087 int ret;
2088
2089#ifdef DEBUG_LOCK_BITS
2090 printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
cb53b3b9 2091 __func__, ofs, len);
1da177e4 2092 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1da1caf8 2093 ofs, len, NULL);
1da177e4
LT
2094#endif
2095
2096 ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2097 ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
1f948b43 2098
1da177e4
LT
2099#ifdef DEBUG_LOCK_BITS
2100 printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
cb53b3b9 2101 __func__, ret);
1f948b43 2102 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1da1caf8 2103 ofs, len, NULL);
1da177e4 2104#endif
1f948b43 2105
1da177e4
LT
2106 return ret;
2107}
2108
f77814dd
NP
2109#ifdef CONFIG_MTD_OTP
2110
1f948b43 2111typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
f77814dd
NP
2112 u_long data_offset, u_char *buf, u_int size,
2113 u_long prot_offset, u_int groupno, u_int groupsize);
2114
2115static int __xipram
2116do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
2117 u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2118{
2119 struct cfi_private *cfi = map->fldrv_priv;
2120 int ret;
2121
2122 spin_lock(chip->mutex);
2123 ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
2124 if (ret) {
2125 spin_unlock(chip->mutex);
2126 return ret;
2127 }
2128
2129 /* let's ensure we're not reading back cached data from array mode */
6da70124 2130 INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
f77814dd
NP
2131
2132 xip_disable(map, chip, chip->start);
2133 if (chip->state != FL_JEDEC_QUERY) {
2134 map_write(map, CMD(0x90), chip->start);
2135 chip->state = FL_JEDEC_QUERY;
2136 }
2137 map_copy_from(map, buf, chip->start + offset, size);
2138 xip_enable(map, chip, chip->start);
2139
2140 /* then ensure we don't keep OTP data in the cache */
6da70124 2141 INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
f77814dd
NP
2142
2143 put_chip(map, chip, chip->start);
2144 spin_unlock(chip->mutex);
2145 return 0;
2146}
2147
2148static int
2149do_otp_write(struct map_info *map, struct flchip *chip, u_long offset,
2150 u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2151{
2152 int ret;
2153
2154 while (size) {
2155 unsigned long bus_ofs = offset & ~(map_bankwidth(map)-1);
2156 int gap = offset - bus_ofs;
2157 int n = min_t(int, size, map_bankwidth(map)-gap);
2158 map_word datum = map_word_ff(map);
2159
2160 datum = map_word_load_partial(map, datum, buf, gap, n);
2161 ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
1f948b43 2162 if (ret)
f77814dd
NP
2163 return ret;
2164
2165 offset += n;
2166 buf += n;
2167 size -= n;
2168 }
2169
2170 return 0;
2171}
2172
2173static int
2174do_otp_lock(struct map_info *map, struct flchip *chip, u_long offset,
2175 u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2176{
2177 struct cfi_private *cfi = map->fldrv_priv;
2178 map_word datum;
2179
2180 /* make sure area matches group boundaries */
332d71f7 2181 if (size != grpsz)
f77814dd
NP
2182 return -EXDEV;
2183
2184 datum = map_word_ff(map);
2185 datum = map_word_clr(map, datum, CMD(1 << grpno));
2186 return do_write_oneword(map, chip, prot, datum, FL_OTP_WRITE);
2187}
2188
2189static int cfi_intelext_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
2190 size_t *retlen, u_char *buf,
2191 otp_op_t action, int user_regs)
2192{
2193 struct map_info *map = mtd->priv;
2194 struct cfi_private *cfi = map->fldrv_priv;
2195 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2196 struct flchip *chip;
2197 struct cfi_intelext_otpinfo *otp;
2198 u_long devsize, reg_prot_offset, data_offset;
2199 u_int chip_num, chip_step, field, reg_fact_size, reg_user_size;
2200 u_int groups, groupno, groupsize, reg_fact_groups, reg_user_groups;
2201 int ret;
2202
2203 *retlen = 0;
2204
2205 /* Check that we actually have some OTP registers */
2206 if (!extp || !(extp->FeatureSupport & 64) || !extp->NumProtectionFields)
2207 return -ENODATA;
2208
2209 /* we need real chips here not virtual ones */
2210 devsize = (1 << cfi->cfiq->DevSize) * cfi->interleave;
2211 chip_step = devsize >> cfi->chipshift;
dce2b4da
NP
2212 chip_num = 0;
2213
2214 /* Some chips have OTP located in the _top_ partition only.
2215 For example: Intel 28F256L18T (T means top-parameter device) */
2216 if (cfi->mfr == MANUFACTURER_INTEL) {
2217 switch (cfi->id) {
2218 case 0x880b:
2219 case 0x880c:
2220 case 0x880d:
2221 chip_num = chip_step - 1;
2222 }
2223 }
f77814dd 2224
dce2b4da 2225 for ( ; chip_num < cfi->numchips; chip_num += chip_step) {
f77814dd
NP
2226 chip = &cfi->chips[chip_num];
2227 otp = (struct cfi_intelext_otpinfo *)&extp->extra[0];
2228
2229 /* first OTP region */
2230 field = 0;
2231 reg_prot_offset = extp->ProtRegAddr;
2232 reg_fact_groups = 1;
2233 reg_fact_size = 1 << extp->FactProtRegSize;
2234 reg_user_groups = 1;
2235 reg_user_size = 1 << extp->UserProtRegSize;
2236
2237 while (len > 0) {
2238 /* flash geometry fixup */
2239 data_offset = reg_prot_offset + 1;
2240 data_offset *= cfi->interleave * cfi->device_type;
2241 reg_prot_offset *= cfi->interleave * cfi->device_type;
2242 reg_fact_size *= cfi->interleave;
2243 reg_user_size *= cfi->interleave;
2244
2245 if (user_regs) {
2246 groups = reg_user_groups;
2247 groupsize = reg_user_size;
2248 /* skip over factory reg area */
2249 groupno = reg_fact_groups;
2250 data_offset += reg_fact_groups * reg_fact_size;
2251 } else {
2252 groups = reg_fact_groups;
2253 groupsize = reg_fact_size;
2254 groupno = 0;
2255 }
2256
332d71f7 2257 while (len > 0 && groups > 0) {
f77814dd
NP
2258 if (!action) {
2259 /*
2260 * Special case: if action is NULL
2261 * we fill buf with otp_info records.
2262 */
2263 struct otp_info *otpinfo;
2264 map_word lockword;
2265 len -= sizeof(struct otp_info);
2266 if (len <= 0)
2267 return -ENOSPC;
2268 ret = do_otp_read(map, chip,
2269 reg_prot_offset,
2270 (u_char *)&lockword,
2271 map_bankwidth(map),
2272 0, 0, 0);
2273 if (ret)
2274 return ret;
2275 otpinfo = (struct otp_info *)buf;
2276 otpinfo->start = from;
2277 otpinfo->length = groupsize;
2278 otpinfo->locked =
2279 !map_word_bitsset(map, lockword,
2280 CMD(1 << groupno));
2281 from += groupsize;
2282 buf += sizeof(*otpinfo);
2283 *retlen += sizeof(*otpinfo);
2284 } else if (from >= groupsize) {
2285 from -= groupsize;
332d71f7 2286 data_offset += groupsize;
f77814dd
NP
2287 } else {
2288 int size = groupsize;
2289 data_offset += from;
2290 size -= from;
2291 from = 0;
2292 if (size > len)
2293 size = len;
2294 ret = action(map, chip, data_offset,
2295 buf, size, reg_prot_offset,
2296 groupno, groupsize);
2297 if (ret < 0)
2298 return ret;
2299 buf += size;
2300 len -= size;
2301 *retlen += size;
332d71f7 2302 data_offset += size;
f77814dd
NP
2303 }
2304 groupno++;
2305 groups--;
2306 }
2307
2308 /* next OTP region */
2309 if (++field == extp->NumProtectionFields)
2310 break;
2311 reg_prot_offset = otp->ProtRegAddr;
2312 reg_fact_groups = otp->FactGroups;
2313 reg_fact_size = 1 << otp->FactProtRegSize;
2314 reg_user_groups = otp->UserGroups;
2315 reg_user_size = 1 << otp->UserProtRegSize;
2316 otp++;
2317 }
2318 }
2319
2320 return 0;
2321}
2322
2323static int cfi_intelext_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
2324 size_t len, size_t *retlen,
2325 u_char *buf)
2326{
2327 return cfi_intelext_otp_walk(mtd, from, len, retlen,
2328 buf, do_otp_read, 0);
2329}
2330
2331static int cfi_intelext_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
2332 size_t len, size_t *retlen,
2333 u_char *buf)
2334{
2335 return cfi_intelext_otp_walk(mtd, from, len, retlen,
2336 buf, do_otp_read, 1);
2337}
2338
2339static int cfi_intelext_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
2340 size_t len, size_t *retlen,
2341 u_char *buf)
2342{
2343 return cfi_intelext_otp_walk(mtd, from, len, retlen,
2344 buf, do_otp_write, 1);
2345}
2346
2347static int cfi_intelext_lock_user_prot_reg(struct mtd_info *mtd,
2348 loff_t from, size_t len)
2349{
2350 size_t retlen;
2351 return cfi_intelext_otp_walk(mtd, from, len, &retlen,
2352 NULL, do_otp_lock, 1);
2353}
2354
1f948b43 2355static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd,
f77814dd
NP
2356 struct otp_info *buf, size_t len)
2357{
2358 size_t retlen;
2359 int ret;
2360
2361 ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 0);
2362 return ret ? : retlen;
2363}
2364
2365static int cfi_intelext_get_user_prot_info(struct mtd_info *mtd,
2366 struct otp_info *buf, size_t len)
2367{
2368 size_t retlen;
2369 int ret;
2370
2371 ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 1);
2372 return ret ? : retlen;
2373}
2374
2375#endif
2376
0ecbc81a
RG
2377static void cfi_intelext_save_locks(struct mtd_info *mtd)
2378{
2379 struct mtd_erase_region_info *region;
2380 int block, status, i;
2381 unsigned long adr;
2382 size_t len;
2383
2384 for (i = 0; i < mtd->numeraseregions; i++) {
2385 region = &mtd->eraseregions[i];
2386 if (!region->lockmap)
2387 continue;
2388
2389 for (block = 0; block < region->numblocks; block++){
2390 len = region->erasesize;
2391 adr = region->offset + block * len;
2392
2393 status = cfi_varsize_frob(mtd,
029a9eb1 2394 do_getlockstatus_oneblock, adr, len, NULL);
0ecbc81a
RG
2395 if (status)
2396 set_bit(block, region->lockmap);
2397 else
2398 clear_bit(block, region->lockmap);
2399 }
2400 }
2401}
2402
1da177e4
LT
2403static int cfi_intelext_suspend(struct mtd_info *mtd)
2404{
2405 struct map_info *map = mtd->priv;
2406 struct cfi_private *cfi = map->fldrv_priv;
0ecbc81a 2407 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
1da177e4
LT
2408 int i;
2409 struct flchip *chip;
2410 int ret = 0;
2411
e619a75f 2412 if ((mtd->flags & MTD_POWERUP_LOCK)
0ecbc81a
RG
2413 && extp && (extp->FeatureSupport & (1 << 5)))
2414 cfi_intelext_save_locks(mtd);
2415
1da177e4
LT
2416 for (i=0; !ret && i<cfi->numchips; i++) {
2417 chip = &cfi->chips[i];
2418
2419 spin_lock(chip->mutex);
2420
2421 switch (chip->state) {
2422 case FL_READY:
2423 case FL_STATUS:
2424 case FL_CFI_QUERY:
2425 case FL_JEDEC_QUERY:
2426 if (chip->oldstate == FL_READY) {
a86aaa6d
DA
2427 /* place the chip in a known state before suspend */
2428 map_write(map, CMD(0xFF), cfi->chips[i].start);
1da177e4
LT
2429 chip->oldstate = chip->state;
2430 chip->state = FL_PM_SUSPENDED;
1f948b43 2431 /* No need to wake_up() on this state change -
1da177e4
LT
2432 * as the whole point is that nobody can do anything
2433 * with the chip now anyway.
2434 */
2435 } else {
2436 /* There seems to be an operation pending. We must wait for it. */
2437 printk(KERN_NOTICE "Flash device refused suspend due to pending operation (oldstate %d)\n", chip->oldstate);
2438 ret = -EAGAIN;
2439 }
2440 break;
2441 default:
2442 /* Should we actually wait? Once upon a time these routines weren't
2443 allowed to. Or should we return -EAGAIN, because the upper layers
2444 ought to have already shut down anything which was using the device
2445 anyway? The latter for now. */
2446 printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->oldstate);
2447 ret = -EAGAIN;
2448 case FL_PM_SUSPENDED:
2449 break;
2450 }
2451 spin_unlock(chip->mutex);
2452 }
2453
2454 /* Unlock the chips again */
2455
2456 if (ret) {
2457 for (i--; i >=0; i--) {
2458 chip = &cfi->chips[i];
1f948b43 2459
1da177e4 2460 spin_lock(chip->mutex);
1f948b43 2461
1da177e4
LT
2462 if (chip->state == FL_PM_SUSPENDED) {
2463 /* No need to force it into a known state here,
2464 because we're returning failure, and it didn't
2465 get power cycled */
2466 chip->state = chip->oldstate;
2467 chip->oldstate = FL_READY;
2468 wake_up(&chip->wq);
2469 }
2470 spin_unlock(chip->mutex);
2471 }
1f948b43
TG
2472 }
2473
1da177e4
LT
2474 return ret;
2475}
2476
0ecbc81a
RG
2477static void cfi_intelext_restore_locks(struct mtd_info *mtd)
2478{
2479 struct mtd_erase_region_info *region;
2480 int block, i;
2481 unsigned long adr;
2482 size_t len;
2483
2484 for (i = 0; i < mtd->numeraseregions; i++) {
2485 region = &mtd->eraseregions[i];
2486 if (!region->lockmap)
2487 continue;
2488
2489 for (block = 0; block < region->numblocks; block++) {
2490 len = region->erasesize;
2491 adr = region->offset + block * len;
2492
2493 if (!test_bit(block, region->lockmap))
2494 cfi_intelext_unlock(mtd, adr, len);
2495 }
2496 }
2497}
2498
1da177e4
LT
2499static void cfi_intelext_resume(struct mtd_info *mtd)
2500{
2501 struct map_info *map = mtd->priv;
2502 struct cfi_private *cfi = map->fldrv_priv;
0ecbc81a 2503 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
1da177e4
LT
2504 int i;
2505 struct flchip *chip;
2506
2507 for (i=0; i<cfi->numchips; i++) {
1f948b43 2508
1da177e4
LT
2509 chip = &cfi->chips[i];
2510
2511 spin_lock(chip->mutex);
1f948b43 2512
1da177e4
LT
2513 /* Go to known state. Chip may have been power cycled */
2514 if (chip->state == FL_PM_SUSPENDED) {
2515 map_write(map, CMD(0xFF), cfi->chips[i].start);
2516 chip->oldstate = chip->state = FL_READY;
2517 wake_up(&chip->wq);
2518 }
2519
2520 spin_unlock(chip->mutex);
2521 }
0ecbc81a 2522
e619a75f 2523 if ((mtd->flags & MTD_POWERUP_LOCK)
0ecbc81a
RG
2524 && extp && (extp->FeatureSupport & (1 << 5)))
2525 cfi_intelext_restore_locks(mtd);
1da177e4
LT
2526}
2527
963a6fb0
NP
2528static int cfi_intelext_reset(struct mtd_info *mtd)
2529{
2530 struct map_info *map = mtd->priv;
2531 struct cfi_private *cfi = map->fldrv_priv;
2532 int i, ret;
2533
2534 for (i=0; i < cfi->numchips; i++) {
2535 struct flchip *chip = &cfi->chips[i];
2536
2537 /* force the completion of any ongoing operation
1f948b43 2538 and switch to array mode so any bootloader in
963a6fb0
NP
2539 flash is accessible for soft reboot. */
2540 spin_lock(chip->mutex);
c4a9f88d 2541 ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
963a6fb0
NP
2542 if (!ret) {
2543 map_write(map, CMD(0xff), chip->start);
c4a9f88d 2544 chip->state = FL_SHUTDOWN;
963a6fb0
NP
2545 }
2546 spin_unlock(chip->mutex);
2547 }
2548
2549 return 0;
2550}
2551
2552static int cfi_intelext_reboot(struct notifier_block *nb, unsigned long val,
2553 void *v)
2554{
2555 struct mtd_info *mtd;
2556
2557 mtd = container_of(nb, struct mtd_info, reboot_notifier);
2558 cfi_intelext_reset(mtd);
2559 return NOTIFY_DONE;
2560}
2561
1da177e4
LT
2562static void cfi_intelext_destroy(struct mtd_info *mtd)
2563{
2564 struct map_info *map = mtd->priv;
2565 struct cfi_private *cfi = map->fldrv_priv;
0ecbc81a
RG
2566 struct mtd_erase_region_info *region;
2567 int i;
963a6fb0
NP
2568 cfi_intelext_reset(mtd);
2569 unregister_reboot_notifier(&mtd->reboot_notifier);
1da177e4
LT
2570 kfree(cfi->cmdset_priv);
2571 kfree(cfi->cfiq);
2572 kfree(cfi->chips[0].priv);
2573 kfree(cfi);
0ecbc81a
RG
2574 for (i = 0; i < mtd->numeraseregions; i++) {
2575 region = &mtd->eraseregions[i];
2576 if (region->lockmap)
2577 kfree(region->lockmap);
2578 }
1da177e4
LT
2579 kfree(mtd->eraseregions);
2580}
2581
1da177e4
LT
2582MODULE_LICENSE("GPL");
2583MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
2584MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");
a15bdeef
DW
2585MODULE_ALIAS("cfi_cmdset_0003");
2586MODULE_ALIAS("cfi_cmdset_0200");