]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/mtd/chips/cfi_cmdset_0001.c
[MTD] Deal correctly with NOR chips which are smaller than the map window
[mirror_ubuntu-artful-kernel.git] / drivers / mtd / chips / cfi_cmdset_0001.c
CommitLineData
1da177e4
LT
1/*
2 * Common Flash Interface support:
3 * Intel Extended Vendor Command Set (ID 0x0001)
4 *
5 * (C) 2000 Red Hat. GPL'd
6 *
8bc3b380 7 * $Id: cfi_cmdset_0001.c,v 1.186 2005/11/23 22:07:52 nico Exp $
1f948b43 8 *
1da177e4 9 *
1da177e4
LT
10 * 10/10/2000 Nicolas Pitre <nico@cam.org>
11 * - completely revamped method functions so they are aware and
12 * independent of the flash geometry (buswidth, interleave, etc.)
13 * - scalability vs code size is completely set at compile-time
14 * (see include/linux/mtd/cfi.h for selection)
15 * - optimized write buffer method
16 * 02/05/2002 Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
17 * - reworked lock/unlock/erase support for var size flash
18 */
19
20#include <linux/module.h>
21#include <linux/types.h>
22#include <linux/kernel.h>
23#include <linux/sched.h>
24#include <linux/init.h>
25#include <asm/io.h>
26#include <asm/byteorder.h>
27
28#include <linux/errno.h>
29#include <linux/slab.h>
30#include <linux/delay.h>
31#include <linux/interrupt.h>
963a6fb0 32#include <linux/reboot.h>
1da177e4
LT
33#include <linux/mtd/xip.h>
34#include <linux/mtd/map.h>
35#include <linux/mtd/mtd.h>
36#include <linux/mtd/compatmac.h>
37#include <linux/mtd/cfi.h>
38
39/* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
40/* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
41
42// debugging, turns off buffer write mode if set to 1
43#define FORCE_WORD_WRITE 0
44
45#define MANUFACTURER_INTEL 0x0089
46#define I82802AB 0x00ad
47#define I82802AC 0x00ac
48#define MANUFACTURER_ST 0x0020
49#define M50LPW080 0x002F
50
51static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
1da177e4
LT
52static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
53static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
e102d54a 54static int cfi_intelext_writev(struct mtd_info *, const struct kvec *, unsigned long, loff_t, size_t *);
1da177e4
LT
55static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
56static void cfi_intelext_sync (struct mtd_info *);
57static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
58static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
8048d2fc 59#ifdef CONFIG_MTD_OTP
f77814dd
NP
60static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
61static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
62static int cfi_intelext_write_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
63static int cfi_intelext_lock_user_prot_reg (struct mtd_info *, loff_t, size_t);
64static int cfi_intelext_get_fact_prot_info (struct mtd_info *,
65 struct otp_info *, size_t);
66static int cfi_intelext_get_user_prot_info (struct mtd_info *,
67 struct otp_info *, size_t);
8048d2fc 68#endif
1da177e4
LT
69static int cfi_intelext_suspend (struct mtd_info *);
70static void cfi_intelext_resume (struct mtd_info *);
963a6fb0 71static int cfi_intelext_reboot (struct notifier_block *, unsigned long, void *);
1da177e4
LT
72
73static void cfi_intelext_destroy(struct mtd_info *);
74
75struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
76
77static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
78static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
79
80static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
81 size_t *retlen, u_char **mtdbuf);
82static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from,
83 size_t len);
84
85static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
86static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
87#include "fwh_lock.h"
88
89
90
91/*
92 * *********** SETUP AND PROBE BITS ***********
93 */
94
95static struct mtd_chip_driver cfi_intelext_chipdrv = {
96 .probe = NULL, /* Not usable directly */
97 .destroy = cfi_intelext_destroy,
98 .name = "cfi_cmdset_0001",
99 .module = THIS_MODULE
100};
101
102/* #define DEBUG_LOCK_BITS */
103/* #define DEBUG_CFI_FEATURES */
104
105#ifdef DEBUG_CFI_FEATURES
106static void cfi_tell_features(struct cfi_pri_intelext *extp)
107{
108 int i;
638d9838 109 printk(" Extended Query version %c.%c\n", extp->MajorVersion, extp->MinorVersion);
1da177e4
LT
110 printk(" Feature/Command Support: %4.4X\n", extp->FeatureSupport);
111 printk(" - Chip Erase: %s\n", extp->FeatureSupport&1?"supported":"unsupported");
112 printk(" - Suspend Erase: %s\n", extp->FeatureSupport&2?"supported":"unsupported");
113 printk(" - Suspend Program: %s\n", extp->FeatureSupport&4?"supported":"unsupported");
114 printk(" - Legacy Lock/Unlock: %s\n", extp->FeatureSupport&8?"supported":"unsupported");
115 printk(" - Queued Erase: %s\n", extp->FeatureSupport&16?"supported":"unsupported");
116 printk(" - Instant block lock: %s\n", extp->FeatureSupport&32?"supported":"unsupported");
117 printk(" - Protection Bits: %s\n", extp->FeatureSupport&64?"supported":"unsupported");
118 printk(" - Page-mode read: %s\n", extp->FeatureSupport&128?"supported":"unsupported");
119 printk(" - Synchronous read: %s\n", extp->FeatureSupport&256?"supported":"unsupported");
120 printk(" - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
638d9838
NP
121 printk(" - Extended Flash Array: %s\n", extp->FeatureSupport&1024?"supported":"unsupported");
122 for (i=11; i<32; i++) {
1f948b43 123 if (extp->FeatureSupport & (1<<i))
1da177e4
LT
124 printk(" - Unknown Bit %X: supported\n", i);
125 }
1f948b43 126
1da177e4
LT
127 printk(" Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
128 printk(" - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
129 for (i=1; i<8; i++) {
130 if (extp->SuspendCmdSupport & (1<<i))
131 printk(" - Unknown Bit %X: supported\n", i);
132 }
1f948b43 133
1da177e4
LT
134 printk(" Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
135 printk(" - Lock Bit Active: %s\n", extp->BlkStatusRegMask&1?"yes":"no");
638d9838
NP
136 printk(" - Lock-Down Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
137 for (i=2; i<3; i++) {
1da177e4
LT
138 if (extp->BlkStatusRegMask & (1<<i))
139 printk(" - Unknown Bit %X Active: yes\n",i);
140 }
638d9838
NP
141 printk(" - EFA Lock Bit: %s\n", extp->BlkStatusRegMask&16?"yes":"no");
142 printk(" - EFA Lock-Down Bit: %s\n", extp->BlkStatusRegMask&32?"yes":"no");
143 for (i=6; i<16; i++) {
144 if (extp->BlkStatusRegMask & (1<<i))
145 printk(" - Unknown Bit %X Active: yes\n",i);
146 }
147
1f948b43 148 printk(" Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
1da177e4
LT
149 extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
150 if (extp->VppOptimal)
1f948b43 151 printk(" Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
1da177e4
LT
152 extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
153}
154#endif
155
156#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
1f948b43 157/* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
1da177e4
LT
158static void fixup_intel_strataflash(struct mtd_info *mtd, void* param)
159{
160 struct map_info *map = mtd->priv;
161 struct cfi_private *cfi = map->fldrv_priv;
162 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
163
164 printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
165 "erase on write disabled.\n");
166 extp->SuspendCmdSupport &= ~1;
167}
168#endif
169
170#ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
171static void fixup_no_write_suspend(struct mtd_info *mtd, void* param)
172{
173 struct map_info *map = mtd->priv;
174 struct cfi_private *cfi = map->fldrv_priv;
175 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
176
177 if (cfip && (cfip->FeatureSupport&4)) {
178 cfip->FeatureSupport &= ~4;
179 printk(KERN_WARNING "cfi_cmdset_0001: write suspend disabled\n");
180 }
181}
182#endif
183
184static void fixup_st_m28w320ct(struct mtd_info *mtd, void* param)
185{
186 struct map_info *map = mtd->priv;
187 struct cfi_private *cfi = map->fldrv_priv;
1f948b43 188
1da177e4
LT
189 cfi->cfiq->BufWriteTimeoutTyp = 0; /* Not supported */
190 cfi->cfiq->BufWriteTimeoutMax = 0; /* Not supported */
191}
192
193static void fixup_st_m28w320cb(struct mtd_info *mtd, void* param)
194{
195 struct map_info *map = mtd->priv;
196 struct cfi_private *cfi = map->fldrv_priv;
1f948b43 197
1da177e4
LT
198 /* Note this is done after the region info is endian swapped */
199 cfi->cfiq->EraseRegionInfo[1] =
200 (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
201};
202
203static void fixup_use_point(struct mtd_info *mtd, void *param)
204{
205 struct map_info *map = mtd->priv;
206 if (!mtd->point && map_is_linear(map)) {
207 mtd->point = cfi_intelext_point;
208 mtd->unpoint = cfi_intelext_unpoint;
209 }
210}
211
212static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
213{
214 struct map_info *map = mtd->priv;
215 struct cfi_private *cfi = map->fldrv_priv;
216 if (cfi->cfiq->BufWriteTimeoutTyp) {
217 printk(KERN_INFO "Using buffer write method\n" );
218 mtd->write = cfi_intelext_write_buffers;
e102d54a 219 mtd->writev = cfi_intelext_writev;
1da177e4
LT
220 }
221}
222
223static struct cfi_fixup cfi_fixup_table[] = {
224#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
1f948b43 225 { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL },
1da177e4
LT
226#endif
227#ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
228 { CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend, NULL },
229#endif
230#if !FORCE_WORD_WRITE
231 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL },
232#endif
233 { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct, NULL },
234 { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb, NULL },
235 { 0, 0, NULL, NULL }
236};
237
238static struct cfi_fixup jedec_fixup_table[] = {
239 { MANUFACTURER_INTEL, I82802AB, fixup_use_fwh_lock, NULL, },
240 { MANUFACTURER_INTEL, I82802AC, fixup_use_fwh_lock, NULL, },
241 { MANUFACTURER_ST, M50LPW080, fixup_use_fwh_lock, NULL, },
242 { 0, 0, NULL, NULL }
243};
244static struct cfi_fixup fixup_table[] = {
245 /* The CFI vendor ids and the JEDEC vendor IDs appear
246 * to be common. It is like the devices id's are as
247 * well. This table is to pick all cases where
248 * we know that is the case.
249 */
250 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point, NULL },
251 { 0, 0, NULL, NULL }
252};
253
254static inline struct cfi_pri_intelext *
255read_pri_intelext(struct map_info *map, __u16 adr)
256{
257 struct cfi_pri_intelext *extp;
258 unsigned int extp_size = sizeof(*extp);
259
260 again:
261 extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
262 if (!extp)
263 return NULL;
264
d88f977b 265 if (extp->MajorVersion != '1' ||
638d9838 266 (extp->MinorVersion < '0' || extp->MinorVersion > '4')) {
d88f977b
TP
267 printk(KERN_ERR " Unknown Intel/Sharp Extended Query "
268 "version %c.%c.\n", extp->MajorVersion,
269 extp->MinorVersion);
270 kfree(extp);
271 return NULL;
272 }
273
1da177e4
LT
274 /* Do some byteswapping if necessary */
275 extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
276 extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
277 extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
278
638d9838 279 if (extp->MajorVersion == '1' && extp->MinorVersion >= '3') {
1da177e4
LT
280 unsigned int extra_size = 0;
281 int nb_parts, i;
282
283 /* Protection Register info */
72b56a2d
NP
284 extra_size += (extp->NumProtectionFields - 1) *
285 sizeof(struct cfi_intelext_otpinfo);
1da177e4
LT
286
287 /* Burst Read info */
6f6ed056
NP
288 extra_size += 2;
289 if (extp_size < sizeof(*extp) + extra_size)
290 goto need_more;
291 extra_size += extp->extra[extra_size-1];
1da177e4
LT
292
293 /* Number of hardware-partitions */
294 extra_size += 1;
295 if (extp_size < sizeof(*extp) + extra_size)
296 goto need_more;
297 nb_parts = extp->extra[extra_size - 1];
298
638d9838
NP
299 /* skip the sizeof(partregion) field in CFI 1.4 */
300 if (extp->MinorVersion >= '4')
301 extra_size += 2;
302
1da177e4
LT
303 for (i = 0; i < nb_parts; i++) {
304 struct cfi_intelext_regioninfo *rinfo;
305 rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
306 extra_size += sizeof(*rinfo);
307 if (extp_size < sizeof(*extp) + extra_size)
308 goto need_more;
309 rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
310 extra_size += (rinfo->NumBlockTypes - 1)
311 * sizeof(struct cfi_intelext_blockinfo);
312 }
313
638d9838
NP
314 if (extp->MinorVersion >= '4')
315 extra_size += sizeof(struct cfi_intelext_programming_regioninfo);
316
1da177e4
LT
317 if (extp_size < sizeof(*extp) + extra_size) {
318 need_more:
319 extp_size = sizeof(*extp) + extra_size;
320 kfree(extp);
321 if (extp_size > 4096) {
322 printk(KERN_ERR
323 "%s: cfi_pri_intelext is too fat\n",
324 __FUNCTION__);
325 return NULL;
326 }
327 goto again;
328 }
329 }
1f948b43 330
1da177e4
LT
331 return extp;
332}
333
1da177e4
LT
334struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
335{
336 struct cfi_private *cfi = map->fldrv_priv;
337 struct mtd_info *mtd;
338 int i;
339
340 mtd = kmalloc(sizeof(*mtd), GFP_KERNEL);
341 if (!mtd) {
342 printk(KERN_ERR "Failed to allocate memory for MTD device\n");
343 return NULL;
344 }
345 memset(mtd, 0, sizeof(*mtd));
346 mtd->priv = map;
347 mtd->type = MTD_NORFLASH;
348
349 /* Fill in the default mtd operations */
350 mtd->erase = cfi_intelext_erase_varsize;
351 mtd->read = cfi_intelext_read;
352 mtd->write = cfi_intelext_write_words;
353 mtd->sync = cfi_intelext_sync;
354 mtd->lock = cfi_intelext_lock;
355 mtd->unlock = cfi_intelext_unlock;
356 mtd->suspend = cfi_intelext_suspend;
357 mtd->resume = cfi_intelext_resume;
358 mtd->flags = MTD_CAP_NORFLASH;
359 mtd->name = map->name;
963a6fb0
NP
360
361 mtd->reboot_notifier.notifier_call = cfi_intelext_reboot;
362
1da177e4 363 if (cfi->cfi_mode == CFI_MODE_CFI) {
1f948b43 364 /*
1da177e4
LT
365 * It's a real CFI chip, not one for which the probe
366 * routine faked a CFI structure. So we read the feature
367 * table from it.
368 */
369 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
370 struct cfi_pri_intelext *extp;
371
372 extp = read_pri_intelext(map, adr);
373 if (!extp) {
374 kfree(mtd);
375 return NULL;
376 }
377
378 /* Install our own private info structure */
1f948b43 379 cfi->cmdset_priv = extp;
1da177e4
LT
380
381 cfi_fixup(mtd, cfi_fixup_table);
382
383#ifdef DEBUG_CFI_FEATURES
384 /* Tell the user about it in lots of lovely detail */
385 cfi_tell_features(extp);
1f948b43 386#endif
1da177e4
LT
387
388 if(extp->SuspendCmdSupport & 1) {
389 printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
390 }
391 }
392 else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
393 /* Apply jedec specific fixups */
394 cfi_fixup(mtd, jedec_fixup_table);
395 }
396 /* Apply generic fixups */
397 cfi_fixup(mtd, fixup_table);
398
399 for (i=0; i< cfi->numchips; i++) {
400 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
401 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
402 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
403 cfi->chips[i].ref_point_counter = 0;
c314b6f1 404 init_waitqueue_head(&(cfi->chips[i].wq));
1f948b43 405 }
1da177e4
LT
406
407 map->fldrv = &cfi_intelext_chipdrv;
1f948b43 408
1da177e4
LT
409 return cfi_intelext_setup(mtd);
410}
a15bdeef
DW
411struct mtd_info *cfi_cmdset_0003(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
412struct mtd_info *cfi_cmdset_0200(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
413EXPORT_SYMBOL_GPL(cfi_cmdset_0001);
414EXPORT_SYMBOL_GPL(cfi_cmdset_0003);
415EXPORT_SYMBOL_GPL(cfi_cmdset_0200);
1da177e4
LT
416
417static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
418{
419 struct map_info *map = mtd->priv;
420 struct cfi_private *cfi = map->fldrv_priv;
421 unsigned long offset = 0;
422 int i,j;
423 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
424
425 //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
426
427 mtd->size = devsize * cfi->numchips;
428
429 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
1f948b43 430 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
1da177e4 431 * mtd->numeraseregions, GFP_KERNEL);
1f948b43 432 if (!mtd->eraseregions) {
1da177e4
LT
433 printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
434 goto setup_err;
435 }
1f948b43 436
1da177e4
LT
437 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
438 unsigned long ernum, ersize;
439 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
440 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
441
442 if (mtd->erasesize < ersize) {
443 mtd->erasesize = ersize;
444 }
445 for (j=0; j<cfi->numchips; j++) {
446 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
447 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
448 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
449 }
450 offset += (ersize * ernum);
451 }
452
453 if (offset != devsize) {
454 /* Argh */
455 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
456 goto setup_err;
457 }
458
459 for (i=0; i<mtd->numeraseregions;i++){
4843653c 460 printk(KERN_DEBUG "erase region %d: offset=0x%x,size=0x%x,blocks=%d\n",
1da177e4
LT
461 i,mtd->eraseregions[i].offset,
462 mtd->eraseregions[i].erasesize,
463 mtd->eraseregions[i].numblocks);
464 }
465
f77814dd 466#ifdef CONFIG_MTD_OTP
1da177e4 467 mtd->read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
f77814dd
NP
468 mtd->read_user_prot_reg = cfi_intelext_read_user_prot_reg;
469 mtd->write_user_prot_reg = cfi_intelext_write_user_prot_reg;
470 mtd->lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
471 mtd->get_fact_prot_info = cfi_intelext_get_fact_prot_info;
472 mtd->get_user_prot_info = cfi_intelext_get_user_prot_info;
1da177e4
LT
473#endif
474
475 /* This function has the potential to distort the reality
476 a bit and therefore should be called last. */
477 if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
478 goto setup_err;
479
480 __module_get(THIS_MODULE);
963a6fb0 481 register_reboot_notifier(&mtd->reboot_notifier);
1da177e4
LT
482 return mtd;
483
484 setup_err:
485 if(mtd) {
fa671646 486 kfree(mtd->eraseregions);
1da177e4
LT
487 kfree(mtd);
488 }
489 kfree(cfi->cmdset_priv);
490 return NULL;
491}
492
493static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
494 struct cfi_private **pcfi)
495{
496 struct map_info *map = mtd->priv;
497 struct cfi_private *cfi = *pcfi;
498 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
499
500 /*
501 * Probing of multi-partition flash ships.
502 *
503 * To support multiple partitions when available, we simply arrange
504 * for each of them to have their own flchip structure even if they
505 * are on the same physical chip. This means completely recreating
506 * a new cfi_private structure right here which is a blatent code
507 * layering violation, but this is still the least intrusive
508 * arrangement at this point. This can be rearranged in the future
509 * if someone feels motivated enough. --nico
510 */
638d9838 511 if (extp && extp->MajorVersion == '1' && extp->MinorVersion >= '3'
1da177e4
LT
512 && extp->FeatureSupport & (1 << 9)) {
513 struct cfi_private *newcfi;
514 struct flchip *chip;
515 struct flchip_shared *shared;
516 int offs, numregions, numparts, partshift, numvirtchips, i, j;
517
518 /* Protection Register info */
72b56a2d
NP
519 offs = (extp->NumProtectionFields - 1) *
520 sizeof(struct cfi_intelext_otpinfo);
1da177e4
LT
521
522 /* Burst Read info */
6f6ed056 523 offs += extp->extra[offs+1]+2;
1da177e4
LT
524
525 /* Number of partition regions */
526 numregions = extp->extra[offs];
527 offs += 1;
528
638d9838
NP
529 /* skip the sizeof(partregion) field in CFI 1.4 */
530 if (extp->MinorVersion >= '4')
531 offs += 2;
532
1da177e4
LT
533 /* Number of hardware partitions */
534 numparts = 0;
535 for (i = 0; i < numregions; i++) {
536 struct cfi_intelext_regioninfo *rinfo;
537 rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
538 numparts += rinfo->NumIdentPartitions;
539 offs += sizeof(*rinfo)
540 + (rinfo->NumBlockTypes - 1) *
541 sizeof(struct cfi_intelext_blockinfo);
542 }
543
638d9838
NP
544 /* Programming Region info */
545 if (extp->MinorVersion >= '4') {
546 struct cfi_intelext_programming_regioninfo *prinfo;
547 prinfo = (struct cfi_intelext_programming_regioninfo *)&extp->extra[offs];
548 MTD_PROGREGION_SIZE(mtd) = cfi->interleave << prinfo->ProgRegShift;
549 MTD_PROGREGION_CTRLMODE_VALID(mtd) = cfi->interleave * prinfo->ControlValid;
550 MTD_PROGREGION_CTRLMODE_INVALID(mtd) = cfi->interleave * prinfo->ControlInvalid;
551 mtd->flags |= MTD_PROGRAM_REGIONS;
552 printk(KERN_DEBUG "%s: program region size/ctrl_valid/ctrl_inval = %d/%d/%d\n",
553 map->name, MTD_PROGREGION_SIZE(mtd),
554 MTD_PROGREGION_CTRLMODE_VALID(mtd),
555 MTD_PROGREGION_CTRLMODE_INVALID(mtd));
556 }
557
1da177e4
LT
558 /*
559 * All functions below currently rely on all chips having
560 * the same geometry so we'll just assume that all hardware
561 * partitions are of the same size too.
562 */
563 partshift = cfi->chipshift - __ffs(numparts);
564
565 if ((1 << partshift) < mtd->erasesize) {
566 printk( KERN_ERR
567 "%s: bad number of hw partitions (%d)\n",
568 __FUNCTION__, numparts);
569 return -EINVAL;
570 }
571
572 numvirtchips = cfi->numchips * numparts;
573 newcfi = kmalloc(sizeof(struct cfi_private) + numvirtchips * sizeof(struct flchip), GFP_KERNEL);
574 if (!newcfi)
575 return -ENOMEM;
576 shared = kmalloc(sizeof(struct flchip_shared) * cfi->numchips, GFP_KERNEL);
577 if (!shared) {
578 kfree(newcfi);
579 return -ENOMEM;
580 }
581 memcpy(newcfi, cfi, sizeof(struct cfi_private));
582 newcfi->numchips = numvirtchips;
583 newcfi->chipshift = partshift;
584
585 chip = &newcfi->chips[0];
586 for (i = 0; i < cfi->numchips; i++) {
587 shared[i].writing = shared[i].erasing = NULL;
588 spin_lock_init(&shared[i].lock);
589 for (j = 0; j < numparts; j++) {
590 *chip = cfi->chips[i];
591 chip->start += j << partshift;
592 chip->priv = &shared[i];
593 /* those should be reset too since
594 they create memory references. */
595 init_waitqueue_head(&chip->wq);
596 spin_lock_init(&chip->_spinlock);
597 chip->mutex = &chip->_spinlock;
598 chip++;
599 }
600 }
601
602 printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
603 "--> %d partitions of %d KiB\n",
604 map->name, cfi->numchips, cfi->interleave,
605 newcfi->numchips, 1<<(newcfi->chipshift-10));
606
607 map->fldrv_priv = newcfi;
608 *pcfi = newcfi;
609 kfree(cfi);
610 }
611
612 return 0;
613}
614
615/*
616 * *********** CHIP ACCESS FUNCTIONS ***********
617 */
618
619static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
620{
621 DECLARE_WAITQUEUE(wait, current);
622 struct cfi_private *cfi = map->fldrv_priv;
623 map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
624 unsigned long timeo;
625 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
626
627 resettime:
628 timeo = jiffies + HZ;
629 retry:
f77814dd 630 if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING || mode == FL_OTP_WRITE)) {
1da177e4
LT
631 /*
632 * OK. We have possibility for contension on the write/erase
633 * operations which are global to the real chip and not per
634 * partition. So let's fight it over in the partition which
635 * currently has authority on the operation.
636 *
637 * The rules are as follows:
638 *
639 * - any write operation must own shared->writing.
640 *
641 * - any erase operation must own _both_ shared->writing and
642 * shared->erasing.
643 *
644 * - contension arbitration is handled in the owner's context.
645 *
8bc3b380
NP
646 * The 'shared' struct can be read and/or written only when
647 * its lock is taken.
1da177e4
LT
648 */
649 struct flchip_shared *shared = chip->priv;
650 struct flchip *contender;
651 spin_lock(&shared->lock);
652 contender = shared->writing;
653 if (contender && contender != chip) {
654 /*
655 * The engine to perform desired operation on this
656 * partition is already in use by someone else.
657 * Let's fight over it in the context of the chip
658 * currently using it. If it is possible to suspend,
659 * that other partition will do just that, otherwise
660 * it'll happily send us to sleep. In any case, when
661 * get_chip returns success we're clear to go ahead.
662 */
663 int ret = spin_trylock(contender->mutex);
664 spin_unlock(&shared->lock);
665 if (!ret)
666 goto retry;
667 spin_unlock(chip->mutex);
668 ret = get_chip(map, contender, contender->start, mode);
669 spin_lock(chip->mutex);
670 if (ret) {
671 spin_unlock(contender->mutex);
672 return ret;
673 }
674 timeo = jiffies + HZ;
675 spin_lock(&shared->lock);
8bc3b380 676 spin_unlock(contender->mutex);
1da177e4
LT
677 }
678
679 /* We now own it */
680 shared->writing = chip;
681 if (mode == FL_ERASING)
682 shared->erasing = chip;
1da177e4
LT
683 spin_unlock(&shared->lock);
684 }
685
686 switch (chip->state) {
687
688 case FL_STATUS:
689 for (;;) {
690 status = map_read(map, adr);
691 if (map_word_andequal(map, status, status_OK, status_OK))
692 break;
693
694 /* At this point we're fine with write operations
695 in other partitions as they don't conflict. */
696 if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
697 break;
698
699 if (time_after(jiffies, timeo)) {
1f948b43 700 printk(KERN_ERR "%s: Waiting for chip to be ready timed out. Status %lx\n",
4843653c 701 map->name, status.x[0]);
1da177e4
LT
702 return -EIO;
703 }
704 spin_unlock(chip->mutex);
705 cfi_udelay(1);
706 spin_lock(chip->mutex);
707 /* Someone else might have been playing with it. */
708 goto retry;
709 }
1f948b43 710
1da177e4
LT
711 case FL_READY:
712 case FL_CFI_QUERY:
713 case FL_JEDEC_QUERY:
714 return 0;
715
716 case FL_ERASING:
717 if (!cfip ||
718 !(cfip->FeatureSupport & 2) ||
719 !(mode == FL_READY || mode == FL_POINT ||
720 (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
721 goto sleep;
722
723
724 /* Erase suspend */
725 map_write(map, CMD(0xB0), adr);
726
727 /* If the flash has finished erasing, then 'erase suspend'
728 * appears to make some (28F320) flash devices switch to
729 * 'read' mode. Make sure that we switch to 'read status'
730 * mode so we get the right data. --rmk
731 */
732 map_write(map, CMD(0x70), adr);
733 chip->oldstate = FL_ERASING;
734 chip->state = FL_ERASE_SUSPENDING;
735 chip->erase_suspended = 1;
736 for (;;) {
737 status = map_read(map, adr);
738 if (map_word_andequal(map, status, status_OK, status_OK))
739 break;
740
741 if (time_after(jiffies, timeo)) {
742 /* Urgh. Resume and pretend we weren't here. */
743 map_write(map, CMD(0xd0), adr);
744 /* Make sure we're in 'read status' mode if it had finished */
745 map_write(map, CMD(0x70), adr);
746 chip->state = FL_ERASING;
747 chip->oldstate = FL_READY;
4843653c
NP
748 printk(KERN_ERR "%s: Chip not ready after erase "
749 "suspended: status = 0x%lx\n", map->name, status.x[0]);
1da177e4
LT
750 return -EIO;
751 }
752
753 spin_unlock(chip->mutex);
754 cfi_udelay(1);
755 spin_lock(chip->mutex);
756 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
757 So we can just loop here. */
758 }
759 chip->state = FL_STATUS;
760 return 0;
761
762 case FL_XIP_WHILE_ERASING:
763 if (mode != FL_READY && mode != FL_POINT &&
764 (mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1)))
765 goto sleep;
766 chip->oldstate = chip->state;
767 chip->state = FL_READY;
768 return 0;
769
770 case FL_POINT:
771 /* Only if there's no operation suspended... */
772 if (mode == FL_READY && chip->oldstate == FL_READY)
773 return 0;
774
775 default:
776 sleep:
777 set_current_state(TASK_UNINTERRUPTIBLE);
778 add_wait_queue(&chip->wq, &wait);
779 spin_unlock(chip->mutex);
780 schedule();
781 remove_wait_queue(&chip->wq, &wait);
782 spin_lock(chip->mutex);
783 goto resettime;
784 }
785}
786
787static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
788{
789 struct cfi_private *cfi = map->fldrv_priv;
790
791 if (chip->priv) {
792 struct flchip_shared *shared = chip->priv;
793 spin_lock(&shared->lock);
794 if (shared->writing == chip && chip->oldstate == FL_READY) {
795 /* We own the ability to write, but we're done */
796 shared->writing = shared->erasing;
797 if (shared->writing && shared->writing != chip) {
798 /* give back ownership to who we loaned it from */
799 struct flchip *loaner = shared->writing;
800 spin_lock(loaner->mutex);
801 spin_unlock(&shared->lock);
802 spin_unlock(chip->mutex);
803 put_chip(map, loaner, loaner->start);
804 spin_lock(chip->mutex);
805 spin_unlock(loaner->mutex);
806 wake_up(&chip->wq);
807 return;
808 }
809 shared->erasing = NULL;
810 shared->writing = NULL;
811 } else if (shared->erasing == chip && shared->writing != chip) {
812 /*
813 * We own the ability to erase without the ability
814 * to write, which means the erase was suspended
815 * and some other partition is currently writing.
816 * Don't let the switch below mess things up since
817 * we don't have ownership to resume anything.
818 */
819 spin_unlock(&shared->lock);
820 wake_up(&chip->wq);
821 return;
822 }
823 spin_unlock(&shared->lock);
824 }
825
826 switch(chip->oldstate) {
827 case FL_ERASING:
828 chip->state = chip->oldstate;
1f948b43 829 /* What if one interleaved chip has finished and the
1da177e4 830 other hasn't? The old code would leave the finished
1f948b43 831 one in READY mode. That's bad, and caused -EROFS
1da177e4
LT
832 errors to be returned from do_erase_oneblock because
833 that's the only bit it checked for at the time.
1f948b43 834 As the state machine appears to explicitly allow
1da177e4 835 sending the 0x70 (Read Status) command to an erasing
1f948b43 836 chip and expecting it to be ignored, that's what we
1da177e4
LT
837 do. */
838 map_write(map, CMD(0xd0), adr);
839 map_write(map, CMD(0x70), adr);
840 chip->oldstate = FL_READY;
841 chip->state = FL_ERASING;
842 break;
843
844 case FL_XIP_WHILE_ERASING:
845 chip->state = chip->oldstate;
846 chip->oldstate = FL_READY;
847 break;
848
849 case FL_READY:
850 case FL_STATUS:
851 case FL_JEDEC_QUERY:
852 /* We should really make set_vpp() count, rather than doing this */
853 DISABLE_VPP(map);
854 break;
855 default:
4843653c 856 printk(KERN_ERR "%s: put_chip() called with oldstate %d!!\n", map->name, chip->oldstate);
1da177e4
LT
857 }
858 wake_up(&chip->wq);
859}
860
861#ifdef CONFIG_MTD_XIP
862
863/*
864 * No interrupt what so ever can be serviced while the flash isn't in array
865 * mode. This is ensured by the xip_disable() and xip_enable() functions
866 * enclosing any code path where the flash is known not to be in array mode.
867 * And within a XIP disabled code path, only functions marked with __xipram
868 * may be called and nothing else (it's a good thing to inspect generated
869 * assembly to make sure inline functions were actually inlined and that gcc
870 * didn't emit calls to its own support functions). Also configuring MTD CFI
871 * support to a single buswidth and a single interleave is also recommended.
1da177e4
LT
872 */
873
874static void xip_disable(struct map_info *map, struct flchip *chip,
875 unsigned long adr)
876{
877 /* TODO: chips with no XIP use should ignore and return */
878 (void) map_read(map, adr); /* ensure mmu mapping is up to date */
1da177e4
LT
879 local_irq_disable();
880}
881
882static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
883 unsigned long adr)
884{
885 struct cfi_private *cfi = map->fldrv_priv;
886 if (chip->state != FL_POINT && chip->state != FL_READY) {
887 map_write(map, CMD(0xff), adr);
888 chip->state = FL_READY;
889 }
890 (void) map_read(map, adr);
97f927a4 891 xip_iprefetch();
1da177e4 892 local_irq_enable();
1da177e4
LT
893}
894
895/*
896 * When a delay is required for the flash operation to complete, the
897 * xip_udelay() function is polling for both the given timeout and pending
898 * (but still masked) hardware interrupts. Whenever there is an interrupt
899 * pending then the flash erase or write operation is suspended, array mode
900 * restored and interrupts unmasked. Task scheduling might also happen at that
901 * point. The CPU eventually returns from the interrupt or the call to
902 * schedule() and the suspended flash operation is resumed for the remaining
903 * of the delay period.
904 *
905 * Warning: this function _will_ fool interrupt latency tracing tools.
906 */
907
908static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
909 unsigned long adr, int usec)
910{
911 struct cfi_private *cfi = map->fldrv_priv;
912 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
913 map_word status, OK = CMD(0x80);
914 unsigned long suspended, start = xip_currtime();
915 flstate_t oldstate, newstate;
916
917 do {
918 cpu_relax();
919 if (xip_irqpending() && cfip &&
920 ((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) ||
921 (chip->state == FL_WRITING && (cfip->FeatureSupport&4))) &&
922 (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
923 /*
924 * Let's suspend the erase or write operation when
925 * supported. Note that we currently don't try to
926 * suspend interleaved chips if there is already
927 * another operation suspended (imagine what happens
928 * when one chip was already done with the current
929 * operation while another chip suspended it, then
930 * we resume the whole thing at once). Yes, it
931 * can happen!
932 */
933 map_write(map, CMD(0xb0), adr);
934 map_write(map, CMD(0x70), adr);
935 usec -= xip_elapsed_since(start);
936 suspended = xip_currtime();
937 do {
938 if (xip_elapsed_since(suspended) > 100000) {
939 /*
940 * The chip doesn't want to suspend
941 * after waiting for 100 msecs.
942 * This is a critical error but there
943 * is not much we can do here.
944 */
945 return;
946 }
947 status = map_read(map, adr);
948 } while (!map_word_andequal(map, status, OK, OK));
949
950 /* Suspend succeeded */
951 oldstate = chip->state;
952 if (oldstate == FL_ERASING) {
953 if (!map_word_bitsset(map, status, CMD(0x40)))
954 break;
955 newstate = FL_XIP_WHILE_ERASING;
956 chip->erase_suspended = 1;
957 } else {
958 if (!map_word_bitsset(map, status, CMD(0x04)))
959 break;
960 newstate = FL_XIP_WHILE_WRITING;
961 chip->write_suspended = 1;
962 }
963 chip->state = newstate;
964 map_write(map, CMD(0xff), adr);
965 (void) map_read(map, adr);
966 asm volatile (".rep 8; nop; .endr");
967 local_irq_enable();
6da70124 968 spin_unlock(chip->mutex);
1da177e4
LT
969 asm volatile (".rep 8; nop; .endr");
970 cond_resched();
971
972 /*
973 * We're back. However someone else might have
974 * decided to go write to the chip if we are in
975 * a suspended erase state. If so let's wait
976 * until it's done.
977 */
6da70124 978 spin_lock(chip->mutex);
1da177e4
LT
979 while (chip->state != newstate) {
980 DECLARE_WAITQUEUE(wait, current);
981 set_current_state(TASK_UNINTERRUPTIBLE);
982 add_wait_queue(&chip->wq, &wait);
6da70124 983 spin_unlock(chip->mutex);
1da177e4
LT
984 schedule();
985 remove_wait_queue(&chip->wq, &wait);
6da70124 986 spin_lock(chip->mutex);
1da177e4
LT
987 }
988 /* Disallow XIP again */
989 local_irq_disable();
990
991 /* Resume the write or erase operation */
992 map_write(map, CMD(0xd0), adr);
993 map_write(map, CMD(0x70), adr);
994 chip->state = oldstate;
995 start = xip_currtime();
996 } else if (usec >= 1000000/HZ) {
997 /*
998 * Try to save on CPU power when waiting delay
999 * is at least a system timer tick period.
1000 * No need to be extremely accurate here.
1001 */
1002 xip_cpu_idle();
1003 }
1004 status = map_read(map, adr);
1005 } while (!map_word_andequal(map, status, OK, OK)
1006 && xip_elapsed_since(start) < usec);
1007}
1008
1009#define UDELAY(map, chip, adr, usec) xip_udelay(map, chip, adr, usec)
1010
1011/*
1012 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
1013 * the flash is actively programming or erasing since we have to poll for
1014 * the operation to complete anyway. We can't do that in a generic way with
6da70124
NP
1015 * a XIP setup so do it before the actual flash operation in this case
1016 * and stub it out from INVALIDATE_CACHE_UDELAY.
1da177e4 1017 */
6da70124
NP
1018#define XIP_INVAL_CACHED_RANGE(map, from, size) \
1019 INVALIDATE_CACHED_RANGE(map, from, size)
1020
d86d4370
AK
1021#define INVALIDATE_CACHE_UDELAY(map, chip, cmd_adr, adr, len, usec) \
1022 UDELAY(map, chip, cmd_adr, usec)
1da177e4
LT
1023
1024/*
1025 * Extra notes:
1026 *
1027 * Activating this XIP support changes the way the code works a bit. For
1028 * example the code to suspend the current process when concurrent access
1029 * happens is never executed because xip_udelay() will always return with the
1030 * same chip state as it was entered with. This is why there is no care for
1031 * the presence of add_wait_queue() or schedule() calls from within a couple
1032 * xip_disable()'d areas of code, like in do_erase_oneblock for example.
1033 * The queueing and scheduling are always happening within xip_udelay().
1034 *
1035 * Similarly, get_chip() and put_chip() just happen to always be executed
1036 * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state
1037 * is in array mode, therefore never executing many cases therein and not
1038 * causing any problem with XIP.
1039 */
1040
1041#else
1042
1043#define xip_disable(map, chip, adr)
1044#define xip_enable(map, chip, adr)
1da177e4
LT
1045#define XIP_INVAL_CACHED_RANGE(x...)
1046
6da70124
NP
1047#define UDELAY(map, chip, adr, usec) \
1048do { \
1049 spin_unlock(chip->mutex); \
1050 cfi_udelay(usec); \
1051 spin_lock(chip->mutex); \
1052} while (0)
1053
d86d4370 1054#define INVALIDATE_CACHE_UDELAY(map, chip, cmd_adr, adr, len, usec) \
6da70124
NP
1055do { \
1056 spin_unlock(chip->mutex); \
1057 INVALIDATE_CACHED_RANGE(map, adr, len); \
1058 cfi_udelay(usec); \
1059 spin_lock(chip->mutex); \
1060} while (0)
1061
1da177e4
LT
1062#endif
1063
1064static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
1065{
1066 unsigned long cmd_addr;
1067 struct cfi_private *cfi = map->fldrv_priv;
1068 int ret = 0;
1069
1070 adr += chip->start;
1071
1f948b43
TG
1072 /* Ensure cmd read/writes are aligned. */
1073 cmd_addr = adr & ~(map_bankwidth(map)-1);
1da177e4
LT
1074
1075 spin_lock(chip->mutex);
1076
1077 ret = get_chip(map, chip, cmd_addr, FL_POINT);
1078
1079 if (!ret) {
1080 if (chip->state != FL_POINT && chip->state != FL_READY)
1081 map_write(map, CMD(0xff), cmd_addr);
1082
1083 chip->state = FL_POINT;
1084 chip->ref_point_counter++;
1085 }
1086 spin_unlock(chip->mutex);
1087
1088 return ret;
1089}
1090
1091static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char **mtdbuf)
1092{
1093 struct map_info *map = mtd->priv;
1094 struct cfi_private *cfi = map->fldrv_priv;
1095 unsigned long ofs;
1096 int chipnum;
1097 int ret = 0;
1098
1099 if (!map->virt || (from + len > mtd->size))
1100 return -EINVAL;
1f948b43 1101
1da177e4
LT
1102 *mtdbuf = (void *)map->virt + from;
1103 *retlen = 0;
1104
1105 /* Now lock the chip(s) to POINT state */
1106
1107 /* ofs: offset within the first chip that the first read should start */
1108 chipnum = (from >> cfi->chipshift);
1109 ofs = from - (chipnum << cfi->chipshift);
1110
1111 while (len) {
1112 unsigned long thislen;
1113
1114 if (chipnum >= cfi->numchips)
1115 break;
1116
1117 if ((len + ofs -1) >> cfi->chipshift)
1118 thislen = (1<<cfi->chipshift) - ofs;
1119 else
1120 thislen = len;
1121
1122 ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
1123 if (ret)
1124 break;
1125
1126 *retlen += thislen;
1127 len -= thislen;
1f948b43 1128
1da177e4
LT
1129 ofs = 0;
1130 chipnum++;
1131 }
1132 return 0;
1133}
1134
1135static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from, size_t len)
1136{
1137 struct map_info *map = mtd->priv;
1138 struct cfi_private *cfi = map->fldrv_priv;
1139 unsigned long ofs;
1140 int chipnum;
1141
1142 /* Now unlock the chip(s) POINT state */
1143
1144 /* ofs: offset within the first chip that the first read should start */
1145 chipnum = (from >> cfi->chipshift);
1146 ofs = from - (chipnum << cfi->chipshift);
1147
1148 while (len) {
1149 unsigned long thislen;
1150 struct flchip *chip;
1151
1152 chip = &cfi->chips[chipnum];
1153 if (chipnum >= cfi->numchips)
1154 break;
1155
1156 if ((len + ofs -1) >> cfi->chipshift)
1157 thislen = (1<<cfi->chipshift) - ofs;
1158 else
1159 thislen = len;
1160
1161 spin_lock(chip->mutex);
1162 if (chip->state == FL_POINT) {
1163 chip->ref_point_counter--;
1164 if(chip->ref_point_counter == 0)
1165 chip->state = FL_READY;
1166 } else
4843653c 1167 printk(KERN_ERR "%s: Warning: unpoint called on non pointed region\n", map->name); /* Should this give an error? */
1da177e4
LT
1168
1169 put_chip(map, chip, chip->start);
1170 spin_unlock(chip->mutex);
1171
1172 len -= thislen;
1173 ofs = 0;
1174 chipnum++;
1175 }
1176}
1177
1178static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1179{
1180 unsigned long cmd_addr;
1181 struct cfi_private *cfi = map->fldrv_priv;
1182 int ret;
1183
1184 adr += chip->start;
1185
1f948b43
TG
1186 /* Ensure cmd read/writes are aligned. */
1187 cmd_addr = adr & ~(map_bankwidth(map)-1);
1da177e4
LT
1188
1189 spin_lock(chip->mutex);
1190 ret = get_chip(map, chip, cmd_addr, FL_READY);
1191 if (ret) {
1192 spin_unlock(chip->mutex);
1193 return ret;
1194 }
1195
1196 if (chip->state != FL_POINT && chip->state != FL_READY) {
1197 map_write(map, CMD(0xff), cmd_addr);
1198
1199 chip->state = FL_READY;
1200 }
1201
1202 map_copy_from(map, buf, adr, len);
1203
1204 put_chip(map, chip, cmd_addr);
1205
1206 spin_unlock(chip->mutex);
1207 return 0;
1208}
1209
1210static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1211{
1212 struct map_info *map = mtd->priv;
1213 struct cfi_private *cfi = map->fldrv_priv;
1214 unsigned long ofs;
1215 int chipnum;
1216 int ret = 0;
1217
1218 /* ofs: offset within the first chip that the first read should start */
1219 chipnum = (from >> cfi->chipshift);
1220 ofs = from - (chipnum << cfi->chipshift);
1221
1222 *retlen = 0;
1223
1224 while (len) {
1225 unsigned long thislen;
1226
1227 if (chipnum >= cfi->numchips)
1228 break;
1229
1230 if ((len + ofs -1) >> cfi->chipshift)
1231 thislen = (1<<cfi->chipshift) - ofs;
1232 else
1233 thislen = len;
1234
1235 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1236 if (ret)
1237 break;
1238
1239 *retlen += thislen;
1240 len -= thislen;
1241 buf += thislen;
1f948b43 1242
1da177e4
LT
1243 ofs = 0;
1244 chipnum++;
1245 }
1246 return ret;
1247}
1248
1da177e4 1249static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
f77814dd 1250 unsigned long adr, map_word datum, int mode)
1da177e4
LT
1251{
1252 struct cfi_private *cfi = map->fldrv_priv;
f77814dd 1253 map_word status, status_OK, write_cmd;
1da177e4
LT
1254 unsigned long timeo;
1255 int z, ret=0;
1256
1257 adr += chip->start;
1258
638d9838 1259 /* Let's determine those according to the interleave only once */
1da177e4 1260 status_OK = CMD(0x80);
f77814dd 1261 switch (mode) {
638d9838
NP
1262 case FL_WRITING:
1263 write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0x40) : CMD(0x41);
1264 break;
1265 case FL_OTP_WRITE:
1266 write_cmd = CMD(0xc0);
1267 break;
1268 default:
1269 return -EINVAL;
f77814dd 1270 }
1da177e4
LT
1271
1272 spin_lock(chip->mutex);
f77814dd 1273 ret = get_chip(map, chip, adr, mode);
1da177e4
LT
1274 if (ret) {
1275 spin_unlock(chip->mutex);
1276 return ret;
1277 }
1278
1279 XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1280 ENABLE_VPP(map);
1281 xip_disable(map, chip, adr);
f77814dd 1282 map_write(map, write_cmd, adr);
1da177e4 1283 map_write(map, datum, adr);
f77814dd 1284 chip->state = mode;
1da177e4 1285
d86d4370 1286 INVALIDATE_CACHE_UDELAY(map, chip, adr,
6da70124
NP
1287 adr, map_bankwidth(map),
1288 chip->word_write_time);
1da177e4
LT
1289
1290 timeo = jiffies + (HZ/2);
1291 z = 0;
1292 for (;;) {
f77814dd 1293 if (chip->state != mode) {
1da177e4
LT
1294 /* Someone's suspended the write. Sleep */
1295 DECLARE_WAITQUEUE(wait, current);
1296
1297 set_current_state(TASK_UNINTERRUPTIBLE);
1298 add_wait_queue(&chip->wq, &wait);
1299 spin_unlock(chip->mutex);
1300 schedule();
1301 remove_wait_queue(&chip->wq, &wait);
1302 timeo = jiffies + (HZ / 2); /* FIXME */
1303 spin_lock(chip->mutex);
1304 continue;
1305 }
1306
1307 status = map_read(map, adr);
1308 if (map_word_andequal(map, status, status_OK, status_OK))
1309 break;
1f948b43 1310
1da177e4
LT
1311 /* OK Still waiting */
1312 if (time_after(jiffies, timeo)) {
4843653c 1313 map_write(map, CMD(0x70), adr);
1da177e4
LT
1314 chip->state = FL_STATUS;
1315 xip_enable(map, chip, adr);
4843653c 1316 printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
1da177e4
LT
1317 ret = -EIO;
1318 goto out;
1319 }
1320
1321 /* Latency issues. Drop the lock, wait a while and retry */
1da177e4
LT
1322 z++;
1323 UDELAY(map, chip, adr, 1);
1da177e4
LT
1324 }
1325 if (!z) {
1326 chip->word_write_time--;
1327 if (!chip->word_write_time)
4843653c 1328 chip->word_write_time = 1;
1da177e4 1329 }
1f948b43 1330 if (z > 1)
1da177e4
LT
1331 chip->word_write_time++;
1332
1333 /* Done and happy. */
1334 chip->state = FL_STATUS;
1335
4843653c
NP
1336 /* check for errors */
1337 if (map_word_bitsset(map, status, CMD(0x1a))) {
1338 unsigned long chipstatus = MERGESTATUS(status);
1339
1340 /* reset status */
1da177e4 1341 map_write(map, CMD(0x50), adr);
1da177e4 1342 map_write(map, CMD(0x70), adr);
4843653c
NP
1343 xip_enable(map, chip, adr);
1344
1345 if (chipstatus & 0x02) {
1346 ret = -EROFS;
1347 } else if (chipstatus & 0x08) {
1348 printk(KERN_ERR "%s: word write error (bad VPP)\n", map->name);
1349 ret = -EIO;
1350 } else {
1351 printk(KERN_ERR "%s: word write error (status 0x%lx)\n", map->name, chipstatus);
1352 ret = -EINVAL;
1353 }
1354
1355 goto out;
1da177e4
LT
1356 }
1357
1358 xip_enable(map, chip, adr);
1359 out: put_chip(map, chip, adr);
1360 spin_unlock(chip->mutex);
1da177e4
LT
1361 return ret;
1362}
1363
1364
1365static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
1366{
1367 struct map_info *map = mtd->priv;
1368 struct cfi_private *cfi = map->fldrv_priv;
1369 int ret = 0;
1370 int chipnum;
1371 unsigned long ofs;
1372
1373 *retlen = 0;
1374 if (!len)
1375 return 0;
1376
1377 chipnum = to >> cfi->chipshift;
1378 ofs = to - (chipnum << cfi->chipshift);
1379
1380 /* If it's not bus-aligned, do the first byte write */
1381 if (ofs & (map_bankwidth(map)-1)) {
1382 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1383 int gap = ofs - bus_ofs;
1384 int n;
1385 map_word datum;
1386
1387 n = min_t(int, len, map_bankwidth(map)-gap);
1388 datum = map_word_ff(map);
1389 datum = map_word_load_partial(map, datum, buf, gap, n);
1390
1391 ret = do_write_oneword(map, &cfi->chips[chipnum],
f77814dd 1392 bus_ofs, datum, FL_WRITING);
1f948b43 1393 if (ret)
1da177e4
LT
1394 return ret;
1395
1396 len -= n;
1397 ofs += n;
1398 buf += n;
1399 (*retlen) += n;
1400
1401 if (ofs >> cfi->chipshift) {
1f948b43 1402 chipnum ++;
1da177e4
LT
1403 ofs = 0;
1404 if (chipnum == cfi->numchips)
1405 return 0;
1406 }
1407 }
1f948b43 1408
1da177e4
LT
1409 while(len >= map_bankwidth(map)) {
1410 map_word datum = map_word_load(map, buf);
1411
1412 ret = do_write_oneword(map, &cfi->chips[chipnum],
f77814dd 1413 ofs, datum, FL_WRITING);
1da177e4
LT
1414 if (ret)
1415 return ret;
1416
1417 ofs += map_bankwidth(map);
1418 buf += map_bankwidth(map);
1419 (*retlen) += map_bankwidth(map);
1420 len -= map_bankwidth(map);
1421
1422 if (ofs >> cfi->chipshift) {
1f948b43 1423 chipnum ++;
1da177e4
LT
1424 ofs = 0;
1425 if (chipnum == cfi->numchips)
1426 return 0;
1427 }
1428 }
1429
1430 if (len & (map_bankwidth(map)-1)) {
1431 map_word datum;
1432
1433 datum = map_word_ff(map);
1434 datum = map_word_load_partial(map, datum, buf, 0, len);
1435
1436 ret = do_write_oneword(map, &cfi->chips[chipnum],
f77814dd 1437 ofs, datum, FL_WRITING);
1f948b43 1438 if (ret)
1da177e4 1439 return ret;
1f948b43 1440
1da177e4
LT
1441 (*retlen) += len;
1442 }
1443
1444 return 0;
1445}
1446
1447
1f948b43 1448static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
e102d54a
NP
1449 unsigned long adr, const struct kvec **pvec,
1450 unsigned long *pvec_seek, int len)
1da177e4
LT
1451{
1452 struct cfi_private *cfi = map->fldrv_priv;
e102d54a 1453 map_word status, status_OK, write_cmd, datum;
1da177e4 1454 unsigned long cmd_adr, timeo;
e102d54a
NP
1455 int wbufsize, z, ret=0, word_gap, words;
1456 const struct kvec *vec;
1457 unsigned long vec_seek;
1da177e4
LT
1458
1459 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1460 adr += chip->start;
1461 cmd_adr = adr & ~(wbufsize-1);
638d9838 1462
1da177e4
LT
1463 /* Let's determine this according to the interleave only once */
1464 status_OK = CMD(0x80);
638d9838 1465 write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0xe8) : CMD(0xe9);
1da177e4
LT
1466
1467 spin_lock(chip->mutex);
1468 ret = get_chip(map, chip, cmd_adr, FL_WRITING);
1469 if (ret) {
1470 spin_unlock(chip->mutex);
1471 return ret;
1472 }
1473
1474 XIP_INVAL_CACHED_RANGE(map, adr, len);
1475 ENABLE_VPP(map);
1476 xip_disable(map, chip, cmd_adr);
1477
1478