]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/mtd/chips/cfi_cmdset_0001.c
[PATCH] Kprobes causes NX protection fault on i686 SMP
[mirror_ubuntu-artful-kernel.git] / drivers / mtd / chips / cfi_cmdset_0001.c
CommitLineData
1da177e4
LT
1/*
2 * Common Flash Interface support:
3 * Intel Extended Vendor Command Set (ID 0x0001)
4 *
5 * (C) 2000 Red Hat. GPL'd
6 *
8bc3b380 7 * $Id: cfi_cmdset_0001.c,v 1.186 2005/11/23 22:07:52 nico Exp $
1f948b43 8 *
1da177e4 9 *
1da177e4
LT
10 * 10/10/2000 Nicolas Pitre <nico@cam.org>
11 * - completely revamped method functions so they are aware and
12 * independent of the flash geometry (buswidth, interleave, etc.)
13 * - scalability vs code size is completely set at compile-time
14 * (see include/linux/mtd/cfi.h for selection)
15 * - optimized write buffer method
16 * 02/05/2002 Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
17 * - reworked lock/unlock/erase support for var size flash
18 */
19
20#include <linux/module.h>
21#include <linux/types.h>
22#include <linux/kernel.h>
23#include <linux/sched.h>
24#include <linux/init.h>
25#include <asm/io.h>
26#include <asm/byteorder.h>
27
28#include <linux/errno.h>
29#include <linux/slab.h>
30#include <linux/delay.h>
31#include <linux/interrupt.h>
963a6fb0 32#include <linux/reboot.h>
1da177e4
LT
33#include <linux/mtd/xip.h>
34#include <linux/mtd/map.h>
35#include <linux/mtd/mtd.h>
36#include <linux/mtd/compatmac.h>
37#include <linux/mtd/cfi.h>
38
39/* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
40/* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
41
42// debugging, turns off buffer write mode if set to 1
43#define FORCE_WORD_WRITE 0
44
45#define MANUFACTURER_INTEL 0x0089
46#define I82802AB 0x00ad
47#define I82802AC 0x00ac
48#define MANUFACTURER_ST 0x0020
49#define M50LPW080 0x002F
50
51static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
1da177e4
LT
52static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
53static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
e102d54a 54static int cfi_intelext_writev(struct mtd_info *, const struct kvec *, unsigned long, loff_t, size_t *);
1da177e4
LT
55static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
56static void cfi_intelext_sync (struct mtd_info *);
57static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
58static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
8048d2fc 59#ifdef CONFIG_MTD_OTP
f77814dd
NP
60static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
61static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
62static int cfi_intelext_write_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
63static int cfi_intelext_lock_user_prot_reg (struct mtd_info *, loff_t, size_t);
64static int cfi_intelext_get_fact_prot_info (struct mtd_info *,
65 struct otp_info *, size_t);
66static int cfi_intelext_get_user_prot_info (struct mtd_info *,
67 struct otp_info *, size_t);
8048d2fc 68#endif
1da177e4
LT
69static int cfi_intelext_suspend (struct mtd_info *);
70static void cfi_intelext_resume (struct mtd_info *);
963a6fb0 71static int cfi_intelext_reboot (struct notifier_block *, unsigned long, void *);
1da177e4
LT
72
73static void cfi_intelext_destroy(struct mtd_info *);
74
75struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
76
77static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
78static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
79
80static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
81 size_t *retlen, u_char **mtdbuf);
82static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from,
83 size_t len);
84
85static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
86static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
87#include "fwh_lock.h"
88
89
90
91/*
92 * *********** SETUP AND PROBE BITS ***********
93 */
94
95static struct mtd_chip_driver cfi_intelext_chipdrv = {
96 .probe = NULL, /* Not usable directly */
97 .destroy = cfi_intelext_destroy,
98 .name = "cfi_cmdset_0001",
99 .module = THIS_MODULE
100};
101
102/* #define DEBUG_LOCK_BITS */
103/* #define DEBUG_CFI_FEATURES */
104
105#ifdef DEBUG_CFI_FEATURES
106static void cfi_tell_features(struct cfi_pri_intelext *extp)
107{
108 int i;
638d9838 109 printk(" Extended Query version %c.%c\n", extp->MajorVersion, extp->MinorVersion);
1da177e4
LT
110 printk(" Feature/Command Support: %4.4X\n", extp->FeatureSupport);
111 printk(" - Chip Erase: %s\n", extp->FeatureSupport&1?"supported":"unsupported");
112 printk(" - Suspend Erase: %s\n", extp->FeatureSupport&2?"supported":"unsupported");
113 printk(" - Suspend Program: %s\n", extp->FeatureSupport&4?"supported":"unsupported");
114 printk(" - Legacy Lock/Unlock: %s\n", extp->FeatureSupport&8?"supported":"unsupported");
115 printk(" - Queued Erase: %s\n", extp->FeatureSupport&16?"supported":"unsupported");
116 printk(" - Instant block lock: %s\n", extp->FeatureSupport&32?"supported":"unsupported");
117 printk(" - Protection Bits: %s\n", extp->FeatureSupport&64?"supported":"unsupported");
118 printk(" - Page-mode read: %s\n", extp->FeatureSupport&128?"supported":"unsupported");
119 printk(" - Synchronous read: %s\n", extp->FeatureSupport&256?"supported":"unsupported");
120 printk(" - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
638d9838
NP
121 printk(" - Extended Flash Array: %s\n", extp->FeatureSupport&1024?"supported":"unsupported");
122 for (i=11; i<32; i++) {
1f948b43 123 if (extp->FeatureSupport & (1<<i))
1da177e4
LT
124 printk(" - Unknown Bit %X: supported\n", i);
125 }
1f948b43 126
1da177e4
LT
127 printk(" Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
128 printk(" - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
129 for (i=1; i<8; i++) {
130 if (extp->SuspendCmdSupport & (1<<i))
131 printk(" - Unknown Bit %X: supported\n", i);
132 }
1f948b43 133
1da177e4
LT
134 printk(" Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
135 printk(" - Lock Bit Active: %s\n", extp->BlkStatusRegMask&1?"yes":"no");
638d9838
NP
136 printk(" - Lock-Down Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
137 for (i=2; i<3; i++) {
1da177e4
LT
138 if (extp->BlkStatusRegMask & (1<<i))
139 printk(" - Unknown Bit %X Active: yes\n",i);
140 }
638d9838
NP
141 printk(" - EFA Lock Bit: %s\n", extp->BlkStatusRegMask&16?"yes":"no");
142 printk(" - EFA Lock-Down Bit: %s\n", extp->BlkStatusRegMask&32?"yes":"no");
143 for (i=6; i<16; i++) {
144 if (extp->BlkStatusRegMask & (1<<i))
145 printk(" - Unknown Bit %X Active: yes\n",i);
146 }
147
1f948b43 148 printk(" Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
1da177e4
LT
149 extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
150 if (extp->VppOptimal)
1f948b43 151 printk(" Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
1da177e4
LT
152 extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
153}
154#endif
155
156#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
1f948b43 157/* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
1da177e4
LT
158static void fixup_intel_strataflash(struct mtd_info *mtd, void* param)
159{
160 struct map_info *map = mtd->priv;
161 struct cfi_private *cfi = map->fldrv_priv;
162 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
163
164 printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
165 "erase on write disabled.\n");
166 extp->SuspendCmdSupport &= ~1;
167}
168#endif
169
170#ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
171static void fixup_no_write_suspend(struct mtd_info *mtd, void* param)
172{
173 struct map_info *map = mtd->priv;
174 struct cfi_private *cfi = map->fldrv_priv;
175 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
176
177 if (cfip && (cfip->FeatureSupport&4)) {
178 cfip->FeatureSupport &= ~4;
179 printk(KERN_WARNING "cfi_cmdset_0001: write suspend disabled\n");
180 }
181}
182#endif
183
184static void fixup_st_m28w320ct(struct mtd_info *mtd, void* param)
185{
186 struct map_info *map = mtd->priv;
187 struct cfi_private *cfi = map->fldrv_priv;
1f948b43 188
1da177e4
LT
189 cfi->cfiq->BufWriteTimeoutTyp = 0; /* Not supported */
190 cfi->cfiq->BufWriteTimeoutMax = 0; /* Not supported */
191}
192
193static void fixup_st_m28w320cb(struct mtd_info *mtd, void* param)
194{
195 struct map_info *map = mtd->priv;
196 struct cfi_private *cfi = map->fldrv_priv;
1f948b43 197
1da177e4
LT
198 /* Note this is done after the region info is endian swapped */
199 cfi->cfiq->EraseRegionInfo[1] =
200 (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
201};
202
203static void fixup_use_point(struct mtd_info *mtd, void *param)
204{
205 struct map_info *map = mtd->priv;
206 if (!mtd->point && map_is_linear(map)) {
207 mtd->point = cfi_intelext_point;
208 mtd->unpoint = cfi_intelext_unpoint;
209 }
210}
211
212static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
213{
214 struct map_info *map = mtd->priv;
215 struct cfi_private *cfi = map->fldrv_priv;
216 if (cfi->cfiq->BufWriteTimeoutTyp) {
217 printk(KERN_INFO "Using buffer write method\n" );
218 mtd->write = cfi_intelext_write_buffers;
e102d54a 219 mtd->writev = cfi_intelext_writev;
1da177e4
LT
220 }
221}
222
223static struct cfi_fixup cfi_fixup_table[] = {
224#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
1f948b43 225 { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL },
1da177e4
LT
226#endif
227#ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
228 { CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend, NULL },
229#endif
230#if !FORCE_WORD_WRITE
231 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL },
232#endif
233 { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct, NULL },
234 { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb, NULL },
235 { 0, 0, NULL, NULL }
236};
237
238static struct cfi_fixup jedec_fixup_table[] = {
239 { MANUFACTURER_INTEL, I82802AB, fixup_use_fwh_lock, NULL, },
240 { MANUFACTURER_INTEL, I82802AC, fixup_use_fwh_lock, NULL, },
241 { MANUFACTURER_ST, M50LPW080, fixup_use_fwh_lock, NULL, },
242 { 0, 0, NULL, NULL }
243};
244static struct cfi_fixup fixup_table[] = {
245 /* The CFI vendor ids and the JEDEC vendor IDs appear
246 * to be common. It is like the devices id's are as
247 * well. This table is to pick all cases where
248 * we know that is the case.
249 */
250 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point, NULL },
251 { 0, 0, NULL, NULL }
252};
253
254static inline struct cfi_pri_intelext *
255read_pri_intelext(struct map_info *map, __u16 adr)
256{
257 struct cfi_pri_intelext *extp;
258 unsigned int extp_size = sizeof(*extp);
259
260 again:
261 extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
262 if (!extp)
263 return NULL;
264
d88f977b 265 if (extp->MajorVersion != '1' ||
638d9838 266 (extp->MinorVersion < '0' || extp->MinorVersion > '4')) {
d88f977b
TP
267 printk(KERN_ERR " Unknown Intel/Sharp Extended Query "
268 "version %c.%c.\n", extp->MajorVersion,
269 extp->MinorVersion);
270 kfree(extp);
271 return NULL;
272 }
273
1da177e4
LT
274 /* Do some byteswapping if necessary */
275 extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
276 extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
277 extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
278
638d9838 279 if (extp->MajorVersion == '1' && extp->MinorVersion >= '3') {
1da177e4
LT
280 unsigned int extra_size = 0;
281 int nb_parts, i;
282
283 /* Protection Register info */
72b56a2d
NP
284 extra_size += (extp->NumProtectionFields - 1) *
285 sizeof(struct cfi_intelext_otpinfo);
1da177e4
LT
286
287 /* Burst Read info */
6f6ed056
NP
288 extra_size += 2;
289 if (extp_size < sizeof(*extp) + extra_size)
290 goto need_more;
291 extra_size += extp->extra[extra_size-1];
1da177e4
LT
292
293 /* Number of hardware-partitions */
294 extra_size += 1;
295 if (extp_size < sizeof(*extp) + extra_size)
296 goto need_more;
297 nb_parts = extp->extra[extra_size - 1];
298
638d9838
NP
299 /* skip the sizeof(partregion) field in CFI 1.4 */
300 if (extp->MinorVersion >= '4')
301 extra_size += 2;
302
1da177e4
LT
303 for (i = 0; i < nb_parts; i++) {
304 struct cfi_intelext_regioninfo *rinfo;
305 rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
306 extra_size += sizeof(*rinfo);
307 if (extp_size < sizeof(*extp) + extra_size)
308 goto need_more;
309 rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
310 extra_size += (rinfo->NumBlockTypes - 1)
311 * sizeof(struct cfi_intelext_blockinfo);
312 }
313
638d9838
NP
314 if (extp->MinorVersion >= '4')
315 extra_size += sizeof(struct cfi_intelext_programming_regioninfo);
316
1da177e4
LT
317 if (extp_size < sizeof(*extp) + extra_size) {
318 need_more:
319 extp_size = sizeof(*extp) + extra_size;
320 kfree(extp);
321 if (extp_size > 4096) {
322 printk(KERN_ERR
323 "%s: cfi_pri_intelext is too fat\n",
324 __FUNCTION__);
325 return NULL;
326 }
327 goto again;
328 }
329 }
1f948b43 330
1da177e4
LT
331 return extp;
332}
333
334/* This routine is made available to other mtd code via
335 * inter_module_register. It must only be accessed through
336 * inter_module_get which will bump the use count of this module. The
337 * addresses passed back in cfi are valid as long as the use count of
338 * this module is non-zero, i.e. between inter_module_get and
339 * inter_module_put. Keith Owens <kaos@ocs.com.au> 29 Oct 2000.
340 */
341struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
342{
343 struct cfi_private *cfi = map->fldrv_priv;
344 struct mtd_info *mtd;
345 int i;
346
347 mtd = kmalloc(sizeof(*mtd), GFP_KERNEL);
348 if (!mtd) {
349 printk(KERN_ERR "Failed to allocate memory for MTD device\n");
350 return NULL;
351 }
352 memset(mtd, 0, sizeof(*mtd));
353 mtd->priv = map;
354 mtd->type = MTD_NORFLASH;
355
356 /* Fill in the default mtd operations */
357 mtd->erase = cfi_intelext_erase_varsize;
358 mtd->read = cfi_intelext_read;
359 mtd->write = cfi_intelext_write_words;
360 mtd->sync = cfi_intelext_sync;
361 mtd->lock = cfi_intelext_lock;
362 mtd->unlock = cfi_intelext_unlock;
363 mtd->suspend = cfi_intelext_suspend;
364 mtd->resume = cfi_intelext_resume;
365 mtd->flags = MTD_CAP_NORFLASH;
366 mtd->name = map->name;
963a6fb0
NP
367
368 mtd->reboot_notifier.notifier_call = cfi_intelext_reboot;
369
1da177e4 370 if (cfi->cfi_mode == CFI_MODE_CFI) {
1f948b43 371 /*
1da177e4
LT
372 * It's a real CFI chip, not one for which the probe
373 * routine faked a CFI structure. So we read the feature
374 * table from it.
375 */
376 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
377 struct cfi_pri_intelext *extp;
378
379 extp = read_pri_intelext(map, adr);
380 if (!extp) {
381 kfree(mtd);
382 return NULL;
383 }
384
385 /* Install our own private info structure */
1f948b43 386 cfi->cmdset_priv = extp;
1da177e4
LT
387
388 cfi_fixup(mtd, cfi_fixup_table);
389
390#ifdef DEBUG_CFI_FEATURES
391 /* Tell the user about it in lots of lovely detail */
392 cfi_tell_features(extp);
1f948b43 393#endif
1da177e4
LT
394
395 if(extp->SuspendCmdSupport & 1) {
396 printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
397 }
398 }
399 else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
400 /* Apply jedec specific fixups */
401 cfi_fixup(mtd, jedec_fixup_table);
402 }
403 /* Apply generic fixups */
404 cfi_fixup(mtd, fixup_table);
405
406 for (i=0; i< cfi->numchips; i++) {
407 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
408 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
409 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
410 cfi->chips[i].ref_point_counter = 0;
1f948b43 411 }
1da177e4
LT
412
413 map->fldrv = &cfi_intelext_chipdrv;
1f948b43 414
1da177e4
LT
415 return cfi_intelext_setup(mtd);
416}
417
418static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
419{
420 struct map_info *map = mtd->priv;
421 struct cfi_private *cfi = map->fldrv_priv;
422 unsigned long offset = 0;
423 int i,j;
424 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
425
426 //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
427
428 mtd->size = devsize * cfi->numchips;
429
430 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
1f948b43 431 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
1da177e4 432 * mtd->numeraseregions, GFP_KERNEL);
1f948b43 433 if (!mtd->eraseregions) {
1da177e4
LT
434 printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
435 goto setup_err;
436 }
1f948b43 437
1da177e4
LT
438 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
439 unsigned long ernum, ersize;
440 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
441 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
442
443 if (mtd->erasesize < ersize) {
444 mtd->erasesize = ersize;
445 }
446 for (j=0; j<cfi->numchips; j++) {
447 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
448 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
449 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
450 }
451 offset += (ersize * ernum);
452 }
453
454 if (offset != devsize) {
455 /* Argh */
456 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
457 goto setup_err;
458 }
459
460 for (i=0; i<mtd->numeraseregions;i++){
4843653c 461 printk(KERN_DEBUG "erase region %d: offset=0x%x,size=0x%x,blocks=%d\n",
1da177e4
LT
462 i,mtd->eraseregions[i].offset,
463 mtd->eraseregions[i].erasesize,
464 mtd->eraseregions[i].numblocks);
465 }
466
f77814dd 467#ifdef CONFIG_MTD_OTP
1da177e4 468 mtd->read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
f77814dd
NP
469 mtd->read_user_prot_reg = cfi_intelext_read_user_prot_reg;
470 mtd->write_user_prot_reg = cfi_intelext_write_user_prot_reg;
471 mtd->lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
472 mtd->get_fact_prot_info = cfi_intelext_get_fact_prot_info;
473 mtd->get_user_prot_info = cfi_intelext_get_user_prot_info;
1da177e4
LT
474#endif
475
476 /* This function has the potential to distort the reality
477 a bit and therefore should be called last. */
478 if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
479 goto setup_err;
480
481 __module_get(THIS_MODULE);
963a6fb0 482 register_reboot_notifier(&mtd->reboot_notifier);
1da177e4
LT
483 return mtd;
484
485 setup_err:
486 if(mtd) {
fa671646 487 kfree(mtd->eraseregions);
1da177e4
LT
488 kfree(mtd);
489 }
490 kfree(cfi->cmdset_priv);
491 return NULL;
492}
493
494static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
495 struct cfi_private **pcfi)
496{
497 struct map_info *map = mtd->priv;
498 struct cfi_private *cfi = *pcfi;
499 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
500
501 /*
502 * Probing of multi-partition flash ships.
503 *
504 * To support multiple partitions when available, we simply arrange
505 * for each of them to have their own flchip structure even if they
506 * are on the same physical chip. This means completely recreating
507 * a new cfi_private structure right here which is a blatent code
508 * layering violation, but this is still the least intrusive
509 * arrangement at this point. This can be rearranged in the future
510 * if someone feels motivated enough. --nico
511 */
638d9838 512 if (extp && extp->MajorVersion == '1' && extp->MinorVersion >= '3'
1da177e4
LT
513 && extp->FeatureSupport & (1 << 9)) {
514 struct cfi_private *newcfi;
515 struct flchip *chip;
516 struct flchip_shared *shared;
517 int offs, numregions, numparts, partshift, numvirtchips, i, j;
518
519 /* Protection Register info */
72b56a2d
NP
520 offs = (extp->NumProtectionFields - 1) *
521 sizeof(struct cfi_intelext_otpinfo);
1da177e4
LT
522
523 /* Burst Read info */
6f6ed056 524 offs += extp->extra[offs+1]+2;
1da177e4
LT
525
526 /* Number of partition regions */
527 numregions = extp->extra[offs];
528 offs += 1;
529
638d9838
NP
530 /* skip the sizeof(partregion) field in CFI 1.4 */
531 if (extp->MinorVersion >= '4')
532 offs += 2;
533
1da177e4
LT
534 /* Number of hardware partitions */
535 numparts = 0;
536 for (i = 0; i < numregions; i++) {
537 struct cfi_intelext_regioninfo *rinfo;
538 rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
539 numparts += rinfo->NumIdentPartitions;
540 offs += sizeof(*rinfo)
541 + (rinfo->NumBlockTypes - 1) *
542 sizeof(struct cfi_intelext_blockinfo);
543 }
544
638d9838
NP
545 /* Programming Region info */
546 if (extp->MinorVersion >= '4') {
547 struct cfi_intelext_programming_regioninfo *prinfo;
548 prinfo = (struct cfi_intelext_programming_regioninfo *)&extp->extra[offs];
549 MTD_PROGREGION_SIZE(mtd) = cfi->interleave << prinfo->ProgRegShift;
550 MTD_PROGREGION_CTRLMODE_VALID(mtd) = cfi->interleave * prinfo->ControlValid;
551 MTD_PROGREGION_CTRLMODE_INVALID(mtd) = cfi->interleave * prinfo->ControlInvalid;
552 mtd->flags |= MTD_PROGRAM_REGIONS;
553 printk(KERN_DEBUG "%s: program region size/ctrl_valid/ctrl_inval = %d/%d/%d\n",
554 map->name, MTD_PROGREGION_SIZE(mtd),
555 MTD_PROGREGION_CTRLMODE_VALID(mtd),
556 MTD_PROGREGION_CTRLMODE_INVALID(mtd));
557 }
558
1da177e4
LT
559 /*
560 * All functions below currently rely on all chips having
561 * the same geometry so we'll just assume that all hardware
562 * partitions are of the same size too.
563 */
564 partshift = cfi->chipshift - __ffs(numparts);
565
566 if ((1 << partshift) < mtd->erasesize) {
567 printk( KERN_ERR
568 "%s: bad number of hw partitions (%d)\n",
569 __FUNCTION__, numparts);
570 return -EINVAL;
571 }
572
573 numvirtchips = cfi->numchips * numparts;
574 newcfi = kmalloc(sizeof(struct cfi_private) + numvirtchips * sizeof(struct flchip), GFP_KERNEL);
575 if (!newcfi)
576 return -ENOMEM;
577 shared = kmalloc(sizeof(struct flchip_shared) * cfi->numchips, GFP_KERNEL);
578 if (!shared) {
579 kfree(newcfi);
580 return -ENOMEM;
581 }
582 memcpy(newcfi, cfi, sizeof(struct cfi_private));
583 newcfi->numchips = numvirtchips;
584 newcfi->chipshift = partshift;
585
586 chip = &newcfi->chips[0];
587 for (i = 0; i < cfi->numchips; i++) {
588 shared[i].writing = shared[i].erasing = NULL;
589 spin_lock_init(&shared[i].lock);
590 for (j = 0; j < numparts; j++) {
591 *chip = cfi->chips[i];
592 chip->start += j << partshift;
593 chip->priv = &shared[i];
594 /* those should be reset too since
595 they create memory references. */
596 init_waitqueue_head(&chip->wq);
597 spin_lock_init(&chip->_spinlock);
598 chip->mutex = &chip->_spinlock;
599 chip++;
600 }
601 }
602
603 printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
604 "--> %d partitions of %d KiB\n",
605 map->name, cfi->numchips, cfi->interleave,
606 newcfi->numchips, 1<<(newcfi->chipshift-10));
607
608 map->fldrv_priv = newcfi;
609 *pcfi = newcfi;
610 kfree(cfi);
611 }
612
613 return 0;
614}
615
616/*
617 * *********** CHIP ACCESS FUNCTIONS ***********
618 */
619
620static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
621{
622 DECLARE_WAITQUEUE(wait, current);
623 struct cfi_private *cfi = map->fldrv_priv;
624 map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
625 unsigned long timeo;
626 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
627
628 resettime:
629 timeo = jiffies + HZ;
630 retry:
f77814dd 631 if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING || mode == FL_OTP_WRITE)) {
1da177e4
LT
632 /*
633 * OK. We have possibility for contension on the write/erase
634 * operations which are global to the real chip and not per
635 * partition. So let's fight it over in the partition which
636 * currently has authority on the operation.
637 *
638 * The rules are as follows:
639 *
640 * - any write operation must own shared->writing.
641 *
642 * - any erase operation must own _both_ shared->writing and
643 * shared->erasing.
644 *
645 * - contension arbitration is handled in the owner's context.
646 *
8bc3b380
NP
647 * The 'shared' struct can be read and/or written only when
648 * its lock is taken.
1da177e4
LT
649 */
650 struct flchip_shared *shared = chip->priv;
651 struct flchip *contender;
652 spin_lock(&shared->lock);
653 contender = shared->writing;
654 if (contender && contender != chip) {
655 /*
656 * The engine to perform desired operation on this
657 * partition is already in use by someone else.
658 * Let's fight over it in the context of the chip
659 * currently using it. If it is possible to suspend,
660 * that other partition will do just that, otherwise
661 * it'll happily send us to sleep. In any case, when
662 * get_chip returns success we're clear to go ahead.
663 */
664 int ret = spin_trylock(contender->mutex);
665 spin_unlock(&shared->lock);
666 if (!ret)
667 goto retry;
668 spin_unlock(chip->mutex);
669 ret = get_chip(map, contender, contender->start, mode);
670 spin_lock(chip->mutex);
671 if (ret) {
672 spin_unlock(contender->mutex);
673 return ret;
674 }
675 timeo = jiffies + HZ;
676 spin_lock(&shared->lock);
8bc3b380 677 spin_unlock(contender->mutex);
1da177e4
LT
678 }
679
680 /* We now own it */
681 shared->writing = chip;
682 if (mode == FL_ERASING)
683 shared->erasing = chip;
1da177e4
LT
684 spin_unlock(&shared->lock);
685 }
686
687 switch (chip->state) {
688
689 case FL_STATUS:
690 for (;;) {
691 status = map_read(map, adr);
692 if (map_word_andequal(map, status, status_OK, status_OK))
693 break;
694
695 /* At this point we're fine with write operations
696 in other partitions as they don't conflict. */
697 if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
698 break;
699
700 if (time_after(jiffies, timeo)) {
1f948b43 701 printk(KERN_ERR "%s: Waiting for chip to be ready timed out. Status %lx\n",
4843653c 702 map->name, status.x[0]);
1da177e4
LT
703 return -EIO;
704 }
705 spin_unlock(chip->mutex);
706 cfi_udelay(1);
707 spin_lock(chip->mutex);
708 /* Someone else might have been playing with it. */
709 goto retry;
710 }
1f948b43 711
1da177e4
LT
712 case FL_READY:
713 case FL_CFI_QUERY:
714 case FL_JEDEC_QUERY:
715 return 0;
716
717 case FL_ERASING:
718 if (!cfip ||
719 !(cfip->FeatureSupport & 2) ||
720 !(mode == FL_READY || mode == FL_POINT ||
721 (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
722 goto sleep;
723
724
725 /* Erase suspend */
726 map_write(map, CMD(0xB0), adr);
727
728 /* If the flash has finished erasing, then 'erase suspend'
729 * appears to make some (28F320) flash devices switch to
730 * 'read' mode. Make sure that we switch to 'read status'
731 * mode so we get the right data. --rmk
732 */
733 map_write(map, CMD(0x70), adr);
734 chip->oldstate = FL_ERASING;
735 chip->state = FL_ERASE_SUSPENDING;
736 chip->erase_suspended = 1;
737 for (;;) {
738 status = map_read(map, adr);
739 if (map_word_andequal(map, status, status_OK, status_OK))
740 break;
741
742 if (time_after(jiffies, timeo)) {
743 /* Urgh. Resume and pretend we weren't here. */
744 map_write(map, CMD(0xd0), adr);
745 /* Make sure we're in 'read status' mode if it had finished */
746 map_write(map, CMD(0x70), adr);
747 chip->state = FL_ERASING;
748 chip->oldstate = FL_READY;
4843653c
NP
749 printk(KERN_ERR "%s: Chip not ready after erase "
750 "suspended: status = 0x%lx\n", map->name, status.x[0]);
1da177e4
LT
751 return -EIO;
752 }
753
754 spin_unlock(chip->mutex);
755 cfi_udelay(1);
756 spin_lock(chip->mutex);
757 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
758 So we can just loop here. */
759 }
760 chip->state = FL_STATUS;
761 return 0;
762
763 case FL_XIP_WHILE_ERASING:
764 if (mode != FL_READY && mode != FL_POINT &&
765 (mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1)))
766 goto sleep;
767 chip->oldstate = chip->state;
768 chip->state = FL_READY;
769 return 0;
770
771 case FL_POINT:
772 /* Only if there's no operation suspended... */
773 if (mode == FL_READY && chip->oldstate == FL_READY)
774 return 0;
775
776 default:
777 sleep:
778 set_current_state(TASK_UNINTERRUPTIBLE);
779 add_wait_queue(&chip->wq, &wait);
780 spin_unlock(chip->mutex);
781 schedule();
782 remove_wait_queue(&chip->wq, &wait);
783 spin_lock(chip->mutex);
784 goto resettime;
785 }
786}
787
788static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
789{
790 struct cfi_private *cfi = map->fldrv_priv;
791
792 if (chip->priv) {
793 struct flchip_shared *shared = chip->priv;
794 spin_lock(&shared->lock);
795 if (shared->writing == chip && chip->oldstate == FL_READY) {
796 /* We own the ability to write, but we're done */
797 shared->writing = shared->erasing;
798 if (shared->writing && shared->writing != chip) {
799 /* give back ownership to who we loaned it from */
800 struct flchip *loaner = shared->writing;
801 spin_lock(loaner->mutex);
802 spin_unlock(&shared->lock);
803 spin_unlock(chip->mutex);
804 put_chip(map, loaner, loaner->start);
805 spin_lock(chip->mutex);
806 spin_unlock(loaner->mutex);
807 wake_up(&chip->wq);
808 return;
809 }
810 shared->erasing = NULL;
811 shared->writing = NULL;
812 } else if (shared->erasing == chip && shared->writing != chip) {
813 /*
814 * We own the ability to erase without the ability
815 * to write, which means the erase was suspended
816 * and some other partition is currently writing.
817 * Don't let the switch below mess things up since
818 * we don't have ownership to resume anything.
819 */
820 spin_unlock(&shared->lock);
821 wake_up(&chip->wq);
822 return;
823 }
824 spin_unlock(&shared->lock);
825 }
826
827 switch(chip->oldstate) {
828 case FL_ERASING:
829 chip->state = chip->oldstate;
1f948b43 830 /* What if one interleaved chip has finished and the
1da177e4 831 other hasn't? The old code would leave the finished
1f948b43 832 one in READY mode. That's bad, and caused -EROFS
1da177e4
LT
833 errors to be returned from do_erase_oneblock because
834 that's the only bit it checked for at the time.
1f948b43 835 As the state machine appears to explicitly allow
1da177e4 836 sending the 0x70 (Read Status) command to an erasing
1f948b43 837 chip and expecting it to be ignored, that's what we
1da177e4
LT
838 do. */
839 map_write(map, CMD(0xd0), adr);
840 map_write(map, CMD(0x70), adr);
841 chip->oldstate = FL_READY;
842 chip->state = FL_ERASING;
843 break;
844
845 case FL_XIP_WHILE_ERASING:
846 chip->state = chip->oldstate;
847 chip->oldstate = FL_READY;
848 break;
849
850 case FL_READY:
851 case FL_STATUS:
852 case FL_JEDEC_QUERY:
853 /* We should really make set_vpp() count, rather than doing this */
854 DISABLE_VPP(map);
855 break;
856 default:
4843653c 857 printk(KERN_ERR "%s: put_chip() called with oldstate %d!!\n", map->name, chip->oldstate);
1da177e4
LT
858 }
859 wake_up(&chip->wq);
860}
861
862#ifdef CONFIG_MTD_XIP
863
864/*
865 * No interrupt what so ever can be serviced while the flash isn't in array
866 * mode. This is ensured by the xip_disable() and xip_enable() functions
867 * enclosing any code path where the flash is known not to be in array mode.
868 * And within a XIP disabled code path, only functions marked with __xipram
869 * may be called and nothing else (it's a good thing to inspect generated
870 * assembly to make sure inline functions were actually inlined and that gcc
871 * didn't emit calls to its own support functions). Also configuring MTD CFI
872 * support to a single buswidth and a single interleave is also recommended.
1da177e4
LT
873 */
874
875static void xip_disable(struct map_info *map, struct flchip *chip,
876 unsigned long adr)
877{
878 /* TODO: chips with no XIP use should ignore and return */
879 (void) map_read(map, adr); /* ensure mmu mapping is up to date */
1da177e4
LT
880 local_irq_disable();
881}
882
883static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
884 unsigned long adr)
885{
886 struct cfi_private *cfi = map->fldrv_priv;
887 if (chip->state != FL_POINT && chip->state != FL_READY) {
888 map_write(map, CMD(0xff), adr);
889 chip->state = FL_READY;
890 }
891 (void) map_read(map, adr);
97f927a4 892 xip_iprefetch();
1da177e4 893 local_irq_enable();
1da177e4
LT
894}
895
896/*
897 * When a delay is required for the flash operation to complete, the
898 * xip_udelay() function is polling for both the given timeout and pending
899 * (but still masked) hardware interrupts. Whenever there is an interrupt
900 * pending then the flash erase or write operation is suspended, array mode
901 * restored and interrupts unmasked. Task scheduling might also happen at that
902 * point. The CPU eventually returns from the interrupt or the call to
903 * schedule() and the suspended flash operation is resumed for the remaining
904 * of the delay period.
905 *
906 * Warning: this function _will_ fool interrupt latency tracing tools.
907 */
908
909static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
910 unsigned long adr, int usec)
911{
912 struct cfi_private *cfi = map->fldrv_priv;
913 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
914 map_word status, OK = CMD(0x80);
915 unsigned long suspended, start = xip_currtime();
916 flstate_t oldstate, newstate;
917
918 do {
919 cpu_relax();
920 if (xip_irqpending() && cfip &&
921 ((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) ||
922 (chip->state == FL_WRITING && (cfip->FeatureSupport&4))) &&
923 (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
924 /*
925 * Let's suspend the erase or write operation when
926 * supported. Note that we currently don't try to
927 * suspend interleaved chips if there is already
928 * another operation suspended (imagine what happens
929 * when one chip was already done with the current
930 * operation while another chip suspended it, then
931 * we resume the whole thing at once). Yes, it
932 * can happen!
933 */
934 map_write(map, CMD(0xb0), adr);
935 map_write(map, CMD(0x70), adr);
936 usec -= xip_elapsed_since(start);
937 suspended = xip_currtime();
938 do {
939 if (xip_elapsed_since(suspended) > 100000) {
940 /*
941 * The chip doesn't want to suspend
942 * after waiting for 100 msecs.
943 * This is a critical error but there
944 * is not much we can do here.
945 */
946 return;
947 }
948 status = map_read(map, adr);
949 } while (!map_word_andequal(map, status, OK, OK));
950
951 /* Suspend succeeded */
952 oldstate = chip->state;
953 if (oldstate == FL_ERASING) {
954 if (!map_word_bitsset(map, status, CMD(0x40)))
955 break;
956 newstate = FL_XIP_WHILE_ERASING;
957 chip->erase_suspended = 1;
958 } else {
959 if (!map_word_bitsset(map, status, CMD(0x04)))
960 break;
961 newstate = FL_XIP_WHILE_WRITING;
962 chip->write_suspended = 1;
963 }
964 chip->state = newstate;
965 map_write(map, CMD(0xff), adr);
966 (void) map_read(map, adr);
967 asm volatile (".rep 8; nop; .endr");
968 local_irq_enable();
6da70124 969 spin_unlock(chip->mutex);
1da177e4
LT
970 asm volatile (".rep 8; nop; .endr");
971 cond_resched();
972
973 /*
974 * We're back. However someone else might have
975 * decided to go write to the chip if we are in
976 * a suspended erase state. If so let's wait
977 * until it's done.
978 */
6da70124 979 spin_lock(chip->mutex);
1da177e4
LT
980 while (chip->state != newstate) {
981 DECLARE_WAITQUEUE(wait, current);
982 set_current_state(TASK_UNINTERRUPTIBLE);
983 add_wait_queue(&chip->wq, &wait);
6da70124 984 spin_unlock(chip->mutex);
1da177e4
LT
985 schedule();
986 remove_wait_queue(&chip->wq, &wait);
6da70124 987 spin_lock(chip->mutex);
1da177e4
LT
988 }
989 /* Disallow XIP again */
990 local_irq_disable();
991
992 /* Resume the write or erase operation */
993 map_write(map, CMD(0xd0), adr);
994 map_write(map, CMD(0x70), adr);
995 chip->state = oldstate;
996 start = xip_currtime();
997 } else if (usec >= 1000000/HZ) {
998 /*
999 * Try to save on CPU power when waiting delay
1000 * is at least a system timer tick period.
1001 * No need to be extremely accurate here.
1002 */
1003 xip_cpu_idle();
1004 }
1005 status = map_read(map, adr);
1006 } while (!map_word_andequal(map, status, OK, OK)
1007 && xip_elapsed_since(start) < usec);
1008}
1009
1010#define UDELAY(map, chip, adr, usec) xip_udelay(map, chip, adr, usec)
1011
1012/*
1013 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
1014 * the flash is actively programming or erasing since we have to poll for
1015 * the operation to complete anyway. We can't do that in a generic way with
6da70124
NP
1016 * a XIP setup so do it before the actual flash operation in this case
1017 * and stub it out from INVALIDATE_CACHE_UDELAY.
1da177e4 1018 */
6da70124
NP
1019#define XIP_INVAL_CACHED_RANGE(map, from, size) \
1020 INVALIDATE_CACHED_RANGE(map, from, size)
1021
d86d4370
AK
1022#define INVALIDATE_CACHE_UDELAY(map, chip, cmd_adr, adr, len, usec) \
1023 UDELAY(map, chip, cmd_adr, usec)
1da177e4
LT
1024
1025/*
1026 * Extra notes:
1027 *
1028 * Activating this XIP support changes the way the code works a bit. For
1029 * example the code to suspend the current process when concurrent access
1030 * happens is never executed because xip_udelay() will always return with the
1031 * same chip state as it was entered with. This is why there is no care for
1032 * the presence of add_wait_queue() or schedule() calls from within a couple
1033 * xip_disable()'d areas of code, like in do_erase_oneblock for example.
1034 * The queueing and scheduling are always happening within xip_udelay().
1035 *
1036 * Similarly, get_chip() and put_chip() just happen to always be executed
1037 * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state
1038 * is in array mode, therefore never executing many cases therein and not
1039 * causing any problem with XIP.
1040 */
1041
1042#else
1043
1044#define xip_disable(map, chip, adr)
1045#define xip_enable(map, chip, adr)
1da177e4
LT
1046#define XIP_INVAL_CACHED_RANGE(x...)
1047
6da70124
NP
1048#define UDELAY(map, chip, adr, usec) \
1049do { \
1050 spin_unlock(chip->mutex); \
1051 cfi_udelay(usec); \
1052 spin_lock(chip->mutex); \
1053} while (0)
1054
d86d4370 1055#define INVALIDATE_CACHE_UDELAY(map, chip, cmd_adr, adr, len, usec) \
6da70124
NP
1056do { \
1057 spin_unlock(chip->mutex); \
1058 INVALIDATE_CACHED_RANGE(map, adr, len); \
1059 cfi_udelay(usec); \
1060 spin_lock(chip->mutex); \
1061} while (0)
1062
1da177e4
LT
1063#endif
1064
1065static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
1066{
1067 unsigned long cmd_addr;
1068 struct cfi_private *cfi = map->fldrv_priv;
1069 int ret = 0;
1070
1071 adr += chip->start;
1072
1f948b43
TG
1073 /* Ensure cmd read/writes are aligned. */
1074 cmd_addr = adr & ~(map_bankwidth(map)-1);
1da177e4
LT
1075
1076 spin_lock(chip->mutex);
1077
1078 ret = get_chip(map, chip, cmd_addr, FL_POINT);
1079
1080 if (!ret) {
1081 if (chip->state != FL_POINT && chip->state != FL_READY)
1082 map_write(map, CMD(0xff), cmd_addr);
1083
1084 chip->state = FL_POINT;
1085 chip->ref_point_counter++;
1086 }
1087 spin_unlock(chip->mutex);
1088
1089 return ret;
1090}
1091
1092static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char **mtdbuf)
1093{
1094 struct map_info *map = mtd->priv;
1095 struct cfi_private *cfi = map->fldrv_priv;
1096 unsigned long ofs;
1097 int chipnum;
1098 int ret = 0;
1099
1100 if (!map->virt || (from + len > mtd->size))
1101 return -EINVAL;
1f948b43 1102
1da177e4
LT
1103 *mtdbuf = (void *)map->virt + from;
1104 *retlen = 0;
1105
1106 /* Now lock the chip(s) to POINT state */
1107
1108 /* ofs: offset within the first chip that the first read should start */
1109 chipnum = (from >> cfi->chipshift);
1110 ofs = from - (chipnum << cfi->chipshift);
1111
1112 while (len) {
1113 unsigned long thislen;
1114
1115 if (chipnum >= cfi->numchips)
1116 break;
1117
1118 if ((len + ofs -1) >> cfi->chipshift)
1119 thislen = (1<<cfi->chipshift) - ofs;
1120 else
1121 thislen = len;
1122
1123 ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
1124 if (ret)
1125 break;
1126
1127 *retlen += thislen;
1128 len -= thislen;
1f948b43 1129
1da177e4
LT
1130 ofs = 0;
1131 chipnum++;
1132 }
1133 return 0;
1134}
1135
1136static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from, size_t len)
1137{
1138 struct map_info *map = mtd->priv;
1139 struct cfi_private *cfi = map->fldrv_priv;
1140 unsigned long ofs;
1141 int chipnum;
1142
1143 /* Now unlock the chip(s) POINT state */
1144
1145 /* ofs: offset within the first chip that the first read should start */
1146 chipnum = (from >> cfi->chipshift);
1147 ofs = from - (chipnum << cfi->chipshift);
1148
1149 while (len) {
1150 unsigned long thislen;
1151 struct flchip *chip;
1152
1153 chip = &cfi->chips[chipnum];
1154 if (chipnum >= cfi->numchips)
1155 break;
1156
1157 if ((len + ofs -1) >> cfi->chipshift)
1158 thislen = (1<<cfi->chipshift) - ofs;
1159 else
1160 thislen = len;
1161
1162 spin_lock(chip->mutex);
1163 if (chip->state == FL_POINT) {
1164 chip->ref_point_counter--;
1165 if(chip->ref_point_counter == 0)
1166 chip->state = FL_READY;
1167 } else
4843653c 1168 printk(KERN_ERR "%s: Warning: unpoint called on non pointed region\n", map->name); /* Should this give an error? */
1da177e4
LT
1169
1170 put_chip(map, chip, chip->start);
1171 spin_unlock(chip->mutex);
1172
1173 len -= thislen;
1174 ofs = 0;
1175 chipnum++;
1176 }
1177}
1178
1179static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1180{
1181 unsigned long cmd_addr;
1182 struct cfi_private *cfi = map->fldrv_priv;
1183 int ret;
1184
1185 adr += chip->start;
1186
1f948b43
TG
1187 /* Ensure cmd read/writes are aligned. */
1188 cmd_addr = adr & ~(map_bankwidth(map)-1);
1da177e4
LT
1189
1190 spin_lock(chip->mutex);
1191 ret = get_chip(map, chip, cmd_addr, FL_READY);
1192 if (ret) {
1193 spin_unlock(chip->mutex);
1194 return ret;
1195 }
1196
1197 if (chip->state != FL_POINT && chip->state != FL_READY) {
1198 map_write(map, CMD(0xff), cmd_addr);
1199
1200 chip->state = FL_READY;
1201 }
1202
1203 map_copy_from(map, buf, adr, len);
1204
1205 put_chip(map, chip, cmd_addr);
1206
1207 spin_unlock(chip->mutex);
1208 return 0;
1209}
1210
1211static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1212{
1213 struct map_info *map = mtd->priv;
1214 struct cfi_private *cfi = map->fldrv_priv;
1215 unsigned long ofs;
1216 int chipnum;
1217 int ret = 0;
1218
1219 /* ofs: offset within the first chip that the first read should start */
1220 chipnum = (from >> cfi->chipshift);
1221 ofs = from - (chipnum << cfi->chipshift);
1222
1223 *retlen = 0;
1224
1225 while (len) {
1226 unsigned long thislen;
1227
1228 if (chipnum >= cfi->numchips)
1229 break;
1230
1231 if ((len + ofs -1) >> cfi->chipshift)
1232 thislen = (1<<cfi->chipshift) - ofs;
1233 else
1234 thislen = len;
1235
1236 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1237 if (ret)
1238 break;
1239
1240 *retlen += thislen;
1241 len -= thislen;
1242 buf += thislen;
1f948b43 1243
1da177e4
LT
1244 ofs = 0;
1245 chipnum++;
1246 }
1247 return ret;
1248}
1249
1da177e4 1250static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
f77814dd 1251 unsigned long adr, map_word datum, int mode)
1da177e4
LT
1252{
1253 struct cfi_private *cfi = map->fldrv_priv;
f77814dd 1254 map_word status, status_OK, write_cmd;
1da177e4
LT
1255 unsigned long timeo;
1256 int z, ret=0;
1257
1258 adr += chip->start;
1259
638d9838 1260 /* Let's determine those according to the interleave only once */
1da177e4 1261 status_OK = CMD(0x80);
f77814dd 1262 switch (mode) {
638d9838
NP
1263 case FL_WRITING:
1264 write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0x40) : CMD(0x41);
1265 break;
1266 case FL_OTP_WRITE:
1267 write_cmd = CMD(0xc0);
1268 break;
1269 default:
1270 return -EINVAL;
f77814dd 1271 }
1da177e4
LT
1272
1273 spin_lock(chip->mutex);
f77814dd 1274 ret = get_chip(map, chip, adr, mode);
1da177e4
LT
1275 if (ret) {
1276 spin_unlock(chip->mutex);
1277 return ret;
1278 }
1279
1280 XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1281 ENABLE_VPP(map);
1282 xip_disable(map, chip, adr);
f77814dd 1283 map_write(map, write_cmd, adr);
1da177e4 1284 map_write(map, datum, adr);
f77814dd 1285 chip->state = mode;
1da177e4 1286
d86d4370 1287 INVALIDATE_CACHE_UDELAY(map, chip, adr,
6da70124
NP
1288 adr, map_bankwidth(map),
1289 chip->word_write_time);
1da177e4
LT
1290
1291 timeo = jiffies + (HZ/2);
1292 z = 0;
1293 for (;;) {
f77814dd 1294 if (chip->state != mode) {
1da177e4
LT
1295 /* Someone's suspended the write. Sleep */
1296 DECLARE_WAITQUEUE(wait, current);
1297
1298 set_current_state(TASK_UNINTERRUPTIBLE);
1299 add_wait_queue(&chip->wq, &wait);
1300 spin_unlock(chip->mutex);
1301 schedule();
1302 remove_wait_queue(&chip->wq, &wait);
1303 timeo = jiffies + (HZ / 2); /* FIXME */
1304 spin_lock(chip->mutex);
1305 continue;
1306 }
1307
1308 status = map_read(map, adr);
1309 if (map_word_andequal(map, status, status_OK, status_OK))
1310 break;
1f948b43 1311
1da177e4
LT
1312 /* OK Still waiting */
1313 if (time_after(jiffies, timeo)) {
4843653c 1314 map_write(map, CMD(0x70), adr);
1da177e4
LT
1315 chip->state = FL_STATUS;
1316 xip_enable(map, chip, adr);
4843653c 1317 printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
1da177e4
LT
1318 ret = -EIO;
1319 goto out;
1320 }
1321
1322 /* Latency issues. Drop the lock, wait a while and retry */
1da177e4
LT
1323 z++;
1324 UDELAY(map, chip, adr, 1);
1da177e4
LT
1325 }
1326 if (!z) {
1327 chip->word_write_time--;
1328 if (!chip->word_write_time)
4843653c 1329 chip->word_write_time = 1;
1da177e4 1330 }
1f948b43 1331 if (z > 1)
1da177e4
LT
1332 chip->word_write_time++;
1333
1334 /* Done and happy. */
1335 chip->state = FL_STATUS;
1336
4843653c
NP
1337 /* check for errors */
1338 if (map_word_bitsset(map, status, CMD(0x1a))) {
1339 unsigned long chipstatus = MERGESTATUS(status);
1340
1341 /* reset status */
1da177e4 1342 map_write(map, CMD(0x50), adr);
1da177e4 1343 map_write(map, CMD(0x70), adr);
4843653c
NP
1344 xip_enable(map, chip, adr);
1345
1346 if (chipstatus & 0x02) {
1347 ret = -EROFS;
1348 } else if (chipstatus & 0x08) {
1349 printk(KERN_ERR "%s: word write error (bad VPP)\n", map->name);
1350 ret = -EIO;
1351 } else {
1352 printk(KERN_ERR "%s: word write error (status 0x%lx)\n", map->name, chipstatus);
1353 ret = -EINVAL;
1354 }
1355
1356 goto out;
1da177e4
LT
1357 }
1358
1359 xip_enable(map, chip, adr);
1360 out: put_chip(map, chip, adr);
1361 spin_unlock(chip->mutex);
1da177e4
LT
1362 return ret;
1363}
1364
1365
1366static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
1367{
1368 struct map_info *map = mtd->priv;
1369 struct cfi_private *cfi = map->fldrv_priv;
1370 int ret = 0;
1371 int chipnum;
1372 unsigned long ofs;
1373
1374 *retlen = 0;
1375 if (!len)
1376 return 0;
1377
1378 chipnum = to >> cfi->chipshift;
1379 ofs = to - (chipnum << cfi->chipshift);
1380
1381 /* If it's not bus-aligned, do the first byte write */
1382 if (ofs & (map_bankwidth(map)-1)) {
1383 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1384 int gap = ofs - bus_ofs;
1385 int n;
1386 map_word datum;
1387
1388 n = min_t(int, len, map_bankwidth(map)-gap);
1389 datum = map_word_ff(map);
1390 datum = map_word_load_partial(map, datum, buf, gap, n);
1391
1392 ret = do_write_oneword(map, &cfi->chips[chipnum],
f77814dd 1393 bus_ofs, datum, FL_WRITING);
1f948b43 1394 if (ret)
1da177e4
LT
1395 return ret;
1396
1397 len -= n;
1398 ofs += n;
1399 buf += n;
1400 (*retlen) += n;
1401
1402 if (ofs >> cfi->chipshift) {
1f948b43 1403 chipnum ++;
1da177e4
LT
1404 ofs = 0;
1405 if (chipnum == cfi->numchips)
1406 return 0;
1407 }
1408 }
1f948b43 1409
1da177e4
LT
1410 while(len >= map_bankwidth(map)) {
1411 map_word datum = map_word_load(map, buf);
1412
1413 ret = do_write_oneword(map, &cfi->chips[chipnum],
f77814dd 1414 ofs, datum, FL_WRITING);
1da177e4
LT
1415 if (ret)
1416 return ret;
1417
1418 ofs += map_bankwidth(map);
1419 buf += map_bankwidth(map);
1420 (*retlen) += map_bankwidth(map);
1421 len -= map_bankwidth(map);
1422
1423 if (ofs >> cfi->chipshift) {
1f948b43 1424 chipnum ++;
1da177e4
LT
1425 ofs = 0;
1426 if (chipnum == cfi->numchips)
1427 return 0;
1428 }
1429 }
1430
1431 if (len & (map_bankwidth(map)-1)) {
1432 map_word datum;
1433
1434 datum = map_word_ff(map);
1435 datum = map_word_load_partial(map, datum, buf, 0, len);
1436
1437 ret = do_write_oneword(map, &cfi->chips[chipnum],
f77814dd 1438 ofs, datum, FL_WRITING);
1f948b43 1439 if (ret)
1da177e4 1440 return ret;
1f948b43 1441
1da177e4
LT
1442 (*retlen) += len;
1443 }
1444
1445 return 0;
1446}
1447
1448
1f948b43 1449static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
e102d54a
NP
1450 unsigned long adr, const struct kvec **pvec,
1451 unsigned long *pvec_seek, int len)
1da177e4
LT
1452{
1453 struct cfi_private *cfi = map->fldrv_priv;
e102d54a 1454 map_word status, status_OK, write_cmd, datum;
1da177e4 1455 unsigned long cmd_adr, timeo;
e102d54a
NP
1456 int wbufsize, z, ret=0, word_gap, words;
1457 const struct kvec *vec;
1458 unsigned long vec_seek;
1da177e4
LT
1459
1460 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1461 adr += chip->start;
1462 cmd_adr = adr & ~(wbufsize-1);
638d9838 1463
1da177e4
LT
1464 /* Let's determine this according to the interleave only once */
1465 status_OK = CMD(0x80);
638d9838 1466 write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0xe8) : CMD(0xe9);
1da177e4
LT
1467
1468 spin_lock(chip->mutex);
1469 ret = get_chip(map, chip, cmd_adr, FL_WRITING);
1470 if (ret) {
1471 spin_unlock(chip->mutex);
1472 return ret;
1473 }
1474
1475 XIP_INVAL_CACHED_RANGE(map, adr, len);
1476 ENABLE_VPP(map);
1477 xip_disable(map, chip, cmd_adr);
1478
1479