]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/mtd/chips/cfi_cmdset_0001.c
[MTD] user-abi: Clean up trailing white spaces
[mirror_ubuntu-artful-kernel.git] / drivers / mtd / chips / cfi_cmdset_0001.c
CommitLineData
1da177e4
LT
1/*
2 * Common Flash Interface support:
3 * Intel Extended Vendor Command Set (ID 0x0001)
4 *
5 * (C) 2000 Red Hat. GPL'd
6 *
6f6ed056 7 * $Id: cfi_cmdset_0001.c,v 1.184 2005/10/25 20:28:40 nico Exp $
1da177e4
LT
8 *
9 *
10 * 10/10/2000 Nicolas Pitre <nico@cam.org>
11 * - completely revamped method functions so they are aware and
12 * independent of the flash geometry (buswidth, interleave, etc.)
13 * - scalability vs code size is completely set at compile-time
14 * (see include/linux/mtd/cfi.h for selection)
15 * - optimized write buffer method
16 * 02/05/2002 Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
17 * - reworked lock/unlock/erase support for var size flash
18 */
19
20#include <linux/module.h>
21#include <linux/types.h>
22#include <linux/kernel.h>
23#include <linux/sched.h>
24#include <linux/init.h>
25#include <asm/io.h>
26#include <asm/byteorder.h>
27
28#include <linux/errno.h>
29#include <linux/slab.h>
30#include <linux/delay.h>
31#include <linux/interrupt.h>
963a6fb0 32#include <linux/reboot.h>
1da177e4
LT
33#include <linux/mtd/xip.h>
34#include <linux/mtd/map.h>
35#include <linux/mtd/mtd.h>
36#include <linux/mtd/compatmac.h>
37#include <linux/mtd/cfi.h>
38
39/* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
40/* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
41
42// debugging, turns off buffer write mode if set to 1
43#define FORCE_WORD_WRITE 0
44
45#define MANUFACTURER_INTEL 0x0089
46#define I82802AB 0x00ad
47#define I82802AC 0x00ac
48#define MANUFACTURER_ST 0x0020
49#define M50LPW080 0x002F
50
51static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
1da177e4
LT
52static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
53static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
e102d54a 54static int cfi_intelext_writev(struct mtd_info *, const struct kvec *, unsigned long, loff_t, size_t *);
1da177e4
LT
55static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
56static void cfi_intelext_sync (struct mtd_info *);
57static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
58static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
8048d2fc 59#ifdef CONFIG_MTD_OTP
f77814dd
NP
60static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
61static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
62static int cfi_intelext_write_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
63static int cfi_intelext_lock_user_prot_reg (struct mtd_info *, loff_t, size_t);
64static int cfi_intelext_get_fact_prot_info (struct mtd_info *,
65 struct otp_info *, size_t);
66static int cfi_intelext_get_user_prot_info (struct mtd_info *,
67 struct otp_info *, size_t);
8048d2fc 68#endif
1da177e4
LT
69static int cfi_intelext_suspend (struct mtd_info *);
70static void cfi_intelext_resume (struct mtd_info *);
963a6fb0 71static int cfi_intelext_reboot (struct notifier_block *, unsigned long, void *);
1da177e4
LT
72
73static void cfi_intelext_destroy(struct mtd_info *);
74
75struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
76
77static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
78static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
79
80static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
81 size_t *retlen, u_char **mtdbuf);
82static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from,
83 size_t len);
84
85static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
86static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
87#include "fwh_lock.h"
88
89
90
91/*
92 * *********** SETUP AND PROBE BITS ***********
93 */
94
95static struct mtd_chip_driver cfi_intelext_chipdrv = {
96 .probe = NULL, /* Not usable directly */
97 .destroy = cfi_intelext_destroy,
98 .name = "cfi_cmdset_0001",
99 .module = THIS_MODULE
100};
101
102/* #define DEBUG_LOCK_BITS */
103/* #define DEBUG_CFI_FEATURES */
104
105#ifdef DEBUG_CFI_FEATURES
106static void cfi_tell_features(struct cfi_pri_intelext *extp)
107{
108 int i;
638d9838 109 printk(" Extended Query version %c.%c\n", extp->MajorVersion, extp->MinorVersion);
1da177e4
LT
110 printk(" Feature/Command Support: %4.4X\n", extp->FeatureSupport);
111 printk(" - Chip Erase: %s\n", extp->FeatureSupport&1?"supported":"unsupported");
112 printk(" - Suspend Erase: %s\n", extp->FeatureSupport&2?"supported":"unsupported");
113 printk(" - Suspend Program: %s\n", extp->FeatureSupport&4?"supported":"unsupported");
114 printk(" - Legacy Lock/Unlock: %s\n", extp->FeatureSupport&8?"supported":"unsupported");
115 printk(" - Queued Erase: %s\n", extp->FeatureSupport&16?"supported":"unsupported");
116 printk(" - Instant block lock: %s\n", extp->FeatureSupport&32?"supported":"unsupported");
117 printk(" - Protection Bits: %s\n", extp->FeatureSupport&64?"supported":"unsupported");
118 printk(" - Page-mode read: %s\n", extp->FeatureSupport&128?"supported":"unsupported");
119 printk(" - Synchronous read: %s\n", extp->FeatureSupport&256?"supported":"unsupported");
120 printk(" - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
638d9838
NP
121 printk(" - Extended Flash Array: %s\n", extp->FeatureSupport&1024?"supported":"unsupported");
122 for (i=11; i<32; i++) {
1da177e4
LT
123 if (extp->FeatureSupport & (1<<i))
124 printk(" - Unknown Bit %X: supported\n", i);
125 }
126
127 printk(" Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
128 printk(" - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
129 for (i=1; i<8; i++) {
130 if (extp->SuspendCmdSupport & (1<<i))
131 printk(" - Unknown Bit %X: supported\n", i);
132 }
133
134 printk(" Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
135 printk(" - Lock Bit Active: %s\n", extp->BlkStatusRegMask&1?"yes":"no");
638d9838
NP
136 printk(" - Lock-Down Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
137 for (i=2; i<3; i++) {
1da177e4
LT
138 if (extp->BlkStatusRegMask & (1<<i))
139 printk(" - Unknown Bit %X Active: yes\n",i);
140 }
638d9838
NP
141 printk(" - EFA Lock Bit: %s\n", extp->BlkStatusRegMask&16?"yes":"no");
142 printk(" - EFA Lock-Down Bit: %s\n", extp->BlkStatusRegMask&32?"yes":"no");
143 for (i=6; i<16; i++) {
144 if (extp->BlkStatusRegMask & (1<<i))
145 printk(" - Unknown Bit %X Active: yes\n",i);
146 }
147
1da177e4
LT
148 printk(" Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
149 extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
150 if (extp->VppOptimal)
151 printk(" Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
152 extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
153}
154#endif
155
156#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
157/* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
158static void fixup_intel_strataflash(struct mtd_info *mtd, void* param)
159{
160 struct map_info *map = mtd->priv;
161 struct cfi_private *cfi = map->fldrv_priv;
162 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
163
164 printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
165 "erase on write disabled.\n");
166 extp->SuspendCmdSupport &= ~1;
167}
168#endif
169
170#ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
171static void fixup_no_write_suspend(struct mtd_info *mtd, void* param)
172{
173 struct map_info *map = mtd->priv;
174 struct cfi_private *cfi = map->fldrv_priv;
175 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
176
177 if (cfip && (cfip->FeatureSupport&4)) {
178 cfip->FeatureSupport &= ~4;
179 printk(KERN_WARNING "cfi_cmdset_0001: write suspend disabled\n");
180 }
181}
182#endif
183
184static void fixup_st_m28w320ct(struct mtd_info *mtd, void* param)
185{
186 struct map_info *map = mtd->priv;
187 struct cfi_private *cfi = map->fldrv_priv;
188
189 cfi->cfiq->BufWriteTimeoutTyp = 0; /* Not supported */
190 cfi->cfiq->BufWriteTimeoutMax = 0; /* Not supported */
191}
192
193static void fixup_st_m28w320cb(struct mtd_info *mtd, void* param)
194{
195 struct map_info *map = mtd->priv;
196 struct cfi_private *cfi = map->fldrv_priv;
197
198 /* Note this is done after the region info is endian swapped */
199 cfi->cfiq->EraseRegionInfo[1] =
200 (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
201};
202
203static void fixup_use_point(struct mtd_info *mtd, void *param)
204{
205 struct map_info *map = mtd->priv;
206 if (!mtd->point && map_is_linear(map)) {
207 mtd->point = cfi_intelext_point;
208 mtd->unpoint = cfi_intelext_unpoint;
209 }
210}
211
212static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
213{
214 struct map_info *map = mtd->priv;
215 struct cfi_private *cfi = map->fldrv_priv;
216 if (cfi->cfiq->BufWriteTimeoutTyp) {
217 printk(KERN_INFO "Using buffer write method\n" );
218 mtd->write = cfi_intelext_write_buffers;
e102d54a 219 mtd->writev = cfi_intelext_writev;
1da177e4
LT
220 }
221}
222
223static struct cfi_fixup cfi_fixup_table[] = {
224#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
225 { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL },
226#endif
227#ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
228 { CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend, NULL },
229#endif
230#if !FORCE_WORD_WRITE
231 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL },
232#endif
233 { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct, NULL },
234 { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb, NULL },
235 { 0, 0, NULL, NULL }
236};
237
238static struct cfi_fixup jedec_fixup_table[] = {
239 { MANUFACTURER_INTEL, I82802AB, fixup_use_fwh_lock, NULL, },
240 { MANUFACTURER_INTEL, I82802AC, fixup_use_fwh_lock, NULL, },
241 { MANUFACTURER_ST, M50LPW080, fixup_use_fwh_lock, NULL, },
242 { 0, 0, NULL, NULL }
243};
244static struct cfi_fixup fixup_table[] = {
245 /* The CFI vendor ids and the JEDEC vendor IDs appear
246 * to be common. It is like the devices id's are as
247 * well. This table is to pick all cases where
248 * we know that is the case.
249 */
250 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point, NULL },
251 { 0, 0, NULL, NULL }
252};
253
254static inline struct cfi_pri_intelext *
255read_pri_intelext(struct map_info *map, __u16 adr)
256{
257 struct cfi_pri_intelext *extp;
258 unsigned int extp_size = sizeof(*extp);
259
260 again:
261 extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
262 if (!extp)
263 return NULL;
264
d88f977b 265 if (extp->MajorVersion != '1' ||
638d9838 266 (extp->MinorVersion < '0' || extp->MinorVersion > '4')) {
d88f977b
TP
267 printk(KERN_ERR " Unknown Intel/Sharp Extended Query "
268 "version %c.%c.\n", extp->MajorVersion,
269 extp->MinorVersion);
270 kfree(extp);
271 return NULL;
272 }
273
1da177e4
LT
274 /* Do some byteswapping if necessary */
275 extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
276 extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
277 extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
278
638d9838 279 if (extp->MajorVersion == '1' && extp->MinorVersion >= '3') {
1da177e4
LT
280 unsigned int extra_size = 0;
281 int nb_parts, i;
282
283 /* Protection Register info */
72b56a2d
NP
284 extra_size += (extp->NumProtectionFields - 1) *
285 sizeof(struct cfi_intelext_otpinfo);
1da177e4
LT
286
287 /* Burst Read info */
6f6ed056
NP
288 extra_size += 2;
289 if (extp_size < sizeof(*extp) + extra_size)
290 goto need_more;
291 extra_size += extp->extra[extra_size-1];
1da177e4
LT
292
293 /* Number of hardware-partitions */
294 extra_size += 1;
295 if (extp_size < sizeof(*extp) + extra_size)
296 goto need_more;
297 nb_parts = extp->extra[extra_size - 1];
298
638d9838
NP
299 /* skip the sizeof(partregion) field in CFI 1.4 */
300 if (extp->MinorVersion >= '4')
301 extra_size += 2;
302
1da177e4
LT
303 for (i = 0; i < nb_parts; i++) {
304 struct cfi_intelext_regioninfo *rinfo;
305 rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
306 extra_size += sizeof(*rinfo);
307 if (extp_size < sizeof(*extp) + extra_size)
308 goto need_more;
309 rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
310 extra_size += (rinfo->NumBlockTypes - 1)
311 * sizeof(struct cfi_intelext_blockinfo);
312 }
313
638d9838
NP
314 if (extp->MinorVersion >= '4')
315 extra_size += sizeof(struct cfi_intelext_programming_regioninfo);
316
1da177e4
LT
317 if (extp_size < sizeof(*extp) + extra_size) {
318 need_more:
319 extp_size = sizeof(*extp) + extra_size;
320 kfree(extp);
321 if (extp_size > 4096) {
322 printk(KERN_ERR
323 "%s: cfi_pri_intelext is too fat\n",
324 __FUNCTION__);
325 return NULL;
326 }
327 goto again;
328 }
329 }
330
331 return extp;
332}
333
334/* This routine is made available to other mtd code via
335 * inter_module_register. It must only be accessed through
336 * inter_module_get which will bump the use count of this module. The
337 * addresses passed back in cfi are valid as long as the use count of
338 * this module is non-zero, i.e. between inter_module_get and
339 * inter_module_put. Keith Owens <kaos@ocs.com.au> 29 Oct 2000.
340 */
341struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
342{
343 struct cfi_private *cfi = map->fldrv_priv;
344 struct mtd_info *mtd;
345 int i;
346
347 mtd = kmalloc(sizeof(*mtd), GFP_KERNEL);
348 if (!mtd) {
349 printk(KERN_ERR "Failed to allocate memory for MTD device\n");
350 return NULL;
351 }
352 memset(mtd, 0, sizeof(*mtd));
353 mtd->priv = map;
354 mtd->type = MTD_NORFLASH;
355
356 /* Fill in the default mtd operations */
357 mtd->erase = cfi_intelext_erase_varsize;
358 mtd->read = cfi_intelext_read;
359 mtd->write = cfi_intelext_write_words;
360 mtd->sync = cfi_intelext_sync;
361 mtd->lock = cfi_intelext_lock;
362 mtd->unlock = cfi_intelext_unlock;
363 mtd->suspend = cfi_intelext_suspend;
364 mtd->resume = cfi_intelext_resume;
365 mtd->flags = MTD_CAP_NORFLASH;
366 mtd->name = map->name;
963a6fb0
NP
367
368 mtd->reboot_notifier.notifier_call = cfi_intelext_reboot;
369
1da177e4
LT
370 if (cfi->cfi_mode == CFI_MODE_CFI) {
371 /*
372 * It's a real CFI chip, not one for which the probe
373 * routine faked a CFI structure. So we read the feature
374 * table from it.
375 */
376 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
377 struct cfi_pri_intelext *extp;
378
379 extp = read_pri_intelext(map, adr);
380 if (!extp) {
381 kfree(mtd);
382 return NULL;
383 }
384
385 /* Install our own private info structure */
386 cfi->cmdset_priv = extp;
387
388 cfi_fixup(mtd, cfi_fixup_table);
389
390#ifdef DEBUG_CFI_FEATURES
391 /* Tell the user about it in lots of lovely detail */
392 cfi_tell_features(extp);
393#endif
394
395 if(extp->SuspendCmdSupport & 1) {
396 printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
397 }
398 }
399 else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
400 /* Apply jedec specific fixups */
401 cfi_fixup(mtd, jedec_fixup_table);
402 }
403 /* Apply generic fixups */
404 cfi_fixup(mtd, fixup_table);
405
406 for (i=0; i< cfi->numchips; i++) {
407 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
408 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
409 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
410 cfi->chips[i].ref_point_counter = 0;
411 }
412
413 map->fldrv = &cfi_intelext_chipdrv;
414
415 return cfi_intelext_setup(mtd);
416}
417
418static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
419{
420 struct map_info *map = mtd->priv;
421 struct cfi_private *cfi = map->fldrv_priv;
422 unsigned long offset = 0;
423 int i,j;
424 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
425
426 //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
427
428 mtd->size = devsize * cfi->numchips;
429
430 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
431 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
432 * mtd->numeraseregions, GFP_KERNEL);
433 if (!mtd->eraseregions) {
434 printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
435 goto setup_err;
436 }
437
438 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
439 unsigned long ernum, ersize;
440 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
441 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
442
443 if (mtd->erasesize < ersize) {
444 mtd->erasesize = ersize;
445 }
446 for (j=0; j<cfi->numchips; j++) {
447 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
448 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
449 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
450 }
451 offset += (ersize * ernum);
452 }
453
454 if (offset != devsize) {
455 /* Argh */
456 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
457 goto setup_err;
458 }
459
460 for (i=0; i<mtd->numeraseregions;i++){
4843653c 461 printk(KERN_DEBUG "erase region %d: offset=0x%x,size=0x%x,blocks=%d\n",
1da177e4
LT
462 i,mtd->eraseregions[i].offset,
463 mtd->eraseregions[i].erasesize,
464 mtd->eraseregions[i].numblocks);
465 }
466
f77814dd 467#ifdef CONFIG_MTD_OTP
1da177e4 468 mtd->read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
f77814dd
NP
469 mtd->read_user_prot_reg = cfi_intelext_read_user_prot_reg;
470 mtd->write_user_prot_reg = cfi_intelext_write_user_prot_reg;
471 mtd->lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
472 mtd->get_fact_prot_info = cfi_intelext_get_fact_prot_info;
473 mtd->get_user_prot_info = cfi_intelext_get_user_prot_info;
1da177e4
LT
474#endif
475
476 /* This function has the potential to distort the reality
477 a bit and therefore should be called last. */
478 if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
479 goto setup_err;
480
481 __module_get(THIS_MODULE);
963a6fb0 482 register_reboot_notifier(&mtd->reboot_notifier);
1da177e4
LT
483 return mtd;
484
485 setup_err:
486 if(mtd) {
487 if(mtd->eraseregions)
488 kfree(mtd->eraseregions);
489 kfree(mtd);
490 }
491 kfree(cfi->cmdset_priv);
492 return NULL;
493}
494
495static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
496 struct cfi_private **pcfi)
497{
498 struct map_info *map = mtd->priv;
499 struct cfi_private *cfi = *pcfi;
500 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
501
502 /*
503 * Probing of multi-partition flash ships.
504 *
505 * To support multiple partitions when available, we simply arrange
506 * for each of them to have their own flchip structure even if they
507 * are on the same physical chip. This means completely recreating
508 * a new cfi_private structure right here which is a blatent code
509 * layering violation, but this is still the least intrusive
510 * arrangement at this point. This can be rearranged in the future
511 * if someone feels motivated enough. --nico
512 */
638d9838 513 if (extp && extp->MajorVersion == '1' && extp->MinorVersion >= '3'
1da177e4
LT
514 && extp->FeatureSupport & (1 << 9)) {
515 struct cfi_private *newcfi;
516 struct flchip *chip;
517 struct flchip_shared *shared;
518 int offs, numregions, numparts, partshift, numvirtchips, i, j;
519
520 /* Protection Register info */
72b56a2d
NP
521 offs = (extp->NumProtectionFields - 1) *
522 sizeof(struct cfi_intelext_otpinfo);
1da177e4
LT
523
524 /* Burst Read info */
6f6ed056 525 offs += extp->extra[offs+1]+2;
1da177e4
LT
526
527 /* Number of partition regions */
528 numregions = extp->extra[offs];
529 offs += 1;
530
638d9838
NP
531 /* skip the sizeof(partregion) field in CFI 1.4 */
532 if (extp->MinorVersion >= '4')
533 offs += 2;
534
1da177e4
LT
535 /* Number of hardware partitions */
536 numparts = 0;
537 for (i = 0; i < numregions; i++) {
538 struct cfi_intelext_regioninfo *rinfo;
539 rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
540 numparts += rinfo->NumIdentPartitions;
541 offs += sizeof(*rinfo)
542 + (rinfo->NumBlockTypes - 1) *
543 sizeof(struct cfi_intelext_blockinfo);
544 }
545
638d9838
NP
546 /* Programming Region info */
547 if (extp->MinorVersion >= '4') {
548 struct cfi_intelext_programming_regioninfo *prinfo;
549 prinfo = (struct cfi_intelext_programming_regioninfo *)&extp->extra[offs];
550 MTD_PROGREGION_SIZE(mtd) = cfi->interleave << prinfo->ProgRegShift;
551 MTD_PROGREGION_CTRLMODE_VALID(mtd) = cfi->interleave * prinfo->ControlValid;
552 MTD_PROGREGION_CTRLMODE_INVALID(mtd) = cfi->interleave * prinfo->ControlInvalid;
553 mtd->flags |= MTD_PROGRAM_REGIONS;
554 printk(KERN_DEBUG "%s: program region size/ctrl_valid/ctrl_inval = %d/%d/%d\n",
555 map->name, MTD_PROGREGION_SIZE(mtd),
556 MTD_PROGREGION_CTRLMODE_VALID(mtd),
557 MTD_PROGREGION_CTRLMODE_INVALID(mtd));
558 }
559
1da177e4
LT
560 /*
561 * All functions below currently rely on all chips having
562 * the same geometry so we'll just assume that all hardware
563 * partitions are of the same size too.
564 */
565 partshift = cfi->chipshift - __ffs(numparts);
566
567 if ((1 << partshift) < mtd->erasesize) {
568 printk( KERN_ERR
569 "%s: bad number of hw partitions (%d)\n",
570 __FUNCTION__, numparts);
571 return -EINVAL;
572 }
573
574 numvirtchips = cfi->numchips * numparts;
575 newcfi = kmalloc(sizeof(struct cfi_private) + numvirtchips * sizeof(struct flchip), GFP_KERNEL);
576 if (!newcfi)
577 return -ENOMEM;
578 shared = kmalloc(sizeof(struct flchip_shared) * cfi->numchips, GFP_KERNEL);
579 if (!shared) {
580 kfree(newcfi);
581 return -ENOMEM;
582 }
583 memcpy(newcfi, cfi, sizeof(struct cfi_private));
584 newcfi->numchips = numvirtchips;
585 newcfi->chipshift = partshift;
586
587 chip = &newcfi->chips[0];
588 for (i = 0; i < cfi->numchips; i++) {
589 shared[i].writing = shared[i].erasing = NULL;
590 spin_lock_init(&shared[i].lock);
591 for (j = 0; j < numparts; j++) {
592 *chip = cfi->chips[i];
593 chip->start += j << partshift;
594 chip->priv = &shared[i];
595 /* those should be reset too since
596 they create memory references. */
597 init_waitqueue_head(&chip->wq);
598 spin_lock_init(&chip->_spinlock);
599 chip->mutex = &chip->_spinlock;
600 chip++;
601 }
602 }
603
604 printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
605 "--> %d partitions of %d KiB\n",
606 map->name, cfi->numchips, cfi->interleave,
607 newcfi->numchips, 1<<(newcfi->chipshift-10));
608
609 map->fldrv_priv = newcfi;
610 *pcfi = newcfi;
611 kfree(cfi);
612 }
613
614 return 0;
615}
616
617/*
618 * *********** CHIP ACCESS FUNCTIONS ***********
619 */
620
621static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
622{
623 DECLARE_WAITQUEUE(wait, current);
624 struct cfi_private *cfi = map->fldrv_priv;
625 map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
626 unsigned long timeo;
627 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
628
629 resettime:
630 timeo = jiffies + HZ;
631 retry:
f77814dd 632 if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING || mode == FL_OTP_WRITE)) {
1da177e4
LT
633 /*
634 * OK. We have possibility for contension on the write/erase
635 * operations which are global to the real chip and not per
636 * partition. So let's fight it over in the partition which
637 * currently has authority on the operation.
638 *
639 * The rules are as follows:
640 *
641 * - any write operation must own shared->writing.
642 *
643 * - any erase operation must own _both_ shared->writing and
644 * shared->erasing.
645 *
646 * - contension arbitration is handled in the owner's context.
647 *
648 * The 'shared' struct can be read when its lock is taken.
649 * However any writes to it can only be made when the current
650 * owner's lock is also held.
651 */
652 struct flchip_shared *shared = chip->priv;
653 struct flchip *contender;
654 spin_lock(&shared->lock);
655 contender = shared->writing;
656 if (contender && contender != chip) {
657 /*
658 * The engine to perform desired operation on this
659 * partition is already in use by someone else.
660 * Let's fight over it in the context of the chip
661 * currently using it. If it is possible to suspend,
662 * that other partition will do just that, otherwise
663 * it'll happily send us to sleep. In any case, when
664 * get_chip returns success we're clear to go ahead.
665 */
666 int ret = spin_trylock(contender->mutex);
667 spin_unlock(&shared->lock);
668 if (!ret)
669 goto retry;
670 spin_unlock(chip->mutex);
671 ret = get_chip(map, contender, contender->start, mode);
672 spin_lock(chip->mutex);
673 if (ret) {
674 spin_unlock(contender->mutex);
675 return ret;
676 }
677 timeo = jiffies + HZ;
678 spin_lock(&shared->lock);
679 }
680
681 /* We now own it */
682 shared->writing = chip;
683 if (mode == FL_ERASING)
684 shared->erasing = chip;
685 if (contender && contender != chip)
686 spin_unlock(contender->mutex);
687 spin_unlock(&shared->lock);
688 }
689
690 switch (chip->state) {
691
692 case FL_STATUS:
693 for (;;) {
694 status = map_read(map, adr);
695 if (map_word_andequal(map, status, status_OK, status_OK))
696 break;
697
698 /* At this point we're fine with write operations
699 in other partitions as they don't conflict. */
700 if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
701 break;
702
703 if (time_after(jiffies, timeo)) {
4843653c
NP
704 printk(KERN_ERR "%s: Waiting for chip to be ready timed out. Status %lx\n",
705 map->name, status.x[0]);
1da177e4
LT
706 return -EIO;
707 }
708 spin_unlock(chip->mutex);
709 cfi_udelay(1);
710 spin_lock(chip->mutex);
711 /* Someone else might have been playing with it. */
712 goto retry;
713 }
714
715 case FL_READY:
716 case FL_CFI_QUERY:
717 case FL_JEDEC_QUERY:
718 return 0;
719
720 case FL_ERASING:
721 if (!cfip ||
722 !(cfip->FeatureSupport & 2) ||
723 !(mode == FL_READY || mode == FL_POINT ||
724 (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
725 goto sleep;
726
727
728 /* Erase suspend */
729 map_write(map, CMD(0xB0), adr);
730
731 /* If the flash has finished erasing, then 'erase suspend'
732 * appears to make some (28F320) flash devices switch to
733 * 'read' mode. Make sure that we switch to 'read status'
734 * mode so we get the right data. --rmk
735 */
736 map_write(map, CMD(0x70), adr);
737 chip->oldstate = FL_ERASING;
738 chip->state = FL_ERASE_SUSPENDING;
739 chip->erase_suspended = 1;
740 for (;;) {
741 status = map_read(map, adr);
742 if (map_word_andequal(map, status, status_OK, status_OK))
743 break;
744
745 if (time_after(jiffies, timeo)) {
746 /* Urgh. Resume and pretend we weren't here. */
747 map_write(map, CMD(0xd0), adr);
748 /* Make sure we're in 'read status' mode if it had finished */
749 map_write(map, CMD(0x70), adr);
750 chip->state = FL_ERASING;
751 chip->oldstate = FL_READY;
4843653c
NP
752 printk(KERN_ERR "%s: Chip not ready after erase "
753 "suspended: status = 0x%lx\n", map->name, status.x[0]);
1da177e4
LT
754 return -EIO;
755 }
756
757 spin_unlock(chip->mutex);
758 cfi_udelay(1);
759 spin_lock(chip->mutex);
760 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
761 So we can just loop here. */
762 }
763 chip->state = FL_STATUS;
764 return 0;
765
766 case FL_XIP_WHILE_ERASING:
767 if (mode != FL_READY && mode != FL_POINT &&
768 (mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1)))
769 goto sleep;
770 chip->oldstate = chip->state;
771 chip->state = FL_READY;
772 return 0;
773
774 case FL_POINT:
775 /* Only if there's no operation suspended... */
776 if (mode == FL_READY && chip->oldstate == FL_READY)
777 return 0;
778
779 default:
780 sleep:
781 set_current_state(TASK_UNINTERRUPTIBLE);
782 add_wait_queue(&chip->wq, &wait);
783 spin_unlock(chip->mutex);
784 schedule();
785 remove_wait_queue(&chip->wq, &wait);
786 spin_lock(chip->mutex);
787 goto resettime;
788 }
789}
790
791static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
792{
793 struct cfi_private *cfi = map->fldrv_priv;
794
795 if (chip->priv) {
796 struct flchip_shared *shared = chip->priv;
797 spin_lock(&shared->lock);
798 if (shared->writing == chip && chip->oldstate == FL_READY) {
799 /* We own the ability to write, but we're done */
800 shared->writing = shared->erasing;
801 if (shared->writing && shared->writing != chip) {
802 /* give back ownership to who we loaned it from */
803 struct flchip *loaner = shared->writing;
804 spin_lock(loaner->mutex);
805 spin_unlock(&shared->lock);
806 spin_unlock(chip->mutex);
807 put_chip(map, loaner, loaner->start);
808 spin_lock(chip->mutex);
809 spin_unlock(loaner->mutex);
810 wake_up(&chip->wq);
811 return;
812 }
813 shared->erasing = NULL;
814 shared->writing = NULL;
815 } else if (shared->erasing == chip && shared->writing != chip) {
816 /*
817 * We own the ability to erase without the ability
818 * to write, which means the erase was suspended
819 * and some other partition is currently writing.
820 * Don't let the switch below mess things up since
821 * we don't have ownership to resume anything.
822 */
823 spin_unlock(&shared->lock);
824 wake_up(&chip->wq);
825 return;
826 }
827 spin_unlock(&shared->lock);
828 }
829
830 switch(chip->oldstate) {
831 case FL_ERASING:
832 chip->state = chip->oldstate;
833 /* What if one interleaved chip has finished and the
834 other hasn't? The old code would leave the finished
835 one in READY mode. That's bad, and caused -EROFS
836 errors to be returned from do_erase_oneblock because
837 that's the only bit it checked for at the time.
838 As the state machine appears to explicitly allow
839 sending the 0x70 (Read Status) command to an erasing
840 chip and expecting it to be ignored, that's what we
841 do. */
842 map_write(map, CMD(0xd0), adr);
843 map_write(map, CMD(0x70), adr);
844 chip->oldstate = FL_READY;
845 chip->state = FL_ERASING;
846 break;
847
848 case FL_XIP_WHILE_ERASING:
849 chip->state = chip->oldstate;
850 chip->oldstate = FL_READY;
851 break;
852
853 case FL_READY:
854 case FL_STATUS:
855 case FL_JEDEC_QUERY:
856 /* We should really make set_vpp() count, rather than doing this */
857 DISABLE_VPP(map);
858 break;
859 default:
4843653c 860 printk(KERN_ERR "%s: put_chip() called with oldstate %d!!\n", map->name, chip->oldstate);
1da177e4
LT
861 }
862 wake_up(&chip->wq);
863}
864
865#ifdef CONFIG_MTD_XIP
866
867/*
868 * No interrupt what so ever can be serviced while the flash isn't in array
869 * mode. This is ensured by the xip_disable() and xip_enable() functions
870 * enclosing any code path where the flash is known not to be in array mode.
871 * And within a XIP disabled code path, only functions marked with __xipram
872 * may be called and nothing else (it's a good thing to inspect generated
873 * assembly to make sure inline functions were actually inlined and that gcc
874 * didn't emit calls to its own support functions). Also configuring MTD CFI
875 * support to a single buswidth and a single interleave is also recommended.
1da177e4
LT
876 */
877
878static void xip_disable(struct map_info *map, struct flchip *chip,
879 unsigned long adr)
880{
881 /* TODO: chips with no XIP use should ignore and return */
882 (void) map_read(map, adr); /* ensure mmu mapping is up to date */
1da177e4
LT
883 local_irq_disable();
884}
885
886static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
887 unsigned long adr)
888{
889 struct cfi_private *cfi = map->fldrv_priv;
890 if (chip->state != FL_POINT && chip->state != FL_READY) {
891 map_write(map, CMD(0xff), adr);
892 chip->state = FL_READY;
893 }
894 (void) map_read(map, adr);
97f927a4 895 xip_iprefetch();
1da177e4 896 local_irq_enable();
1da177e4
LT
897}
898
899/*
900 * When a delay is required for the flash operation to complete, the
901 * xip_udelay() function is polling for both the given timeout and pending
902 * (but still masked) hardware interrupts. Whenever there is an interrupt
903 * pending then the flash erase or write operation is suspended, array mode
904 * restored and interrupts unmasked. Task scheduling might also happen at that
905 * point. The CPU eventually returns from the interrupt or the call to
906 * schedule() and the suspended flash operation is resumed for the remaining
907 * of the delay period.
908 *
909 * Warning: this function _will_ fool interrupt latency tracing tools.
910 */
911
912static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
913 unsigned long adr, int usec)
914{
915 struct cfi_private *cfi = map->fldrv_priv;
916 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
917 map_word status, OK = CMD(0x80);
918 unsigned long suspended, start = xip_currtime();
919 flstate_t oldstate, newstate;
920
921 do {
922 cpu_relax();
923 if (xip_irqpending() && cfip &&
924 ((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) ||
925 (chip->state == FL_WRITING && (cfip->FeatureSupport&4))) &&
926 (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
927 /*
928 * Let's suspend the erase or write operation when
929 * supported. Note that we currently don't try to
930 * suspend interleaved chips if there is already
931 * another operation suspended (imagine what happens
932 * when one chip was already done with the current
933 * operation while another chip suspended it, then
934 * we resume the whole thing at once). Yes, it
935 * can happen!
936 */
937 map_write(map, CMD(0xb0), adr);
938 map_write(map, CMD(0x70), adr);
939 usec -= xip_elapsed_since(start);
940 suspended = xip_currtime();
941 do {
942 if (xip_elapsed_since(suspended) > 100000) {
943 /*
944 * The chip doesn't want to suspend
945 * after waiting for 100 msecs.
946 * This is a critical error but there
947 * is not much we can do here.
948 */
949 return;
950 }
951 status = map_read(map, adr);
952 } while (!map_word_andequal(map, status, OK, OK));
953
954 /* Suspend succeeded */
955 oldstate = chip->state;
956 if (oldstate == FL_ERASING) {
957 if (!map_word_bitsset(map, status, CMD(0x40)))
958 break;
959 newstate = FL_XIP_WHILE_ERASING;
960 chip->erase_suspended = 1;
961 } else {
962 if (!map_word_bitsset(map, status, CMD(0x04)))
963 break;
964 newstate = FL_XIP_WHILE_WRITING;
965 chip->write_suspended = 1;
966 }
967 chip->state = newstate;
968 map_write(map, CMD(0xff), adr);
969 (void) map_read(map, adr);
970 asm volatile (".rep 8; nop; .endr");
971 local_irq_enable();
6da70124 972 spin_unlock(chip->mutex);
1da177e4
LT
973 asm volatile (".rep 8; nop; .endr");
974 cond_resched();
975
976 /*
977 * We're back. However someone else might have
978 * decided to go write to the chip if we are in
979 * a suspended erase state. If so let's wait
980 * until it's done.
981 */
6da70124 982 spin_lock(chip->mutex);
1da177e4
LT
983 while (chip->state != newstate) {
984 DECLARE_WAITQUEUE(wait, current);
985 set_current_state(TASK_UNINTERRUPTIBLE);
986 add_wait_queue(&chip->wq, &wait);
6da70124 987 spin_unlock(chip->mutex);
1da177e4
LT
988 schedule();
989 remove_wait_queue(&chip->wq, &wait);
6da70124 990 spin_lock(chip->mutex);
1da177e4
LT
991 }
992 /* Disallow XIP again */
993 local_irq_disable();
994
995 /* Resume the write or erase operation */
996 map_write(map, CMD(0xd0), adr);
997 map_write(map, CMD(0x70), adr);
998 chip->state = oldstate;
999 start = xip_currtime();
1000 } else if (usec >= 1000000/HZ) {
1001 /*
1002 * Try to save on CPU power when waiting delay
1003 * is at least a system timer tick period.
1004 * No need to be extremely accurate here.
1005 */
1006 xip_cpu_idle();
1007 }
1008 status = map_read(map, adr);
1009 } while (!map_word_andequal(map, status, OK, OK)
1010 && xip_elapsed_since(start) < usec);
1011}
1012
1013#define UDELAY(map, chip, adr, usec) xip_udelay(map, chip, adr, usec)
1014
1015/*
1016 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
1017 * the flash is actively programming or erasing since we have to poll for
1018 * the operation to complete anyway. We can't do that in a generic way with
6da70124
NP
1019 * a XIP setup so do it before the actual flash operation in this case
1020 * and stub it out from INVALIDATE_CACHE_UDELAY.
1da177e4 1021 */
6da70124
NP
1022#define XIP_INVAL_CACHED_RANGE(map, from, size) \
1023 INVALIDATE_CACHED_RANGE(map, from, size)
1024
1025#define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
1026 UDELAY(map, chip, adr, usec)
1da177e4
LT
1027
1028/*
1029 * Extra notes:
1030 *
1031 * Activating this XIP support changes the way the code works a bit. For
1032 * example the code to suspend the current process when concurrent access
1033 * happens is never executed because xip_udelay() will always return with the
1034 * same chip state as it was entered with. This is why there is no care for
1035 * the presence of add_wait_queue() or schedule() calls from within a couple
1036 * xip_disable()'d areas of code, like in do_erase_oneblock for example.
1037 * The queueing and scheduling are always happening within xip_udelay().
1038 *
1039 * Similarly, get_chip() and put_chip() just happen to always be executed
1040 * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state
1041 * is in array mode, therefore never executing many cases therein and not
1042 * causing any problem with XIP.
1043 */
1044
1045#else
1046
1047#define xip_disable(map, chip, adr)
1048#define xip_enable(map, chip, adr)
1da177e4
LT
1049#define XIP_INVAL_CACHED_RANGE(x...)
1050
6da70124
NP
1051#define UDELAY(map, chip, adr, usec) \
1052do { \
1053 spin_unlock(chip->mutex); \
1054 cfi_udelay(usec); \
1055 spin_lock(chip->mutex); \
1056} while (0)
1057
1058#define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
1059do { \
1060 spin_unlock(chip->mutex); \
1061 INVALIDATE_CACHED_RANGE(map, adr, len); \
1062 cfi_udelay(usec); \
1063 spin_lock(chip->mutex); \
1064} while (0)
1065
1da177e4
LT
1066#endif
1067
1068static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
1069{
1070 unsigned long cmd_addr;
1071 struct cfi_private *cfi = map->fldrv_priv;
1072 int ret = 0;
1073
1074 adr += chip->start;
1075
1076 /* Ensure cmd read/writes are aligned. */
1077 cmd_addr = adr & ~(map_bankwidth(map)-1);
1078
1079 spin_lock(chip->mutex);
1080
1081 ret = get_chip(map, chip, cmd_addr, FL_POINT);
1082
1083 if (!ret) {
1084 if (chip->state != FL_POINT && chip->state != FL_READY)
1085 map_write(map, CMD(0xff), cmd_addr);
1086
1087 chip->state = FL_POINT;
1088 chip->ref_point_counter++;
1089 }
1090 spin_unlock(chip->mutex);
1091
1092 return ret;
1093}
1094
1095static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char **mtdbuf)
1096{
1097 struct map_info *map = mtd->priv;
1098 struct cfi_private *cfi = map->fldrv_priv;
1099 unsigned long ofs;
1100 int chipnum;
1101 int ret = 0;
1102
1103 if (!map->virt || (from + len > mtd->size))
1104 return -EINVAL;
1105
1106 *mtdbuf = (void *)map->virt + from;
1107 *retlen = 0;
1108
1109 /* Now lock the chip(s) to POINT state */
1110
1111 /* ofs: offset within the first chip that the first read should start */
1112 chipnum = (from >> cfi->chipshift);
1113 ofs = from - (chipnum << cfi->chipshift);
1114
1115 while (len) {
1116 unsigned long thislen;
1117
1118 if (chipnum >= cfi->numchips)
1119 break;
1120
1121 if ((len + ofs -1) >> cfi->chipshift)
1122 thislen = (1<<cfi->chipshift) - ofs;
1123 else
1124 thislen = len;
1125
1126 ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
1127 if (ret)
1128 break;
1129
1130 *retlen += thislen;
1131 len -= thislen;
1132
1133 ofs = 0;
1134 chipnum++;
1135 }
1136 return 0;
1137}
1138
1139static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from, size_t len)
1140{
1141 struct map_info *map = mtd->priv;
1142 struct cfi_private *cfi = map->fldrv_priv;
1143 unsigned long ofs;
1144 int chipnum;
1145
1146 /* Now unlock the chip(s) POINT state */
1147
1148 /* ofs: offset within the first chip that the first read should start */
1149 chipnum = (from >> cfi->chipshift);
1150 ofs = from - (chipnum << cfi->chipshift);
1151
1152 while (len) {
1153 unsigned long thislen;
1154 struct flchip *chip;
1155
1156 chip = &cfi->chips[chipnum];
1157 if (chipnum >= cfi->numchips)
1158 break;
1159
1160 if ((len + ofs -1) >> cfi->chipshift)
1161 thislen = (1<<cfi->chipshift) - ofs;
1162 else
1163 thislen = len;
1164
1165 spin_lock(chip->mutex);
1166 if (chip->state == FL_POINT) {
1167 chip->ref_point_counter--;
1168 if(chip->ref_point_counter == 0)
1169 chip->state = FL_READY;
1170 } else
4843653c 1171 printk(KERN_ERR "%s: Warning: unpoint called on non pointed region\n", map->name); /* Should this give an error? */
1da177e4
LT
1172
1173 put_chip(map, chip, chip->start);
1174 spin_unlock(chip->mutex);
1175
1176 len -= thislen;
1177 ofs = 0;
1178 chipnum++;
1179 }
1180}
1181
1182static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1183{
1184 unsigned long cmd_addr;
1185 struct cfi_private *cfi = map->fldrv_priv;
1186 int ret;
1187
1188 adr += chip->start;
1189
1190 /* Ensure cmd read/writes are aligned. */
1191 cmd_addr = adr & ~(map_bankwidth(map)-1);
1192
1193 spin_lock(chip->mutex);
1194 ret = get_chip(map, chip, cmd_addr, FL_READY);
1195 if (ret) {
1196 spin_unlock(chip->mutex);
1197 return ret;
1198 }
1199
1200 if (chip->state != FL_POINT && chip->state != FL_READY) {
1201 map_write(map, CMD(0xff), cmd_addr);
1202
1203 chip->state = FL_READY;
1204 }
1205
1206 map_copy_from(map, buf, adr, len);
1207
1208 put_chip(map, chip, cmd_addr);
1209
1210 spin_unlock(chip->mutex);
1211 return 0;
1212}
1213
1214static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1215{
1216 struct map_info *map = mtd->priv;
1217 struct cfi_private *cfi = map->fldrv_priv;
1218 unsigned long ofs;
1219 int chipnum;
1220 int ret = 0;
1221
1222 /* ofs: offset within the first chip that the first read should start */
1223 chipnum = (from >> cfi->chipshift);
1224 ofs = from - (chipnum << cfi->chipshift);
1225
1226 *retlen = 0;
1227
1228 while (len) {
1229 unsigned long thislen;
1230
1231 if (chipnum >= cfi->numchips)
1232 break;
1233
1234 if ((len + ofs -1) >> cfi->chipshift)
1235 thislen = (1<<cfi->chipshift) - ofs;
1236 else
1237 thislen = len;
1238
1239 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1240 if (ret)
1241 break;
1242
1243 *retlen += thislen;
1244 len -= thislen;
1245 buf += thislen;
1246
1247 ofs = 0;
1248 chipnum++;
1249 }
1250 return ret;
1251}
1252
1da177e4 1253static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
f77814dd 1254 unsigned long adr, map_word datum, int mode)
1da177e4
LT
1255{
1256 struct cfi_private *cfi = map->fldrv_priv;
f77814dd 1257 map_word status, status_OK, write_cmd;
1da177e4
LT
1258 unsigned long timeo;
1259 int z, ret=0;
1260
1261 adr += chip->start;
1262
638d9838 1263 /* Let's determine those according to the interleave only once */
1da177e4 1264 status_OK = CMD(0x80);
f77814dd 1265 switch (mode) {
638d9838
NP
1266 case FL_WRITING:
1267 write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0x40) : CMD(0x41);
1268 break;
1269 case FL_OTP_WRITE:
1270 write_cmd = CMD(0xc0);
1271 break;
1272 default:
1273 return -EINVAL;
f77814dd 1274 }
1da177e4
LT
1275
1276 spin_lock(chip->mutex);
f77814dd 1277 ret = get_chip(map, chip, adr, mode);
1da177e4
LT
1278 if (ret) {
1279 spin_unlock(chip->mutex);
1280 return ret;
1281 }
1282
1283 XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1284 ENABLE_VPP(map);
1285 xip_disable(map, chip, adr);
f77814dd 1286 map_write(map, write_cmd, adr);
1da177e4 1287 map_write(map, datum, adr);
f77814dd 1288 chip->state = mode;
1da177e4 1289
6da70124
NP
1290 INVALIDATE_CACHE_UDELAY(map, chip,
1291 adr, map_bankwidth(map),
1292 chip->word_write_time);
1da177e4
LT
1293
1294 timeo = jiffies + (HZ/2);
1295 z = 0;
1296 for (;;) {
f77814dd 1297 if (chip->state != mode) {
1da177e4
LT
1298 /* Someone's suspended the write. Sleep */
1299 DECLARE_WAITQUEUE(wait, current);
1300
1301 set_current_state(TASK_UNINTERRUPTIBLE);
1302 add_wait_queue(&chip->wq, &wait);
1303 spin_unlock(chip->mutex);
1304 schedule();
1305 remove_wait_queue(&chip->wq, &wait);
1306 timeo = jiffies + (HZ / 2); /* FIXME */
1307 spin_lock(chip->mutex);
1308 continue;
1309 }
1310
1311 status = map_read(map, adr);
1312 if (map_word_andequal(map, status, status_OK, status_OK))
1313 break;
1314
1315 /* OK Still waiting */
1316 if (time_after(jiffies, timeo)) {
4843653c 1317 map_write(map, CMD(0x70), adr);
1da177e4
LT
1318 chip->state = FL_STATUS;
1319 xip_enable(map, chip, adr);
4843653c 1320 printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
1da177e4
LT
1321 ret = -EIO;
1322 goto out;
1323 }
1324
1325 /* Latency issues. Drop the lock, wait a while and retry */
1da177e4
LT
1326 z++;
1327 UDELAY(map, chip, adr, 1);
1da177e4
LT
1328 }
1329 if (!z) {
1330 chip->word_write_time--;
1331 if (!chip->word_write_time)
4843653c 1332 chip->word_write_time = 1;
1da177e4
LT
1333 }
1334 if (z > 1)
1335 chip->word_write_time++;
1336
1337 /* Done and happy. */
1338 chip->state = FL_STATUS;
1339
4843653c
NP
1340 /* check for errors */
1341 if (map_word_bitsset(map, status, CMD(0x1a))) {
1342 unsigned long chipstatus = MERGESTATUS(status);
1343
1344 /* reset status */
1da177e4 1345 map_write(map, CMD(0x50), adr);
1da177e4 1346 map_write(map, CMD(0x70), adr);
4843653c
NP
1347 xip_enable(map, chip, adr);
1348
1349 if (chipstatus & 0x02) {
1350 ret = -EROFS;
1351 } else if (chipstatus & 0x08) {
1352 printk(KERN_ERR "%s: word write error (bad VPP)\n", map->name);
1353 ret = -EIO;
1354 } else {
1355 printk(KERN_ERR "%s: word write error (status 0x%lx)\n", map->name, chipstatus);
1356 ret = -EINVAL;
1357 }
1358
1359 goto out;
1da177e4
LT
1360 }
1361
1362 xip_enable(map, chip, adr);
1363 out: put_chip(map, chip, adr);
1364 spin_unlock(chip->mutex);
1da177e4
LT
1365 return ret;
1366}
1367
1368
1369static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
1370{
1371 struct map_info *map = mtd->priv;
1372 struct cfi_private *cfi = map->fldrv_priv;
1373 int ret = 0;
1374 int chipnum;
1375 unsigned long ofs;
1376
1377 *retlen = 0;
1378 if (!len)
1379 return 0;
1380
1381 chipnum = to >> cfi->chipshift;
1382 ofs = to - (chipnum << cfi->chipshift);
1383
1384 /* If it's not bus-aligned, do the first byte write */
1385 if (ofs & (map_bankwidth(map)-1)) {
1386 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1387 int gap = ofs - bus_ofs;
1388 int n;
1389 map_word datum;
1390
1391 n = min_t(int, len, map_bankwidth(map)-gap);
1392 datum = map_word_ff(map);
1393 datum = map_word_load_partial(map, datum, buf, gap, n);
1394
1395 ret = do_write_oneword(map, &cfi->chips[chipnum],
f77814dd 1396 bus_ofs, datum, FL_WRITING);
1da177e4
LT
1397 if (ret)
1398 return ret;
1399
1400 len -= n;
1401 ofs += n;
1402 buf += n;
1403 (*retlen) += n;
1404
1405 if (ofs >> cfi->chipshift) {
1406 chipnum ++;
1407 ofs = 0;
1408 if (chipnum == cfi->numchips)
1409 return 0;
1410 }
1411 }
1412
1413 while(len >= map_bankwidth(map)) {
1414 map_word datum = map_word_load(map, buf);
1415
1416 ret = do_write_oneword(map, &cfi->chips[chipnum],
f77814dd 1417 ofs, datum, FL_WRITING);
1da177e4
LT
1418 if (ret)
1419 return ret;
1420
1421 ofs += map_bankwidth(map);
1422 buf += map_bankwidth(map);
1423 (*retlen) += map_bankwidth(map);
1424 len -= map_bankwidth(map);
1425
1426 if (ofs >> cfi->chipshift) {
1427 chipnum ++;
1428 ofs = 0;
1429 if (chipnum == cfi->numchips)
1430 return 0;
1431 }
1432 }
1433
1434 if (len & (map_bankwidth(map)-1)) {
1435 map_word datum;
1436
1437 datum = map_word_ff(map);
1438 datum = map_word_load_partial(map, datum, buf, 0, len);
1439
1440 ret = do_write_oneword(map, &cfi->chips[chipnum],
f77814dd 1441 ofs, datum, FL_WRITING);
1da177e4
LT
1442 if (ret)
1443 return ret;
1444
1445 (*retlen) += len;
1446 }
1447
1448 return 0;
1449}
1450
1451
1452static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
e102d54a
NP
1453 unsigned long adr, const struct kvec **pvec,
1454 unsigned long *pvec_seek, int len)
1da177e4
LT
1455{
1456 struct cfi_private *cfi = map->fldrv_priv;
e102d54a 1457 map_word status, status_OK, write_cmd, datum;
1da177e4 1458 unsigned long cmd_adr, timeo;
e102d54a
NP
1459 int wbufsize, z, ret=0, word_gap, words;
1460 const struct kvec *vec;
1461 unsigned long vec_seek;
1da177e4
LT
1462
1463 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1464 adr += chip->start;
1465 cmd_adr = adr & ~(wbufsize-1);
638d9838 1466
1da177e4
LT
1467 /* Let's determine this according to the interleave only once */
1468 status_OK = CMD(0x80);
638d9838 1469 write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0xe8) : CMD(0xe9);
1da177e4
LT
1470
1471 spin_lock(chip->mutex);
1472 ret = get_chip(map, chip, cmd_adr, FL_WRITING);
1473 if (ret) {
1474 spin_unlock(chip->mutex);
1475 return ret;
1476 }
1477
1478 XIP_INVAL_CACHED_RANGE(map, adr, len);
1479 ENABLE_VPP(map);
1480 xip_disable(map, chip, cmd_adr);
1481
1482