]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/mtd/chips/cfi_cmdset_0001.c
{MTD] add support for Intel's "Sibley" flash
[mirror_ubuntu-artful-kernel.git] / drivers / mtd / chips / cfi_cmdset_0001.c
CommitLineData
1da177e4
LT
1/*
2 * Common Flash Interface support:
3 * Intel Extended Vendor Command Set (ID 0x0001)
4 *
5 * (C) 2000 Red Hat. GPL'd
6 *
638d9838 7 * $Id: cfi_cmdset_0001.c,v 1.182 2005/08/06 04:40:41 nico Exp $
1da177e4
LT
8 *
9 *
10 * 10/10/2000 Nicolas Pitre <nico@cam.org>
11 * - completely revamped method functions so they are aware and
12 * independent of the flash geometry (buswidth, interleave, etc.)
13 * - scalability vs code size is completely set at compile-time
14 * (see include/linux/mtd/cfi.h for selection)
15 * - optimized write buffer method
16 * 02/05/2002 Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
17 * - reworked lock/unlock/erase support for var size flash
18 */
19
20#include <linux/module.h>
21#include <linux/types.h>
22#include <linux/kernel.h>
23#include <linux/sched.h>
24#include <linux/init.h>
25#include <asm/io.h>
26#include <asm/byteorder.h>
27
28#include <linux/errno.h>
29#include <linux/slab.h>
30#include <linux/delay.h>
31#include <linux/interrupt.h>
963a6fb0 32#include <linux/reboot.h>
1da177e4
LT
33#include <linux/mtd/xip.h>
34#include <linux/mtd/map.h>
35#include <linux/mtd/mtd.h>
36#include <linux/mtd/compatmac.h>
37#include <linux/mtd/cfi.h>
38
39/* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
40/* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
41
42// debugging, turns off buffer write mode if set to 1
43#define FORCE_WORD_WRITE 0
44
45#define MANUFACTURER_INTEL 0x0089
46#define I82802AB 0x00ad
47#define I82802AC 0x00ac
48#define MANUFACTURER_ST 0x0020
49#define M50LPW080 0x002F
50
51static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
1da177e4
LT
52static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
53static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
54static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
55static void cfi_intelext_sync (struct mtd_info *);
56static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
57static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
8048d2fc 58#ifdef CONFIG_MTD_OTP
f77814dd
NP
59static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
60static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
61static int cfi_intelext_write_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
62static int cfi_intelext_lock_user_prot_reg (struct mtd_info *, loff_t, size_t);
63static int cfi_intelext_get_fact_prot_info (struct mtd_info *,
64 struct otp_info *, size_t);
65static int cfi_intelext_get_user_prot_info (struct mtd_info *,
66 struct otp_info *, size_t);
8048d2fc 67#endif
1da177e4
LT
68static int cfi_intelext_suspend (struct mtd_info *);
69static void cfi_intelext_resume (struct mtd_info *);
963a6fb0 70static int cfi_intelext_reboot (struct notifier_block *, unsigned long, void *);
1da177e4
LT
71
72static void cfi_intelext_destroy(struct mtd_info *);
73
74struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
75
76static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
77static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
78
79static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
80 size_t *retlen, u_char **mtdbuf);
81static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from,
82 size_t len);
83
84static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
85static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
86#include "fwh_lock.h"
87
88
89
90/*
91 * *********** SETUP AND PROBE BITS ***********
92 */
93
94static struct mtd_chip_driver cfi_intelext_chipdrv = {
95 .probe = NULL, /* Not usable directly */
96 .destroy = cfi_intelext_destroy,
97 .name = "cfi_cmdset_0001",
98 .module = THIS_MODULE
99};
100
101/* #define DEBUG_LOCK_BITS */
102/* #define DEBUG_CFI_FEATURES */
103
104#ifdef DEBUG_CFI_FEATURES
105static void cfi_tell_features(struct cfi_pri_intelext *extp)
106{
107 int i;
638d9838 108 printk(" Extended Query version %c.%c\n", extp->MajorVersion, extp->MinorVersion);
1da177e4
LT
109 printk(" Feature/Command Support: %4.4X\n", extp->FeatureSupport);
110 printk(" - Chip Erase: %s\n", extp->FeatureSupport&1?"supported":"unsupported");
111 printk(" - Suspend Erase: %s\n", extp->FeatureSupport&2?"supported":"unsupported");
112 printk(" - Suspend Program: %s\n", extp->FeatureSupport&4?"supported":"unsupported");
113 printk(" - Legacy Lock/Unlock: %s\n", extp->FeatureSupport&8?"supported":"unsupported");
114 printk(" - Queued Erase: %s\n", extp->FeatureSupport&16?"supported":"unsupported");
115 printk(" - Instant block lock: %s\n", extp->FeatureSupport&32?"supported":"unsupported");
116 printk(" - Protection Bits: %s\n", extp->FeatureSupport&64?"supported":"unsupported");
117 printk(" - Page-mode read: %s\n", extp->FeatureSupport&128?"supported":"unsupported");
118 printk(" - Synchronous read: %s\n", extp->FeatureSupport&256?"supported":"unsupported");
119 printk(" - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
638d9838
NP
120 printk(" - Extended Flash Array: %s\n", extp->FeatureSupport&1024?"supported":"unsupported");
121 for (i=11; i<32; i++) {
1da177e4
LT
122 if (extp->FeatureSupport & (1<<i))
123 printk(" - Unknown Bit %X: supported\n", i);
124 }
125
126 printk(" Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
127 printk(" - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
128 for (i=1; i<8; i++) {
129 if (extp->SuspendCmdSupport & (1<<i))
130 printk(" - Unknown Bit %X: supported\n", i);
131 }
132
133 printk(" Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
134 printk(" - Lock Bit Active: %s\n", extp->BlkStatusRegMask&1?"yes":"no");
638d9838
NP
135 printk(" - Lock-Down Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
136 for (i=2; i<3; i++) {
1da177e4
LT
137 if (extp->BlkStatusRegMask & (1<<i))
138 printk(" - Unknown Bit %X Active: yes\n",i);
139 }
638d9838
NP
140 printk(" - EFA Lock Bit: %s\n", extp->BlkStatusRegMask&16?"yes":"no");
141 printk(" - EFA Lock-Down Bit: %s\n", extp->BlkStatusRegMask&32?"yes":"no");
142 for (i=6; i<16; i++) {
143 if (extp->BlkStatusRegMask & (1<<i))
144 printk(" - Unknown Bit %X Active: yes\n",i);
145 }
146
1da177e4
LT
147 printk(" Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
148 extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
149 if (extp->VppOptimal)
150 printk(" Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
151 extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
152}
153#endif
154
155#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
156/* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
157static void fixup_intel_strataflash(struct mtd_info *mtd, void* param)
158{
159 struct map_info *map = mtd->priv;
160 struct cfi_private *cfi = map->fldrv_priv;
161 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
162
163 printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
164 "erase on write disabled.\n");
165 extp->SuspendCmdSupport &= ~1;
166}
167#endif
168
169#ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
170static void fixup_no_write_suspend(struct mtd_info *mtd, void* param)
171{
172 struct map_info *map = mtd->priv;
173 struct cfi_private *cfi = map->fldrv_priv;
174 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
175
176 if (cfip && (cfip->FeatureSupport&4)) {
177 cfip->FeatureSupport &= ~4;
178 printk(KERN_WARNING "cfi_cmdset_0001: write suspend disabled\n");
179 }
180}
181#endif
182
183static void fixup_st_m28w320ct(struct mtd_info *mtd, void* param)
184{
185 struct map_info *map = mtd->priv;
186 struct cfi_private *cfi = map->fldrv_priv;
187
188 cfi->cfiq->BufWriteTimeoutTyp = 0; /* Not supported */
189 cfi->cfiq->BufWriteTimeoutMax = 0; /* Not supported */
190}
191
192static void fixup_st_m28w320cb(struct mtd_info *mtd, void* param)
193{
194 struct map_info *map = mtd->priv;
195 struct cfi_private *cfi = map->fldrv_priv;
196
197 /* Note this is done after the region info is endian swapped */
198 cfi->cfiq->EraseRegionInfo[1] =
199 (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
200};
201
202static void fixup_use_point(struct mtd_info *mtd, void *param)
203{
204 struct map_info *map = mtd->priv;
205 if (!mtd->point && map_is_linear(map)) {
206 mtd->point = cfi_intelext_point;
207 mtd->unpoint = cfi_intelext_unpoint;
208 }
209}
210
211static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
212{
213 struct map_info *map = mtd->priv;
214 struct cfi_private *cfi = map->fldrv_priv;
215 if (cfi->cfiq->BufWriteTimeoutTyp) {
216 printk(KERN_INFO "Using buffer write method\n" );
217 mtd->write = cfi_intelext_write_buffers;
218 }
219}
220
221static struct cfi_fixup cfi_fixup_table[] = {
222#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
223 { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL },
224#endif
225#ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
226 { CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend, NULL },
227#endif
228#if !FORCE_WORD_WRITE
229 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL },
230#endif
231 { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct, NULL },
232 { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb, NULL },
233 { 0, 0, NULL, NULL }
234};
235
236static struct cfi_fixup jedec_fixup_table[] = {
237 { MANUFACTURER_INTEL, I82802AB, fixup_use_fwh_lock, NULL, },
238 { MANUFACTURER_INTEL, I82802AC, fixup_use_fwh_lock, NULL, },
239 { MANUFACTURER_ST, M50LPW080, fixup_use_fwh_lock, NULL, },
240 { 0, 0, NULL, NULL }
241};
242static struct cfi_fixup fixup_table[] = {
243 /* The CFI vendor ids and the JEDEC vendor IDs appear
244 * to be common. It is like the devices id's are as
245 * well. This table is to pick all cases where
246 * we know that is the case.
247 */
248 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point, NULL },
249 { 0, 0, NULL, NULL }
250};
251
252static inline struct cfi_pri_intelext *
253read_pri_intelext(struct map_info *map, __u16 adr)
254{
255 struct cfi_pri_intelext *extp;
256 unsigned int extp_size = sizeof(*extp);
257
258 again:
259 extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
260 if (!extp)
261 return NULL;
262
d88f977b 263 if (extp->MajorVersion != '1' ||
638d9838 264 (extp->MinorVersion < '0' || extp->MinorVersion > '4')) {
d88f977b
TP
265 printk(KERN_ERR " Unknown Intel/Sharp Extended Query "
266 "version %c.%c.\n", extp->MajorVersion,
267 extp->MinorVersion);
268 kfree(extp);
269 return NULL;
270 }
271
1da177e4
LT
272 /* Do some byteswapping if necessary */
273 extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
274 extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
275 extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
276
638d9838 277 if (extp->MajorVersion == '1' && extp->MinorVersion >= '3') {
1da177e4
LT
278 unsigned int extra_size = 0;
279 int nb_parts, i;
280
281 /* Protection Register info */
72b56a2d
NP
282 extra_size += (extp->NumProtectionFields - 1) *
283 sizeof(struct cfi_intelext_otpinfo);
1da177e4
LT
284
285 /* Burst Read info */
638d9838 286 extra_size += (extp->MinorVersion < '4') ? 6 : 5;
1da177e4
LT
287
288 /* Number of hardware-partitions */
289 extra_size += 1;
290 if (extp_size < sizeof(*extp) + extra_size)
291 goto need_more;
292 nb_parts = extp->extra[extra_size - 1];
293
638d9838
NP
294 /* skip the sizeof(partregion) field in CFI 1.4 */
295 if (extp->MinorVersion >= '4')
296 extra_size += 2;
297
1da177e4
LT
298 for (i = 0; i < nb_parts; i++) {
299 struct cfi_intelext_regioninfo *rinfo;
300 rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
301 extra_size += sizeof(*rinfo);
302 if (extp_size < sizeof(*extp) + extra_size)
303 goto need_more;
304 rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
305 extra_size += (rinfo->NumBlockTypes - 1)
306 * sizeof(struct cfi_intelext_blockinfo);
307 }
308
638d9838
NP
309 if (extp->MinorVersion >= '4')
310 extra_size += sizeof(struct cfi_intelext_programming_regioninfo);
311
1da177e4
LT
312 if (extp_size < sizeof(*extp) + extra_size) {
313 need_more:
314 extp_size = sizeof(*extp) + extra_size;
315 kfree(extp);
316 if (extp_size > 4096) {
317 printk(KERN_ERR
318 "%s: cfi_pri_intelext is too fat\n",
319 __FUNCTION__);
320 return NULL;
321 }
322 goto again;
323 }
324 }
325
326 return extp;
327}
328
329/* This routine is made available to other mtd code via
330 * inter_module_register. It must only be accessed through
331 * inter_module_get which will bump the use count of this module. The
332 * addresses passed back in cfi are valid as long as the use count of
333 * this module is non-zero, i.e. between inter_module_get and
334 * inter_module_put. Keith Owens <kaos@ocs.com.au> 29 Oct 2000.
335 */
336struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
337{
338 struct cfi_private *cfi = map->fldrv_priv;
339 struct mtd_info *mtd;
340 int i;
341
342 mtd = kmalloc(sizeof(*mtd), GFP_KERNEL);
343 if (!mtd) {
344 printk(KERN_ERR "Failed to allocate memory for MTD device\n");
345 return NULL;
346 }
347 memset(mtd, 0, sizeof(*mtd));
348 mtd->priv = map;
349 mtd->type = MTD_NORFLASH;
350
351 /* Fill in the default mtd operations */
352 mtd->erase = cfi_intelext_erase_varsize;
353 mtd->read = cfi_intelext_read;
354 mtd->write = cfi_intelext_write_words;
355 mtd->sync = cfi_intelext_sync;
356 mtd->lock = cfi_intelext_lock;
357 mtd->unlock = cfi_intelext_unlock;
358 mtd->suspend = cfi_intelext_suspend;
359 mtd->resume = cfi_intelext_resume;
360 mtd->flags = MTD_CAP_NORFLASH;
361 mtd->name = map->name;
963a6fb0
NP
362
363 mtd->reboot_notifier.notifier_call = cfi_intelext_reboot;
364
1da177e4
LT
365 if (cfi->cfi_mode == CFI_MODE_CFI) {
366 /*
367 * It's a real CFI chip, not one for which the probe
368 * routine faked a CFI structure. So we read the feature
369 * table from it.
370 */
371 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
372 struct cfi_pri_intelext *extp;
373
374 extp = read_pri_intelext(map, adr);
375 if (!extp) {
376 kfree(mtd);
377 return NULL;
378 }
379
380 /* Install our own private info structure */
381 cfi->cmdset_priv = extp;
382
383 cfi_fixup(mtd, cfi_fixup_table);
384
385#ifdef DEBUG_CFI_FEATURES
386 /* Tell the user about it in lots of lovely detail */
387 cfi_tell_features(extp);
388#endif
389
390 if(extp->SuspendCmdSupport & 1) {
391 printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
392 }
393 }
394 else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
395 /* Apply jedec specific fixups */
396 cfi_fixup(mtd, jedec_fixup_table);
397 }
398 /* Apply generic fixups */
399 cfi_fixup(mtd, fixup_table);
400
401 for (i=0; i< cfi->numchips; i++) {
402 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
403 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
404 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
405 cfi->chips[i].ref_point_counter = 0;
406 }
407
408 map->fldrv = &cfi_intelext_chipdrv;
409
410 return cfi_intelext_setup(mtd);
411}
412
413static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
414{
415 struct map_info *map = mtd->priv;
416 struct cfi_private *cfi = map->fldrv_priv;
417 unsigned long offset = 0;
418 int i,j;
419 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
420
421 //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
422
423 mtd->size = devsize * cfi->numchips;
424
425 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
426 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
427 * mtd->numeraseregions, GFP_KERNEL);
428 if (!mtd->eraseregions) {
429 printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
430 goto setup_err;
431 }
432
433 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
434 unsigned long ernum, ersize;
435 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
436 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
437
438 if (mtd->erasesize < ersize) {
439 mtd->erasesize = ersize;
440 }
441 for (j=0; j<cfi->numchips; j++) {
442 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
443 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
444 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
445 }
446 offset += (ersize * ernum);
447 }
448
449 if (offset != devsize) {
450 /* Argh */
451 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
452 goto setup_err;
453 }
454
455 for (i=0; i<mtd->numeraseregions;i++){
4843653c 456 printk(KERN_DEBUG "erase region %d: offset=0x%x,size=0x%x,blocks=%d\n",
1da177e4
LT
457 i,mtd->eraseregions[i].offset,
458 mtd->eraseregions[i].erasesize,
459 mtd->eraseregions[i].numblocks);
460 }
461
f77814dd 462#ifdef CONFIG_MTD_OTP
1da177e4 463 mtd->read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
f77814dd
NP
464 mtd->read_user_prot_reg = cfi_intelext_read_user_prot_reg;
465 mtd->write_user_prot_reg = cfi_intelext_write_user_prot_reg;
466 mtd->lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
467 mtd->get_fact_prot_info = cfi_intelext_get_fact_prot_info;
468 mtd->get_user_prot_info = cfi_intelext_get_user_prot_info;
1da177e4
LT
469#endif
470
471 /* This function has the potential to distort the reality
472 a bit and therefore should be called last. */
473 if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
474 goto setup_err;
475
476 __module_get(THIS_MODULE);
963a6fb0 477 register_reboot_notifier(&mtd->reboot_notifier);
1da177e4
LT
478 return mtd;
479
480 setup_err:
481 if(mtd) {
482 if(mtd->eraseregions)
483 kfree(mtd->eraseregions);
484 kfree(mtd);
485 }
486 kfree(cfi->cmdset_priv);
487 return NULL;
488}
489
490static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
491 struct cfi_private **pcfi)
492{
493 struct map_info *map = mtd->priv;
494 struct cfi_private *cfi = *pcfi;
495 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
496
497 /*
498 * Probing of multi-partition flash ships.
499 *
500 * To support multiple partitions when available, we simply arrange
501 * for each of them to have their own flchip structure even if they
502 * are on the same physical chip. This means completely recreating
503 * a new cfi_private structure right here which is a blatent code
504 * layering violation, but this is still the least intrusive
505 * arrangement at this point. This can be rearranged in the future
506 * if someone feels motivated enough. --nico
507 */
638d9838 508 if (extp && extp->MajorVersion == '1' && extp->MinorVersion >= '3'
1da177e4
LT
509 && extp->FeatureSupport & (1 << 9)) {
510 struct cfi_private *newcfi;
511 struct flchip *chip;
512 struct flchip_shared *shared;
513 int offs, numregions, numparts, partshift, numvirtchips, i, j;
514
515 /* Protection Register info */
72b56a2d
NP
516 offs = (extp->NumProtectionFields - 1) *
517 sizeof(struct cfi_intelext_otpinfo);
1da177e4
LT
518
519 /* Burst Read info */
638d9838 520 offs += (extp->MinorVersion < '4') ? 6 : 5;
1da177e4
LT
521
522 /* Number of partition regions */
523 numregions = extp->extra[offs];
524 offs += 1;
525
638d9838
NP
526 /* skip the sizeof(partregion) field in CFI 1.4 */
527 if (extp->MinorVersion >= '4')
528 offs += 2;
529
1da177e4
LT
530 /* Number of hardware partitions */
531 numparts = 0;
532 for (i = 0; i < numregions; i++) {
533 struct cfi_intelext_regioninfo *rinfo;
534 rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
535 numparts += rinfo->NumIdentPartitions;
536 offs += sizeof(*rinfo)
537 + (rinfo->NumBlockTypes - 1) *
538 sizeof(struct cfi_intelext_blockinfo);
539 }
540
638d9838
NP
541 /* Programming Region info */
542 if (extp->MinorVersion >= '4') {
543 struct cfi_intelext_programming_regioninfo *prinfo;
544 prinfo = (struct cfi_intelext_programming_regioninfo *)&extp->extra[offs];
545 MTD_PROGREGION_SIZE(mtd) = cfi->interleave << prinfo->ProgRegShift;
546 MTD_PROGREGION_CTRLMODE_VALID(mtd) = cfi->interleave * prinfo->ControlValid;
547 MTD_PROGREGION_CTRLMODE_INVALID(mtd) = cfi->interleave * prinfo->ControlInvalid;
548 mtd->flags |= MTD_PROGRAM_REGIONS;
549 printk(KERN_DEBUG "%s: program region size/ctrl_valid/ctrl_inval = %d/%d/%d\n",
550 map->name, MTD_PROGREGION_SIZE(mtd),
551 MTD_PROGREGION_CTRLMODE_VALID(mtd),
552 MTD_PROGREGION_CTRLMODE_INVALID(mtd));
553 }
554
1da177e4
LT
555 /*
556 * All functions below currently rely on all chips having
557 * the same geometry so we'll just assume that all hardware
558 * partitions are of the same size too.
559 */
560 partshift = cfi->chipshift - __ffs(numparts);
561
562 if ((1 << partshift) < mtd->erasesize) {
563 printk( KERN_ERR
564 "%s: bad number of hw partitions (%d)\n",
565 __FUNCTION__, numparts);
566 return -EINVAL;
567 }
568
569 numvirtchips = cfi->numchips * numparts;
570 newcfi = kmalloc(sizeof(struct cfi_private) + numvirtchips * sizeof(struct flchip), GFP_KERNEL);
571 if (!newcfi)
572 return -ENOMEM;
573 shared = kmalloc(sizeof(struct flchip_shared) * cfi->numchips, GFP_KERNEL);
574 if (!shared) {
575 kfree(newcfi);
576 return -ENOMEM;
577 }
578 memcpy(newcfi, cfi, sizeof(struct cfi_private));
579 newcfi->numchips = numvirtchips;
580 newcfi->chipshift = partshift;
581
582 chip = &newcfi->chips[0];
583 for (i = 0; i < cfi->numchips; i++) {
584 shared[i].writing = shared[i].erasing = NULL;
585 spin_lock_init(&shared[i].lock);
586 for (j = 0; j < numparts; j++) {
587 *chip = cfi->chips[i];
588 chip->start += j << partshift;
589 chip->priv = &shared[i];
590 /* those should be reset too since
591 they create memory references. */
592 init_waitqueue_head(&chip->wq);
593 spin_lock_init(&chip->_spinlock);
594 chip->mutex = &chip->_spinlock;
595 chip++;
596 }
597 }
598
599 printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
600 "--> %d partitions of %d KiB\n",
601 map->name, cfi->numchips, cfi->interleave,
602 newcfi->numchips, 1<<(newcfi->chipshift-10));
603
604 map->fldrv_priv = newcfi;
605 *pcfi = newcfi;
606 kfree(cfi);
607 }
608
609 return 0;
610}
611
612/*
613 * *********** CHIP ACCESS FUNCTIONS ***********
614 */
615
616static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
617{
618 DECLARE_WAITQUEUE(wait, current);
619 struct cfi_private *cfi = map->fldrv_priv;
620 map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
621 unsigned long timeo;
622 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
623
624 resettime:
625 timeo = jiffies + HZ;
626 retry:
f77814dd 627 if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING || mode == FL_OTP_WRITE)) {
1da177e4
LT
628 /*
629 * OK. We have possibility for contension on the write/erase
630 * operations which are global to the real chip and not per
631 * partition. So let's fight it over in the partition which
632 * currently has authority on the operation.
633 *
634 * The rules are as follows:
635 *
636 * - any write operation must own shared->writing.
637 *
638 * - any erase operation must own _both_ shared->writing and
639 * shared->erasing.
640 *
641 * - contension arbitration is handled in the owner's context.
642 *
643 * The 'shared' struct can be read when its lock is taken.
644 * However any writes to it can only be made when the current
645 * owner's lock is also held.
646 */
647 struct flchip_shared *shared = chip->priv;
648 struct flchip *contender;
649 spin_lock(&shared->lock);
650 contender = shared->writing;
651 if (contender && contender != chip) {
652 /*
653 * The engine to perform desired operation on this
654 * partition is already in use by someone else.
655 * Let's fight over it in the context of the chip
656 * currently using it. If it is possible to suspend,
657 * that other partition will do just that, otherwise
658 * it'll happily send us to sleep. In any case, when
659 * get_chip returns success we're clear to go ahead.
660 */
661 int ret = spin_trylock(contender->mutex);
662 spin_unlock(&shared->lock);
663 if (!ret)
664 goto retry;
665 spin_unlock(chip->mutex);
666 ret = get_chip(map, contender, contender->start, mode);
667 spin_lock(chip->mutex);
668 if (ret) {
669 spin_unlock(contender->mutex);
670 return ret;
671 }
672 timeo = jiffies + HZ;
673 spin_lock(&shared->lock);
674 }
675
676 /* We now own it */
677 shared->writing = chip;
678 if (mode == FL_ERASING)
679 shared->erasing = chip;
680 if (contender && contender != chip)
681 spin_unlock(contender->mutex);
682 spin_unlock(&shared->lock);
683 }
684
685 switch (chip->state) {
686
687 case FL_STATUS:
688 for (;;) {
689 status = map_read(map, adr);
690 if (map_word_andequal(map, status, status_OK, status_OK))
691 break;
692
693 /* At this point we're fine with write operations
694 in other partitions as they don't conflict. */
695 if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
696 break;
697
698 if (time_after(jiffies, timeo)) {
4843653c
NP
699 printk(KERN_ERR "%s: Waiting for chip to be ready timed out. Status %lx\n",
700 map->name, status.x[0]);
1da177e4
LT
701 return -EIO;
702 }
703 spin_unlock(chip->mutex);
704 cfi_udelay(1);
705 spin_lock(chip->mutex);
706 /* Someone else might have been playing with it. */
707 goto retry;
708 }
709
710 case FL_READY:
711 case FL_CFI_QUERY:
712 case FL_JEDEC_QUERY:
713 return 0;
714
715 case FL_ERASING:
716 if (!cfip ||
717 !(cfip->FeatureSupport & 2) ||
718 !(mode == FL_READY || mode == FL_POINT ||
719 (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
720 goto sleep;
721
722
723 /* Erase suspend */
724 map_write(map, CMD(0xB0), adr);
725
726 /* If the flash has finished erasing, then 'erase suspend'
727 * appears to make some (28F320) flash devices switch to
728 * 'read' mode. Make sure that we switch to 'read status'
729 * mode so we get the right data. --rmk
730 */
731 map_write(map, CMD(0x70), adr);
732 chip->oldstate = FL_ERASING;
733 chip->state = FL_ERASE_SUSPENDING;
734 chip->erase_suspended = 1;
735 for (;;) {
736 status = map_read(map, adr);
737 if (map_word_andequal(map, status, status_OK, status_OK))
738 break;
739
740 if (time_after(jiffies, timeo)) {
741 /* Urgh. Resume and pretend we weren't here. */
742 map_write(map, CMD(0xd0), adr);
743 /* Make sure we're in 'read status' mode if it had finished */
744 map_write(map, CMD(0x70), adr);
745 chip->state = FL_ERASING;
746 chip->oldstate = FL_READY;
4843653c
NP
747 printk(KERN_ERR "%s: Chip not ready after erase "
748 "suspended: status = 0x%lx\n", map->name, status.x[0]);
1da177e4
LT
749 return -EIO;
750 }
751
752 spin_unlock(chip->mutex);
753 cfi_udelay(1);
754 spin_lock(chip->mutex);
755 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
756 So we can just loop here. */
757 }
758 chip->state = FL_STATUS;
759 return 0;
760
761 case FL_XIP_WHILE_ERASING:
762 if (mode != FL_READY && mode != FL_POINT &&
763 (mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1)))
764 goto sleep;
765 chip->oldstate = chip->state;
766 chip->state = FL_READY;
767 return 0;
768
769 case FL_POINT:
770 /* Only if there's no operation suspended... */
771 if (mode == FL_READY && chip->oldstate == FL_READY)
772 return 0;
773
774 default:
775 sleep:
776 set_current_state(TASK_UNINTERRUPTIBLE);
777 add_wait_queue(&chip->wq, &wait);
778 spin_unlock(chip->mutex);
779 schedule();
780 remove_wait_queue(&chip->wq, &wait);
781 spin_lock(chip->mutex);
782 goto resettime;
783 }
784}
785
786static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
787{
788 struct cfi_private *cfi = map->fldrv_priv;
789
790 if (chip->priv) {
791 struct flchip_shared *shared = chip->priv;
792 spin_lock(&shared->lock);
793 if (shared->writing == chip && chip->oldstate == FL_READY) {
794 /* We own the ability to write, but we're done */
795 shared->writing = shared->erasing;
796 if (shared->writing && shared->writing != chip) {
797 /* give back ownership to who we loaned it from */
798 struct flchip *loaner = shared->writing;
799 spin_lock(loaner->mutex);
800 spin_unlock(&shared->lock);
801 spin_unlock(chip->mutex);
802 put_chip(map, loaner, loaner->start);
803 spin_lock(chip->mutex);
804 spin_unlock(loaner->mutex);
805 wake_up(&chip->wq);
806 return;
807 }
808 shared->erasing = NULL;
809 shared->writing = NULL;
810 } else if (shared->erasing == chip && shared->writing != chip) {
811 /*
812 * We own the ability to erase without the ability
813 * to write, which means the erase was suspended
814 * and some other partition is currently writing.
815 * Don't let the switch below mess things up since
816 * we don't have ownership to resume anything.
817 */
818 spin_unlock(&shared->lock);
819 wake_up(&chip->wq);
820 return;
821 }
822 spin_unlock(&shared->lock);
823 }
824
825 switch(chip->oldstate) {
826 case FL_ERASING:
827 chip->state = chip->oldstate;
828 /* What if one interleaved chip has finished and the
829 other hasn't? The old code would leave the finished
830 one in READY mode. That's bad, and caused -EROFS
831 errors to be returned from do_erase_oneblock because
832 that's the only bit it checked for at the time.
833 As the state machine appears to explicitly allow
834 sending the 0x70 (Read Status) command to an erasing
835 chip and expecting it to be ignored, that's what we
836 do. */
837 map_write(map, CMD(0xd0), adr);
838 map_write(map, CMD(0x70), adr);
839 chip->oldstate = FL_READY;
840 chip->state = FL_ERASING;
841 break;
842
843 case FL_XIP_WHILE_ERASING:
844 chip->state = chip->oldstate;
845 chip->oldstate = FL_READY;
846 break;
847
848 case FL_READY:
849 case FL_STATUS:
850 case FL_JEDEC_QUERY:
851 /* We should really make set_vpp() count, rather than doing this */
852 DISABLE_VPP(map);
853 break;
854 default:
4843653c 855 printk(KERN_ERR "%s: put_chip() called with oldstate %d!!\n", map->name, chip->oldstate);
1da177e4
LT
856 }
857 wake_up(&chip->wq);
858}
859
860#ifdef CONFIG_MTD_XIP
861
862/*
863 * No interrupt what so ever can be serviced while the flash isn't in array
864 * mode. This is ensured by the xip_disable() and xip_enable() functions
865 * enclosing any code path where the flash is known not to be in array mode.
866 * And within a XIP disabled code path, only functions marked with __xipram
867 * may be called and nothing else (it's a good thing to inspect generated
868 * assembly to make sure inline functions were actually inlined and that gcc
869 * didn't emit calls to its own support functions). Also configuring MTD CFI
870 * support to a single buswidth and a single interleave is also recommended.
1da177e4
LT
871 */
872
873static void xip_disable(struct map_info *map, struct flchip *chip,
874 unsigned long adr)
875{
876 /* TODO: chips with no XIP use should ignore and return */
877 (void) map_read(map, adr); /* ensure mmu mapping is up to date */
1da177e4
LT
878 local_irq_disable();
879}
880
881static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
882 unsigned long adr)
883{
884 struct cfi_private *cfi = map->fldrv_priv;
885 if (chip->state != FL_POINT && chip->state != FL_READY) {
886 map_write(map, CMD(0xff), adr);
887 chip->state = FL_READY;
888 }
889 (void) map_read(map, adr);
97f927a4 890 xip_iprefetch();
1da177e4 891 local_irq_enable();
1da177e4
LT
892}
893
894/*
895 * When a delay is required for the flash operation to complete, the
896 * xip_udelay() function is polling for both the given timeout and pending
897 * (but still masked) hardware interrupts. Whenever there is an interrupt
898 * pending then the flash erase or write operation is suspended, array mode
899 * restored and interrupts unmasked. Task scheduling might also happen at that
900 * point. The CPU eventually returns from the interrupt or the call to
901 * schedule() and the suspended flash operation is resumed for the remaining
902 * of the delay period.
903 *
904 * Warning: this function _will_ fool interrupt latency tracing tools.
905 */
906
907static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
908 unsigned long adr, int usec)
909{
910 struct cfi_private *cfi = map->fldrv_priv;
911 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
912 map_word status, OK = CMD(0x80);
913 unsigned long suspended, start = xip_currtime();
914 flstate_t oldstate, newstate;
915
916 do {
917 cpu_relax();
918 if (xip_irqpending() && cfip &&
919 ((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) ||
920 (chip->state == FL_WRITING && (cfip->FeatureSupport&4))) &&
921 (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
922 /*
923 * Let's suspend the erase or write operation when
924 * supported. Note that we currently don't try to
925 * suspend interleaved chips if there is already
926 * another operation suspended (imagine what happens
927 * when one chip was already done with the current
928 * operation while another chip suspended it, then
929 * we resume the whole thing at once). Yes, it
930 * can happen!
931 */
932 map_write(map, CMD(0xb0), adr);
933 map_write(map, CMD(0x70), adr);
934 usec -= xip_elapsed_since(start);
935 suspended = xip_currtime();
936 do {
937 if (xip_elapsed_since(suspended) > 100000) {
938 /*
939 * The chip doesn't want to suspend
940 * after waiting for 100 msecs.
941 * This is a critical error but there
942 * is not much we can do here.
943 */
944 return;
945 }
946 status = map_read(map, adr);
947 } while (!map_word_andequal(map, status, OK, OK));
948
949 /* Suspend succeeded */
950 oldstate = chip->state;
951 if (oldstate == FL_ERASING) {
952 if (!map_word_bitsset(map, status, CMD(0x40)))
953 break;
954 newstate = FL_XIP_WHILE_ERASING;
955 chip->erase_suspended = 1;
956 } else {
957 if (!map_word_bitsset(map, status, CMD(0x04)))
958 break;
959 newstate = FL_XIP_WHILE_WRITING;
960 chip->write_suspended = 1;
961 }
962 chip->state = newstate;
963 map_write(map, CMD(0xff), adr);
964 (void) map_read(map, adr);
965 asm volatile (".rep 8; nop; .endr");
966 local_irq_enable();
6da70124 967 spin_unlock(chip->mutex);
1da177e4
LT
968 asm volatile (".rep 8; nop; .endr");
969 cond_resched();
970
971 /*
972 * We're back. However someone else might have
973 * decided to go write to the chip if we are in
974 * a suspended erase state. If so let's wait
975 * until it's done.
976 */
6da70124 977 spin_lock(chip->mutex);
1da177e4
LT
978 while (chip->state != newstate) {
979 DECLARE_WAITQUEUE(wait, current);
980 set_current_state(TASK_UNINTERRUPTIBLE);
981 add_wait_queue(&chip->wq, &wait);
6da70124 982 spin_unlock(chip->mutex);
1da177e4
LT
983 schedule();
984 remove_wait_queue(&chip->wq, &wait);
6da70124 985 spin_lock(chip->mutex);
1da177e4
LT
986 }
987 /* Disallow XIP again */
988 local_irq_disable();
989
990 /* Resume the write or erase operation */
991 map_write(map, CMD(0xd0), adr);
992 map_write(map, CMD(0x70), adr);
993 chip->state = oldstate;
994 start = xip_currtime();
995 } else if (usec >= 1000000/HZ) {
996 /*
997 * Try to save on CPU power when waiting delay
998 * is at least a system timer tick period.
999 * No need to be extremely accurate here.
1000 */
1001 xip_cpu_idle();
1002 }
1003 status = map_read(map, adr);
1004 } while (!map_word_andequal(map, status, OK, OK)
1005 && xip_elapsed_since(start) < usec);
1006}
1007
1008#define UDELAY(map, chip, adr, usec) xip_udelay(map, chip, adr, usec)
1009
1010/*
1011 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
1012 * the flash is actively programming or erasing since we have to poll for
1013 * the operation to complete anyway. We can't do that in a generic way with
6da70124
NP
1014 * a XIP setup so do it before the actual flash operation in this case
1015 * and stub it out from INVALIDATE_CACHE_UDELAY.
1da177e4 1016 */
6da70124
NP
1017#define XIP_INVAL_CACHED_RANGE(map, from, size) \
1018 INVALIDATE_CACHED_RANGE(map, from, size)
1019
1020#define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
1021 UDELAY(map, chip, adr, usec)
1da177e4
LT
1022
1023/*
1024 * Extra notes:
1025 *
1026 * Activating this XIP support changes the way the code works a bit. For
1027 * example the code to suspend the current process when concurrent access
1028 * happens is never executed because xip_udelay() will always return with the
1029 * same chip state as it was entered with. This is why there is no care for
1030 * the presence of add_wait_queue() or schedule() calls from within a couple
1031 * xip_disable()'d areas of code, like in do_erase_oneblock for example.
1032 * The queueing and scheduling are always happening within xip_udelay().
1033 *
1034 * Similarly, get_chip() and put_chip() just happen to always be executed
1035 * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state
1036 * is in array mode, therefore never executing many cases therein and not
1037 * causing any problem with XIP.
1038 */
1039
1040#else
1041
1042#define xip_disable(map, chip, adr)
1043#define xip_enable(map, chip, adr)
1da177e4
LT
1044#define XIP_INVAL_CACHED_RANGE(x...)
1045
6da70124
NP
1046#define UDELAY(map, chip, adr, usec) \
1047do { \
1048 spin_unlock(chip->mutex); \
1049 cfi_udelay(usec); \
1050 spin_lock(chip->mutex); \
1051} while (0)
1052
1053#define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
1054do { \
1055 spin_unlock(chip->mutex); \
1056 INVALIDATE_CACHED_RANGE(map, adr, len); \
1057 cfi_udelay(usec); \
1058 spin_lock(chip->mutex); \
1059} while (0)
1060
1da177e4
LT
1061#endif
1062
1063static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
1064{
1065 unsigned long cmd_addr;
1066 struct cfi_private *cfi = map->fldrv_priv;
1067 int ret = 0;
1068
1069 adr += chip->start;
1070
1071 /* Ensure cmd read/writes are aligned. */
1072 cmd_addr = adr & ~(map_bankwidth(map)-1);
1073
1074 spin_lock(chip->mutex);
1075
1076 ret = get_chip(map, chip, cmd_addr, FL_POINT);
1077
1078 if (!ret) {
1079 if (chip->state != FL_POINT && chip->state != FL_READY)
1080 map_write(map, CMD(0xff), cmd_addr);
1081
1082 chip->state = FL_POINT;
1083 chip->ref_point_counter++;
1084 }
1085 spin_unlock(chip->mutex);
1086
1087 return ret;
1088}
1089
1090static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char **mtdbuf)
1091{
1092 struct map_info *map = mtd->priv;
1093 struct cfi_private *cfi = map->fldrv_priv;
1094 unsigned long ofs;
1095 int chipnum;
1096 int ret = 0;
1097
1098 if (!map->virt || (from + len > mtd->size))
1099 return -EINVAL;
1100
1101 *mtdbuf = (void *)map->virt + from;
1102 *retlen = 0;
1103
1104 /* Now lock the chip(s) to POINT state */
1105
1106 /* ofs: offset within the first chip that the first read should start */
1107 chipnum = (from >> cfi->chipshift);
1108 ofs = from - (chipnum << cfi->chipshift);
1109
1110 while (len) {
1111 unsigned long thislen;
1112
1113 if (chipnum >= cfi->numchips)
1114 break;
1115
1116 if ((len + ofs -1) >> cfi->chipshift)
1117 thislen = (1<<cfi->chipshift) - ofs;
1118 else
1119 thislen = len;
1120
1121 ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
1122 if (ret)
1123 break;
1124
1125 *retlen += thislen;
1126 len -= thislen;
1127
1128 ofs = 0;
1129 chipnum++;
1130 }
1131 return 0;
1132}
1133
1134static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from, size_t len)
1135{
1136 struct map_info *map = mtd->priv;
1137 struct cfi_private *cfi = map->fldrv_priv;
1138 unsigned long ofs;
1139 int chipnum;
1140
1141 /* Now unlock the chip(s) POINT state */
1142
1143 /* ofs: offset within the first chip that the first read should start */
1144 chipnum = (from >> cfi->chipshift);
1145 ofs = from - (chipnum << cfi->chipshift);
1146
1147 while (len) {
1148 unsigned long thislen;
1149 struct flchip *chip;
1150
1151 chip = &cfi->chips[chipnum];
1152 if (chipnum >= cfi->numchips)
1153 break;
1154
1155 if ((len + ofs -1) >> cfi->chipshift)
1156 thislen = (1<<cfi->chipshift) - ofs;
1157 else
1158 thislen = len;
1159
1160 spin_lock(chip->mutex);
1161 if (chip->state == FL_POINT) {
1162 chip->ref_point_counter--;
1163 if(chip->ref_point_counter == 0)
1164 chip->state = FL_READY;
1165 } else
4843653c 1166 printk(KERN_ERR "%s: Warning: unpoint called on non pointed region\n", map->name); /* Should this give an error? */
1da177e4
LT
1167
1168 put_chip(map, chip, chip->start);
1169 spin_unlock(chip->mutex);
1170
1171 len -= thislen;
1172 ofs = 0;
1173 chipnum++;
1174 }
1175}
1176
1177static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1178{
1179 unsigned long cmd_addr;
1180 struct cfi_private *cfi = map->fldrv_priv;
1181 int ret;
1182
1183 adr += chip->start;
1184
1185 /* Ensure cmd read/writes are aligned. */
1186 cmd_addr = adr & ~(map_bankwidth(map)-1);
1187
1188 spin_lock(chip->mutex);
1189 ret = get_chip(map, chip, cmd_addr, FL_READY);
1190 if (ret) {
1191 spin_unlock(chip->mutex);
1192 return ret;
1193 }
1194
1195 if (chip->state != FL_POINT && chip->state != FL_READY) {
1196 map_write(map, CMD(0xff), cmd_addr);
1197
1198 chip->state = FL_READY;
1199 }
1200
1201 map_copy_from(map, buf, adr, len);
1202
1203 put_chip(map, chip, cmd_addr);
1204
1205 spin_unlock(chip->mutex);
1206 return 0;
1207}
1208
1209static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1210{
1211 struct map_info *map = mtd->priv;
1212 struct cfi_private *cfi = map->fldrv_priv;
1213 unsigned long ofs;
1214 int chipnum;
1215 int ret = 0;
1216
1217 /* ofs: offset within the first chip that the first read should start */
1218 chipnum = (from >> cfi->chipshift);
1219 ofs = from - (chipnum << cfi->chipshift);
1220
1221 *retlen = 0;
1222
1223 while (len) {
1224 unsigned long thislen;
1225
1226 if (chipnum >= cfi->numchips)
1227 break;
1228
1229 if ((len + ofs -1) >> cfi->chipshift)
1230 thislen = (1<<cfi->chipshift) - ofs;
1231 else
1232 thislen = len;
1233
1234 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1235 if (ret)
1236 break;
1237
1238 *retlen += thislen;
1239 len -= thislen;
1240 buf += thislen;
1241
1242 ofs = 0;
1243 chipnum++;
1244 }
1245 return ret;
1246}
1247
1da177e4 1248static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
f77814dd 1249 unsigned long adr, map_word datum, int mode)
1da177e4
LT
1250{
1251 struct cfi_private *cfi = map->fldrv_priv;
f77814dd 1252 map_word status, status_OK, write_cmd;
1da177e4
LT
1253 unsigned long timeo;
1254 int z, ret=0;
1255
1256 adr += chip->start;
1257
638d9838 1258 /* Let's determine those according to the interleave only once */
1da177e4 1259 status_OK = CMD(0x80);
f77814dd 1260 switch (mode) {
638d9838
NP
1261 case FL_WRITING:
1262 write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0x40) : CMD(0x41);
1263 break;
1264 case FL_OTP_WRITE:
1265 write_cmd = CMD(0xc0);
1266 break;
1267 default:
1268 return -EINVAL;
f77814dd 1269 }
1da177e4
LT
1270
1271 spin_lock(chip->mutex);
f77814dd 1272 ret = get_chip(map, chip, adr, mode);
1da177e4
LT
1273 if (ret) {
1274 spin_unlock(chip->mutex);
1275 return ret;
1276 }
1277
1278 XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1279 ENABLE_VPP(map);
1280 xip_disable(map, chip, adr);
f77814dd 1281 map_write(map, write_cmd, adr);
1da177e4 1282 map_write(map, datum, adr);
f77814dd 1283 chip->state = mode;
1da177e4 1284
6da70124
NP
1285 INVALIDATE_CACHE_UDELAY(map, chip,
1286 adr, map_bankwidth(map),
1287 chip->word_write_time);
1da177e4
LT
1288
1289 timeo = jiffies + (HZ/2);
1290 z = 0;
1291 for (;;) {
f77814dd 1292 if (chip->state != mode) {
1da177e4
LT
1293 /* Someone's suspended the write. Sleep */
1294 DECLARE_WAITQUEUE(wait, current);
1295
1296 set_current_state(TASK_UNINTERRUPTIBLE);
1297 add_wait_queue(&chip->wq, &wait);
1298 spin_unlock(chip->mutex);
1299 schedule();
1300 remove_wait_queue(&chip->wq, &wait);
1301 timeo = jiffies + (HZ / 2); /* FIXME */
1302 spin_lock(chip->mutex);
1303 continue;
1304 }
1305
1306 status = map_read(map, adr);
1307 if (map_word_andequal(map, status, status_OK, status_OK))
1308 break;
1309
1310 /* OK Still waiting */
1311 if (time_after(jiffies, timeo)) {
4843653c 1312 map_write(map, CMD(0x70), adr);
1da177e4
LT
1313 chip->state = FL_STATUS;
1314 xip_enable(map, chip, adr);
4843653c 1315 printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
1da177e4
LT
1316 ret = -EIO;
1317 goto out;
1318 }
1319
1320 /* Latency issues. Drop the lock, wait a while and retry */
1da177e4
LT
1321 z++;
1322 UDELAY(map, chip, adr, 1);
1da177e4
LT
1323 }
1324 if (!z) {
1325 chip->word_write_time--;
1326 if (!chip->word_write_time)
4843653c 1327 chip->word_write_time = 1;
1da177e4
LT
1328 }
1329 if (z > 1)
1330 chip->word_write_time++;
1331
1332 /* Done and happy. */
1333 chip->state = FL_STATUS;
1334
4843653c
NP
1335 /* check for errors */
1336 if (map_word_bitsset(map, status, CMD(0x1a))) {
1337 unsigned long chipstatus = MERGESTATUS(status);
1338
1339 /* reset status */
1da177e4 1340 map_write(map, CMD(0x50), adr);
1da177e4 1341 map_write(map, CMD(0x70), adr);
4843653c
NP
1342 xip_enable(map, chip, adr);
1343
1344 if (chipstatus & 0x02) {
1345 ret = -EROFS;
1346 } else if (chipstatus & 0x08) {
1347 printk(KERN_ERR "%s: word write error (bad VPP)\n", map->name);
1348 ret = -EIO;
1349 } else {
1350 printk(KERN_ERR "%s: word write error (status 0x%lx)\n", map->name, chipstatus);
1351 ret = -EINVAL;
1352 }
1353
1354 goto out;
1da177e4
LT
1355 }
1356
1357 xip_enable(map, chip, adr);
1358 out: put_chip(map, chip, adr);
1359 spin_unlock(chip->mutex);
1da177e4
LT
1360 return ret;
1361}
1362
1363
1364static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
1365{
1366 struct map_info *map = mtd->priv;
1367 struct cfi_private *cfi = map->fldrv_priv;
1368 int ret = 0;
1369 int chipnum;
1370 unsigned long ofs;
1371
1372 *retlen = 0;
1373 if (!len)
1374 return 0;
1375
1376 chipnum = to >> cfi->chipshift;
1377 ofs = to - (chipnum << cfi->chipshift);
1378
1379 /* If it's not bus-aligned, do the first byte write */
1380 if (ofs & (map_bankwidth(map)-1)) {
1381 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1382 int gap = ofs - bus_ofs;
1383 int n;
1384 map_word datum;
1385
1386 n = min_t(int, len, map_bankwidth(map)-gap);
1387 datum = map_word_ff(map);
1388 datum = map_word_load_partial(map, datum, buf, gap, n);
1389
1390 ret = do_write_oneword(map, &cfi->chips[chipnum],
f77814dd 1391 bus_ofs, datum, FL_WRITING);
1da177e4
LT
1392 if (ret)
1393 return ret;
1394
1395 len -= n;
1396 ofs += n;
1397 buf += n;
1398 (*retlen) += n;
1399
1400 if (ofs >> cfi->chipshift) {
1401 chipnum ++;
1402 ofs = 0;
1403 if (chipnum == cfi->numchips)
1404 return 0;
1405 }
1406 }
1407
1408 while(len >= map_bankwidth(map)) {
1409 map_word datum = map_word_load(map, buf);
1410
1411 ret = do_write_oneword(map, &cfi->chips[chipnum],
f77814dd 1412 ofs, datum, FL_WRITING);
1da177e4
LT
1413 if (ret)
1414 return ret;
1415
1416 ofs += map_bankwidth(map);
1417 buf += map_bankwidth(map);
1418 (*retlen) += map_bankwidth(map);
1419 len -= map_bankwidth(map);
1420
1421 if (ofs >> cfi->chipshift) {
1422 chipnum ++;
1423 ofs = 0;
1424 if (chipnum == cfi->numchips)
1425 return 0;
1426 }
1427 }
1428
1429 if (len & (map_bankwidth(map)-1)) {
1430 map_word datum;
1431
1432 datum = map_word_ff(map);
1433 datum = map_word_load_partial(map, datum, buf, 0, len);
1434
1435 ret = do_write_oneword(map, &cfi->chips[chipnum],
f77814dd 1436 ofs, datum, FL_WRITING);
1da177e4
LT
1437 if (ret)
1438 return ret;
1439
1440 (*retlen) += len;
1441 }
1442
1443 return 0;
1444}
1445
1446
1447static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1448 unsigned long adr, const u_char *buf, int len)
1449{
1450 struct cfi_private *cfi = map->fldrv_priv;
638d9838 1451 map_word status, status_OK, write_cmd;
1da177e4
LT
1452 unsigned long cmd_adr, timeo;
1453 int wbufsize, z, ret=0, bytes, words;
1454
1455 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1456 adr += chip->start;
1457 cmd_adr = adr & ~(wbufsize-1);
638d9838 1458
1da177e4
LT
1459 /* Let's determine this according to the interleave only once */
1460 status_OK = CMD(0x80);
638d9838 1461 write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0xe8) : CMD(0xe9);
1da177e4
LT
1462
1463 spin_lock(chip->mutex);
1464 ret = get_chip(map, chip, cmd_adr, FL_WRITING);
1465 if (ret) {
1466 spin_unlock(chip->mutex);
1467 return ret;
1468 }
1469
1470 XIP_INVAL_CACHED_RANGE(map, adr, len);
1471 ENABLE_VPP(map);
1472 xip_disable(map, chip, cmd_adr);
1473
1474