]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/mtd/chips/cfi_cmdset_0001.c
[MTD] user-abi: Clean up trailing white spaces
[mirror_ubuntu-artful-kernel.git] / drivers / mtd / chips / cfi_cmdset_0001.c
1 /*
2 * Common Flash Interface support:
3 * Intel Extended Vendor Command Set (ID 0x0001)
4 *
5 * (C) 2000 Red Hat. GPL'd
6 *
7 * $Id: cfi_cmdset_0001.c,v 1.184 2005/10/25 20:28:40 nico Exp $
8 *
9 *
10 * 10/10/2000 Nicolas Pitre <nico@cam.org>
11 * - completely revamped method functions so they are aware and
12 * independent of the flash geometry (buswidth, interleave, etc.)
13 * - scalability vs code size is completely set at compile-time
14 * (see include/linux/mtd/cfi.h for selection)
15 * - optimized write buffer method
16 * 02/05/2002 Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
17 * - reworked lock/unlock/erase support for var size flash
18 */
19
20 #include <linux/module.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/sched.h>
24 #include <linux/init.h>
25 #include <asm/io.h>
26 #include <asm/byteorder.h>
27
28 #include <linux/errno.h>
29 #include <linux/slab.h>
30 #include <linux/delay.h>
31 #include <linux/interrupt.h>
32 #include <linux/reboot.h>
33 #include <linux/mtd/xip.h>
34 #include <linux/mtd/map.h>
35 #include <linux/mtd/mtd.h>
36 #include <linux/mtd/compatmac.h>
37 #include <linux/mtd/cfi.h>
38
39 /* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
40 /* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
41
42 // debugging, turns off buffer write mode if set to 1
43 #define FORCE_WORD_WRITE 0
44
45 #define MANUFACTURER_INTEL 0x0089
46 #define I82802AB 0x00ad
47 #define I82802AC 0x00ac
48 #define MANUFACTURER_ST 0x0020
49 #define M50LPW080 0x002F
50
51 static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
52 static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
53 static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
54 static int cfi_intelext_writev(struct mtd_info *, const struct kvec *, unsigned long, loff_t, size_t *);
55 static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
56 static void cfi_intelext_sync (struct mtd_info *);
57 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
58 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
59 #ifdef CONFIG_MTD_OTP
60 static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
61 static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
62 static int cfi_intelext_write_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
63 static int cfi_intelext_lock_user_prot_reg (struct mtd_info *, loff_t, size_t);
64 static int cfi_intelext_get_fact_prot_info (struct mtd_info *,
65 struct otp_info *, size_t);
66 static int cfi_intelext_get_user_prot_info (struct mtd_info *,
67 struct otp_info *, size_t);
68 #endif
69 static int cfi_intelext_suspend (struct mtd_info *);
70 static void cfi_intelext_resume (struct mtd_info *);
71 static int cfi_intelext_reboot (struct notifier_block *, unsigned long, void *);
72
73 static void cfi_intelext_destroy(struct mtd_info *);
74
75 struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
76
77 static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
78 static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
79
80 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
81 size_t *retlen, u_char **mtdbuf);
82 static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from,
83 size_t len);
84
85 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
86 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
87 #include "fwh_lock.h"
88
89
90
91 /*
92 * *********** SETUP AND PROBE BITS ***********
93 */
94
95 static struct mtd_chip_driver cfi_intelext_chipdrv = {
96 .probe = NULL, /* Not usable directly */
97 .destroy = cfi_intelext_destroy,
98 .name = "cfi_cmdset_0001",
99 .module = THIS_MODULE
100 };
101
102 /* #define DEBUG_LOCK_BITS */
103 /* #define DEBUG_CFI_FEATURES */
104
105 #ifdef DEBUG_CFI_FEATURES
106 static void cfi_tell_features(struct cfi_pri_intelext *extp)
107 {
108 int i;
109 printk(" Extended Query version %c.%c\n", extp->MajorVersion, extp->MinorVersion);
110 printk(" Feature/Command Support: %4.4X\n", extp->FeatureSupport);
111 printk(" - Chip Erase: %s\n", extp->FeatureSupport&1?"supported":"unsupported");
112 printk(" - Suspend Erase: %s\n", extp->FeatureSupport&2?"supported":"unsupported");
113 printk(" - Suspend Program: %s\n", extp->FeatureSupport&4?"supported":"unsupported");
114 printk(" - Legacy Lock/Unlock: %s\n", extp->FeatureSupport&8?"supported":"unsupported");
115 printk(" - Queued Erase: %s\n", extp->FeatureSupport&16?"supported":"unsupported");
116 printk(" - Instant block lock: %s\n", extp->FeatureSupport&32?"supported":"unsupported");
117 printk(" - Protection Bits: %s\n", extp->FeatureSupport&64?"supported":"unsupported");
118 printk(" - Page-mode read: %s\n", extp->FeatureSupport&128?"supported":"unsupported");
119 printk(" - Synchronous read: %s\n", extp->FeatureSupport&256?"supported":"unsupported");
120 printk(" - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
121 printk(" - Extended Flash Array: %s\n", extp->FeatureSupport&1024?"supported":"unsupported");
122 for (i=11; i<32; i++) {
123 if (extp->FeatureSupport & (1<<i))
124 printk(" - Unknown Bit %X: supported\n", i);
125 }
126
127 printk(" Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
128 printk(" - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
129 for (i=1; i<8; i++) {
130 if (extp->SuspendCmdSupport & (1<<i))
131 printk(" - Unknown Bit %X: supported\n", i);
132 }
133
134 printk(" Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
135 printk(" - Lock Bit Active: %s\n", extp->BlkStatusRegMask&1?"yes":"no");
136 printk(" - Lock-Down Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
137 for (i=2; i<3; i++) {
138 if (extp->BlkStatusRegMask & (1<<i))
139 printk(" - Unknown Bit %X Active: yes\n",i);
140 }
141 printk(" - EFA Lock Bit: %s\n", extp->BlkStatusRegMask&16?"yes":"no");
142 printk(" - EFA Lock-Down Bit: %s\n", extp->BlkStatusRegMask&32?"yes":"no");
143 for (i=6; i<16; i++) {
144 if (extp->BlkStatusRegMask & (1<<i))
145 printk(" - Unknown Bit %X Active: yes\n",i);
146 }
147
148 printk(" Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
149 extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
150 if (extp->VppOptimal)
151 printk(" Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
152 extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
153 }
154 #endif
155
156 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
157 /* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
158 static void fixup_intel_strataflash(struct mtd_info *mtd, void* param)
159 {
160 struct map_info *map = mtd->priv;
161 struct cfi_private *cfi = map->fldrv_priv;
162 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
163
164 printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
165 "erase on write disabled.\n");
166 extp->SuspendCmdSupport &= ~1;
167 }
168 #endif
169
170 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
171 static void fixup_no_write_suspend(struct mtd_info *mtd, void* param)
172 {
173 struct map_info *map = mtd->priv;
174 struct cfi_private *cfi = map->fldrv_priv;
175 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
176
177 if (cfip && (cfip->FeatureSupport&4)) {
178 cfip->FeatureSupport &= ~4;
179 printk(KERN_WARNING "cfi_cmdset_0001: write suspend disabled\n");
180 }
181 }
182 #endif
183
184 static void fixup_st_m28w320ct(struct mtd_info *mtd, void* param)
185 {
186 struct map_info *map = mtd->priv;
187 struct cfi_private *cfi = map->fldrv_priv;
188
189 cfi->cfiq->BufWriteTimeoutTyp = 0; /* Not supported */
190 cfi->cfiq->BufWriteTimeoutMax = 0; /* Not supported */
191 }
192
193 static void fixup_st_m28w320cb(struct mtd_info *mtd, void* param)
194 {
195 struct map_info *map = mtd->priv;
196 struct cfi_private *cfi = map->fldrv_priv;
197
198 /* Note this is done after the region info is endian swapped */
199 cfi->cfiq->EraseRegionInfo[1] =
200 (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
201 };
202
203 static void fixup_use_point(struct mtd_info *mtd, void *param)
204 {
205 struct map_info *map = mtd->priv;
206 if (!mtd->point && map_is_linear(map)) {
207 mtd->point = cfi_intelext_point;
208 mtd->unpoint = cfi_intelext_unpoint;
209 }
210 }
211
212 static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
213 {
214 struct map_info *map = mtd->priv;
215 struct cfi_private *cfi = map->fldrv_priv;
216 if (cfi->cfiq->BufWriteTimeoutTyp) {
217 printk(KERN_INFO "Using buffer write method\n" );
218 mtd->write = cfi_intelext_write_buffers;
219 mtd->writev = cfi_intelext_writev;
220 }
221 }
222
223 static struct cfi_fixup cfi_fixup_table[] = {
224 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
225 { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL },
226 #endif
227 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
228 { CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend, NULL },
229 #endif
230 #if !FORCE_WORD_WRITE
231 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL },
232 #endif
233 { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct, NULL },
234 { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb, NULL },
235 { 0, 0, NULL, NULL }
236 };
237
238 static struct cfi_fixup jedec_fixup_table[] = {
239 { MANUFACTURER_INTEL, I82802AB, fixup_use_fwh_lock, NULL, },
240 { MANUFACTURER_INTEL, I82802AC, fixup_use_fwh_lock, NULL, },
241 { MANUFACTURER_ST, M50LPW080, fixup_use_fwh_lock, NULL, },
242 { 0, 0, NULL, NULL }
243 };
244 static struct cfi_fixup fixup_table[] = {
245 /* The CFI vendor ids and the JEDEC vendor IDs appear
246 * to be common. It is like the devices id's are as
247 * well. This table is to pick all cases where
248 * we know that is the case.
249 */
250 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point, NULL },
251 { 0, 0, NULL, NULL }
252 };
253
254 static inline struct cfi_pri_intelext *
255 read_pri_intelext(struct map_info *map, __u16 adr)
256 {
257 struct cfi_pri_intelext *extp;
258 unsigned int extp_size = sizeof(*extp);
259
260 again:
261 extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
262 if (!extp)
263 return NULL;
264
265 if (extp->MajorVersion != '1' ||
266 (extp->MinorVersion < '0' || extp->MinorVersion > '4')) {
267 printk(KERN_ERR " Unknown Intel/Sharp Extended Query "
268 "version %c.%c.\n", extp->MajorVersion,
269 extp->MinorVersion);
270 kfree(extp);
271 return NULL;
272 }
273
274 /* Do some byteswapping if necessary */
275 extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
276 extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
277 extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
278
279 if (extp->MajorVersion == '1' && extp->MinorVersion >= '3') {
280 unsigned int extra_size = 0;
281 int nb_parts, i;
282
283 /* Protection Register info */
284 extra_size += (extp->NumProtectionFields - 1) *
285 sizeof(struct cfi_intelext_otpinfo);
286
287 /* Burst Read info */
288 extra_size += 2;
289 if (extp_size < sizeof(*extp) + extra_size)
290 goto need_more;
291 extra_size += extp->extra[extra_size-1];
292
293 /* Number of hardware-partitions */
294 extra_size += 1;
295 if (extp_size < sizeof(*extp) + extra_size)
296 goto need_more;
297 nb_parts = extp->extra[extra_size - 1];
298
299 /* skip the sizeof(partregion) field in CFI 1.4 */
300 if (extp->MinorVersion >= '4')
301 extra_size += 2;
302
303 for (i = 0; i < nb_parts; i++) {
304 struct cfi_intelext_regioninfo *rinfo;
305 rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
306 extra_size += sizeof(*rinfo);
307 if (extp_size < sizeof(*extp) + extra_size)
308 goto need_more;
309 rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
310 extra_size += (rinfo->NumBlockTypes - 1)
311 * sizeof(struct cfi_intelext_blockinfo);
312 }
313
314 if (extp->MinorVersion >= '4')
315 extra_size += sizeof(struct cfi_intelext_programming_regioninfo);
316
317 if (extp_size < sizeof(*extp) + extra_size) {
318 need_more:
319 extp_size = sizeof(*extp) + extra_size;
320 kfree(extp);
321 if (extp_size > 4096) {
322 printk(KERN_ERR
323 "%s: cfi_pri_intelext is too fat\n",
324 __FUNCTION__);
325 return NULL;
326 }
327 goto again;
328 }
329 }
330
331 return extp;
332 }
333
334 /* This routine is made available to other mtd code via
335 * inter_module_register. It must only be accessed through
336 * inter_module_get which will bump the use count of this module. The
337 * addresses passed back in cfi are valid as long as the use count of
338 * this module is non-zero, i.e. between inter_module_get and
339 * inter_module_put. Keith Owens <kaos@ocs.com.au> 29 Oct 2000.
340 */
341 struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
342 {
343 struct cfi_private *cfi = map->fldrv_priv;
344 struct mtd_info *mtd;
345 int i;
346
347 mtd = kmalloc(sizeof(*mtd), GFP_KERNEL);
348 if (!mtd) {
349 printk(KERN_ERR "Failed to allocate memory for MTD device\n");
350 return NULL;
351 }
352 memset(mtd, 0, sizeof(*mtd));
353 mtd->priv = map;
354 mtd->type = MTD_NORFLASH;
355
356 /* Fill in the default mtd operations */
357 mtd->erase = cfi_intelext_erase_varsize;
358 mtd->read = cfi_intelext_read;
359 mtd->write = cfi_intelext_write_words;
360 mtd->sync = cfi_intelext_sync;
361 mtd->lock = cfi_intelext_lock;
362 mtd->unlock = cfi_intelext_unlock;
363 mtd->suspend = cfi_intelext_suspend;
364 mtd->resume = cfi_intelext_resume;
365 mtd->flags = MTD_CAP_NORFLASH;
366 mtd->name = map->name;
367
368 mtd->reboot_notifier.notifier_call = cfi_intelext_reboot;
369
370 if (cfi->cfi_mode == CFI_MODE_CFI) {
371 /*
372 * It's a real CFI chip, not one for which the probe
373 * routine faked a CFI structure. So we read the feature
374 * table from it.
375 */
376 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
377 struct cfi_pri_intelext *extp;
378
379 extp = read_pri_intelext(map, adr);
380 if (!extp) {
381 kfree(mtd);
382 return NULL;
383 }
384
385 /* Install our own private info structure */
386 cfi->cmdset_priv = extp;
387
388 cfi_fixup(mtd, cfi_fixup_table);
389
390 #ifdef DEBUG_CFI_FEATURES
391 /* Tell the user about it in lots of lovely detail */
392 cfi_tell_features(extp);
393 #endif
394
395 if(extp->SuspendCmdSupport & 1) {
396 printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
397 }
398 }
399 else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
400 /* Apply jedec specific fixups */
401 cfi_fixup(mtd, jedec_fixup_table);
402 }
403 /* Apply generic fixups */
404 cfi_fixup(mtd, fixup_table);
405
406 for (i=0; i< cfi->numchips; i++) {
407 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
408 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
409 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
410 cfi->chips[i].ref_point_counter = 0;
411 }
412
413 map->fldrv = &cfi_intelext_chipdrv;
414
415 return cfi_intelext_setup(mtd);
416 }
417
418 static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
419 {
420 struct map_info *map = mtd->priv;
421 struct cfi_private *cfi = map->fldrv_priv;
422 unsigned long offset = 0;
423 int i,j;
424 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
425
426 //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
427
428 mtd->size = devsize * cfi->numchips;
429
430 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
431 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
432 * mtd->numeraseregions, GFP_KERNEL);
433 if (!mtd->eraseregions) {
434 printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
435 goto setup_err;
436 }
437
438 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
439 unsigned long ernum, ersize;
440 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
441 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
442
443 if (mtd->erasesize < ersize) {
444 mtd->erasesize = ersize;
445 }
446 for (j=0; j<cfi->numchips; j++) {
447 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
448 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
449 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
450 }
451 offset += (ersize * ernum);
452 }
453
454 if (offset != devsize) {
455 /* Argh */
456 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
457 goto setup_err;
458 }
459
460 for (i=0; i<mtd->numeraseregions;i++){
461 printk(KERN_DEBUG "erase region %d: offset=0x%x,size=0x%x,blocks=%d\n",
462 i,mtd->eraseregions[i].offset,
463 mtd->eraseregions[i].erasesize,
464 mtd->eraseregions[i].numblocks);
465 }
466
467 #ifdef CONFIG_MTD_OTP
468 mtd->read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
469 mtd->read_user_prot_reg = cfi_intelext_read_user_prot_reg;
470 mtd->write_user_prot_reg = cfi_intelext_write_user_prot_reg;
471 mtd->lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
472 mtd->get_fact_prot_info = cfi_intelext_get_fact_prot_info;
473 mtd->get_user_prot_info = cfi_intelext_get_user_prot_info;
474 #endif
475
476 /* This function has the potential to distort the reality
477 a bit and therefore should be called last. */
478 if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
479 goto setup_err;
480
481 __module_get(THIS_MODULE);
482 register_reboot_notifier(&mtd->reboot_notifier);
483 return mtd;
484
485 setup_err:
486 if(mtd) {
487 if(mtd->eraseregions)
488 kfree(mtd->eraseregions);
489 kfree(mtd);
490 }
491 kfree(cfi->cmdset_priv);
492 return NULL;
493 }
494
495 static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
496 struct cfi_private **pcfi)
497 {
498 struct map_info *map = mtd->priv;
499 struct cfi_private *cfi = *pcfi;
500 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
501
502 /*
503 * Probing of multi-partition flash ships.
504 *
505 * To support multiple partitions when available, we simply arrange
506 * for each of them to have their own flchip structure even if they
507 * are on the same physical chip. This means completely recreating
508 * a new cfi_private structure right here which is a blatent code
509 * layering violation, but this is still the least intrusive
510 * arrangement at this point. This can be rearranged in the future
511 * if someone feels motivated enough. --nico
512 */
513 if (extp && extp->MajorVersion == '1' && extp->MinorVersion >= '3'
514 && extp->FeatureSupport & (1 << 9)) {
515 struct cfi_private *newcfi;
516 struct flchip *chip;
517 struct flchip_shared *shared;
518 int offs, numregions, numparts, partshift, numvirtchips, i, j;
519
520 /* Protection Register info */
521 offs = (extp->NumProtectionFields - 1) *
522 sizeof(struct cfi_intelext_otpinfo);
523
524 /* Burst Read info */
525 offs += extp->extra[offs+1]+2;
526
527 /* Number of partition regions */
528 numregions = extp->extra[offs];
529 offs += 1;
530
531 /* skip the sizeof(partregion) field in CFI 1.4 */
532 if (extp->MinorVersion >= '4')
533 offs += 2;
534
535 /* Number of hardware partitions */
536 numparts = 0;
537 for (i = 0; i < numregions; i++) {
538 struct cfi_intelext_regioninfo *rinfo;
539 rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
540 numparts += rinfo->NumIdentPartitions;
541 offs += sizeof(*rinfo)
542 + (rinfo->NumBlockTypes - 1) *
543 sizeof(struct cfi_intelext_blockinfo);
544 }
545
546 /* Programming Region info */
547 if (extp->MinorVersion >= '4') {
548 struct cfi_intelext_programming_regioninfo *prinfo;
549 prinfo = (struct cfi_intelext_programming_regioninfo *)&extp->extra[offs];
550 MTD_PROGREGION_SIZE(mtd) = cfi->interleave << prinfo->ProgRegShift;
551 MTD_PROGREGION_CTRLMODE_VALID(mtd) = cfi->interleave * prinfo->ControlValid;
552 MTD_PROGREGION_CTRLMODE_INVALID(mtd) = cfi->interleave * prinfo->ControlInvalid;
553 mtd->flags |= MTD_PROGRAM_REGIONS;
554 printk(KERN_DEBUG "%s: program region size/ctrl_valid/ctrl_inval = %d/%d/%d\n",
555 map->name, MTD_PROGREGION_SIZE(mtd),
556 MTD_PROGREGION_CTRLMODE_VALID(mtd),
557 MTD_PROGREGION_CTRLMODE_INVALID(mtd));
558 }
559
560 /*
561 * All functions below currently rely on all chips having
562 * the same geometry so we'll just assume that all hardware
563 * partitions are of the same size too.
564 */
565 partshift = cfi->chipshift - __ffs(numparts);
566
567 if ((1 << partshift) < mtd->erasesize) {
568 printk( KERN_ERR
569 "%s: bad number of hw partitions (%d)\n",
570 __FUNCTION__, numparts);
571 return -EINVAL;
572 }
573
574 numvirtchips = cfi->numchips * numparts;
575 newcfi = kmalloc(sizeof(struct cfi_private) + numvirtchips * sizeof(struct flchip), GFP_KERNEL);
576 if (!newcfi)
577 return -ENOMEM;
578 shared = kmalloc(sizeof(struct flchip_shared) * cfi->numchips, GFP_KERNEL);
579 if (!shared) {
580 kfree(newcfi);
581 return -ENOMEM;
582 }
583 memcpy(newcfi, cfi, sizeof(struct cfi_private));
584 newcfi->numchips = numvirtchips;
585 newcfi->chipshift = partshift;
586
587 chip = &newcfi->chips[0];
588 for (i = 0; i < cfi->numchips; i++) {
589 shared[i].writing = shared[i].erasing = NULL;
590 spin_lock_init(&shared[i].lock);
591 for (j = 0; j < numparts; j++) {
592 *chip = cfi->chips[i];
593 chip->start += j << partshift;
594 chip->priv = &shared[i];
595 /* those should be reset too since
596 they create memory references. */
597 init_waitqueue_head(&chip->wq);
598 spin_lock_init(&chip->_spinlock);
599 chip->mutex = &chip->_spinlock;
600 chip++;
601 }
602 }
603
604 printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
605 "--> %d partitions of %d KiB\n",
606 map->name, cfi->numchips, cfi->interleave,
607 newcfi->numchips, 1<<(newcfi->chipshift-10));
608
609 map->fldrv_priv = newcfi;
610 *pcfi = newcfi;
611 kfree(cfi);
612 }
613
614 return 0;
615 }
616
617 /*
618 * *********** CHIP ACCESS FUNCTIONS ***********
619 */
620
621 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
622 {
623 DECLARE_WAITQUEUE(wait, current);
624 struct cfi_private *cfi = map->fldrv_priv;
625 map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
626 unsigned long timeo;
627 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
628
629 resettime:
630 timeo = jiffies + HZ;
631 retry:
632 if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING || mode == FL_OTP_WRITE)) {
633 /*
634 * OK. We have possibility for contension on the write/erase
635 * operations which are global to the real chip and not per
636 * partition. So let's fight it over in the partition which
637 * currently has authority on the operation.
638 *
639 * The rules are as follows:
640 *
641 * - any write operation must own shared->writing.
642 *
643 * - any erase operation must own _both_ shared->writing and
644 * shared->erasing.
645 *
646 * - contension arbitration is handled in the owner's context.
647 *
648 * The 'shared' struct can be read when its lock is taken.
649 * However any writes to it can only be made when the current
650 * owner's lock is also held.
651 */
652 struct flchip_shared *shared = chip->priv;
653 struct flchip *contender;
654 spin_lock(&shared->lock);
655 contender = shared->writing;
656 if (contender && contender != chip) {
657 /*
658 * The engine to perform desired operation on this
659 * partition is already in use by someone else.
660 * Let's fight over it in the context of the chip
661 * currently using it. If it is possible to suspend,
662 * that other partition will do just that, otherwise
663 * it'll happily send us to sleep. In any case, when
664 * get_chip returns success we're clear to go ahead.
665 */
666 int ret = spin_trylock(contender->mutex);
667 spin_unlock(&shared->lock);
668 if (!ret)
669 goto retry;
670 spin_unlock(chip->mutex);
671 ret = get_chip(map, contender, contender->start, mode);
672 spin_lock(chip->mutex);
673 if (ret) {
674 spin_unlock(contender->mutex);
675 return ret;
676 }
677 timeo = jiffies + HZ;
678 spin_lock(&shared->lock);
679 }
680
681 /* We now own it */
682 shared->writing = chip;
683 if (mode == FL_ERASING)
684 shared->erasing = chip;
685 if (contender && contender != chip)
686 spin_unlock(contender->mutex);
687 spin_unlock(&shared->lock);
688 }
689
690 switch (chip->state) {
691
692 case FL_STATUS:
693 for (;;) {
694 status = map_read(map, adr);
695 if (map_word_andequal(map, status, status_OK, status_OK))
696 break;
697
698 /* At this point we're fine with write operations
699 in other partitions as they don't conflict. */
700 if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
701 break;
702
703 if (time_after(jiffies, timeo)) {
704 printk(KERN_ERR "%s: Waiting for chip to be ready timed out. Status %lx\n",
705 map->name, status.x[0]);
706 return -EIO;
707 }
708 spin_unlock(chip->mutex);
709 cfi_udelay(1);
710 spin_lock(chip->mutex);
711 /* Someone else might have been playing with it. */
712 goto retry;
713 }
714
715 case FL_READY:
716 case FL_CFI_QUERY:
717 case FL_JEDEC_QUERY:
718 return 0;
719
720 case FL_ERASING:
721 if (!cfip ||
722 !(cfip->FeatureSupport & 2) ||
723 !(mode == FL_READY || mode == FL_POINT ||
724 (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
725 goto sleep;
726
727
728 /* Erase suspend */
729 map_write(map, CMD(0xB0), adr);
730
731 /* If the flash has finished erasing, then 'erase suspend'
732 * appears to make some (28F320) flash devices switch to
733 * 'read' mode. Make sure that we switch to 'read status'
734 * mode so we get the right data. --rmk
735 */
736 map_write(map, CMD(0x70), adr);
737 chip->oldstate = FL_ERASING;
738 chip->state = FL_ERASE_SUSPENDING;
739 chip->erase_suspended = 1;
740 for (;;) {
741 status = map_read(map, adr);
742 if (map_word_andequal(map, status, status_OK, status_OK))
743 break;
744
745 if (time_after(jiffies, timeo)) {
746 /* Urgh. Resume and pretend we weren't here. */
747 map_write(map, CMD(0xd0), adr);
748 /* Make sure we're in 'read status' mode if it had finished */
749 map_write(map, CMD(0x70), adr);
750 chip->state = FL_ERASING;
751 chip->oldstate = FL_READY;
752 printk(KERN_ERR "%s: Chip not ready after erase "
753 "suspended: status = 0x%lx\n", map->name, status.x[0]);
754 return -EIO;
755 }
756
757 spin_unlock(chip->mutex);
758 cfi_udelay(1);
759 spin_lock(chip->mutex);
760 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
761 So we can just loop here. */
762 }
763 chip->state = FL_STATUS;
764 return 0;
765
766 case FL_XIP_WHILE_ERASING:
767 if (mode != FL_READY && mode != FL_POINT &&
768 (mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1)))
769 goto sleep;
770 chip->oldstate = chip->state;
771 chip->state = FL_READY;
772 return 0;
773
774 case FL_POINT:
775 /* Only if there's no operation suspended... */
776 if (mode == FL_READY && chip->oldstate == FL_READY)
777 return 0;
778
779 default:
780 sleep:
781 set_current_state(TASK_UNINTERRUPTIBLE);
782 add_wait_queue(&chip->wq, &wait);
783 spin_unlock(chip->mutex);
784 schedule();
785 remove_wait_queue(&chip->wq, &wait);
786 spin_lock(chip->mutex);
787 goto resettime;
788 }
789 }
790
791 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
792 {
793 struct cfi_private *cfi = map->fldrv_priv;
794
795 if (chip->priv) {
796 struct flchip_shared *shared = chip->priv;
797 spin_lock(&shared->lock);
798 if (shared->writing == chip && chip->oldstate == FL_READY) {
799 /* We own the ability to write, but we're done */
800 shared->writing = shared->erasing;
801 if (shared->writing && shared->writing != chip) {
802 /* give back ownership to who we loaned it from */
803 struct flchip *loaner = shared->writing;
804 spin_lock(loaner->mutex);
805 spin_unlock(&shared->lock);
806 spin_unlock(chip->mutex);
807 put_chip(map, loaner, loaner->start);
808 spin_lock(chip->mutex);
809 spin_unlock(loaner->mutex);
810 wake_up(&chip->wq);
811 return;
812 }
813 shared->erasing = NULL;
814 shared->writing = NULL;
815 } else if (shared->erasing == chip && shared->writing != chip) {
816 /*
817 * We own the ability to erase without the ability
818 * to write, which means the erase was suspended
819 * and some other partition is currently writing.
820 * Don't let the switch below mess things up since
821 * we don't have ownership to resume anything.
822 */
823 spin_unlock(&shared->lock);
824 wake_up(&chip->wq);
825 return;
826 }
827 spin_unlock(&shared->lock);
828 }
829
830 switch(chip->oldstate) {
831 case FL_ERASING:
832 chip->state = chip->oldstate;
833 /* What if one interleaved chip has finished and the
834 other hasn't? The old code would leave the finished
835 one in READY mode. That's bad, and caused -EROFS
836 errors to be returned from do_erase_oneblock because
837 that's the only bit it checked for at the time.
838 As the state machine appears to explicitly allow
839 sending the 0x70 (Read Status) command to an erasing
840 chip and expecting it to be ignored, that's what we
841 do. */
842 map_write(map, CMD(0xd0), adr);
843 map_write(map, CMD(0x70), adr);
844 chip->oldstate = FL_READY;
845 chip->state = FL_ERASING;
846 break;
847
848 case FL_XIP_WHILE_ERASING:
849 chip->state = chip->oldstate;
850 chip->oldstate = FL_READY;
851 break;
852
853 case FL_READY:
854 case FL_STATUS:
855 case FL_JEDEC_QUERY:
856 /* We should really make set_vpp() count, rather than doing this */
857 DISABLE_VPP(map);
858 break;
859 default:
860 printk(KERN_ERR "%s: put_chip() called with oldstate %d!!\n", map->name, chip->oldstate);
861 }
862 wake_up(&chip->wq);
863 }
864
865 #ifdef CONFIG_MTD_XIP
866
867 /*
868 * No interrupt what so ever can be serviced while the flash isn't in array
869 * mode. This is ensured by the xip_disable() and xip_enable() functions
870 * enclosing any code path where the flash is known not to be in array mode.
871 * And within a XIP disabled code path, only functions marked with __xipram
872 * may be called and nothing else (it's a good thing to inspect generated
873 * assembly to make sure inline functions were actually inlined and that gcc
874 * didn't emit calls to its own support functions). Also configuring MTD CFI
875 * support to a single buswidth and a single interleave is also recommended.
876 */
877
878 static void xip_disable(struct map_info *map, struct flchip *chip,
879 unsigned long adr)
880 {
881 /* TODO: chips with no XIP use should ignore and return */
882 (void) map_read(map, adr); /* ensure mmu mapping is up to date */
883 local_irq_disable();
884 }
885
886 static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
887 unsigned long adr)
888 {
889 struct cfi_private *cfi = map->fldrv_priv;
890 if (chip->state != FL_POINT && chip->state != FL_READY) {
891 map_write(map, CMD(0xff), adr);
892 chip->state = FL_READY;
893 }
894 (void) map_read(map, adr);
895 xip_iprefetch();
896 local_irq_enable();
897 }
898
899 /*
900 * When a delay is required for the flash operation to complete, the
901 * xip_udelay() function is polling for both the given timeout and pending
902 * (but still masked) hardware interrupts. Whenever there is an interrupt
903 * pending then the flash erase or write operation is suspended, array mode
904 * restored and interrupts unmasked. Task scheduling might also happen at that
905 * point. The CPU eventually returns from the interrupt or the call to
906 * schedule() and the suspended flash operation is resumed for the remaining
907 * of the delay period.
908 *
909 * Warning: this function _will_ fool interrupt latency tracing tools.
910 */
911
912 static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
913 unsigned long adr, int usec)
914 {
915 struct cfi_private *cfi = map->fldrv_priv;
916 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
917 map_word status, OK = CMD(0x80);
918 unsigned long suspended, start = xip_currtime();
919 flstate_t oldstate, newstate;
920
921 do {
922 cpu_relax();
923 if (xip_irqpending() && cfip &&
924 ((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) ||
925 (chip->state == FL_WRITING && (cfip->FeatureSupport&4))) &&
926 (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
927 /*
928 * Let's suspend the erase or write operation when
929 * supported. Note that we currently don't try to
930 * suspend interleaved chips if there is already
931 * another operation suspended (imagine what happens
932 * when one chip was already done with the current
933 * operation while another chip suspended it, then
934 * we resume the whole thing at once). Yes, it
935 * can happen!
936 */
937 map_write(map, CMD(0xb0), adr);
938 map_write(map, CMD(0x70), adr);
939 usec -= xip_elapsed_since(start);
940 suspended = xip_currtime();
941 do {
942 if (xip_elapsed_since(suspended) > 100000) {
943 /*
944 * The chip doesn't want to suspend
945 * after waiting for 100 msecs.
946 * This is a critical error but there
947 * is not much we can do here.
948 */
949 return;
950 }
951 status = map_read(map, adr);
952 } while (!map_word_andequal(map, status, OK, OK));
953
954 /* Suspend succeeded */
955 oldstate = chip->state;
956 if (oldstate == FL_ERASING) {
957 if (!map_word_bitsset(map, status, CMD(0x40)))
958 break;
959 newstate = FL_XIP_WHILE_ERASING;
960 chip->erase_suspended = 1;
961 } else {
962 if (!map_word_bitsset(map, status, CMD(0x04)))
963 break;
964 newstate = FL_XIP_WHILE_WRITING;
965 chip->write_suspended = 1;
966 }
967 chip->state = newstate;
968 map_write(map, CMD(0xff), adr);
969 (void) map_read(map, adr);
970 asm volatile (".rep 8; nop; .endr");
971 local_irq_enable();
972 spin_unlock(chip->mutex);
973 asm volatile (".rep 8; nop; .endr");
974 cond_resched();
975
976 /*
977 * We're back. However someone else might have
978 * decided to go write to the chip if we are in
979 * a suspended erase state. If so let's wait
980 * until it's done.
981 */
982 spin_lock(chip->mutex);
983 while (chip->state != newstate) {
984 DECLARE_WAITQUEUE(wait, current);
985 set_current_state(TASK_UNINTERRUPTIBLE);
986 add_wait_queue(&chip->wq, &wait);
987 spin_unlock(chip->mutex);
988 schedule();
989 remove_wait_queue(&chip->wq, &wait);
990 spin_lock(chip->mutex);
991 }
992 /* Disallow XIP again */
993 local_irq_disable();
994
995 /* Resume the write or erase operation */
996 map_write(map, CMD(0xd0), adr);
997 map_write(map, CMD(0x70), adr);
998 chip->state = oldstate;
999 start = xip_currtime();
1000 } else if (usec >= 1000000/HZ) {
1001 /*
1002 * Try to save on CPU power when waiting delay
1003 * is at least a system timer tick period.
1004 * No need to be extremely accurate here.
1005 */
1006 xip_cpu_idle();
1007 }
1008 status = map_read(map, adr);
1009 } while (!map_word_andequal(map, status, OK, OK)
1010 && xip_elapsed_since(start) < usec);
1011 }
1012
1013 #define UDELAY(map, chip, adr, usec) xip_udelay(map, chip, adr, usec)
1014
1015 /*
1016 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
1017 * the flash is actively programming or erasing since we have to poll for
1018 * the operation to complete anyway. We can't do that in a generic way with
1019 * a XIP setup so do it before the actual flash operation in this case
1020 * and stub it out from INVALIDATE_CACHE_UDELAY.
1021 */
1022 #define XIP_INVAL_CACHED_RANGE(map, from, size) \
1023 INVALIDATE_CACHED_RANGE(map, from, size)
1024
1025 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
1026 UDELAY(map, chip, adr, usec)
1027
1028 /*
1029 * Extra notes:
1030 *
1031 * Activating this XIP support changes the way the code works a bit. For
1032 * example the code to suspend the current process when concurrent access
1033 * happens is never executed because xip_udelay() will always return with the
1034 * same chip state as it was entered with. This is why there is no care for
1035 * the presence of add_wait_queue() or schedule() calls from within a couple
1036 * xip_disable()'d areas of code, like in do_erase_oneblock for example.
1037 * The queueing and scheduling are always happening within xip_udelay().
1038 *
1039 * Similarly, get_chip() and put_chip() just happen to always be executed
1040 * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state
1041 * is in array mode, therefore never executing many cases therein and not
1042 * causing any problem with XIP.
1043 */
1044
1045 #else
1046
1047 #define xip_disable(map, chip, adr)
1048 #define xip_enable(map, chip, adr)
1049 #define XIP_INVAL_CACHED_RANGE(x...)
1050
1051 #define UDELAY(map, chip, adr, usec) \
1052 do { \
1053 spin_unlock(chip->mutex); \
1054 cfi_udelay(usec); \
1055 spin_lock(chip->mutex); \
1056 } while (0)
1057
1058 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
1059 do { \
1060 spin_unlock(chip->mutex); \
1061 INVALIDATE_CACHED_RANGE(map, adr, len); \
1062 cfi_udelay(usec); \
1063 spin_lock(chip->mutex); \
1064 } while (0)
1065
1066 #endif
1067
1068 static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
1069 {
1070 unsigned long cmd_addr;
1071 struct cfi_private *cfi = map->fldrv_priv;
1072 int ret = 0;
1073
1074 adr += chip->start;
1075
1076 /* Ensure cmd read/writes are aligned. */
1077 cmd_addr = adr & ~(map_bankwidth(map)-1);
1078
1079 spin_lock(chip->mutex);
1080
1081 ret = get_chip(map, chip, cmd_addr, FL_POINT);
1082
1083 if (!ret) {
1084 if (chip->state != FL_POINT && chip->state != FL_READY)
1085 map_write(map, CMD(0xff), cmd_addr);
1086
1087 chip->state = FL_POINT;
1088 chip->ref_point_counter++;
1089 }
1090 spin_unlock(chip->mutex);
1091
1092 return ret;
1093 }
1094
1095 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char **mtdbuf)
1096 {
1097 struct map_info *map = mtd->priv;
1098 struct cfi_private *cfi = map->fldrv_priv;
1099 unsigned long ofs;
1100 int chipnum;
1101 int ret = 0;
1102
1103 if (!map->virt || (from + len > mtd->size))
1104 return -EINVAL;
1105
1106 *mtdbuf = (void *)map->virt + from;
1107 *retlen = 0;
1108
1109 /* Now lock the chip(s) to POINT state */
1110
1111 /* ofs: offset within the first chip that the first read should start */
1112 chipnum = (from >> cfi->chipshift);
1113 ofs = from - (chipnum << cfi->chipshift);
1114
1115 while (len) {
1116 unsigned long thislen;
1117
1118 if (chipnum >= cfi->numchips)
1119 break;
1120
1121 if ((len + ofs -1) >> cfi->chipshift)
1122 thislen = (1<<cfi->chipshift) - ofs;
1123 else
1124 thislen = len;
1125
1126 ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
1127 if (ret)
1128 break;
1129
1130 *retlen += thislen;
1131 len -= thislen;
1132
1133 ofs = 0;
1134 chipnum++;
1135 }
1136 return 0;
1137 }
1138
1139 static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from, size_t len)
1140 {
1141 struct map_info *map = mtd->priv;
1142 struct cfi_private *cfi = map->fldrv_priv;
1143 unsigned long ofs;
1144 int chipnum;
1145
1146 /* Now unlock the chip(s) POINT state */
1147
1148 /* ofs: offset within the first chip that the first read should start */
1149 chipnum = (from >> cfi->chipshift);
1150 ofs = from - (chipnum << cfi->chipshift);
1151
1152 while (len) {
1153 unsigned long thislen;
1154 struct flchip *chip;
1155
1156 chip = &cfi->chips[chipnum];
1157 if (chipnum >= cfi->numchips)
1158 break;
1159
1160 if ((len + ofs -1) >> cfi->chipshift)
1161 thislen = (1<<cfi->chipshift) - ofs;
1162 else
1163 thislen = len;
1164
1165 spin_lock(chip->mutex);
1166 if (chip->state == FL_POINT) {
1167 chip->ref_point_counter--;
1168 if(chip->ref_point_counter == 0)
1169 chip->state = FL_READY;
1170 } else
1171 printk(KERN_ERR "%s: Warning: unpoint called on non pointed region\n", map->name); /* Should this give an error? */
1172
1173 put_chip(map, chip, chip->start);
1174 spin_unlock(chip->mutex);
1175
1176 len -= thislen;
1177 ofs = 0;
1178 chipnum++;
1179 }
1180 }
1181
1182 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1183 {
1184 unsigned long cmd_addr;
1185 struct cfi_private *cfi = map->fldrv_priv;
1186 int ret;
1187
1188 adr += chip->start;
1189
1190 /* Ensure cmd read/writes are aligned. */
1191 cmd_addr = adr & ~(map_bankwidth(map)-1);
1192
1193 spin_lock(chip->mutex);
1194 ret = get_chip(map, chip, cmd_addr, FL_READY);
1195 if (ret) {
1196 spin_unlock(chip->mutex);
1197 return ret;
1198 }
1199
1200 if (chip->state != FL_POINT && chip->state != FL_READY) {
1201 map_write(map, CMD(0xff), cmd_addr);
1202
1203 chip->state = FL_READY;
1204 }
1205
1206 map_copy_from(map, buf, adr, len);
1207
1208 put_chip(map, chip, cmd_addr);
1209
1210 spin_unlock(chip->mutex);
1211 return 0;
1212 }
1213
1214 static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1215 {
1216 struct map_info *map = mtd->priv;
1217 struct cfi_private *cfi = map->fldrv_priv;
1218 unsigned long ofs;
1219 int chipnum;
1220 int ret = 0;
1221
1222 /* ofs: offset within the first chip that the first read should start */
1223 chipnum = (from >> cfi->chipshift);
1224 ofs = from - (chipnum << cfi->chipshift);
1225
1226 *retlen = 0;
1227
1228 while (len) {
1229 unsigned long thislen;
1230
1231 if (chipnum >= cfi->numchips)
1232 break;
1233
1234 if ((len + ofs -1) >> cfi->chipshift)
1235 thislen = (1<<cfi->chipshift) - ofs;
1236 else
1237 thislen = len;
1238
1239 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1240 if (ret)
1241 break;
1242
1243 *retlen += thislen;
1244 len -= thislen;
1245 buf += thislen;
1246
1247 ofs = 0;
1248 chipnum++;
1249 }
1250 return ret;
1251 }
1252
1253 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1254 unsigned long adr, map_word datum, int mode)
1255 {
1256 struct cfi_private *cfi = map->fldrv_priv;
1257 map_word status, status_OK, write_cmd;
1258 unsigned long timeo;
1259 int z, ret=0;
1260
1261 adr += chip->start;
1262
1263 /* Let's determine those according to the interleave only once */
1264 status_OK = CMD(0x80);
1265 switch (mode) {
1266 case FL_WRITING:
1267 write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0x40) : CMD(0x41);
1268 break;
1269 case FL_OTP_WRITE:
1270 write_cmd = CMD(0xc0);
1271 break;
1272 default:
1273 return -EINVAL;
1274 }
1275
1276 spin_lock(chip->mutex);
1277 ret = get_chip(map, chip, adr, mode);
1278 if (ret) {
1279 spin_unlock(chip->mutex);
1280 return ret;
1281 }
1282
1283 XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1284 ENABLE_VPP(map);
1285 xip_disable(map, chip, adr);
1286 map_write(map, write_cmd, adr);
1287 map_write(map, datum, adr);
1288 chip->state = mode;
1289
1290 INVALIDATE_CACHE_UDELAY(map, chip,
1291 adr, map_bankwidth(map),
1292 chip->word_write_time);
1293
1294 timeo = jiffies + (HZ/2);
1295 z = 0;
1296 for (;;) {
1297 if (chip->state != mode) {
1298 /* Someone's suspended the write. Sleep */
1299 DECLARE_WAITQUEUE(wait, current);
1300
1301 set_current_state(TASK_UNINTERRUPTIBLE);
1302 add_wait_queue(&chip->wq, &wait);
1303 spin_unlock(chip->mutex);
1304 schedule();
1305 remove_wait_queue(&chip->wq, &wait);
1306 timeo = jiffies + (HZ / 2); /* FIXME */
1307 spin_lock(chip->mutex);
1308 continue;
1309 }
1310
1311 status = map_read(map, adr);
1312 if (map_word_andequal(map, status, status_OK, status_OK))
1313 break;
1314
1315 /* OK Still waiting */
1316 if (time_after(jiffies, timeo)) {
1317 map_write(map, CMD(0x70), adr);
1318 chip->state = FL_STATUS;
1319 xip_enable(map, chip, adr);
1320 printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
1321 ret = -EIO;
1322 goto out;
1323 }
1324
1325 /* Latency issues. Drop the lock, wait a while and retry */
1326 z++;
1327 UDELAY(map, chip, adr, 1);
1328 }
1329 if (!z) {
1330 chip->word_write_time--;
1331 if (!chip->word_write_time)
1332 chip->word_write_time = 1;
1333 }
1334 if (z > 1)
1335 chip->word_write_time++;
1336
1337 /* Done and happy. */
1338 chip->state = FL_STATUS;
1339
1340 /* check for errors */
1341 if (map_word_bitsset(map, status, CMD(0x1a))) {
1342 unsigned long chipstatus = MERGESTATUS(status);
1343
1344 /* reset status */
1345 map_write(map, CMD(0x50), adr);
1346 map_write(map, CMD(0x70), adr);
1347 xip_enable(map, chip, adr);
1348
1349 if (chipstatus & 0x02) {
1350 ret = -EROFS;
1351 } else if (chipstatus & 0x08) {
1352 printk(KERN_ERR "%s: word write error (bad VPP)\n", map->name);
1353 ret = -EIO;
1354 } else {
1355 printk(KERN_ERR "%s: word write error (status 0x%lx)\n", map->name, chipstatus);
1356 ret = -EINVAL;
1357 }
1358
1359 goto out;
1360 }
1361
1362 xip_enable(map, chip, adr);
1363 out: put_chip(map, chip, adr);
1364 spin_unlock(chip->mutex);
1365 return ret;
1366 }
1367
1368
1369 static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
1370 {
1371 struct map_info *map = mtd->priv;
1372 struct cfi_private *cfi = map->fldrv_priv;
1373 int ret = 0;
1374 int chipnum;
1375 unsigned long ofs;
1376
1377 *retlen = 0;
1378 if (!len)
1379 return 0;
1380
1381 chipnum = to >> cfi->chipshift;
1382 ofs = to - (chipnum << cfi->chipshift);
1383
1384 /* If it's not bus-aligned, do the first byte write */
1385 if (ofs & (map_bankwidth(map)-1)) {
1386 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1387 int gap = ofs - bus_ofs;
1388 int n;
1389 map_word datum;
1390
1391 n = min_t(int, len, map_bankwidth(map)-gap);
1392 datum = map_word_ff(map);
1393 datum = map_word_load_partial(map, datum, buf, gap, n);
1394
1395 ret = do_write_oneword(map, &cfi->chips[chipnum],
1396 bus_ofs, datum, FL_WRITING);
1397 if (ret)
1398 return ret;
1399
1400 len -= n;
1401 ofs += n;
1402 buf += n;
1403 (*retlen) += n;
1404
1405 if (ofs >> cfi->chipshift) {
1406 chipnum ++;
1407 ofs = 0;
1408 if (chipnum == cfi->numchips)
1409 return 0;
1410 }
1411 }
1412
1413 while(len >= map_bankwidth(map)) {
1414 map_word datum = map_word_load(map, buf);
1415
1416 ret = do_write_oneword(map, &cfi->chips[chipnum],
1417 ofs, datum, FL_WRITING);
1418 if (ret)
1419 return ret;
1420
1421 ofs += map_bankwidth(map);
1422 buf += map_bankwidth(map);
1423 (*retlen) += map_bankwidth(map);
1424 len -= map_bankwidth(map);
1425
1426 if (ofs >> cfi->chipshift) {
1427 chipnum ++;
1428 ofs = 0;
1429 if (chipnum == cfi->numchips)
1430 return 0;
1431 }
1432 }
1433
1434 if (len & (map_bankwidth(map)-1)) {
1435 map_word datum;
1436
1437 datum = map_word_ff(map);
1438 datum = map_word_load_partial(map, datum, buf, 0, len);
1439
1440 ret = do_write_oneword(map, &cfi->chips[chipnum],
1441 ofs, datum, FL_WRITING);
1442 if (ret)
1443 return ret;
1444
1445 (*retlen) += len;
1446 }
1447
1448 return 0;
1449 }
1450
1451
1452 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1453 unsigned long adr, const struct kvec **pvec,
1454 unsigned long *pvec_seek, int len)
1455 {
1456 struct cfi_private *cfi = map->fldrv_priv;
1457 map_word status, status_OK, write_cmd, datum;
1458 unsigned long cmd_adr, timeo;
1459 int wbufsize, z, ret=0, word_gap, words;
1460 const struct kvec *vec;
1461 unsigned long vec_seek;
1462
1463 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1464 adr += chip->start;
1465 cmd_adr = adr & ~(wbufsize-1);
1466
1467 /* Let's determine this according to the interleave only once */
1468 status_OK = CMD(0x80);
1469 write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0xe8) : CMD(0xe9);
1470
1471 spin_lock(chip->mutex);
1472 ret = get_chip(map, chip, cmd_adr, FL_WRITING);
1473 if (ret) {
1474 spin_unlock(chip->mutex);
1475 return ret;
1476 }
1477
1478 XIP_INVAL_CACHED_RANGE(map, adr, len);
1479 ENABLE_VPP(map);
1480 xip_disable(map, chip, cmd_adr);
1481
1482 /* §4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1483 [...], the device will not accept any more Write to Buffer commands".
1484 So we must check here and reset those bits if they're set. Otherwise
1485 we're just pissing in the wind */
1486 if (chip->state != FL_STATUS)
1487 map_write(map, CMD(0x70), cmd_adr);
1488 status = map_read(map, cmd_adr);
1489 if (map_word_bitsset(map, status, CMD(0x30))) {
1490 xip_enable(map, chip, cmd_adr);
1491 printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
1492 xip_disable(map, chip, cmd_adr);
1493 map_write(map, CMD(0x50), cmd_adr);
1494 map_write(map, CMD(0x70), cmd_adr);
1495 }
1496
1497 chip->state = FL_WRITING_TO_BUFFER;
1498
1499 z = 0;
1500 for (;;) {
1501 map_write(map, write_cmd, cmd_adr);
1502
1503 status = map_read(map, cmd_adr);
1504 if (map_word_andequal(map, status, status_OK, status_OK))
1505 break;
1506
1507 UDELAY(map, chip, cmd_adr, 1);
1508
1509 if (++z > 20) {
1510 /* Argh. Not ready for write to buffer */
1511 map_word Xstatus;
1512 map_write(map, CMD(0x70), cmd_adr);
1513 chip->state = FL_STATUS;
1514 Xstatus = map_read(map, cmd_adr);
1515 /* Odd. Clear status bits */
1516 map_write(map, CMD(0x50), cmd_adr);
1517 map_write(map, CMD(0x70), cmd_adr);
1518 xip_enable(map, chip, cmd_adr);
1519 printk(KERN_ERR "%s: Chip not ready for buffer write. status = %lx, Xstatus = %lx\n",
1520 map->name, status.x[0], Xstatus.x[0]);
1521 ret = -EIO;
1522 goto out;
1523 }
1524 }
1525
1526 /* Figure out the number of words to write */
1527 word_gap = (-adr & (map_bankwidth(map)-1));
1528 words = (len - word_gap + map_bankwidth(map) - 1) / map_bankwidth(map);
1529 if (!word_gap) {
1530 words--;
1531 } else {
1532 word_gap = map_bankwidth(map) - word_gap;
1533 adr -= word_gap;
1534 datum = map_word_ff(map);
1535 }
1536
1537 /* Write length of data to come */
1538 map_write(map, CMD(words), cmd_adr );
1539
1540 /* Write data */
1541 vec = *pvec;
1542 vec_seek = *pvec_seek;
1543 do {
1544 int n = map_bankwidth(map) - word_gap;
1545 if (n > vec->iov_len - vec_seek)
1546 n = vec->iov_len - vec_seek;
1547 if (n > len)
1548 n = len;
1549
1550 if (!word_gap && len < map_bankwidth(map))
1551 datum = map_word_ff(map);
1552
1553 datum = map_word_load_partial(map, datum,
1554 vec->iov_base + vec_seek,
1555 word_gap, n);
1556
1557 len -= n;
1558 word_gap += n;
1559 if (!len || word_gap == map_bankwidth(map)) {
1560 map_write(map, datum, adr);
1561 adr += map_bankwidth(map);
1562 word_gap = 0;
1563 }
1564
1565 vec_seek += n;
1566 if (vec_seek == vec->iov_len) {
1567 vec++;
1568 vec_seek = 0;
1569 }
1570 } while (len);
1571 *pvec = vec;
1572 *pvec_seek = vec_seek;
1573
1574 /* GO GO GO */
1575 map_write(map, CMD(0xd0), cmd_adr);
1576 chip->state = FL_WRITING;
1577
1578 INVALIDATE_CACHE_UDELAY(map, chip,
1579 cmd_adr, len,
1580 chip->buffer_write_time);
1581
1582 timeo = jiffies + (HZ/2);
1583 z = 0;
1584 for (;;) {
1585 if (chip->state != FL_WRITING) {
1586 /* Someone's suspended the write. Sleep */
1587 DECLARE_WAITQUEUE(wait, current);
1588 set_current_state(TASK_UNINTERRUPTIBLE);
1589 add_wait_queue(&chip->wq, &wait);
1590 spin_unlock(chip->mutex);
1591 schedule();
1592 remove_wait_queue(&chip->wq, &wait);
1593 timeo = jiffies + (HZ / 2); /* FIXME */
1594 spin_lock(chip->mutex);
1595 continue;
1596 }
1597
1598 status = map_read(map, cmd_adr);
1599 if (map_word_andequal(map, status, status_OK, status_OK))
1600 break;
1601
1602 /* OK Still waiting */
1603 if (time_after(jiffies, timeo)) {
1604 map_write(map, CMD(0x70), cmd_adr);
1605 chip->state = FL_STATUS;
1606 xip_enable(map, chip, cmd_adr);
1607 printk(KERN_ERR "%s: buffer write error (status timeout)\n", map->name);
1608 ret = -EIO;
1609 goto out;
1610 }
1611
1612 /* Latency issues. Drop the lock, wait a while and retry */
1613 z++;
1614 UDELAY(map, chip, cmd_adr, 1);
1615 }
1616 if (!z) {
1617 chip->buffer_write_time--;
1618 if (!chip->buffer_write_time)
1619 chip->buffer_write_time = 1;
1620 }
1621 if (z > 1)
1622 chip->buffer_write_time++;
1623
1624 /* Done and happy. */
1625 chip->state = FL_STATUS;
1626
1627 /* check for errors */
1628 if (map_word_bitsset(map, status, CMD(0x1a))) {
1629 unsigned long chipstatus = MERGESTATUS(status);
1630
1631 /* reset status */
1632 map_write(map, CMD(0x50), cmd_adr);
1633 map_write(map, CMD(0x70), cmd_adr);
1634 xip_enable(map, chip, cmd_adr);
1635
1636 if (chipstatus & 0x02) {
1637 ret = -EROFS;
1638 } else if (chipstatus & 0x08) {
1639 printk(KERN_ERR "%s: buffer write error (bad VPP)\n", map->name);
1640 ret = -EIO;
1641 } else {
1642 printk(KERN_ERR "%s: buffer write error (status 0x%lx)\n", map->name, chipstatus);
1643 ret = -EINVAL;
1644 }
1645
1646 goto out;
1647 }
1648
1649 xip_enable(map, chip, cmd_adr);
1650 out: put_chip(map, chip, cmd_adr);
1651 spin_unlock(chip->mutex);
1652 return ret;
1653 }
1654
1655 static int cfi_intelext_writev (struct mtd_info *mtd, const struct kvec *vecs,
1656 unsigned long count, loff_t to, size_t *retlen)
1657 {
1658 struct map_info *map = mtd->priv;
1659 struct cfi_private *cfi = map->fldrv_priv;
1660 int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1661 int ret = 0;
1662 int chipnum;
1663 unsigned long ofs, vec_seek, i;
1664 size_t len = 0;
1665
1666 for (i = 0; i < count; i++)
1667 len += vecs[i].iov_len;
1668
1669 *retlen = 0;
1670 if (!len)
1671 return 0;
1672
1673 chipnum = to >> cfi->chipshift;
1674 ofs = to - (chipnum << cfi->chipshift);
1675 vec_seek = 0;
1676
1677 do {
1678 /* We must not cross write block boundaries */
1679 int size = wbufsize - (ofs & (wbufsize-1));
1680
1681 if (size > len)
1682 size = len;
1683 ret = do_write_buffer(map, &cfi->chips[chipnum],
1684 ofs, &vecs, &vec_seek, size);
1685 if (ret)
1686 return ret;
1687
1688 ofs += size;
1689 (*retlen) += size;
1690 len -= size;
1691
1692 if (ofs >> cfi->chipshift) {
1693 chipnum ++;
1694 ofs = 0;
1695 if (chipnum == cfi->numchips)
1696 return 0;
1697 }
1698 } while (len);
1699
1700 return 0;
1701 }
1702
1703 static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
1704 size_t len, size_t *retlen, const u_char *buf)
1705 {
1706 struct kvec vec;
1707
1708 vec.iov_base = (void *) buf;
1709 vec.iov_len = len;
1710
1711 return cfi_intelext_writev(mtd, &vec, 1, to, retlen);
1712 }
1713
1714 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1715 unsigned long adr, int len, void *thunk)
1716 {
1717 struct cfi_private *cfi = map->fldrv_priv;
1718 map_word status, status_OK;
1719 unsigned long timeo;
1720 int retries = 3;
1721 DECLARE_WAITQUEUE(wait, current);
1722 int ret = 0;
1723
1724 adr += chip->start;
1725
1726 /* Let's determine this according to the interleave only once */
1727 status_OK = CMD(0x80);
1728
1729 retry:
1730 spin_lock(chip->mutex);
1731 ret = get_chip(map, chip, adr, FL_ERASING);
1732 if (ret) {
1733 spin_unlock(chip->mutex);
1734 return ret;
1735 }
1736
1737 XIP_INVAL_CACHED_RANGE(map, adr, len);
1738 ENABLE_VPP(map);
1739 xip_disable(map, chip, adr);
1740
1741 /* Clear the status register first */
1742 map_write(map, CMD(0x50), adr);
1743
1744 /* Now erase */
1745 map_write(map, CMD(0x20), adr);
1746 map_write(map, CMD(0xD0), adr);
1747 chip->state = FL_ERASING;
1748 chip->erase_suspended = 0;
1749
1750 INVALIDATE_CACHE_UDELAY(map, chip,
1751 adr, len,
1752 chip->erase_time*1000/2);
1753
1754 /* FIXME. Use a timer to check this, and return immediately. */
1755 /* Once the state machine's known to be working I'll do that */
1756
1757 timeo = jiffies + (HZ*20);
1758 for (;;) {
1759 if (chip->state != FL_ERASING) {
1760 /* Someone's suspended the erase. Sleep */
1761 set_current_state(TASK_UNINTERRUPTIBLE);
1762 add_wait_queue(&chip->wq, &wait);
1763 spin_unlock(chip->mutex);
1764 schedule();
1765 remove_wait_queue(&chip->wq, &wait);
1766 spin_lock(chip->mutex);
1767 continue;
1768 }
1769 if (chip->erase_suspended) {
1770 /* This erase was suspended and resumed.
1771 Adjust the timeout */
1772 timeo = jiffies + (HZ*20); /* FIXME */
1773 chip->erase_suspended = 0;
1774 }
1775
1776 status = map_read(map, adr);
1777 if (map_word_andequal(map, status, status_OK, status_OK))
1778 break;
1779
1780 /* OK Still waiting */
1781 if (time_after(jiffies, timeo)) {
1782 map_write(map, CMD(0x70), adr);
1783 chip->state = FL_STATUS;
1784 xip_enable(map, chip, adr);
1785 printk(KERN_ERR "%s: block erase error: (status timeout)\n", map->name);
1786 ret = -EIO;
1787 goto out;
1788 }
1789
1790 /* Latency issues. Drop the lock, wait a while and retry */
1791 UDELAY(map, chip, adr, 1000000/HZ);
1792 }
1793
1794 /* We've broken this before. It doesn't hurt to be safe */
1795 map_write(map, CMD(0x70), adr);
1796 chip->state = FL_STATUS;
1797 status = map_read(map, adr);
1798
1799 /* check for errors */
1800 if (map_word_bitsset(map, status, CMD(0x3a))) {
1801 unsigned long chipstatus = MERGESTATUS(status);
1802
1803 /* Reset the error bits */
1804 map_write(map, CMD(0x50), adr);
1805 map_write(map, CMD(0x70), adr);
1806 xip_enable(map, chip, adr);
1807
1808 if ((chipstatus & 0x30) == 0x30) {
1809 printk(KERN_ERR "%s: block erase error: (bad command sequence, status 0x%lx)\n", map->name, chipstatus);
1810 ret = -EINVAL;
1811 } else if (chipstatus & 0x02) {
1812 /* Protection bit set */
1813 ret = -EROFS;
1814 } else if (chipstatus & 0x8) {
1815 /* Voltage */
1816 printk(KERN_ERR "%s: block erase error: (bad VPP)\n", map->name);
1817 ret = -EIO;
1818 } else if (chipstatus & 0x20 && retries--) {
1819 printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
1820 timeo = jiffies + HZ;
1821 put_chip(map, chip, adr);
1822 spin_unlock(chip->mutex);
1823 goto retry;
1824 } else {
1825 printk(KERN_ERR "%s: block erase failed at 0x%08lx (status 0x%lx)\n", map->name, adr, chipstatus);
1826 ret = -EIO;
1827 }
1828
1829 goto out;
1830 }
1831
1832 xip_enable(map, chip, adr);
1833 out: put_chip(map, chip, adr);
1834 spin_unlock(chip->mutex);
1835 return ret;
1836 }
1837
1838 int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1839 {
1840 unsigned long ofs, len;
1841 int ret;
1842
1843 ofs = instr->addr;
1844 len = instr->len;
1845
1846 ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1847 if (ret)
1848 return ret;
1849
1850 instr->state = MTD_ERASE_DONE;
1851 mtd_erase_callback(instr);
1852
1853 return 0;
1854 }
1855
1856 static void cfi_intelext_sync (struct mtd_info *mtd)
1857 {
1858 struct map_info *map = mtd->priv;
1859 struct cfi_private *cfi = map->fldrv_priv;
1860 int i;
1861 struct flchip *chip;
1862 int ret = 0;
1863
1864 for (i=0; !ret && i<cfi->numchips; i++) {
1865 chip = &cfi->chips[i];
1866
1867 spin_lock(chip->mutex);
1868 ret = get_chip(map, chip, chip->start, FL_SYNCING);
1869
1870 if (!ret) {
1871 chip->oldstate = chip->state;
1872 chip->state = FL_SYNCING;
1873 /* No need to wake_up() on this state change -
1874 * as the whole point is that nobody can do anything
1875 * with the chip now anyway.
1876 */
1877 }
1878 spin_unlock(chip->mutex);
1879 }
1880
1881 /* Unlock the chips again */
1882
1883 for (i--; i >=0; i--) {
1884 chip = &cfi->chips[i];
1885
1886 spin_lock(chip->mutex);
1887
1888 if (chip->state == FL_SYNCING) {
1889 chip->state = chip->oldstate;
1890 chip->oldstate = FL_READY;
1891 wake_up(&chip->wq);
1892 }
1893 spin_unlock(chip->mutex);
1894 }
1895 }
1896
1897 #ifdef DEBUG_LOCK_BITS
1898 static int __xipram do_printlockstatus_oneblock(struct map_info *map,
1899 struct flchip *chip,
1900 unsigned long adr,
1901 int len, void *thunk)
1902 {
1903 struct cfi_private *cfi = map->fldrv_priv;
1904 int status, ofs_factor = cfi->interleave * cfi->device_type;
1905
1906 adr += chip->start;
1907 xip_disable(map, chip, adr+(2*ofs_factor));
1908 map_write(map, CMD(0x90), adr+(2*ofs_factor));
1909 chip->state = FL_JEDEC_QUERY;
1910 status = cfi_read_query(map, adr+(2*ofs_factor));
1911 xip_enable(map, chip, 0);
1912 printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
1913 adr, status);
1914 return 0;
1915 }
1916 #endif
1917
1918 #define DO_XXLOCK_ONEBLOCK_LOCK ((void *) 1)
1919 #define DO_XXLOCK_ONEBLOCK_UNLOCK ((void *) 2)
1920
1921 static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
1922 unsigned long adr, int len, void *thunk)
1923 {
1924 struct cfi_private *cfi = map->fldrv_priv;
1925 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
1926 map_word status, status_OK;
1927 unsigned long timeo = jiffies + HZ;
1928 int ret;
1929
1930 adr += chip->start;
1931
1932 /* Let's determine this according to the interleave only once */
1933 status_OK = CMD(0x80);
1934
1935 spin_lock(chip->mutex);
1936 ret = get_chip(map, chip, adr, FL_LOCKING);
1937 if (ret) {
1938 spin_unlock(chip->mutex);
1939 return ret;
1940 }
1941
1942 ENABLE_VPP(map);
1943 xip_disable(map, chip, adr);
1944
1945 map_write(map, CMD(0x60), adr);
1946 if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
1947 map_write(map, CMD(0x01), adr);
1948 chip->state = FL_LOCKING;
1949 } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
1950 map_write(map, CMD(0xD0), adr);
1951 chip->state = FL_UNLOCKING;
1952 } else
1953 BUG();
1954
1955 /*
1956 * If Instant Individual Block Locking supported then no need
1957 * to delay.
1958 */
1959
1960 if (!extp || !(extp->FeatureSupport & (1 << 5)))
1961 UDELAY(map, chip, adr, 1000000/HZ);
1962
1963 /* FIXME. Use a timer to check this, and return immediately. */
1964 /* Once the state machine's known to be working I'll do that */
1965
1966 timeo = jiffies + (HZ*20);
1967 for (;;) {
1968
1969 status = map_read(map, adr);
1970 if (map_word_andequal(map, status, status_OK, status_OK))
1971 break;
1972
1973 /* OK Still waiting */
1974 if (time_after(jiffies, timeo)) {
1975 map_write(map, CMD(0x70), adr);
1976 chip->state = FL_STATUS;
1977 xip_enable(map, chip, adr);
1978 printk(KERN_ERR "%s: block unlock error: (status timeout)\n", map->name);
1979 put_chip(map, chip, adr);
1980 spin_unlock(chip->mutex);
1981 return -EIO;
1982 }
1983
1984 /* Latency issues. Drop the lock, wait a while and retry */
1985 UDELAY(map, chip, adr, 1);
1986 }
1987
1988 /* Done and happy. */
1989 chip->state = FL_STATUS;
1990 xip_enable(map, chip, adr);
1991 put_chip(map, chip, adr);
1992 spin_unlock(chip->mutex);
1993 return 0;
1994 }
1995
1996 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
1997 {
1998 int ret;
1999
2000 #ifdef DEBUG_LOCK_BITS
2001 printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2002 __FUNCTION__, ofs, len);
2003 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2004 ofs, len, 0);
2005 #endif
2006
2007 ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2008 ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
2009
2010 #ifdef DEBUG_LOCK_BITS
2011 printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2012 __FUNCTION__, ret);
2013 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2014 ofs, len, 0);
2015 #endif
2016
2017 return ret;
2018 }
2019
2020 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
2021 {
2022 int ret;
2023
2024 #ifdef DEBUG_LOCK_BITS
2025 printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2026 __FUNCTION__, ofs, len);
2027 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2028 ofs, len, 0);
2029 #endif
2030
2031 ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2032 ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
2033
2034 #ifdef DEBUG_LOCK_BITS
2035 printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2036 __FUNCTION__, ret);
2037 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2038 ofs, len, 0);
2039 #endif
2040
2041 return ret;
2042 }
2043
2044 #ifdef CONFIG_MTD_OTP
2045
2046 typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
2047 u_long data_offset, u_char *buf, u_int size,
2048 u_long prot_offset, u_int groupno, u_int groupsize);
2049
2050 static int __xipram
2051 do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
2052 u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2053 {
2054 struct cfi_private *cfi = map->fldrv_priv;
2055 int ret;
2056
2057 spin_lock(chip->mutex);
2058 ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
2059 if (ret) {
2060 spin_unlock(chip->mutex);
2061 return ret;
2062 }
2063
2064 /* let's ensure we're not reading back cached data from array mode */
2065 INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2066
2067 xip_disable(map, chip, chip->start);
2068 if (chip->state != FL_JEDEC_QUERY) {
2069 map_write(map, CMD(0x90), chip->start);
2070 chip->state = FL_JEDEC_QUERY;
2071 }
2072 map_copy_from(map, buf, chip->start + offset, size);
2073 xip_enable(map, chip, chip->start);
2074
2075 /* then ensure we don't keep OTP data in the cache */
2076 INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2077
2078 put_chip(map, chip, chip->start);
2079 spin_unlock(chip->mutex);
2080 return 0;
2081 }
2082
2083 static int
2084 do_otp_write(struct map_info *map, struct flchip *chip, u_long offset,
2085 u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2086 {
2087 int ret;
2088
2089 while (size) {
2090 unsigned long bus_ofs = offset & ~(map_bankwidth(map)-1);
2091 int gap = offset - bus_ofs;
2092 int n = min_t(int, size, map_bankwidth(map)-gap);
2093 map_word datum = map_word_ff(map);
2094
2095 datum = map_word_load_partial(map, datum, buf, gap, n);
2096 ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
2097 if (ret)
2098 return ret;
2099
2100 offset += n;
2101 buf += n;
2102 size -= n;
2103 }
2104
2105 return 0;
2106 }
2107
2108 static int
2109 do_otp_lock(struct map_info *map, struct flchip *chip, u_long offset,
2110 u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2111 {
2112 struct cfi_private *cfi = map->fldrv_priv;
2113 map_word datum;
2114
2115 /* make sure area matches group boundaries */
2116 if (size != grpsz)
2117 return -EXDEV;
2118
2119 datum = map_word_ff(map);
2120 datum = map_word_clr(map, datum, CMD(1 << grpno));
2121 return do_write_oneword(map, chip, prot, datum, FL_OTP_WRITE);
2122 }
2123
2124 static int cfi_intelext_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
2125 size_t *retlen, u_char *buf,
2126 otp_op_t action, int user_regs)
2127 {
2128 struct map_info *map = mtd->priv;
2129 struct cfi_private *cfi = map->fldrv_priv;
2130 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2131 struct flchip *chip;
2132 struct cfi_intelext_otpinfo *otp;
2133 u_long devsize, reg_prot_offset, data_offset;
2134 u_int chip_num, chip_step, field, reg_fact_size, reg_user_size;
2135 u_int groups, groupno, groupsize, reg_fact_groups, reg_user_groups;
2136 int ret;
2137
2138 *retlen = 0;
2139
2140 /* Check that we actually have some OTP registers */
2141 if (!extp || !(extp->FeatureSupport & 64) || !extp->NumProtectionFields)
2142 return -ENODATA;
2143
2144 /* we need real chips here not virtual ones */
2145 devsize = (1 << cfi->cfiq->DevSize) * cfi->interleave;
2146 chip_step = devsize >> cfi->chipshift;
2147 chip_num = 0;
2148
2149 /* Some chips have OTP located in the _top_ partition only.
2150 For example: Intel 28F256L18T (T means top-parameter device) */
2151 if (cfi->mfr == MANUFACTURER_INTEL) {
2152 switch (cfi->id) {
2153 case 0x880b:
2154 case 0x880c:
2155 case 0x880d:
2156 chip_num = chip_step - 1;
2157 }
2158 }
2159
2160 for ( ; chip_num < cfi->numchips; chip_num += chip_step) {
2161 chip = &cfi->chips[chip_num];
2162 otp = (struct cfi_intelext_otpinfo *)&extp->extra[0];
2163
2164 /* first OTP region */
2165 field = 0;
2166 reg_prot_offset = extp->ProtRegAddr;
2167 reg_fact_groups = 1;
2168 reg_fact_size = 1 << extp->FactProtRegSize;
2169 reg_user_groups = 1;
2170 reg_user_size = 1 << extp->UserProtRegSize;
2171
2172 while (len > 0) {
2173 /* flash geometry fixup */
2174 data_offset = reg_prot_offset + 1;
2175 data_offset *= cfi->interleave * cfi->device_type;
2176 reg_prot_offset *= cfi->interleave * cfi->device_type;
2177 reg_fact_size *= cfi->interleave;
2178 reg_user_size *= cfi->interleave;
2179
2180 if (user_regs) {
2181 groups = reg_user_groups;
2182 groupsize = reg_user_size;
2183 /* skip over factory reg area */
2184 groupno = reg_fact_groups;
2185 data_offset += reg_fact_groups * reg_fact_size;
2186 } else {
2187 groups = reg_fact_groups;
2188 groupsize = reg_fact_size;
2189 groupno = 0;
2190 }
2191
2192 while (len > 0 && groups > 0) {
2193 if (!action) {
2194 /*
2195 * Special case: if action is NULL
2196 * we fill buf with otp_info records.
2197 */
2198 struct otp_info *otpinfo;
2199 map_word lockword;
2200 len -= sizeof(struct otp_info);
2201 if (len <= 0)
2202 return -ENOSPC;
2203 ret = do_otp_read(map, chip,
2204 reg_prot_offset,
2205 (u_char *)&lockword,
2206 map_bankwidth(map),
2207 0, 0, 0);
2208 if (ret)
2209 return ret;
2210 otpinfo = (struct otp_info *)buf;
2211 otpinfo->start = from;
2212 otpinfo->length = groupsize;
2213 otpinfo->locked =
2214 !map_word_bitsset(map, lockword,
2215 CMD(1 << groupno));
2216 from += groupsize;
2217 buf += sizeof(*otpinfo);
2218 *retlen += sizeof(*otpinfo);
2219 } else if (from >= groupsize) {
2220 from -= groupsize;
2221 data_offset += groupsize;
2222 } else {
2223 int size = groupsize;
2224 data_offset += from;
2225 size -= from;
2226 from = 0;
2227 if (size > len)
2228 size = len;
2229 ret = action(map, chip, data_offset,
2230 buf, size, reg_prot_offset,
2231 groupno, groupsize);
2232 if (ret < 0)
2233 return ret;
2234 buf += size;
2235 len -= size;
2236 *retlen += size;
2237 data_offset += size;
2238 }
2239 groupno++;
2240 groups--;
2241 }
2242
2243 /* next OTP region */
2244 if (++field == extp->NumProtectionFields)
2245 break;
2246 reg_prot_offset = otp->ProtRegAddr;
2247 reg_fact_groups = otp->FactGroups;
2248 reg_fact_size = 1 << otp->FactProtRegSize;
2249 reg_user_groups = otp->UserGroups;
2250 reg_user_size = 1 << otp->UserProtRegSize;
2251 otp++;
2252 }
2253 }
2254
2255 return 0;
2256 }
2257
2258 static int cfi_intelext_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
2259 size_t len, size_t *retlen,
2260 u_char *buf)
2261 {
2262 return cfi_intelext_otp_walk(mtd, from, len, retlen,
2263 buf, do_otp_read, 0);
2264 }
2265
2266 static int cfi_intelext_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
2267 size_t len, size_t *retlen,
2268 u_char *buf)
2269 {
2270 return cfi_intelext_otp_walk(mtd, from, len, retlen,
2271 buf, do_otp_read, 1);
2272 }
2273
2274 static int cfi_intelext_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
2275 size_t len, size_t *retlen,
2276 u_char *buf)
2277 {
2278 return cfi_intelext_otp_walk(mtd, from, len, retlen,
2279 buf, do_otp_write, 1);
2280 }
2281
2282 static int cfi_intelext_lock_user_prot_reg(struct mtd_info *mtd,
2283 loff_t from, size_t len)
2284 {
2285 size_t retlen;
2286 return cfi_intelext_otp_walk(mtd, from, len, &retlen,
2287 NULL, do_otp_lock, 1);
2288 }
2289
2290 static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd,
2291 struct otp_info *buf, size_t len)
2292 {
2293 size_t retlen;
2294 int ret;
2295
2296 ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 0);
2297 return ret ? : retlen;
2298 }
2299
2300 static int cfi_intelext_get_user_prot_info(struct mtd_info *mtd,
2301 struct otp_info *buf, size_t len)
2302 {
2303 size_t retlen;
2304 int ret;
2305
2306 ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 1);
2307 return ret ? : retlen;
2308 }
2309
2310 #endif
2311
2312 static int cfi_intelext_suspend(struct mtd_info *mtd)
2313 {
2314 struct map_info *map = mtd->priv;
2315 struct cfi_private *cfi = map->fldrv_priv;
2316 int i;
2317 struct flchip *chip;
2318 int ret = 0;
2319
2320 for (i=0; !ret && i<cfi->numchips; i++) {
2321 chip = &cfi->chips[i];
2322
2323 spin_lock(chip->mutex);
2324
2325 switch (chip->state) {
2326 case FL_READY:
2327 case FL_STATUS:
2328 case FL_CFI_QUERY:
2329 case FL_JEDEC_QUERY:
2330 if (chip->oldstate == FL_READY) {
2331 chip->oldstate = chip->state;
2332 chip->state = FL_PM_SUSPENDED;
2333 /* No need to wake_up() on this state change -
2334 * as the whole point is that nobody can do anything
2335 * with the chip now anyway.
2336 */
2337 } else {
2338 /* There seems to be an operation pending. We must wait for it. */
2339 printk(KERN_NOTICE "Flash device refused suspend due to pending operation (oldstate %d)\n", chip->oldstate);
2340 ret = -EAGAIN;
2341 }
2342 break;
2343 default:
2344 /* Should we actually wait? Once upon a time these routines weren't
2345 allowed to. Or should we return -EAGAIN, because the upper layers
2346 ought to have already shut down anything which was using the device
2347 anyway? The latter for now. */
2348 printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->oldstate);
2349 ret = -EAGAIN;
2350 case FL_PM_SUSPENDED:
2351 break;
2352 }
2353 spin_unlock(chip->mutex);
2354 }
2355
2356 /* Unlock the chips again */
2357
2358 if (ret) {
2359 for (i--; i >=0; i--) {
2360 chip = &cfi->chips[i];
2361
2362 spin_lock(chip->mutex);
2363
2364 if (chip->state == FL_PM_SUSPENDED) {
2365 /* No need to force it into a known state here,
2366 because we're returning failure, and it didn't
2367 get power cycled */
2368 chip->state = chip->oldstate;
2369 chip->oldstate = FL_READY;
2370 wake_up(&chip->wq);
2371 }
2372 spin_unlock(chip->mutex);
2373 }
2374 }
2375
2376 return ret;
2377 }
2378
2379 static void cfi_intelext_resume(struct mtd_info *mtd)
2380 {
2381 struct map_info *map = mtd->priv;
2382 struct cfi_private *cfi = map->fldrv_priv;
2383 int i;
2384 struct flchip *chip;
2385
2386 for (i=0; i<cfi->numchips; i++) {
2387
2388 chip = &cfi->chips[i];
2389
2390 spin_lock(chip->mutex);
2391
2392 /* Go to known state. Chip may have been power cycled */
2393 if (chip->state == FL_PM_SUSPENDED) {
2394 map_write(map, CMD(0xFF), cfi->chips[i].start);
2395 chip->oldstate = chip->state = FL_READY;
2396 wake_up(&chip->wq);
2397 }
2398
2399 spin_unlock(chip->mutex);
2400 }
2401 }
2402
2403 static int cfi_intelext_reset(struct mtd_info *mtd)
2404 {
2405 struct map_info *map = mtd->priv;
2406 struct cfi_private *cfi = map->fldrv_priv;
2407 int i, ret;
2408
2409 for (i=0; i < cfi->numchips; i++) {
2410 struct flchip *chip = &cfi->chips[i];
2411
2412 /* force the completion of any ongoing operation
2413 and switch to array mode so any bootloader in
2414 flash is accessible for soft reboot. */
2415 spin_lock(chip->mutex);
2416 ret = get_chip(map, chip, chip->start, FL_SYNCING);
2417 if (!ret) {
2418 map_write(map, CMD(0xff), chip->start);
2419 chip->state = FL_READY;
2420 }
2421 spin_unlock(chip->mutex);
2422 }
2423
2424 return 0;
2425 }
2426
2427 static int cfi_intelext_reboot(struct notifier_block *nb, unsigned long val,
2428 void *v)
2429 {
2430 struct mtd_info *mtd;
2431
2432 mtd = container_of(nb, struct mtd_info, reboot_notifier);
2433 cfi_intelext_reset(mtd);
2434 return NOTIFY_DONE;
2435 }
2436
2437 static void cfi_intelext_destroy(struct mtd_info *mtd)
2438 {
2439 struct map_info *map = mtd->priv;
2440 struct cfi_private *cfi = map->fldrv_priv;
2441 cfi_intelext_reset(mtd);
2442 unregister_reboot_notifier(&mtd->reboot_notifier);
2443 kfree(cfi->cmdset_priv);
2444 kfree(cfi->cfiq);
2445 kfree(cfi->chips[0].priv);
2446 kfree(cfi);
2447 kfree(mtd->eraseregions);
2448 }
2449
2450 static char im_name_0001[] = "cfi_cmdset_0001";
2451 static char im_name_0003[] = "cfi_cmdset_0003";
2452 static char im_name_0200[] = "cfi_cmdset_0200";
2453
2454 static int __init cfi_intelext_init(void)
2455 {
2456 inter_module_register(im_name_0001, THIS_MODULE, &cfi_cmdset_0001);
2457 inter_module_register(im_name_0003, THIS_MODULE, &cfi_cmdset_0001);
2458 inter_module_register(im_name_0200, THIS_MODULE, &cfi_cmdset_0001);
2459 return 0;
2460 }
2461
2462 static void __exit cfi_intelext_exit(void)
2463 {
2464 inter_module_unregister(im_name_0001);
2465 inter_module_unregister(im_name_0003);
2466 inter_module_unregister(im_name_0200);
2467 }
2468
2469 module_init(cfi_intelext_init);
2470 module_exit(cfi_intelext_exit);
2471
2472 MODULE_LICENSE("GPL");
2473 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
2474 MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");