]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/mtd/chips/cfi_cmdset_0002.c
Merge branch 'for-2.6.35' of git://linux-nfs.org/~bfields/linux
[mirror_ubuntu-artful-kernel.git] / drivers / mtd / chips / cfi_cmdset_0002.c
1 /*
2 * Common Flash Interface support:
3 * AMD & Fujitsu Standard Vendor Command Set (ID 0x0002)
4 *
5 * Copyright (C) 2000 Crossnet Co. <info@crossnet.co.jp>
6 * Copyright (C) 2004 Arcom Control Systems Ltd <linux@arcom.com>
7 * Copyright (C) 2005 MontaVista Software Inc. <source@mvista.com>
8 *
9 * 2_by_8 routines added by Simon Munton
10 *
11 * 4_by_16 work by Carolyn J. Smith
12 *
13 * XIP support hooks by Vitaly Wool (based on code for Intel flash
14 * by Nicolas Pitre)
15 *
16 * 25/09/2008 Christopher Moore: TopBottom fixup for many Macronix with CFI V1.0
17 *
18 * Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com
19 *
20 * This code is GPL
21 */
22
23 #include <linux/module.h>
24 #include <linux/types.h>
25 #include <linux/kernel.h>
26 #include <linux/sched.h>
27 #include <linux/init.h>
28 #include <asm/io.h>
29 #include <asm/byteorder.h>
30
31 #include <linux/errno.h>
32 #include <linux/slab.h>
33 #include <linux/delay.h>
34 #include <linux/interrupt.h>
35 #include <linux/reboot.h>
36 #include <linux/mtd/compatmac.h>
37 #include <linux/mtd/map.h>
38 #include <linux/mtd/mtd.h>
39 #include <linux/mtd/cfi.h>
40 #include <linux/mtd/xip.h>
41
42 #define AMD_BOOTLOC_BUG
43 #define FORCE_WORD_WRITE 0
44
45 #define MAX_WORD_RETRIES 3
46
47 #define SST49LF004B 0x0060
48 #define SST49LF040B 0x0050
49 #define SST49LF008A 0x005a
50 #define AT49BV6416 0x00d6
51
52 static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
53 static int cfi_amdstd_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
54 static int cfi_amdstd_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
55 static int cfi_amdstd_erase_chip(struct mtd_info *, struct erase_info *);
56 static int cfi_amdstd_erase_varsize(struct mtd_info *, struct erase_info *);
57 static void cfi_amdstd_sync (struct mtd_info *);
58 static int cfi_amdstd_suspend (struct mtd_info *);
59 static void cfi_amdstd_resume (struct mtd_info *);
60 static int cfi_amdstd_reboot(struct notifier_block *, unsigned long, void *);
61 static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
62
63 static void cfi_amdstd_destroy(struct mtd_info *);
64
65 struct mtd_info *cfi_cmdset_0002(struct map_info *, int);
66 static struct mtd_info *cfi_amdstd_setup (struct mtd_info *);
67
68 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
69 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
70 #include "fwh_lock.h"
71
72 static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
73 static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
74
75 static struct mtd_chip_driver cfi_amdstd_chipdrv = {
76 .probe = NULL, /* Not usable directly */
77 .destroy = cfi_amdstd_destroy,
78 .name = "cfi_cmdset_0002",
79 .module = THIS_MODULE
80 };
81
82
83 /* #define DEBUG_CFI_FEATURES */
84
85
86 #ifdef DEBUG_CFI_FEATURES
87 static void cfi_tell_features(struct cfi_pri_amdstd *extp)
88 {
89 const char* erase_suspend[3] = {
90 "Not supported", "Read only", "Read/write"
91 };
92 const char* top_bottom[6] = {
93 "No WP", "8x8KiB sectors at top & bottom, no WP",
94 "Bottom boot", "Top boot",
95 "Uniform, Bottom WP", "Uniform, Top WP"
96 };
97
98 printk(" Silicon revision: %d\n", extp->SiliconRevision >> 1);
99 printk(" Address sensitive unlock: %s\n",
100 (extp->SiliconRevision & 1) ? "Not required" : "Required");
101
102 if (extp->EraseSuspend < ARRAY_SIZE(erase_suspend))
103 printk(" Erase Suspend: %s\n", erase_suspend[extp->EraseSuspend]);
104 else
105 printk(" Erase Suspend: Unknown value %d\n", extp->EraseSuspend);
106
107 if (extp->BlkProt == 0)
108 printk(" Block protection: Not supported\n");
109 else
110 printk(" Block protection: %d sectors per group\n", extp->BlkProt);
111
112
113 printk(" Temporary block unprotect: %s\n",
114 extp->TmpBlkUnprotect ? "Supported" : "Not supported");
115 printk(" Block protect/unprotect scheme: %d\n", extp->BlkProtUnprot);
116 printk(" Number of simultaneous operations: %d\n", extp->SimultaneousOps);
117 printk(" Burst mode: %s\n",
118 extp->BurstMode ? "Supported" : "Not supported");
119 if (extp->PageMode == 0)
120 printk(" Page mode: Not supported\n");
121 else
122 printk(" Page mode: %d word page\n", extp->PageMode << 2);
123
124 printk(" Vpp Supply Minimum Program/Erase Voltage: %d.%d V\n",
125 extp->VppMin >> 4, extp->VppMin & 0xf);
126 printk(" Vpp Supply Maximum Program/Erase Voltage: %d.%d V\n",
127 extp->VppMax >> 4, extp->VppMax & 0xf);
128
129 if (extp->TopBottom < ARRAY_SIZE(top_bottom))
130 printk(" Top/Bottom Boot Block: %s\n", top_bottom[extp->TopBottom]);
131 else
132 printk(" Top/Bottom Boot Block: Unknown value %d\n", extp->TopBottom);
133 }
134 #endif
135
136 #ifdef AMD_BOOTLOC_BUG
137 /* Wheee. Bring me the head of someone at AMD. */
138 static void fixup_amd_bootblock(struct mtd_info *mtd, void* param)
139 {
140 struct map_info *map = mtd->priv;
141 struct cfi_private *cfi = map->fldrv_priv;
142 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
143 __u8 major = extp->MajorVersion;
144 __u8 minor = extp->MinorVersion;
145
146 if (((major << 8) | minor) < 0x3131) {
147 /* CFI version 1.0 => don't trust bootloc */
148
149 DEBUG(MTD_DEBUG_LEVEL1,
150 "%s: JEDEC Vendor ID is 0x%02X Device ID is 0x%02X\n",
151 map->name, cfi->mfr, cfi->id);
152
153 /* AFAICS all 29LV400 with a bottom boot block have a device ID
154 * of 0x22BA in 16-bit mode and 0xBA in 8-bit mode.
155 * These were badly detected as they have the 0x80 bit set
156 * so treat them as a special case.
157 */
158 if (((cfi->id == 0xBA) || (cfi->id == 0x22BA)) &&
159
160 /* Macronix added CFI to their 2nd generation
161 * MX29LV400C B/T but AFAICS no other 29LV400 (AMD,
162 * Fujitsu, Spansion, EON, ESI and older Macronix)
163 * has CFI.
164 *
165 * Therefore also check the manufacturer.
166 * This reduces the risk of false detection due to
167 * the 8-bit device ID.
168 */
169 (cfi->mfr == CFI_MFR_MACRONIX)) {
170 DEBUG(MTD_DEBUG_LEVEL1,
171 "%s: Macronix MX29LV400C with bottom boot block"
172 " detected\n", map->name);
173 extp->TopBottom = 2; /* bottom boot */
174 } else
175 if (cfi->id & 0x80) {
176 printk(KERN_WARNING "%s: JEDEC Device ID is 0x%02X. Assuming broken CFI table.\n", map->name, cfi->id);
177 extp->TopBottom = 3; /* top boot */
178 } else {
179 extp->TopBottom = 2; /* bottom boot */
180 }
181
182 DEBUG(MTD_DEBUG_LEVEL1,
183 "%s: AMD CFI PRI V%c.%c has no boot block field;"
184 " deduced %s from Device ID\n", map->name, major, minor,
185 extp->TopBottom == 2 ? "bottom" : "top");
186 }
187 }
188 #endif
189
190 static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
191 {
192 struct map_info *map = mtd->priv;
193 struct cfi_private *cfi = map->fldrv_priv;
194 if (cfi->cfiq->BufWriteTimeoutTyp) {
195 DEBUG(MTD_DEBUG_LEVEL1, "Using buffer write method\n" );
196 mtd->write = cfi_amdstd_write_buffers;
197 }
198 }
199
200 /* Atmel chips don't use the same PRI format as AMD chips */
201 static void fixup_convert_atmel_pri(struct mtd_info *mtd, void *param)
202 {
203 struct map_info *map = mtd->priv;
204 struct cfi_private *cfi = map->fldrv_priv;
205 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
206 struct cfi_pri_atmel atmel_pri;
207
208 memcpy(&atmel_pri, extp, sizeof(atmel_pri));
209 memset((char *)extp + 5, 0, sizeof(*extp) - 5);
210
211 if (atmel_pri.Features & 0x02)
212 extp->EraseSuspend = 2;
213
214 /* Some chips got it backwards... */
215 if (cfi->id == AT49BV6416) {
216 if (atmel_pri.BottomBoot)
217 extp->TopBottom = 3;
218 else
219 extp->TopBottom = 2;
220 } else {
221 if (atmel_pri.BottomBoot)
222 extp->TopBottom = 2;
223 else
224 extp->TopBottom = 3;
225 }
226
227 /* burst write mode not supported */
228 cfi->cfiq->BufWriteTimeoutTyp = 0;
229 cfi->cfiq->BufWriteTimeoutMax = 0;
230 }
231
232 static void fixup_use_secsi(struct mtd_info *mtd, void *param)
233 {
234 /* Setup for chips with a secsi area */
235 mtd->read_user_prot_reg = cfi_amdstd_secsi_read;
236 mtd->read_fact_prot_reg = cfi_amdstd_secsi_read;
237 }
238
239 static void fixup_use_erase_chip(struct mtd_info *mtd, void *param)
240 {
241 struct map_info *map = mtd->priv;
242 struct cfi_private *cfi = map->fldrv_priv;
243 if ((cfi->cfiq->NumEraseRegions == 1) &&
244 ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0)) {
245 mtd->erase = cfi_amdstd_erase_chip;
246 }
247
248 }
249
250 /*
251 * Some Atmel chips (e.g. the AT49BV6416) power-up with all sectors
252 * locked by default.
253 */
254 static void fixup_use_atmel_lock(struct mtd_info *mtd, void *param)
255 {
256 mtd->lock = cfi_atmel_lock;
257 mtd->unlock = cfi_atmel_unlock;
258 mtd->flags |= MTD_POWERUP_LOCK;
259 }
260
261 static void fixup_old_sst_eraseregion(struct mtd_info *mtd)
262 {
263 struct map_info *map = mtd->priv;
264 struct cfi_private *cfi = map->fldrv_priv;
265
266 /*
267 * These flashes report two seperate eraseblock regions based on the
268 * sector_erase-size and block_erase-size, although they both operate on the
269 * same memory. This is not allowed according to CFI, so we just pick the
270 * sector_erase-size.
271 */
272 cfi->cfiq->NumEraseRegions = 1;
273 }
274
275 static void fixup_sst39vf(struct mtd_info *mtd, void *param)
276 {
277 struct map_info *map = mtd->priv;
278 struct cfi_private *cfi = map->fldrv_priv;
279
280 fixup_old_sst_eraseregion(mtd);
281
282 cfi->addr_unlock1 = 0x5555;
283 cfi->addr_unlock2 = 0x2AAA;
284 }
285
286 static void fixup_sst39vf_rev_b(struct mtd_info *mtd, void *param)
287 {
288 struct map_info *map = mtd->priv;
289 struct cfi_private *cfi = map->fldrv_priv;
290
291 fixup_old_sst_eraseregion(mtd);
292
293 cfi->addr_unlock1 = 0x555;
294 cfi->addr_unlock2 = 0x2AA;
295 }
296
297 static void fixup_s29gl064n_sectors(struct mtd_info *mtd, void *param)
298 {
299 struct map_info *map = mtd->priv;
300 struct cfi_private *cfi = map->fldrv_priv;
301
302 if ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0x003f) {
303 cfi->cfiq->EraseRegionInfo[0] |= 0x0040;
304 pr_warning("%s: Bad S29GL064N CFI data, adjust from 64 to 128 sectors\n", mtd->name);
305 }
306 }
307
308 static void fixup_s29gl032n_sectors(struct mtd_info *mtd, void *param)
309 {
310 struct map_info *map = mtd->priv;
311 struct cfi_private *cfi = map->fldrv_priv;
312
313 if ((cfi->cfiq->EraseRegionInfo[1] & 0xffff) == 0x007e) {
314 cfi->cfiq->EraseRegionInfo[1] &= ~0x0040;
315 pr_warning("%s: Bad S29GL032N CFI data, adjust from 127 to 63 sectors\n", mtd->name);
316 }
317 }
318
319 /* Used to fix CFI-Tables of chips without Extended Query Tables */
320 static struct cfi_fixup cfi_nopri_fixup_table[] = {
321 { CFI_MFR_SST, 0x234A, fixup_sst39vf, NULL, }, // SST39VF1602
322 { CFI_MFR_SST, 0x234B, fixup_sst39vf, NULL, }, // SST39VF1601
323 { CFI_MFR_SST, 0x235A, fixup_sst39vf, NULL, }, // SST39VF3202
324 { CFI_MFR_SST, 0x235B, fixup_sst39vf, NULL, }, // SST39VF3201
325 { CFI_MFR_SST, 0x235C, fixup_sst39vf_rev_b, NULL, }, // SST39VF3202B
326 { CFI_MFR_SST, 0x235D, fixup_sst39vf_rev_b, NULL, }, // SST39VF3201B
327 { CFI_MFR_SST, 0x236C, fixup_sst39vf_rev_b, NULL, }, // SST39VF6402B
328 { CFI_MFR_SST, 0x236D, fixup_sst39vf_rev_b, NULL, }, // SST39VF6401B
329 { 0, 0, NULL, NULL }
330 };
331
332 static struct cfi_fixup cfi_fixup_table[] = {
333 { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL },
334 #ifdef AMD_BOOTLOC_BUG
335 { CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock, NULL },
336 { CFI_MFR_MACRONIX, CFI_ID_ANY, fixup_amd_bootblock, NULL },
337 #endif
338 { CFI_MFR_AMD, 0x0050, fixup_use_secsi, NULL, },
339 { CFI_MFR_AMD, 0x0053, fixup_use_secsi, NULL, },
340 { CFI_MFR_AMD, 0x0055, fixup_use_secsi, NULL, },
341 { CFI_MFR_AMD, 0x0056, fixup_use_secsi, NULL, },
342 { CFI_MFR_AMD, 0x005C, fixup_use_secsi, NULL, },
343 { CFI_MFR_AMD, 0x005F, fixup_use_secsi, NULL, },
344 { CFI_MFR_AMD, 0x0c01, fixup_s29gl064n_sectors, NULL, },
345 { CFI_MFR_AMD, 0x1301, fixup_s29gl064n_sectors, NULL, },
346 { CFI_MFR_AMD, 0x1a00, fixup_s29gl032n_sectors, NULL, },
347 { CFI_MFR_AMD, 0x1a01, fixup_s29gl032n_sectors, NULL, },
348 #if !FORCE_WORD_WRITE
349 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL, },
350 #endif
351 { 0, 0, NULL, NULL }
352 };
353 static struct cfi_fixup jedec_fixup_table[] = {
354 { CFI_MFR_SST, SST49LF004B, fixup_use_fwh_lock, NULL, },
355 { CFI_MFR_SST, SST49LF040B, fixup_use_fwh_lock, NULL, },
356 { CFI_MFR_SST, SST49LF008A, fixup_use_fwh_lock, NULL, },
357 { 0, 0, NULL, NULL }
358 };
359
360 static struct cfi_fixup fixup_table[] = {
361 /* The CFI vendor ids and the JEDEC vendor IDs appear
362 * to be common. It is like the devices id's are as
363 * well. This table is to pick all cases where
364 * we know that is the case.
365 */
366 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_erase_chip, NULL },
367 { CFI_MFR_ATMEL, AT49BV6416, fixup_use_atmel_lock, NULL },
368 { 0, 0, NULL, NULL }
369 };
370
371
372 static void cfi_fixup_major_minor(struct cfi_private *cfi,
373 struct cfi_pri_amdstd *extp)
374 {
375 if (cfi->mfr == CFI_MFR_SAMSUNG && cfi->id == 0x257e &&
376 extp->MajorVersion == '0')
377 extp->MajorVersion = '1';
378 }
379
380 struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
381 {
382 struct cfi_private *cfi = map->fldrv_priv;
383 struct mtd_info *mtd;
384 int i;
385
386 mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
387 if (!mtd) {
388 printk(KERN_WARNING "Failed to allocate memory for MTD device\n");
389 return NULL;
390 }
391 mtd->priv = map;
392 mtd->type = MTD_NORFLASH;
393
394 /* Fill in the default mtd operations */
395 mtd->erase = cfi_amdstd_erase_varsize;
396 mtd->write = cfi_amdstd_write_words;
397 mtd->read = cfi_amdstd_read;
398 mtd->sync = cfi_amdstd_sync;
399 mtd->suspend = cfi_amdstd_suspend;
400 mtd->resume = cfi_amdstd_resume;
401 mtd->flags = MTD_CAP_NORFLASH;
402 mtd->name = map->name;
403 mtd->writesize = 1;
404
405 mtd->reboot_notifier.notifier_call = cfi_amdstd_reboot;
406
407 if (cfi->cfi_mode==CFI_MODE_CFI){
408 unsigned char bootloc;
409 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
410 struct cfi_pri_amdstd *extp;
411
412 extp = (struct cfi_pri_amdstd*)cfi_read_pri(map, adr, sizeof(*extp), "Amd/Fujitsu");
413 if (extp) {
414 /*
415 * It's a real CFI chip, not one for which the probe
416 * routine faked a CFI structure.
417 */
418 cfi_fixup_major_minor(cfi, extp);
419
420 if (extp->MajorVersion != '1' ||
421 (extp->MinorVersion < '0' || extp->MinorVersion > '4')) {
422 printk(KERN_ERR " Unknown Amd/Fujitsu Extended Query "
423 "version %c.%c.\n", extp->MajorVersion,
424 extp->MinorVersion);
425 kfree(extp);
426 kfree(mtd);
427 return NULL;
428 }
429
430 /* Install our own private info structure */
431 cfi->cmdset_priv = extp;
432
433 /* Apply cfi device specific fixups */
434 cfi_fixup(mtd, cfi_fixup_table);
435
436 #ifdef DEBUG_CFI_FEATURES
437 /* Tell the user about it in lots of lovely detail */
438 cfi_tell_features(extp);
439 #endif
440
441 bootloc = extp->TopBottom;
442 if ((bootloc < 2) || (bootloc > 5)) {
443 printk(KERN_WARNING "%s: CFI contains unrecognised boot "
444 "bank location (%d). Assuming bottom.\n",
445 map->name, bootloc);
446 bootloc = 2;
447 }
448
449 if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) {
450 printk(KERN_WARNING "%s: Swapping erase regions for top-boot CFI table.\n", map->name);
451
452 for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) {
453 int j = (cfi->cfiq->NumEraseRegions-1)-i;
454 __u32 swap;
455
456 swap = cfi->cfiq->EraseRegionInfo[i];
457 cfi->cfiq->EraseRegionInfo[i] = cfi->cfiq->EraseRegionInfo[j];
458 cfi->cfiq->EraseRegionInfo[j] = swap;
459 }
460 }
461 /* Set the default CFI lock/unlock addresses */
462 cfi->addr_unlock1 = 0x555;
463 cfi->addr_unlock2 = 0x2aa;
464 }
465 cfi_fixup(mtd, cfi_nopri_fixup_table);
466
467 if (!cfi->addr_unlock1 || !cfi->addr_unlock2) {
468 kfree(mtd);
469 return NULL;
470 }
471
472 } /* CFI mode */
473 else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
474 /* Apply jedec specific fixups */
475 cfi_fixup(mtd, jedec_fixup_table);
476 }
477 /* Apply generic fixups */
478 cfi_fixup(mtd, fixup_table);
479
480 for (i=0; i< cfi->numchips; i++) {
481 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
482 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
483 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
484 cfi->chips[i].ref_point_counter = 0;
485 init_waitqueue_head(&(cfi->chips[i].wq));
486 }
487
488 map->fldrv = &cfi_amdstd_chipdrv;
489
490 return cfi_amdstd_setup(mtd);
491 }
492 struct mtd_info *cfi_cmdset_0006(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002")));
493 struct mtd_info *cfi_cmdset_0701(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002")));
494 EXPORT_SYMBOL_GPL(cfi_cmdset_0002);
495 EXPORT_SYMBOL_GPL(cfi_cmdset_0006);
496 EXPORT_SYMBOL_GPL(cfi_cmdset_0701);
497
498 static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd)
499 {
500 struct map_info *map = mtd->priv;
501 struct cfi_private *cfi = map->fldrv_priv;
502 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
503 unsigned long offset = 0;
504 int i,j;
505
506 printk(KERN_NOTICE "number of %s chips: %d\n",
507 (cfi->cfi_mode == CFI_MODE_CFI)?"CFI":"JEDEC",cfi->numchips);
508 /* Select the correct geometry setup */
509 mtd->size = devsize * cfi->numchips;
510
511 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
512 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
513 * mtd->numeraseregions, GFP_KERNEL);
514 if (!mtd->eraseregions) {
515 printk(KERN_WARNING "Failed to allocate memory for MTD erase region info\n");
516 goto setup_err;
517 }
518
519 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
520 unsigned long ernum, ersize;
521 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
522 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
523
524 if (mtd->erasesize < ersize) {
525 mtd->erasesize = ersize;
526 }
527 for (j=0; j<cfi->numchips; j++) {
528 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
529 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
530 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
531 }
532 offset += (ersize * ernum);
533 }
534 if (offset != devsize) {
535 /* Argh */
536 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
537 goto setup_err;
538 }
539 #if 0
540 // debug
541 for (i=0; i<mtd->numeraseregions;i++){
542 printk("%d: offset=0x%x,size=0x%x,blocks=%d\n",
543 i,mtd->eraseregions[i].offset,
544 mtd->eraseregions[i].erasesize,
545 mtd->eraseregions[i].numblocks);
546 }
547 #endif
548
549 __module_get(THIS_MODULE);
550 register_reboot_notifier(&mtd->reboot_notifier);
551 return mtd;
552
553 setup_err:
554 kfree(mtd->eraseregions);
555 kfree(mtd);
556 kfree(cfi->cmdset_priv);
557 kfree(cfi->cfiq);
558 return NULL;
559 }
560
561 /*
562 * Return true if the chip is ready.
563 *
564 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
565 * non-suspended sector) and is indicated by no toggle bits toggling.
566 *
567 * Note that anything more complicated than checking if no bits are toggling
568 * (including checking DQ5 for an error status) is tricky to get working
569 * correctly and is therefore not done (particulary with interleaved chips
570 * as each chip must be checked independantly of the others).
571 */
572 static int __xipram chip_ready(struct map_info *map, unsigned long addr)
573 {
574 map_word d, t;
575
576 d = map_read(map, addr);
577 t = map_read(map, addr);
578
579 return map_word_equal(map, d, t);
580 }
581
582 /*
583 * Return true if the chip is ready and has the correct value.
584 *
585 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
586 * non-suspended sector) and it is indicated by no bits toggling.
587 *
588 * Error are indicated by toggling bits or bits held with the wrong value,
589 * or with bits toggling.
590 *
591 * Note that anything more complicated than checking if no bits are toggling
592 * (including checking DQ5 for an error status) is tricky to get working
593 * correctly and is therefore not done (particulary with interleaved chips
594 * as each chip must be checked independantly of the others).
595 *
596 */
597 static int __xipram chip_good(struct map_info *map, unsigned long addr, map_word expected)
598 {
599 map_word oldd, curd;
600
601 oldd = map_read(map, addr);
602 curd = map_read(map, addr);
603
604 return map_word_equal(map, oldd, curd) &&
605 map_word_equal(map, curd, expected);
606 }
607
608 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
609 {
610 DECLARE_WAITQUEUE(wait, current);
611 struct cfi_private *cfi = map->fldrv_priv;
612 unsigned long timeo;
613 struct cfi_pri_amdstd *cfip = (struct cfi_pri_amdstd *)cfi->cmdset_priv;
614
615 resettime:
616 timeo = jiffies + HZ;
617 retry:
618 switch (chip->state) {
619
620 case FL_STATUS:
621 for (;;) {
622 if (chip_ready(map, adr))
623 break;
624
625 if (time_after(jiffies, timeo)) {
626 printk(KERN_ERR "Waiting for chip to be ready timed out.\n");
627 return -EIO;
628 }
629 mutex_unlock(&chip->mutex);
630 cfi_udelay(1);
631 mutex_lock(&chip->mutex);
632 /* Someone else might have been playing with it. */
633 goto retry;
634 }
635
636 case FL_READY:
637 case FL_CFI_QUERY:
638 case FL_JEDEC_QUERY:
639 return 0;
640
641 case FL_ERASING:
642 if (!cfip || !(cfip->EraseSuspend & (0x1|0x2)) ||
643 !(mode == FL_READY || mode == FL_POINT ||
644 (mode == FL_WRITING && (cfip->EraseSuspend & 0x2))))
645 goto sleep;
646
647 /* We could check to see if we're trying to access the sector
648 * that is currently being erased. However, no user will try
649 * anything like that so we just wait for the timeout. */
650
651 /* Erase suspend */
652 /* It's harmless to issue the Erase-Suspend and Erase-Resume
653 * commands when the erase algorithm isn't in progress. */
654 map_write(map, CMD(0xB0), chip->in_progress_block_addr);
655 chip->oldstate = FL_ERASING;
656 chip->state = FL_ERASE_SUSPENDING;
657 chip->erase_suspended = 1;
658 for (;;) {
659 if (chip_ready(map, adr))
660 break;
661
662 if (time_after(jiffies, timeo)) {
663 /* Should have suspended the erase by now.
664 * Send an Erase-Resume command as either
665 * there was an error (so leave the erase
666 * routine to recover from it) or we trying to
667 * use the erase-in-progress sector. */
668 map_write(map, CMD(0x30), chip->in_progress_block_addr);
669 chip->state = FL_ERASING;
670 chip->oldstate = FL_READY;
671 printk(KERN_ERR "MTD %s(): chip not ready after erase suspend\n", __func__);
672 return -EIO;
673 }
674
675 mutex_unlock(&chip->mutex);
676 cfi_udelay(1);
677 mutex_lock(&chip->mutex);
678 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
679 So we can just loop here. */
680 }
681 chip->state = FL_READY;
682 return 0;
683
684 case FL_XIP_WHILE_ERASING:
685 if (mode != FL_READY && mode != FL_POINT &&
686 (!cfip || !(cfip->EraseSuspend&2)))
687 goto sleep;
688 chip->oldstate = chip->state;
689 chip->state = FL_READY;
690 return 0;
691
692 case FL_SHUTDOWN:
693 /* The machine is rebooting */
694 return -EIO;
695
696 case FL_POINT:
697 /* Only if there's no operation suspended... */
698 if (mode == FL_READY && chip->oldstate == FL_READY)
699 return 0;
700
701 default:
702 sleep:
703 set_current_state(TASK_UNINTERRUPTIBLE);
704 add_wait_queue(&chip->wq, &wait);
705 mutex_unlock(&chip->mutex);
706 schedule();
707 remove_wait_queue(&chip->wq, &wait);
708 mutex_lock(&chip->mutex);
709 goto resettime;
710 }
711 }
712
713
714 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
715 {
716 struct cfi_private *cfi = map->fldrv_priv;
717
718 switch(chip->oldstate) {
719 case FL_ERASING:
720 chip->state = chip->oldstate;
721 map_write(map, CMD(0x30), chip->in_progress_block_addr);
722 chip->oldstate = FL_READY;
723 chip->state = FL_ERASING;
724 break;
725
726 case FL_XIP_WHILE_ERASING:
727 chip->state = chip->oldstate;
728 chip->oldstate = FL_READY;
729 break;
730
731 case FL_READY:
732 case FL_STATUS:
733 /* We should really make set_vpp() count, rather than doing this */
734 DISABLE_VPP(map);
735 break;
736 default:
737 printk(KERN_ERR "MTD: put_chip() called with oldstate %d!!\n", chip->oldstate);
738 }
739 wake_up(&chip->wq);
740 }
741
742 #ifdef CONFIG_MTD_XIP
743
744 /*
745 * No interrupt what so ever can be serviced while the flash isn't in array
746 * mode. This is ensured by the xip_disable() and xip_enable() functions
747 * enclosing any code path where the flash is known not to be in array mode.
748 * And within a XIP disabled code path, only functions marked with __xipram
749 * may be called and nothing else (it's a good thing to inspect generated
750 * assembly to make sure inline functions were actually inlined and that gcc
751 * didn't emit calls to its own support functions). Also configuring MTD CFI
752 * support to a single buswidth and a single interleave is also recommended.
753 */
754
755 static void xip_disable(struct map_info *map, struct flchip *chip,
756 unsigned long adr)
757 {
758 /* TODO: chips with no XIP use should ignore and return */
759 (void) map_read(map, adr); /* ensure mmu mapping is up to date */
760 local_irq_disable();
761 }
762
763 static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
764 unsigned long adr)
765 {
766 struct cfi_private *cfi = map->fldrv_priv;
767
768 if (chip->state != FL_POINT && chip->state != FL_READY) {
769 map_write(map, CMD(0xf0), adr);
770 chip->state = FL_READY;
771 }
772 (void) map_read(map, adr);
773 xip_iprefetch();
774 local_irq_enable();
775 }
776
777 /*
778 * When a delay is required for the flash operation to complete, the
779 * xip_udelay() function is polling for both the given timeout and pending
780 * (but still masked) hardware interrupts. Whenever there is an interrupt
781 * pending then the flash erase operation is suspended, array mode restored
782 * and interrupts unmasked. Task scheduling might also happen at that
783 * point. The CPU eventually returns from the interrupt or the call to
784 * schedule() and the suspended flash operation is resumed for the remaining
785 * of the delay period.
786 *
787 * Warning: this function _will_ fool interrupt latency tracing tools.
788 */
789
790 static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
791 unsigned long adr, int usec)
792 {
793 struct cfi_private *cfi = map->fldrv_priv;
794 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
795 map_word status, OK = CMD(0x80);
796 unsigned long suspended, start = xip_currtime();
797 flstate_t oldstate;
798
799 do {
800 cpu_relax();
801 if (xip_irqpending() && extp &&
802 ((chip->state == FL_ERASING && (extp->EraseSuspend & 2))) &&
803 (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
804 /*
805 * Let's suspend the erase operation when supported.
806 * Note that we currently don't try to suspend
807 * interleaved chips if there is already another
808 * operation suspended (imagine what happens
809 * when one chip was already done with the current
810 * operation while another chip suspended it, then
811 * we resume the whole thing at once). Yes, it
812 * can happen!
813 */
814 map_write(map, CMD(0xb0), adr);
815 usec -= xip_elapsed_since(start);
816 suspended = xip_currtime();
817 do {
818 if (xip_elapsed_since(suspended) > 100000) {
819 /*
820 * The chip doesn't want to suspend
821 * after waiting for 100 msecs.
822 * This is a critical error but there
823 * is not much we can do here.
824 */
825 return;
826 }
827 status = map_read(map, adr);
828 } while (!map_word_andequal(map, status, OK, OK));
829
830 /* Suspend succeeded */
831 oldstate = chip->state;
832 if (!map_word_bitsset(map, status, CMD(0x40)))
833 break;
834 chip->state = FL_XIP_WHILE_ERASING;
835 chip->erase_suspended = 1;
836 map_write(map, CMD(0xf0), adr);
837 (void) map_read(map, adr);
838 xip_iprefetch();
839 local_irq_enable();
840 mutex_unlock(&chip->mutex);
841 xip_iprefetch();
842 cond_resched();
843
844 /*
845 * We're back. However someone else might have
846 * decided to go write to the chip if we are in
847 * a suspended erase state. If so let's wait
848 * until it's done.
849 */
850 mutex_lock(&chip->mutex);
851 while (chip->state != FL_XIP_WHILE_ERASING) {
852 DECLARE_WAITQUEUE(wait, current);
853 set_current_state(TASK_UNINTERRUPTIBLE);
854 add_wait_queue(&chip->wq, &wait);
855 mutex_unlock(&chip->mutex);
856 schedule();
857 remove_wait_queue(&chip->wq, &wait);
858 mutex_lock(&chip->mutex);
859 }
860 /* Disallow XIP again */
861 local_irq_disable();
862
863 /* Resume the write or erase operation */
864 map_write(map, CMD(0x30), adr);
865 chip->state = oldstate;
866 start = xip_currtime();
867 } else if (usec >= 1000000/HZ) {
868 /*
869 * Try to save on CPU power when waiting delay
870 * is at least a system timer tick period.
871 * No need to be extremely accurate here.
872 */
873 xip_cpu_idle();
874 }
875 status = map_read(map, adr);
876 } while (!map_word_andequal(map, status, OK, OK)
877 && xip_elapsed_since(start) < usec);
878 }
879
880 #define UDELAY(map, chip, adr, usec) xip_udelay(map, chip, adr, usec)
881
882 /*
883 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
884 * the flash is actively programming or erasing since we have to poll for
885 * the operation to complete anyway. We can't do that in a generic way with
886 * a XIP setup so do it before the actual flash operation in this case
887 * and stub it out from INVALIDATE_CACHE_UDELAY.
888 */
889 #define XIP_INVAL_CACHED_RANGE(map, from, size) \
890 INVALIDATE_CACHED_RANGE(map, from, size)
891
892 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
893 UDELAY(map, chip, adr, usec)
894
895 /*
896 * Extra notes:
897 *
898 * Activating this XIP support changes the way the code works a bit. For
899 * example the code to suspend the current process when concurrent access
900 * happens is never executed because xip_udelay() will always return with the
901 * same chip state as it was entered with. This is why there is no care for
902 * the presence of add_wait_queue() or schedule() calls from within a couple
903 * xip_disable()'d areas of code, like in do_erase_oneblock for example.
904 * The queueing and scheduling are always happening within xip_udelay().
905 *
906 * Similarly, get_chip() and put_chip() just happen to always be executed
907 * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state
908 * is in array mode, therefore never executing many cases therein and not
909 * causing any problem with XIP.
910 */
911
912 #else
913
914 #define xip_disable(map, chip, adr)
915 #define xip_enable(map, chip, adr)
916 #define XIP_INVAL_CACHED_RANGE(x...)
917
918 #define UDELAY(map, chip, adr, usec) \
919 do { \
920 mutex_unlock(&chip->mutex); \
921 cfi_udelay(usec); \
922 mutex_lock(&chip->mutex); \
923 } while (0)
924
925 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
926 do { \
927 mutex_unlock(&chip->mutex); \
928 INVALIDATE_CACHED_RANGE(map, adr, len); \
929 cfi_udelay(usec); \
930 mutex_lock(&chip->mutex); \
931 } while (0)
932
933 #endif
934
935 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
936 {
937 unsigned long cmd_addr;
938 struct cfi_private *cfi = map->fldrv_priv;
939 int ret;
940
941 adr += chip->start;
942
943 /* Ensure cmd read/writes are aligned. */
944 cmd_addr = adr & ~(map_bankwidth(map)-1);
945
946 mutex_lock(&chip->mutex);
947 ret = get_chip(map, chip, cmd_addr, FL_READY);
948 if (ret) {
949 mutex_unlock(&chip->mutex);
950 return ret;
951 }
952
953 if (chip->state != FL_POINT && chip->state != FL_READY) {
954 map_write(map, CMD(0xf0), cmd_addr);
955 chip->state = FL_READY;
956 }
957
958 map_copy_from(map, buf, adr, len);
959
960 put_chip(map, chip, cmd_addr);
961
962 mutex_unlock(&chip->mutex);
963 return 0;
964 }
965
966
967 static int cfi_amdstd_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
968 {
969 struct map_info *map = mtd->priv;
970 struct cfi_private *cfi = map->fldrv_priv;
971 unsigned long ofs;
972 int chipnum;
973 int ret = 0;
974
975 /* ofs: offset within the first chip that the first read should start */
976
977 chipnum = (from >> cfi->chipshift);
978 ofs = from - (chipnum << cfi->chipshift);
979
980
981 *retlen = 0;
982
983 while (len) {
984 unsigned long thislen;
985
986 if (chipnum >= cfi->numchips)
987 break;
988
989 if ((len + ofs -1) >> cfi->chipshift)
990 thislen = (1<<cfi->chipshift) - ofs;
991 else
992 thislen = len;
993
994 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
995 if (ret)
996 break;
997
998 *retlen += thislen;
999 len -= thislen;
1000 buf += thislen;
1001
1002 ofs = 0;
1003 chipnum++;
1004 }
1005 return ret;
1006 }
1007
1008
1009 static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1010 {
1011 DECLARE_WAITQUEUE(wait, current);
1012 unsigned long timeo = jiffies + HZ;
1013 struct cfi_private *cfi = map->fldrv_priv;
1014
1015 retry:
1016 mutex_lock(&chip->mutex);
1017
1018 if (chip->state != FL_READY){
1019 #if 0
1020 printk(KERN_DEBUG "Waiting for chip to read, status = %d\n", chip->state);
1021 #endif
1022 set_current_state(TASK_UNINTERRUPTIBLE);
1023 add_wait_queue(&chip->wq, &wait);
1024
1025 mutex_unlock(&chip->mutex);
1026
1027 schedule();
1028 remove_wait_queue(&chip->wq, &wait);
1029 #if 0
1030 if(signal_pending(current))
1031 return -EINTR;
1032 #endif
1033 timeo = jiffies + HZ;
1034
1035 goto retry;
1036 }
1037
1038 adr += chip->start;
1039
1040 chip->state = FL_READY;
1041
1042 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1043 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1044 cfi_send_gen_cmd(0x88, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1045
1046 map_copy_from(map, buf, adr, len);
1047
1048 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1049 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1050 cfi_send_gen_cmd(0x90, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1051 cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1052
1053 wake_up(&chip->wq);
1054 mutex_unlock(&chip->mutex);
1055
1056 return 0;
1057 }
1058
1059 static int cfi_amdstd_secsi_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1060 {
1061 struct map_info *map = mtd->priv;
1062 struct cfi_private *cfi = map->fldrv_priv;
1063 unsigned long ofs;
1064 int chipnum;
1065 int ret = 0;
1066
1067
1068 /* ofs: offset within the first chip that the first read should start */
1069
1070 /* 8 secsi bytes per chip */
1071 chipnum=from>>3;
1072 ofs=from & 7;
1073
1074
1075 *retlen = 0;
1076
1077 while (len) {
1078 unsigned long thislen;
1079
1080 if (chipnum >= cfi->numchips)
1081 break;
1082
1083 if ((len + ofs -1) >> 3)
1084 thislen = (1<<3) - ofs;
1085 else
1086 thislen = len;
1087
1088 ret = do_read_secsi_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1089 if (ret)
1090 break;
1091
1092 *retlen += thislen;
1093 len -= thislen;
1094 buf += thislen;
1095
1096 ofs = 0;
1097 chipnum++;
1098 }
1099 return ret;
1100 }
1101
1102
1103 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, unsigned long adr, map_word datum)
1104 {
1105 struct cfi_private *cfi = map->fldrv_priv;
1106 unsigned long timeo = jiffies + HZ;
1107 /*
1108 * We use a 1ms + 1 jiffies generic timeout for writes (most devices
1109 * have a max write time of a few hundreds usec). However, we should
1110 * use the maximum timeout value given by the chip at probe time
1111 * instead. Unfortunately, struct flchip does have a field for
1112 * maximum timeout, only for typical which can be far too short
1113 * depending of the conditions. The ' + 1' is to avoid having a
1114 * timeout of 0 jiffies if HZ is smaller than 1000.
1115 */
1116 unsigned long uWriteTimeout = ( HZ / 1000 ) + 1;
1117 int ret = 0;
1118 map_word oldd;
1119 int retry_cnt = 0;
1120
1121 adr += chip->start;
1122
1123 mutex_lock(&chip->mutex);
1124 ret = get_chip(map, chip, adr, FL_WRITING);
1125 if (ret) {
1126 mutex_unlock(&chip->mutex);
1127 return ret;
1128 }
1129
1130 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
1131 __func__, adr, datum.x[0] );
1132
1133 /*
1134 * Check for a NOP for the case when the datum to write is already
1135 * present - it saves time and works around buggy chips that corrupt
1136 * data at other locations when 0xff is written to a location that
1137 * already contains 0xff.
1138 */
1139 oldd = map_read(map, adr);
1140 if (map_word_equal(map, oldd, datum)) {
1141 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): NOP\n",
1142 __func__);
1143 goto op_done;
1144 }
1145
1146 XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1147 ENABLE_VPP(map);
1148 xip_disable(map, chip, adr);
1149 retry:
1150 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1151 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1152 cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1153 map_write(map, datum, adr);
1154 chip->state = FL_WRITING;
1155
1156 INVALIDATE_CACHE_UDELAY(map, chip,
1157 adr, map_bankwidth(map),
1158 chip->word_write_time);
1159
1160 /* See comment above for timeout value. */
1161 timeo = jiffies + uWriteTimeout;
1162 for (;;) {
1163 if (chip->state != FL_WRITING) {
1164 /* Someone's suspended the write. Sleep */
1165 DECLARE_WAITQUEUE(wait, current);
1166
1167 set_current_state(TASK_UNINTERRUPTIBLE);
1168 add_wait_queue(&chip->wq, &wait);
1169 mutex_unlock(&chip->mutex);
1170 schedule();
1171 remove_wait_queue(&chip->wq, &wait);
1172 timeo = jiffies + (HZ / 2); /* FIXME */
1173 mutex_lock(&chip->mutex);
1174 continue;
1175 }
1176
1177 if (time_after(jiffies, timeo) && !chip_ready(map, adr)){
1178 xip_enable(map, chip, adr);
1179 printk(KERN_WARNING "MTD %s(): software timeout\n", __func__);
1180 xip_disable(map, chip, adr);
1181 break;
1182 }
1183
1184 if (chip_ready(map, adr))
1185 break;
1186
1187 /* Latency issues. Drop the lock, wait a while and retry */
1188 UDELAY(map, chip, adr, 1);
1189 }
1190 /* Did we succeed? */
1191 if (!chip_good(map, adr, datum)) {
1192 /* reset on all failures. */
1193 map_write( map, CMD(0xF0), chip->start );
1194 /* FIXME - should have reset delay before continuing */
1195
1196 if (++retry_cnt <= MAX_WORD_RETRIES)
1197 goto retry;
1198
1199 ret = -EIO;
1200 }
1201 xip_enable(map, chip, adr);
1202 op_done:
1203 chip->state = FL_READY;
1204 put_chip(map, chip, adr);
1205 mutex_unlock(&chip->mutex);
1206
1207 return ret;
1208 }
1209
1210
1211 static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
1212 size_t *retlen, const u_char *buf)
1213 {
1214 struct map_info *map = mtd->priv;
1215 struct cfi_private *cfi = map->fldrv_priv;
1216 int ret = 0;
1217 int chipnum;
1218 unsigned long ofs, chipstart;
1219 DECLARE_WAITQUEUE(wait, current);
1220
1221 *retlen = 0;
1222 if (!len)
1223 return 0;
1224
1225 chipnum = to >> cfi->chipshift;
1226 ofs = to - (chipnum << cfi->chipshift);
1227 chipstart = cfi->chips[chipnum].start;
1228
1229 /* If it's not bus-aligned, do the first byte write */
1230 if (ofs & (map_bankwidth(map)-1)) {
1231 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1232 int i = ofs - bus_ofs;
1233 int n = 0;
1234 map_word tmp_buf;
1235
1236 retry:
1237 mutex_lock(&cfi->chips[chipnum].mutex);
1238
1239 if (cfi->chips[chipnum].state != FL_READY) {
1240 #if 0
1241 printk(KERN_DEBUG "Waiting for chip to write, status = %d\n", cfi->chips[chipnum].state);
1242 #endif
1243 set_current_state(TASK_UNINTERRUPTIBLE);
1244 add_wait_queue(&cfi->chips[chipnum].wq, &wait);
1245
1246 mutex_unlock(&cfi->chips[chipnum].mutex);
1247
1248 schedule();
1249 remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
1250 #if 0
1251 if(signal_pending(current))
1252 return -EINTR;
1253 #endif
1254 goto retry;
1255 }
1256
1257 /* Load 'tmp_buf' with old contents of flash */
1258 tmp_buf = map_read(map, bus_ofs+chipstart);
1259
1260 mutex_unlock(&cfi->chips[chipnum].mutex);
1261
1262 /* Number of bytes to copy from buffer */
1263 n = min_t(int, len, map_bankwidth(map)-i);
1264
1265 tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n);
1266
1267 ret = do_write_oneword(map, &cfi->chips[chipnum],
1268 bus_ofs, tmp_buf);
1269 if (ret)
1270 return ret;
1271
1272 ofs += n;
1273 buf += n;
1274 (*retlen) += n;
1275 len -= n;
1276
1277 if (ofs >> cfi->chipshift) {
1278 chipnum ++;
1279 ofs = 0;
1280 if (chipnum == cfi->numchips)
1281 return 0;
1282 }
1283 }
1284
1285 /* We are now aligned, write as much as possible */
1286 while(len >= map_bankwidth(map)) {
1287 map_word datum;
1288
1289 datum = map_word_load(map, buf);
1290
1291 ret = do_write_oneword(map, &cfi->chips[chipnum],
1292 ofs, datum);
1293 if (ret)
1294 return ret;
1295
1296 ofs += map_bankwidth(map);
1297 buf += map_bankwidth(map);
1298 (*retlen) += map_bankwidth(map);
1299 len -= map_bankwidth(map);
1300
1301 if (ofs >> cfi->chipshift) {
1302 chipnum ++;
1303 ofs = 0;
1304 if (chipnum == cfi->numchips)
1305 return 0;
1306 chipstart = cfi->chips[chipnum].start;
1307 }
1308 }
1309
1310 /* Write the trailing bytes if any */
1311 if (len & (map_bankwidth(map)-1)) {
1312 map_word tmp_buf;
1313
1314 retry1:
1315 mutex_lock(&cfi->chips[chipnum].mutex);
1316
1317 if (cfi->chips[chipnum].state != FL_READY) {
1318 #if 0
1319 printk(KERN_DEBUG "Waiting for chip to write, status = %d\n", cfi->chips[chipnum].state);
1320 #endif
1321 set_current_state(TASK_UNINTERRUPTIBLE);
1322 add_wait_queue(&cfi->chips[chipnum].wq, &wait);
1323
1324 mutex_unlock(&cfi->chips[chipnum].mutex);
1325
1326 schedule();
1327 remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
1328 #if 0
1329 if(signal_pending(current))
1330 return -EINTR;
1331 #endif
1332 goto retry1;
1333 }
1334
1335 tmp_buf = map_read(map, ofs + chipstart);
1336
1337 mutex_unlock(&cfi->chips[chipnum].mutex);
1338
1339 tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
1340
1341 ret = do_write_oneword(map, &cfi->chips[chipnum],
1342 ofs, tmp_buf);
1343 if (ret)
1344 return ret;
1345
1346 (*retlen) += len;
1347 }
1348
1349 return 0;
1350 }
1351
1352
1353 /*
1354 * FIXME: interleaved mode not tested, and probably not supported!
1355 */
1356 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1357 unsigned long adr, const u_char *buf,
1358 int len)
1359 {
1360 struct cfi_private *cfi = map->fldrv_priv;
1361 unsigned long timeo = jiffies + HZ;
1362 /* see comments in do_write_oneword() regarding uWriteTimeo. */
1363 unsigned long uWriteTimeout = ( HZ / 1000 ) + 1;
1364 int ret = -EIO;
1365 unsigned long cmd_adr;
1366 int z, words;
1367 map_word datum;
1368
1369 adr += chip->start;
1370 cmd_adr = adr;
1371
1372 mutex_lock(&chip->mutex);
1373 ret = get_chip(map, chip, adr, FL_WRITING);
1374 if (ret) {
1375 mutex_unlock(&chip->mutex);
1376 return ret;
1377 }
1378
1379 datum = map_word_load(map, buf);
1380
1381 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
1382 __func__, adr, datum.x[0] );
1383
1384 XIP_INVAL_CACHED_RANGE(map, adr, len);
1385 ENABLE_VPP(map);
1386 xip_disable(map, chip, cmd_adr);
1387
1388 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1389 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1390 //cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1391
1392 /* Write Buffer Load */
1393 map_write(map, CMD(0x25), cmd_adr);
1394
1395 chip->state = FL_WRITING_TO_BUFFER;
1396
1397 /* Write length of data to come */
1398 words = len / map_bankwidth(map);
1399 map_write(map, CMD(words - 1), cmd_adr);
1400 /* Write data */
1401 z = 0;
1402 while(z < words * map_bankwidth(map)) {
1403 datum = map_word_load(map, buf);
1404 map_write(map, datum, adr + z);
1405
1406 z += map_bankwidth(map);
1407 buf += map_bankwidth(map);
1408 }
1409 z -= map_bankwidth(map);
1410
1411 adr += z;
1412
1413 /* Write Buffer Program Confirm: GO GO GO */
1414 map_write(map, CMD(0x29), cmd_adr);
1415 chip->state = FL_WRITING;
1416
1417 INVALIDATE_CACHE_UDELAY(map, chip,
1418 adr, map_bankwidth(map),
1419 chip->word_write_time);
1420
1421 timeo = jiffies + uWriteTimeout;
1422
1423 for (;;) {
1424 if (chip->state != FL_WRITING) {
1425 /* Someone's suspended the write. Sleep */
1426 DECLARE_WAITQUEUE(wait, current);
1427
1428 set_current_state(TASK_UNINTERRUPTIBLE);
1429 add_wait_queue(&chip->wq, &wait);
1430 mutex_unlock(&chip->mutex);
1431 schedule();
1432 remove_wait_queue(&chip->wq, &wait);
1433 timeo = jiffies + (HZ / 2); /* FIXME */
1434 mutex_lock(&chip->mutex);
1435 continue;
1436 }
1437
1438 if (time_after(jiffies, timeo) && !chip_ready(map, adr))
1439 break;
1440
1441 if (chip_ready(map, adr)) {
1442 xip_enable(map, chip, adr);
1443 goto op_done;
1444 }
1445
1446 /* Latency issues. Drop the lock, wait a while and retry */
1447 UDELAY(map, chip, adr, 1);
1448 }
1449
1450 /* reset on all failures. */
1451 map_write( map, CMD(0xF0), chip->start );
1452 xip_enable(map, chip, adr);
1453 /* FIXME - should have reset delay before continuing */
1454
1455 printk(KERN_WARNING "MTD %s(): software timeout\n",
1456 __func__ );
1457
1458 ret = -EIO;
1459 op_done:
1460 chip->state = FL_READY;
1461 put_chip(map, chip, adr);
1462 mutex_unlock(&chip->mutex);
1463
1464 return ret;
1465 }
1466
1467
1468 static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
1469 size_t *retlen, const u_char *buf)
1470 {
1471 struct map_info *map = mtd->priv;
1472 struct cfi_private *cfi = map->fldrv_priv;
1473 int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1474 int ret = 0;
1475 int chipnum;
1476 unsigned long ofs;
1477
1478 *retlen = 0;
1479 if (!len)
1480 return 0;
1481
1482 chipnum = to >> cfi->chipshift;
1483 ofs = to - (chipnum << cfi->chipshift);
1484
1485 /* If it's not bus-aligned, do the first word write */
1486 if (ofs & (map_bankwidth(map)-1)) {
1487 size_t local_len = (-ofs)&(map_bankwidth(map)-1);
1488 if (local_len > len)
1489 local_len = len;
1490 ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
1491 local_len, retlen, buf);
1492 if (ret)
1493 return ret;
1494 ofs += local_len;
1495 buf += local_len;
1496 len -= local_len;
1497
1498 if (ofs >> cfi->chipshift) {
1499 chipnum ++;
1500 ofs = 0;
1501 if (chipnum == cfi->numchips)
1502 return 0;
1503 }
1504 }
1505
1506 /* Write buffer is worth it only if more than one word to write... */
1507 while (len >= map_bankwidth(map) * 2) {
1508 /* We must not cross write block boundaries */
1509 int size = wbufsize - (ofs & (wbufsize-1));
1510
1511 if (size > len)
1512 size = len;
1513 if (size % map_bankwidth(map))
1514 size -= size % map_bankwidth(map);
1515
1516 ret = do_write_buffer(map, &cfi->chips[chipnum],
1517 ofs, buf, size);
1518 if (ret)
1519 return ret;
1520
1521 ofs += size;
1522 buf += size;
1523 (*retlen) += size;
1524 len -= size;
1525
1526 if (ofs >> cfi->chipshift) {
1527 chipnum ++;
1528 ofs = 0;
1529 if (chipnum == cfi->numchips)
1530 return 0;
1531 }
1532 }
1533
1534 if (len) {
1535 size_t retlen_dregs = 0;
1536
1537 ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
1538 len, &retlen_dregs, buf);
1539
1540 *retlen += retlen_dregs;
1541 return ret;
1542 }
1543
1544 return 0;
1545 }
1546
1547
1548 /*
1549 * Handle devices with one erase region, that only implement
1550 * the chip erase command.
1551 */
1552 static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
1553 {
1554 struct cfi_private *cfi = map->fldrv_priv;
1555 unsigned long timeo = jiffies + HZ;
1556 unsigned long int adr;
1557 DECLARE_WAITQUEUE(wait, current);
1558 int ret = 0;
1559
1560 adr = cfi->addr_unlock1;
1561
1562 mutex_lock(&chip->mutex);
1563 ret = get_chip(map, chip, adr, FL_WRITING);
1564 if (ret) {
1565 mutex_unlock(&chip->mutex);
1566 return ret;
1567 }
1568
1569 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n",
1570 __func__, chip->start );
1571
1572 XIP_INVAL_CACHED_RANGE(map, adr, map->size);
1573 ENABLE_VPP(map);
1574 xip_disable(map, chip, adr);
1575
1576 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1577 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1578 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1579 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1580 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1581 cfi_send_gen_cmd(0x10, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1582
1583 chip->state = FL_ERASING;
1584 chip->erase_suspended = 0;
1585 chip->in_progress_block_addr = adr;
1586
1587 INVALIDATE_CACHE_UDELAY(map, chip,
1588 adr, map->size,
1589 chip->erase_time*500);
1590
1591 timeo = jiffies + (HZ*20);
1592
1593 for (;;) {
1594 if (chip->state != FL_ERASING) {
1595 /* Someone's suspended the erase. Sleep */
1596 set_current_state(TASK_UNINTERRUPTIBLE);
1597 add_wait_queue(&chip->wq, &wait);
1598 mutex_unlock(&chip->mutex);
1599 schedule();
1600 remove_wait_queue(&chip->wq, &wait);
1601 mutex_lock(&chip->mutex);
1602 continue;
1603 }
1604 if (chip->erase_suspended) {
1605 /* This erase was suspended and resumed.
1606 Adjust the timeout */
1607 timeo = jiffies + (HZ*20); /* FIXME */
1608 chip->erase_suspended = 0;
1609 }
1610
1611 if (chip_ready(map, adr))
1612 break;
1613
1614 if (time_after(jiffies, timeo)) {
1615 printk(KERN_WARNING "MTD %s(): software timeout\n",
1616 __func__ );
1617 break;
1618 }
1619
1620 /* Latency issues. Drop the lock, wait a while and retry */
1621 UDELAY(map, chip, adr, 1000000/HZ);
1622 }
1623 /* Did we succeed? */
1624 if (!chip_good(map, adr, map_word_ff(map))) {
1625 /* reset on all failures. */
1626 map_write( map, CMD(0xF0), chip->start );
1627 /* FIXME - should have reset delay before continuing */
1628
1629 ret = -EIO;
1630 }
1631
1632 chip->state = FL_READY;
1633 xip_enable(map, chip, adr);
1634 put_chip(map, chip, adr);
1635 mutex_unlock(&chip->mutex);
1636
1637 return ret;
1638 }
1639
1640
1641 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, int len, void *thunk)
1642 {
1643 struct cfi_private *cfi = map->fldrv_priv;
1644 unsigned long timeo = jiffies + HZ;
1645 DECLARE_WAITQUEUE(wait, current);
1646 int ret = 0;
1647
1648 adr += chip->start;
1649
1650 mutex_lock(&chip->mutex);
1651 ret = get_chip(map, chip, adr, FL_ERASING);
1652 if (ret) {
1653 mutex_unlock(&chip->mutex);
1654 return ret;
1655 }
1656
1657 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n",
1658 __func__, adr );
1659
1660 XIP_INVAL_CACHED_RANGE(map, adr, len);
1661 ENABLE_VPP(map);
1662 xip_disable(map, chip, adr);
1663
1664 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1665 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1666 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1667 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1668 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1669 map_write(map, CMD(0x30), adr);
1670
1671 chip->state = FL_ERASING;
1672 chip->erase_suspended = 0;
1673 chip->in_progress_block_addr = adr;
1674
1675 INVALIDATE_CACHE_UDELAY(map, chip,
1676 adr, len,
1677 chip->erase_time*500);
1678
1679 timeo = jiffies + (HZ*20);
1680
1681 for (;;) {
1682 if (chip->state != FL_ERASING) {
1683 /* Someone's suspended the erase. Sleep */
1684 set_current_state(TASK_UNINTERRUPTIBLE);
1685 add_wait_queue(&chip->wq, &wait);
1686 mutex_unlock(&chip->mutex);
1687 schedule();
1688 remove_wait_queue(&chip->wq, &wait);
1689 mutex_lock(&chip->mutex);
1690 continue;
1691 }
1692 if (chip->erase_suspended) {
1693 /* This erase was suspended and resumed.
1694 Adjust the timeout */
1695 timeo = jiffies + (HZ*20); /* FIXME */
1696 chip->erase_suspended = 0;
1697 }
1698
1699 if (chip_ready(map, adr)) {
1700 xip_enable(map, chip, adr);
1701 break;
1702 }
1703
1704 if (time_after(jiffies, timeo)) {
1705 xip_enable(map, chip, adr);
1706 printk(KERN_WARNING "MTD %s(): software timeout\n",
1707 __func__ );
1708 break;
1709 }
1710
1711 /* Latency issues. Drop the lock, wait a while and retry */
1712 UDELAY(map, chip, adr, 1000000/HZ);
1713 }
1714 /* Did we succeed? */
1715 if (!chip_good(map, adr, map_word_ff(map))) {
1716 /* reset on all failures. */
1717 map_write( map, CMD(0xF0), chip->start );
1718 /* FIXME - should have reset delay before continuing */
1719
1720 ret = -EIO;
1721 }
1722
1723 chip->state = FL_READY;
1724 put_chip(map, chip, adr);
1725 mutex_unlock(&chip->mutex);
1726 return ret;
1727 }
1728
1729
1730 static int cfi_amdstd_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1731 {
1732 unsigned long ofs, len;
1733 int ret;
1734
1735 ofs = instr->addr;
1736 len = instr->len;
1737
1738 ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1739 if (ret)
1740 return ret;
1741
1742 instr->state = MTD_ERASE_DONE;
1743 mtd_erase_callback(instr);
1744
1745 return 0;
1746 }
1747
1748
1749 static int cfi_amdstd_erase_chip(struct mtd_info *mtd, struct erase_info *instr)
1750 {
1751 struct map_info *map = mtd->priv;
1752 struct cfi_private *cfi = map->fldrv_priv;
1753 int ret = 0;
1754
1755 if (instr->addr != 0)
1756 return -EINVAL;
1757
1758 if (instr->len != mtd->size)
1759 return -EINVAL;
1760
1761 ret = do_erase_chip(map, &cfi->chips[0]);
1762 if (ret)
1763 return ret;
1764
1765 instr->state = MTD_ERASE_DONE;
1766 mtd_erase_callback(instr);
1767
1768 return 0;
1769 }
1770
1771 static int do_atmel_lock(struct map_info *map, struct flchip *chip,
1772 unsigned long adr, int len, void *thunk)
1773 {
1774 struct cfi_private *cfi = map->fldrv_priv;
1775 int ret;
1776
1777 mutex_lock(&chip->mutex);
1778 ret = get_chip(map, chip, adr + chip->start, FL_LOCKING);
1779 if (ret)
1780 goto out_unlock;
1781 chip->state = FL_LOCKING;
1782
1783 DEBUG(MTD_DEBUG_LEVEL3, "MTD %s(): LOCK 0x%08lx len %d\n",
1784 __func__, adr, len);
1785
1786 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1787 cfi->device_type, NULL);
1788 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
1789 cfi->device_type, NULL);
1790 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi,
1791 cfi->device_type, NULL);
1792 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1793 cfi->device_type, NULL);
1794 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
1795 cfi->device_type, NULL);
1796 map_write(map, CMD(0x40), chip->start + adr);
1797
1798 chip->state = FL_READY;
1799 put_chip(map, chip, adr + chip->start);
1800 ret = 0;
1801
1802 out_unlock:
1803 mutex_unlock(&chip->mutex);
1804 return ret;
1805 }
1806
1807 static int do_atmel_unlock(struct map_info *map, struct flchip *chip,
1808 unsigned long adr, int len, void *thunk)
1809 {
1810 struct cfi_private *cfi = map->fldrv_priv;
1811 int ret;
1812
1813 mutex_lock(&chip->mutex);
1814 ret = get_chip(map, chip, adr + chip->start, FL_UNLOCKING);
1815 if (ret)
1816 goto out_unlock;
1817 chip->state = FL_UNLOCKING;
1818
1819 DEBUG(MTD_DEBUG_LEVEL3, "MTD %s(): LOCK 0x%08lx len %d\n",
1820 __func__, adr, len);
1821
1822 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1823 cfi->device_type, NULL);
1824 map_write(map, CMD(0x70), adr);
1825
1826 chip->state = FL_READY;
1827 put_chip(map, chip, adr + chip->start);
1828 ret = 0;
1829
1830 out_unlock:
1831 mutex_unlock(&chip->mutex);
1832 return ret;
1833 }
1834
1835 static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1836 {
1837 return cfi_varsize_frob(mtd, do_atmel_lock, ofs, len, NULL);
1838 }
1839
1840 static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1841 {
1842 return cfi_varsize_frob(mtd, do_atmel_unlock, ofs, len, NULL);
1843 }
1844
1845
1846 static void cfi_amdstd_sync (struct mtd_info *mtd)
1847 {
1848 struct map_info *map = mtd->priv;
1849 struct cfi_private *cfi = map->fldrv_priv;
1850 int i;
1851 struct flchip *chip;
1852 int ret = 0;
1853 DECLARE_WAITQUEUE(wait, current);
1854
1855 for (i=0; !ret && i<cfi->numchips; i++) {
1856 chip = &cfi->chips[i];
1857
1858 retry:
1859 mutex_lock(&chip->mutex);
1860
1861 switch(chip->state) {
1862 case FL_READY:
1863 case FL_STATUS:
1864 case FL_CFI_QUERY:
1865 case FL_JEDEC_QUERY:
1866 chip->oldstate = chip->state;
1867 chip->state = FL_SYNCING;
1868 /* No need to wake_up() on this state change -
1869 * as the whole point is that nobody can do anything
1870 * with the chip now anyway.
1871 */
1872 case FL_SYNCING:
1873 mutex_unlock(&chip->mutex);
1874 break;
1875
1876 default:
1877 /* Not an idle state */
1878 set_current_state(TASK_UNINTERRUPTIBLE);
1879 add_wait_queue(&chip->wq, &wait);
1880
1881 mutex_unlock(&chip->mutex);
1882
1883 schedule();
1884
1885 remove_wait_queue(&chip->wq, &wait);
1886
1887 goto retry;
1888 }
1889 }
1890
1891 /* Unlock the chips again */
1892
1893 for (i--; i >=0; i--) {
1894 chip = &cfi->chips[i];
1895
1896 mutex_lock(&chip->mutex);
1897
1898 if (chip->state == FL_SYNCING) {
1899 chip->state = chip->oldstate;
1900 wake_up(&chip->wq);
1901 }
1902 mutex_unlock(&chip->mutex);
1903 }
1904 }
1905
1906
1907 static int cfi_amdstd_suspend(struct mtd_info *mtd)
1908 {
1909 struct map_info *map = mtd->priv;
1910 struct cfi_private *cfi = map->fldrv_priv;
1911 int i;
1912 struct flchip *chip;
1913 int ret = 0;
1914
1915 for (i=0; !ret && i<cfi->numchips; i++) {
1916 chip = &cfi->chips[i];
1917
1918 mutex_lock(&chip->mutex);
1919
1920 switch(chip->state) {
1921 case FL_READY:
1922 case FL_STATUS:
1923 case FL_CFI_QUERY:
1924 case FL_JEDEC_QUERY:
1925 chip->oldstate = chip->state;
1926 chip->state = FL_PM_SUSPENDED;
1927 /* No need to wake_up() on this state change -
1928 * as the whole point is that nobody can do anything
1929 * with the chip now anyway.
1930 */
1931 case FL_PM_SUSPENDED:
1932 break;
1933
1934 default:
1935 ret = -EAGAIN;
1936 break;
1937 }
1938 mutex_unlock(&chip->mutex);
1939 }
1940
1941 /* Unlock the chips again */
1942
1943 if (ret) {
1944 for (i--; i >=0; i--) {
1945 chip = &cfi->chips[i];
1946
1947 mutex_lock(&chip->mutex);
1948
1949 if (chip->state == FL_PM_SUSPENDED) {
1950 chip->state = chip->oldstate;
1951 wake_up(&chip->wq);
1952 }
1953 mutex_unlock(&chip->mutex);
1954 }
1955 }
1956
1957 return ret;
1958 }
1959
1960
1961 static void cfi_amdstd_resume(struct mtd_info *mtd)
1962 {
1963 struct map_info *map = mtd->priv;
1964 struct cfi_private *cfi = map->fldrv_priv;
1965 int i;
1966 struct flchip *chip;
1967
1968 for (i=0; i<cfi->numchips; i++) {
1969
1970 chip = &cfi->chips[i];
1971
1972 mutex_lock(&chip->mutex);
1973
1974 if (chip->state == FL_PM_SUSPENDED) {
1975 chip->state = FL_READY;
1976 map_write(map, CMD(0xF0), chip->start);
1977 wake_up(&chip->wq);
1978 }
1979 else
1980 printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n");
1981
1982 mutex_unlock(&chip->mutex);
1983 }
1984 }
1985
1986
1987 /*
1988 * Ensure that the flash device is put back into read array mode before
1989 * unloading the driver or rebooting. On some systems, rebooting while
1990 * the flash is in query/program/erase mode will prevent the CPU from
1991 * fetching the bootloader code, requiring a hard reset or power cycle.
1992 */
1993 static int cfi_amdstd_reset(struct mtd_info *mtd)
1994 {
1995 struct map_info *map = mtd->priv;
1996 struct cfi_private *cfi = map->fldrv_priv;
1997 int i, ret;
1998 struct flchip *chip;
1999
2000 for (i = 0; i < cfi->numchips; i++) {
2001
2002 chip = &cfi->chips[i];
2003
2004 mutex_lock(&chip->mutex);
2005
2006 ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
2007 if (!ret) {
2008 map_write(map, CMD(0xF0), chip->start);
2009 chip->state = FL_SHUTDOWN;
2010 put_chip(map, chip, chip->start);
2011 }
2012
2013 mutex_unlock(&chip->mutex);
2014 }
2015
2016 return 0;
2017 }
2018
2019
2020 static int cfi_amdstd_reboot(struct notifier_block *nb, unsigned long val,
2021 void *v)
2022 {
2023 struct mtd_info *mtd;
2024
2025 mtd = container_of(nb, struct mtd_info, reboot_notifier);
2026 cfi_amdstd_reset(mtd);
2027 return NOTIFY_DONE;
2028 }
2029
2030
2031 static void cfi_amdstd_destroy(struct mtd_info *mtd)
2032 {
2033 struct map_info *map = mtd->priv;
2034 struct cfi_private *cfi = map->fldrv_priv;
2035
2036 cfi_amdstd_reset(mtd);
2037 unregister_reboot_notifier(&mtd->reboot_notifier);
2038 kfree(cfi->cmdset_priv);
2039 kfree(cfi->cfiq);
2040 kfree(cfi);
2041 kfree(mtd->eraseregions);
2042 }
2043
2044 MODULE_LICENSE("GPL");
2045 MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp> et al.");
2046 MODULE_DESCRIPTION("MTD chip driver for AMD/Fujitsu flash chips");
2047 MODULE_ALIAS("cfi_cmdset_0006");
2048 MODULE_ALIAS("cfi_cmdset_0701");