]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - drivers/mtd/chips/cfi_cmdset_0020.c
Merge tag 'powerpc-4.10-2' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc...
[mirror_ubuntu-zesty-kernel.git] / drivers / mtd / chips / cfi_cmdset_0020.c
CommitLineData
1da177e4
LT
1/*
2 * Common Flash Interface support:
3 * ST Advanced Architecture Command Set (ID 0x0020)
4 *
5 * (C) 2000 Red Hat. GPL'd
6 *
2f82af08 7 * 10/10/2000 Nicolas Pitre <nico@fluxnic.net>
1da177e4
LT
8 * - completely revamped method functions so they are aware and
9 * independent of the flash geometry (buswidth, interleave, etc.)
10 * - scalability vs code size is completely set at compile-time
11 * (see include/linux/mtd/cfi.h for selection)
12 * - optimized write buffer method
13 * 06/21/2002 Joern Engel <joern@wh.fh-wedel.de> and others
14 * - modified Intel Command Set 0x0001 to support ST Advanced Architecture
15 * (command set 0x0020)
16 * - added a writev function
6a8b4d31
JE
17 * 07/13/2005 Joern Engel <joern@wh.fh-wedel.de>
18 * - Plugged memory leak in cfi_staa_writev().
1da177e4
LT
19 */
20
1da177e4
LT
21#include <linux/module.h>
22#include <linux/types.h>
23#include <linux/kernel.h>
24#include <linux/sched.h>
1da177e4
LT
25#include <asm/io.h>
26#include <asm/byteorder.h>
27
28#include <linux/errno.h>
29#include <linux/slab.h>
30#include <linux/delay.h>
31#include <linux/interrupt.h>
32#include <linux/mtd/map.h>
33#include <linux/mtd/cfi.h>
34#include <linux/mtd/mtd.h>
1da177e4
LT
35
36
37static int cfi_staa_read(struct mtd_info *, loff_t, size_t, size_t *, u_char *);
38static int cfi_staa_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
39static int cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
40 unsigned long count, loff_t to, size_t *retlen);
41static int cfi_staa_erase_varsize(struct mtd_info *, struct erase_info *);
42static void cfi_staa_sync (struct mtd_info *);
69423d99
AH
43static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
44static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
1da177e4
LT
45static int cfi_staa_suspend (struct mtd_info *);
46static void cfi_staa_resume (struct mtd_info *);
47
48static void cfi_staa_destroy(struct mtd_info *);
49
50struct mtd_info *cfi_cmdset_0020(struct map_info *, int);
51
52static struct mtd_info *cfi_staa_setup (struct map_info *);
53
54static struct mtd_chip_driver cfi_staa_chipdrv = {
55 .probe = NULL, /* Not usable directly */
56 .destroy = cfi_staa_destroy,
57 .name = "cfi_cmdset_0020",
58 .module = THIS_MODULE
59};
60
61/* #define DEBUG_LOCK_BITS */
62//#define DEBUG_CFI_FEATURES
63
64#ifdef DEBUG_CFI_FEATURES
65static void cfi_tell_features(struct cfi_pri_intelext *extp)
66{
67 int i;
68 printk(" Feature/Command Support: %4.4X\n", extp->FeatureSupport);
69 printk(" - Chip Erase: %s\n", extp->FeatureSupport&1?"supported":"unsupported");
70 printk(" - Suspend Erase: %s\n", extp->FeatureSupport&2?"supported":"unsupported");
71 printk(" - Suspend Program: %s\n", extp->FeatureSupport&4?"supported":"unsupported");
72 printk(" - Legacy Lock/Unlock: %s\n", extp->FeatureSupport&8?"supported":"unsupported");
73 printk(" - Queued Erase: %s\n", extp->FeatureSupport&16?"supported":"unsupported");
74 printk(" - Instant block lock: %s\n", extp->FeatureSupport&32?"supported":"unsupported");
75 printk(" - Protection Bits: %s\n", extp->FeatureSupport&64?"supported":"unsupported");
76 printk(" - Page-mode read: %s\n", extp->FeatureSupport&128?"supported":"unsupported");
77 printk(" - Synchronous read: %s\n", extp->FeatureSupport&256?"supported":"unsupported");
78 for (i=9; i<32; i++) {
1f948b43 79 if (extp->FeatureSupport & (1<<i))
1da177e4
LT
80 printk(" - Unknown Bit %X: supported\n", i);
81 }
1f948b43 82
1da177e4
LT
83 printk(" Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
84 printk(" - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
85 for (i=1; i<8; i++) {
86 if (extp->SuspendCmdSupport & (1<<i))
87 printk(" - Unknown Bit %X: supported\n", i);
88 }
1f948b43 89
1da177e4
LT
90 printk(" Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
91 printk(" - Lock Bit Active: %s\n", extp->BlkStatusRegMask&1?"yes":"no");
92 printk(" - Valid Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
93 for (i=2; i<16; i++) {
94 if (extp->BlkStatusRegMask & (1<<i))
95 printk(" - Unknown Bit %X Active: yes\n",i);
96 }
1f948b43
TG
97
98 printk(" Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
1da177e4
LT
99 extp->VccOptimal >> 8, extp->VccOptimal & 0xf);
100 if (extp->VppOptimal)
1f948b43 101 printk(" Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
1da177e4
LT
102 extp->VppOptimal >> 8, extp->VppOptimal & 0xf);
103}
104#endif
105
106/* This routine is made available to other mtd code via
107 * inter_module_register. It must only be accessed through
108 * inter_module_get which will bump the use count of this module. The
109 * addresses passed back in cfi are valid as long as the use count of
110 * this module is non-zero, i.e. between inter_module_get and
111 * inter_module_put. Keith Owens <kaos@ocs.com.au> 29 Oct 2000.
112 */
113struct mtd_info *cfi_cmdset_0020(struct map_info *map, int primary)
114{
115 struct cfi_private *cfi = map->fldrv_priv;
116 int i;
117
118 if (cfi->cfi_mode) {
1f948b43 119 /*
1da177e4
LT
120 * It's a real CFI chip, not one for which the probe
121 * routine faked a CFI structure. So we read the feature
122 * table from it.
123 */
124 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
125 struct cfi_pri_intelext *extp;
126
127 extp = (struct cfi_pri_intelext*)cfi_read_pri(map, adr, sizeof(*extp), "ST Microelectronics");
128 if (!extp)
129 return NULL;
130
d88f977b
TP
131 if (extp->MajorVersion != '1' ||
132 (extp->MinorVersion < '0' || extp->MinorVersion > '3')) {
133 printk(KERN_ERR " Unknown ST Microelectronics"
134 " Extended Query version %c.%c.\n",
135 extp->MajorVersion, extp->MinorVersion);
136 kfree(extp);
137 return NULL;
138 }
139
1da177e4 140 /* Do some byteswapping if necessary */
8e987465
AS
141 extp->FeatureSupport = cfi32_to_cpu(map, extp->FeatureSupport);
142 extp->BlkStatusRegMask = cfi32_to_cpu(map,
143 extp->BlkStatusRegMask);
1f948b43 144
1da177e4
LT
145#ifdef DEBUG_CFI_FEATURES
146 /* Tell the user about it in lots of lovely detail */
147 cfi_tell_features(extp);
1f948b43 148#endif
1da177e4
LT
149
150 /* Install our own private info structure */
151 cfi->cmdset_priv = extp;
1f948b43 152 }
1da177e4
LT
153
154 for (i=0; i< cfi->numchips; i++) {
155 cfi->chips[i].word_write_time = 128;
156 cfi->chips[i].buffer_write_time = 128;
157 cfi->chips[i].erase_time = 1024;
83d48091
VS
158 cfi->chips[i].ref_point_counter = 0;
159 init_waitqueue_head(&(cfi->chips[i].wq));
1f948b43 160 }
1da177e4
LT
161
162 return cfi_staa_setup(map);
163}
83ea4ef2 164EXPORT_SYMBOL_GPL(cfi_cmdset_0020);
1da177e4
LT
165
166static struct mtd_info *cfi_staa_setup(struct map_info *map)
167{
168 struct cfi_private *cfi = map->fldrv_priv;
169 struct mtd_info *mtd;
170 unsigned long offset = 0;
171 int i,j;
172 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
173
95b93a0c 174 mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
1da177e4
LT
175 //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
176
177 if (!mtd) {
1da177e4
LT
178 kfree(cfi->cmdset_priv);
179 return NULL;
180 }
181
1da177e4
LT
182 mtd->priv = map;
183 mtd->type = MTD_NORFLASH;
184 mtd->size = devsize * cfi->numchips;
185
186 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
1f948b43 187 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
1da177e4 188 * mtd->numeraseregions, GFP_KERNEL);
1f948b43 189 if (!mtd->eraseregions) {
1da177e4
LT
190 kfree(cfi->cmdset_priv);
191 kfree(mtd);
192 return NULL;
193 }
1f948b43 194
1da177e4
LT
195 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
196 unsigned long ernum, ersize;
197 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
198 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
199
200 if (mtd->erasesize < ersize) {
201 mtd->erasesize = ersize;
202 }
203 for (j=0; j<cfi->numchips; j++) {
204 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
205 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
206 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
207 }
208 offset += (ersize * ernum);
35cc3337 209 }
1da177e4 210
35cc3337
DC
211 if (offset != devsize) {
212 /* Argh */
213 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
214 kfree(mtd->eraseregions);
215 kfree(cfi->cmdset_priv);
216 kfree(mtd);
217 return NULL;
218 }
1da177e4 219
35cc3337
DC
220 for (i=0; i<mtd->numeraseregions;i++){
221 printk(KERN_DEBUG "%d: offset=0x%llx,size=0x%x,blocks=%d\n",
222 i, (unsigned long long)mtd->eraseregions[i].offset,
223 mtd->eraseregions[i].erasesize,
224 mtd->eraseregions[i].numblocks);
225 }
1da177e4 226
1f948b43 227 /* Also select the correct geometry setup too */
3c3c10bb
AB
228 mtd->_erase = cfi_staa_erase_varsize;
229 mtd->_read = cfi_staa_read;
230 mtd->_write = cfi_staa_write_buffers;
231 mtd->_writev = cfi_staa_writev;
232 mtd->_sync = cfi_staa_sync;
233 mtd->_lock = cfi_staa_lock;
234 mtd->_unlock = cfi_staa_unlock;
235 mtd->_suspend = cfi_staa_suspend;
236 mtd->_resume = cfi_staa_resume;
5fa43394 237 mtd->flags = MTD_CAP_NORFLASH & ~MTD_BIT_WRITEABLE;
c8b229de 238 mtd->writesize = 8; /* FIXME: Should be 0 for STMicro flashes w/out ECC */
13ce77f4 239 mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1da177e4
LT
240 map->fldrv = &cfi_staa_chipdrv;
241 __module_get(THIS_MODULE);
242 mtd->name = map->name;
243 return mtd;
244}
245
246
247static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
248{
249 map_word status, status_OK;
250 unsigned long timeo;
251 DECLARE_WAITQUEUE(wait, current);
252 int suspended = 0;
253 unsigned long cmd_addr;
254 struct cfi_private *cfi = map->fldrv_priv;
255
256 adr += chip->start;
257
1f948b43
TG
258 /* Ensure cmd read/writes are aligned. */
259 cmd_addr = adr & ~(map_bankwidth(map)-1);
1da177e4
LT
260
261 /* Let's determine this according to the interleave only once */
262 status_OK = CMD(0x80);
263
264 timeo = jiffies + HZ;
265 retry:
c4e77376 266 mutex_lock(&chip->mutex);
1da177e4
LT
267
268 /* Check that the chip's ready to talk to us.
269 * If it's in FL_ERASING state, suspend it and make it talk now.
270 */
271 switch (chip->state) {
272 case FL_ERASING:
273 if (!(((struct cfi_pri_intelext *)cfi->cmdset_priv)->FeatureSupport & 2))
274 goto sleep; /* We don't support erase suspend */
1f948b43 275
1da177e4
LT
276 map_write (map, CMD(0xb0), cmd_addr);
277 /* If the flash has finished erasing, then 'erase suspend'
278 * appears to make some (28F320) flash devices switch to
279 * 'read' mode. Make sure that we switch to 'read status'
280 * mode so we get the right data. --rmk
281 */
282 map_write(map, CMD(0x70), cmd_addr);
283 chip->oldstate = FL_ERASING;
284 chip->state = FL_ERASE_SUSPENDING;
285 // printk("Erase suspending at 0x%lx\n", cmd_addr);
286 for (;;) {
287 status = map_read(map, cmd_addr);
288 if (map_word_andequal(map, status, status_OK, status_OK))
289 break;
1f948b43 290
1da177e4
LT
291 if (time_after(jiffies, timeo)) {
292 /* Urgh */
293 map_write(map, CMD(0xd0), cmd_addr);
294 /* make sure we're in 'read status' mode */
295 map_write(map, CMD(0x70), cmd_addr);
296 chip->state = FL_ERASING;
100f2341 297 wake_up(&chip->wq);
c4e77376 298 mutex_unlock(&chip->mutex);
1da177e4
LT
299 printk(KERN_ERR "Chip not ready after erase "
300 "suspended: status = 0x%lx\n", status.x[0]);
301 return -EIO;
302 }
1f948b43 303
c4e77376 304 mutex_unlock(&chip->mutex);
1da177e4 305 cfi_udelay(1);
c4e77376 306 mutex_lock(&chip->mutex);
1da177e4 307 }
1f948b43 308
1da177e4
LT
309 suspended = 1;
310 map_write(map, CMD(0xff), cmd_addr);
311 chip->state = FL_READY;
312 break;
1f948b43 313
1da177e4
LT
314#if 0
315 case FL_WRITING:
316 /* Not quite yet */
317#endif
318
319 case FL_READY:
320 break;
321
322 case FL_CFI_QUERY:
323 case FL_JEDEC_QUERY:
324 map_write(map, CMD(0x70), cmd_addr);
325 chip->state = FL_STATUS;
326
327 case FL_STATUS:
328 status = map_read(map, cmd_addr);
329 if (map_word_andequal(map, status, status_OK, status_OK)) {
330 map_write(map, CMD(0xff), cmd_addr);
331 chip->state = FL_READY;
332 break;
333 }
1f948b43 334
1da177e4
LT
335 /* Urgh. Chip not yet ready to talk to us. */
336 if (time_after(jiffies, timeo)) {
c4e77376 337 mutex_unlock(&chip->mutex);
1da177e4
LT
338 printk(KERN_ERR "waiting for chip to be ready timed out in read. WSM status = %lx\n", status.x[0]);
339 return -EIO;
340 }
341
342 /* Latency issues. Drop the lock, wait a while and retry */
c4e77376 343 mutex_unlock(&chip->mutex);
1da177e4
LT
344 cfi_udelay(1);
345 goto retry;
346
347 default:
348 sleep:
349 /* Stick ourselves on a wait queue to be woken when
350 someone changes the status */
351 set_current_state(TASK_UNINTERRUPTIBLE);
352 add_wait_queue(&chip->wq, &wait);
c4e77376 353 mutex_unlock(&chip->mutex);
1da177e4
LT
354 schedule();
355 remove_wait_queue(&chip->wq, &wait);
356 timeo = jiffies + HZ;
357 goto retry;
358 }
359
360 map_copy_from(map, buf, adr, len);
361
362 if (suspended) {
363 chip->state = chip->oldstate;
1f948b43 364 /* What if one interleaved chip has finished and the
1da177e4 365 other hasn't? The old code would leave the finished
1f948b43 366 one in READY mode. That's bad, and caused -EROFS
1da177e4
LT
367 errors to be returned from do_erase_oneblock because
368 that's the only bit it checked for at the time.
1f948b43 369 As the state machine appears to explicitly allow
1da177e4 370 sending the 0x70 (Read Status) command to an erasing
1f948b43 371 chip and expecting it to be ignored, that's what we
1da177e4
LT
372 do. */
373 map_write(map, CMD(0xd0), cmd_addr);
1f948b43 374 map_write(map, CMD(0x70), cmd_addr);
1da177e4
LT
375 }
376
377 wake_up(&chip->wq);
c4e77376 378 mutex_unlock(&chip->mutex);
1da177e4
LT
379 return 0;
380}
381
382static int cfi_staa_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
383{
384 struct map_info *map = mtd->priv;
385 struct cfi_private *cfi = map->fldrv_priv;
386 unsigned long ofs;
387 int chipnum;
388 int ret = 0;
389
390 /* ofs: offset within the first chip that the first read should start */
391 chipnum = (from >> cfi->chipshift);
392 ofs = from - (chipnum << cfi->chipshift);
393
1da177e4
LT
394 while (len) {
395 unsigned long thislen;
396
397 if (chipnum >= cfi->numchips)
398 break;
399
400 if ((len + ofs -1) >> cfi->chipshift)
401 thislen = (1<<cfi->chipshift) - ofs;
402 else
403 thislen = len;
404
405 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
406 if (ret)
407 break;
408
409 *retlen += thislen;
410 len -= thislen;
411 buf += thislen;
1f948b43 412
1da177e4
LT
413 ofs = 0;
414 chipnum++;
415 }
416 return ret;
417}
418
21a190b9 419static int do_write_buffer(struct map_info *map, struct flchip *chip,
1da177e4
LT
420 unsigned long adr, const u_char *buf, int len)
421{
422 struct cfi_private *cfi = map->fldrv_priv;
423 map_word status, status_OK;
424 unsigned long cmd_adr, timeo;
425 DECLARE_WAITQUEUE(wait, current);
426 int wbufsize, z;
1f948b43 427
1da177e4
LT
428 /* M58LW064A requires bus alignment for buffer wriets -- saw */
429 if (adr & (map_bankwidth(map)-1))
430 return -EINVAL;
431
432 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
433 adr += chip->start;
434 cmd_adr = adr & ~(wbufsize-1);
1f948b43 435
1da177e4
LT
436 /* Let's determine this according to the interleave only once */
437 status_OK = CMD(0x80);
1f948b43 438
1da177e4
LT
439 timeo = jiffies + HZ;
440 retry:
441
442#ifdef DEBUG_CFI_FEATURES
cb53b3b9 443 printk("%s: chip->state[%d]\n", __func__, chip->state);
1da177e4 444#endif
c4e77376 445 mutex_lock(&chip->mutex);
1f948b43 446
1da177e4
LT
447 /* Check that the chip's ready to talk to us.
448 * Later, we can actually think about interrupting it
449 * if it's in FL_ERASING state.
450 * Not just yet, though.
451 */
452 switch (chip->state) {
453 case FL_READY:
454 break;
1f948b43 455
1da177e4
LT
456 case FL_CFI_QUERY:
457 case FL_JEDEC_QUERY:
458 map_write(map, CMD(0x70), cmd_adr);
459 chip->state = FL_STATUS;
460#ifdef DEBUG_CFI_FEATURES
cb53b3b9 461 printk("%s: 1 status[%x]\n", __func__, map_read(map, cmd_adr));
1da177e4
LT
462#endif
463
464 case FL_STATUS:
465 status = map_read(map, cmd_adr);
466 if (map_word_andequal(map, status, status_OK, status_OK))
467 break;
468 /* Urgh. Chip not yet ready to talk to us. */
469 if (time_after(jiffies, timeo)) {
c4e77376 470 mutex_unlock(&chip->mutex);
1da177e4
LT
471 printk(KERN_ERR "waiting for chip to be ready timed out in buffer write Xstatus = %lx, status = %lx\n",
472 status.x[0], map_read(map, cmd_adr).x[0]);
473 return -EIO;
474 }
475
476 /* Latency issues. Drop the lock, wait a while and retry */
c4e77376 477 mutex_unlock(&chip->mutex);
1da177e4
LT
478 cfi_udelay(1);
479 goto retry;
480
481 default:
482 /* Stick ourselves on a wait queue to be woken when
483 someone changes the status */
484 set_current_state(TASK_UNINTERRUPTIBLE);
485 add_wait_queue(&chip->wq, &wait);
c4e77376 486 mutex_unlock(&chip->mutex);
1da177e4
LT
487 schedule();
488 remove_wait_queue(&chip->wq, &wait);
489 timeo = jiffies + HZ;
490 goto retry;
491 }
492
493 ENABLE_VPP(map);
494 map_write(map, CMD(0xe8), cmd_adr);
495 chip->state = FL_WRITING_TO_BUFFER;
496
497 z = 0;
498 for (;;) {
499 status = map_read(map, cmd_adr);
500 if (map_word_andequal(map, status, status_OK, status_OK))
501 break;
502
c4e77376 503 mutex_unlock(&chip->mutex);
1da177e4 504 cfi_udelay(1);
c4e77376 505 mutex_lock(&chip->mutex);
1da177e4
LT
506
507 if (++z > 100) {
508 /* Argh. Not ready for write to buffer */
509 DISABLE_VPP(map);
510 map_write(map, CMD(0x70), cmd_adr);
511 chip->state = FL_STATUS;
c4e77376 512 mutex_unlock(&chip->mutex);
1da177e4
LT
513 printk(KERN_ERR "Chip not ready for buffer write. Xstatus = %lx\n", status.x[0]);
514 return -EIO;
515 }
516 }
517
518 /* Write length of data to come */
519 map_write(map, CMD(len/map_bankwidth(map)-1), cmd_adr );
1f948b43 520
1da177e4
LT
521 /* Write data */
522 for (z = 0; z < len;
523 z += map_bankwidth(map), buf += map_bankwidth(map)) {
524 map_word d;
525 d = map_word_load(map, buf);
526 map_write(map, d, adr+z);
527 }
528 /* GO GO GO */
529 map_write(map, CMD(0xd0), cmd_adr);
530 chip->state = FL_WRITING;
531
c4e77376 532 mutex_unlock(&chip->mutex);
1da177e4 533 cfi_udelay(chip->buffer_write_time);
c4e77376 534 mutex_lock(&chip->mutex);
1da177e4
LT
535
536 timeo = jiffies + (HZ/2);
537 z = 0;
538 for (;;) {
539 if (chip->state != FL_WRITING) {
540 /* Someone's suspended the write. Sleep */
541 set_current_state(TASK_UNINTERRUPTIBLE);
542 add_wait_queue(&chip->wq, &wait);
c4e77376 543 mutex_unlock(&chip->mutex);
1da177e4
LT
544 schedule();
545 remove_wait_queue(&chip->wq, &wait);
546 timeo = jiffies + (HZ / 2); /* FIXME */
c4e77376 547 mutex_lock(&chip->mutex);
1da177e4
LT
548 continue;
549 }
550
551 status = map_read(map, cmd_adr);
552 if (map_word_andequal(map, status, status_OK, status_OK))
553 break;
554
555 /* OK Still waiting */
556 if (time_after(jiffies, timeo)) {
557 /* clear status */
558 map_write(map, CMD(0x50), cmd_adr);
559 /* put back into read status register mode */
560 map_write(map, CMD(0x70), adr);
561 chip->state = FL_STATUS;
562 DISABLE_VPP(map);
c4e77376 563 mutex_unlock(&chip->mutex);
1da177e4
LT
564 printk(KERN_ERR "waiting for chip to be ready timed out in bufwrite\n");
565 return -EIO;
566 }
1f948b43 567
1da177e4 568 /* Latency issues. Drop the lock, wait a while and retry */
c4e77376 569 mutex_unlock(&chip->mutex);
1da177e4
LT
570 cfi_udelay(1);
571 z++;
c4e77376 572 mutex_lock(&chip->mutex);
1da177e4
LT
573 }
574 if (!z) {
575 chip->buffer_write_time--;
576 if (!chip->buffer_write_time)
577 chip->buffer_write_time++;
578 }
1f948b43 579 if (z > 1)
1da177e4 580 chip->buffer_write_time++;
1f948b43 581
1da177e4
LT
582 /* Done and happy. */
583 DISABLE_VPP(map);
584 chip->state = FL_STATUS;
585
586 /* check for errors: 'lock bit', 'VPP', 'dead cell'/'unerased cell' or 'incorrect cmd' -- saw */
587 if (map_word_bitsset(map, status, CMD(0x3a))) {
588#ifdef DEBUG_CFI_FEATURES
cb53b3b9 589 printk("%s: 2 status[%lx]\n", __func__, status.x[0]);
1da177e4
LT
590#endif
591 /* clear status */
592 map_write(map, CMD(0x50), cmd_adr);
593 /* put back into read status register mode */
594 map_write(map, CMD(0x70), adr);
595 wake_up(&chip->wq);
c4e77376 596 mutex_unlock(&chip->mutex);
1da177e4
LT
597 return map_word_bitsset(map, status, CMD(0x02)) ? -EROFS : -EIO;
598 }
599 wake_up(&chip->wq);
c4e77376 600 mutex_unlock(&chip->mutex);
1da177e4
LT
601
602 return 0;
603}
604
1f948b43 605static int cfi_staa_write_buffers (struct mtd_info *mtd, loff_t to,
1da177e4
LT
606 size_t len, size_t *retlen, const u_char *buf)
607{
608 struct map_info *map = mtd->priv;
609 struct cfi_private *cfi = map->fldrv_priv;
610 int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
611 int ret = 0;
612 int chipnum;
613 unsigned long ofs;
614
1da177e4
LT
615 chipnum = to >> cfi->chipshift;
616 ofs = to - (chipnum << cfi->chipshift);
617
618#ifdef DEBUG_CFI_FEATURES
cb53b3b9
HH
619 printk("%s: map_bankwidth(map)[%x]\n", __func__, map_bankwidth(map));
620 printk("%s: chipnum[%x] wbufsize[%x]\n", __func__, chipnum, wbufsize);
621 printk("%s: ofs[%x] len[%x]\n", __func__, ofs, len);
1da177e4 622#endif
1f948b43 623
1da177e4
LT
624 /* Write buffer is worth it only if more than one word to write... */
625 while (len > 0) {
626 /* We must not cross write block boundaries */
627 int size = wbufsize - (ofs & (wbufsize-1));
628
629 if (size > len)
630 size = len;
631
1f948b43 632 ret = do_write_buffer(map, &cfi->chips[chipnum],
1da177e4
LT
633 ofs, buf, size);
634 if (ret)
635 return ret;
636
637 ofs += size;
638 buf += size;
639 (*retlen) += size;
640 len -= size;
641
642 if (ofs >> cfi->chipshift) {
1f948b43 643 chipnum ++;
1da177e4
LT
644 ofs = 0;
645 if (chipnum == cfi->numchips)
646 return 0;
647 }
648 }
1f948b43 649
1da177e4
LT
650 return 0;
651}
652
653/*
654 * Writev for ECC-Flashes is a little more complicated. We need to maintain
655 * a small buffer for this.
656 * XXX: If the buffer size is not a multiple of 2, this will break
657 */
992c9d24 658#define ECCBUF_SIZE (mtd->writesize)
1da177e4
LT
659#define ECCBUF_DIV(x) ((x) & ~(ECCBUF_SIZE - 1))
660#define ECCBUF_MOD(x) ((x) & (ECCBUF_SIZE - 1))
661static int
662cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
663 unsigned long count, loff_t to, size_t *retlen)
664{
665 unsigned long i;
666 size_t totlen = 0, thislen;
667 int ret = 0;
668 size_t buflen = 0;
669 static char *buffer;
670
671 if (!ECCBUF_SIZE) {
672 /* We should fall back to a general writev implementation.
673 * Until that is written, just break.
674 */
675 return -EIO;
676 }
677 buffer = kmalloc(ECCBUF_SIZE, GFP_KERNEL);
678 if (!buffer)
679 return -ENOMEM;
680
681 for (i=0; i<count; i++) {
682 size_t elem_len = vecs[i].iov_len;
683 void *elem_base = vecs[i].iov_base;
684 if (!elem_len) /* FIXME: Might be unnecessary. Check that */
685 continue;
686 if (buflen) { /* cut off head */
687 if (buflen + elem_len < ECCBUF_SIZE) { /* just accumulate */
688 memcpy(buffer+buflen, elem_base, elem_len);
689 buflen += elem_len;
690 continue;
691 }
692 memcpy(buffer+buflen, elem_base, ECCBUF_SIZE-buflen);
eda95cbf
AB
693 ret = mtd_write(mtd, to, ECCBUF_SIZE, &thislen,
694 buffer);
1da177e4
LT
695 totlen += thislen;
696 if (ret || thislen != ECCBUF_SIZE)
697 goto write_error;
698 elem_len -= thislen-buflen;
699 elem_base += thislen-buflen;
700 to += ECCBUF_SIZE;
701 }
702 if (ECCBUF_DIV(elem_len)) { /* write clean aligned data */
eda95cbf
AB
703 ret = mtd_write(mtd, to, ECCBUF_DIV(elem_len),
704 &thislen, elem_base);
1da177e4
LT
705 totlen += thislen;
706 if (ret || thislen != ECCBUF_DIV(elem_len))
707 goto write_error;
708 to += thislen;
709 }
710 buflen = ECCBUF_MOD(elem_len); /* cut off tail */
711 if (buflen) {
712 memset(buffer, 0xff, ECCBUF_SIZE);
713 memcpy(buffer, elem_base + thislen, buflen);
714 }
715 }
716 if (buflen) { /* flush last page, even if not full */
717 /* This is sometimes intended behaviour, really */
eda95cbf 718 ret = mtd_write(mtd, to, buflen, &thislen, buffer);
1da177e4
LT
719 totlen += thislen;
720 if (ret || thislen != ECCBUF_SIZE)
721 goto write_error;
722 }
723write_error:
724 if (retlen)
725 *retlen = totlen;
6a8b4d31 726 kfree(buffer);
1da177e4
LT
727 return ret;
728}
729
730
731static inline int do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
732{
733 struct cfi_private *cfi = map->fldrv_priv;
734 map_word status, status_OK;
735 unsigned long timeo;
736 int retries = 3;
737 DECLARE_WAITQUEUE(wait, current);
738 int ret = 0;
739
740 adr += chip->start;
741
742 /* Let's determine this according to the interleave only once */
743 status_OK = CMD(0x80);
744
745 timeo = jiffies + HZ;
746retry:
c4e77376 747 mutex_lock(&chip->mutex);
1da177e4
LT
748
749 /* Check that the chip's ready to talk to us. */
750 switch (chip->state) {
751 case FL_CFI_QUERY:
752 case FL_JEDEC_QUERY:
753 case FL_READY:
754 map_write(map, CMD(0x70), adr);
755 chip->state = FL_STATUS;
756
757 case FL_STATUS:
758 status = map_read(map, adr);
759 if (map_word_andequal(map, status, status_OK, status_OK))
760 break;
1f948b43 761
1da177e4
LT
762 /* Urgh. Chip not yet ready to talk to us. */
763 if (time_after(jiffies, timeo)) {
c4e77376 764 mutex_unlock(&chip->mutex);
1da177e4
LT
765 printk(KERN_ERR "waiting for chip to be ready timed out in erase\n");
766 return -EIO;
767 }
768
769 /* Latency issues. Drop the lock, wait a while and retry */
c4e77376 770 mutex_unlock(&chip->mutex);
1da177e4
LT
771 cfi_udelay(1);
772 goto retry;
773
774 default:
775 /* Stick ourselves on a wait queue to be woken when
776 someone changes the status */
777 set_current_state(TASK_UNINTERRUPTIBLE);
778 add_wait_queue(&chip->wq, &wait);
c4e77376 779 mutex_unlock(&chip->mutex);
1da177e4
LT
780 schedule();
781 remove_wait_queue(&chip->wq, &wait);
782 timeo = jiffies + HZ;
783 goto retry;
784 }
785
786 ENABLE_VPP(map);
787 /* Clear the status register first */
788 map_write(map, CMD(0x50), adr);
789
790 /* Now erase */
791 map_write(map, CMD(0x20), adr);
792 map_write(map, CMD(0xD0), adr);
793 chip->state = FL_ERASING;
1f948b43 794
c4e77376 795 mutex_unlock(&chip->mutex);
1da177e4 796 msleep(1000);
c4e77376 797 mutex_lock(&chip->mutex);
1da177e4
LT
798
799 /* FIXME. Use a timer to check this, and return immediately. */
800 /* Once the state machine's known to be working I'll do that */
801
802 timeo = jiffies + (HZ*20);
803 for (;;) {
804 if (chip->state != FL_ERASING) {
805 /* Someone's suspended the erase. Sleep */
806 set_current_state(TASK_UNINTERRUPTIBLE);
807 add_wait_queue(&chip->wq, &wait);
c4e77376 808 mutex_unlock(&chip->mutex);
1da177e4
LT
809 schedule();
810 remove_wait_queue(&chip->wq, &wait);
811 timeo = jiffies + (HZ*20); /* FIXME */
c4e77376 812 mutex_lock(&chip->mutex);
1da177e4
LT
813 continue;
814 }
815
816 status = map_read(map, adr);
817 if (map_word_andequal(map, status, status_OK, status_OK))
818 break;
1f948b43 819
1da177e4
LT
820 /* OK Still waiting */
821 if (time_after(jiffies, timeo)) {
822 map_write(map, CMD(0x70), adr);
823 chip->state = FL_STATUS;
824 printk(KERN_ERR "waiting for erase to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
825 DISABLE_VPP(map);
c4e77376 826 mutex_unlock(&chip->mutex);
1da177e4
LT
827 return -EIO;
828 }
1f948b43 829
1da177e4 830 /* Latency issues. Drop the lock, wait a while and retry */
c4e77376 831 mutex_unlock(&chip->mutex);
1da177e4 832 cfi_udelay(1);
c4e77376 833 mutex_lock(&chip->mutex);
1da177e4 834 }
1f948b43 835
1da177e4
LT
836 DISABLE_VPP(map);
837 ret = 0;
838
839 /* We've broken this before. It doesn't hurt to be safe */
840 map_write(map, CMD(0x70), adr);
841 chip->state = FL_STATUS;
842 status = map_read(map, adr);
843
844 /* check for lock bit */
845 if (map_word_bitsset(map, status, CMD(0x3a))) {
846 unsigned char chipstatus = status.x[0];
847 if (!map_word_equal(map, status, CMD(chipstatus))) {
848 int i, w;
849 for (w=0; w<map_words(map); w++) {
850 for (i = 0; i<cfi_interleave(cfi); i++) {
851 chipstatus |= status.x[w] >> (cfi->device_type * 8);
852 }
853 }
854 printk(KERN_WARNING "Status is not identical for all chips: 0x%lx. Merging to give 0x%02x\n",
855 status.x[0], chipstatus);
856 }
857 /* Reset the error bits */
858 map_write(map, CMD(0x50), adr);
859 map_write(map, CMD(0x70), adr);
1f948b43 860
1da177e4
LT
861 if ((chipstatus & 0x30) == 0x30) {
862 printk(KERN_NOTICE "Chip reports improper command sequence: status 0x%x\n", chipstatus);
863 ret = -EIO;
864 } else if (chipstatus & 0x02) {
865 /* Protection bit set */
866 ret = -EROFS;
867 } else if (chipstatus & 0x8) {
868 /* Voltage */
869 printk(KERN_WARNING "Chip reports voltage low on erase: status 0x%x\n", chipstatus);
870 ret = -EIO;
871 } else if (chipstatus & 0x20) {
872 if (retries--) {
873 printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x. Retrying...\n", adr, chipstatus);
874 timeo = jiffies + HZ;
875 chip->state = FL_STATUS;
c4e77376 876 mutex_unlock(&chip->mutex);
1da177e4
LT
877 goto retry;
878 }
879 printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x\n", adr, chipstatus);
880 ret = -EIO;
881 }
882 }
883
884 wake_up(&chip->wq);
c4e77376 885 mutex_unlock(&chip->mutex);
1da177e4
LT
886 return ret;
887}
888
eb8e3183
AB
889static int cfi_staa_erase_varsize(struct mtd_info *mtd,
890 struct erase_info *instr)
1da177e4
LT
891{ struct map_info *map = mtd->priv;
892 struct cfi_private *cfi = map->fldrv_priv;
893 unsigned long adr, len;
894 int chipnum, ret = 0;
895 int i, first;
896 struct mtd_erase_region_info *regions = mtd->eraseregions;
897
1da177e4
LT
898 /* Check that both start and end of the requested erase are
899 * aligned with the erasesize at the appropriate addresses.
900 */
901
902 i = 0;
903
1f948b43 904 /* Skip all erase regions which are ended before the start of
1da177e4
LT
905 the requested erase. Actually, to save on the calculations,
906 we skip to the first erase region which starts after the
907 start of the requested erase, and then go back one.
908 */
1f948b43 909
1da177e4
LT
910 while (i < mtd->numeraseregions && instr->addr >= regions[i].offset)
911 i++;
912 i--;
913
1f948b43 914 /* OK, now i is pointing at the erase region in which this
1da177e4
LT
915 erase request starts. Check the start of the requested
916 erase range is aligned with the erase size which is in
917 effect here.
918 */
919
920 if (instr->addr & (regions[i].erasesize-1))
921 return -EINVAL;
922
923 /* Remember the erase region we start on */
924 first = i;
925
926 /* Next, check that the end of the requested erase is aligned
927 * with the erase region at that address.
928 */
929
930 while (i<mtd->numeraseregions && (instr->addr + instr->len) >= regions[i].offset)
931 i++;
932
933 /* As before, drop back one to point at the region in which
934 the address actually falls
935 */
936 i--;
1f948b43 937
1da177e4
LT
938 if ((instr->addr + instr->len) & (regions[i].erasesize-1))
939 return -EINVAL;
940
941 chipnum = instr->addr >> cfi->chipshift;
942 adr = instr->addr - (chipnum << cfi->chipshift);
943 len = instr->len;
944
945 i=first;
946
947 while(len) {
948 ret = do_erase_oneblock(map, &cfi->chips[chipnum], adr);
1f948b43 949
1da177e4
LT
950 if (ret)
951 return ret;
952
953 adr += regions[i].erasesize;
954 len -= regions[i].erasesize;
955
69423d99 956 if (adr % (1<< cfi->chipshift) == (((unsigned long)regions[i].offset + (regions[i].erasesize * regions[i].numblocks)) %( 1<< cfi->chipshift)))
1da177e4
LT
957 i++;
958
959 if (adr >> cfi->chipshift) {
960 adr = 0;
961 chipnum++;
1f948b43 962
1da177e4 963 if (chipnum >= cfi->numchips)
555b8d12 964 break;
1da177e4
LT
965 }
966 }
1f948b43 967
1da177e4
LT
968 instr->state = MTD_ERASE_DONE;
969 mtd_erase_callback(instr);
1f948b43 970
1da177e4
LT
971 return 0;
972}
973
974static void cfi_staa_sync (struct mtd_info *mtd)
975{
976 struct map_info *map = mtd->priv;
977 struct cfi_private *cfi = map->fldrv_priv;
978 int i;
979 struct flchip *chip;
980 int ret = 0;
981 DECLARE_WAITQUEUE(wait, current);
982
983 for (i=0; !ret && i<cfi->numchips; i++) {
984 chip = &cfi->chips[i];
985
986 retry:
c4e77376 987 mutex_lock(&chip->mutex);
1da177e4
LT
988
989 switch(chip->state) {
990 case FL_READY:
991 case FL_STATUS:
992 case FL_CFI_QUERY:
993 case FL_JEDEC_QUERY:
994 chip->oldstate = chip->state;
995 chip->state = FL_SYNCING;
1f948b43 996 /* No need to wake_up() on this state change -
1da177e4
LT
997 * as the whole point is that nobody can do anything
998 * with the chip now anyway.
999 */
1000 case FL_SYNCING:
c4e77376 1001 mutex_unlock(&chip->mutex);
1da177e4
LT
1002 break;
1003
1004 default:
1005 /* Not an idle state */
f8e30e44 1006 set_current_state(TASK_UNINTERRUPTIBLE);
1da177e4 1007 add_wait_queue(&chip->wq, &wait);
1f948b43 1008
c4e77376 1009 mutex_unlock(&chip->mutex);
1da177e4
LT
1010 schedule();
1011 remove_wait_queue(&chip->wq, &wait);
1f948b43 1012
1da177e4
LT
1013 goto retry;
1014 }
1015 }
1016
1017 /* Unlock the chips again */
1018
1019 for (i--; i >=0; i--) {
1020 chip = &cfi->chips[i];
1021
c4e77376 1022 mutex_lock(&chip->mutex);
1f948b43 1023
1da177e4
LT
1024 if (chip->state == FL_SYNCING) {
1025 chip->state = chip->oldstate;
1026 wake_up(&chip->wq);
1027 }
c4e77376 1028 mutex_unlock(&chip->mutex);
1da177e4
LT
1029 }
1030}
1031
1032static inline int do_lock_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
1033{
1034 struct cfi_private *cfi = map->fldrv_priv;
1035 map_word status, status_OK;
1036 unsigned long timeo = jiffies + HZ;
1037 DECLARE_WAITQUEUE(wait, current);
1038
1039 adr += chip->start;
1040
1041 /* Let's determine this according to the interleave only once */
1042 status_OK = CMD(0x80);
1043
1044 timeo = jiffies + HZ;
1045retry:
c4e77376 1046 mutex_lock(&chip->mutex);
1da177e4
LT
1047
1048 /* Check that the chip's ready to talk to us. */
1049 switch (chip->state) {
1050 case FL_CFI_QUERY:
1051 case FL_JEDEC_QUERY:
1052 case FL_READY:
1053 map_write(map, CMD(0x70), adr);
1054 chip->state = FL_STATUS;
1055
1056 case FL_STATUS:
1057 status = map_read(map, adr);
1f948b43 1058 if (map_word_andequal(map, status, status_OK, status_OK))
1da177e4 1059 break;
1f948b43 1060
1da177e4
LT
1061 /* Urgh. Chip not yet ready to talk to us. */
1062 if (time_after(jiffies, timeo)) {
c4e77376 1063 mutex_unlock(&chip->mutex);
1da177e4
LT
1064 printk(KERN_ERR "waiting for chip to be ready timed out in lock\n");
1065 return -EIO;
1066 }
1067
1068 /* Latency issues. Drop the lock, wait a while and retry */
c4e77376 1069 mutex_unlock(&chip->mutex);
1da177e4
LT
1070 cfi_udelay(1);
1071 goto retry;
1072
1073 default:
1074 /* Stick ourselves on a wait queue to be woken when
1075 someone changes the status */
1076 set_current_state(TASK_UNINTERRUPTIBLE);
1077 add_wait_queue(&chip->wq, &wait);
c4e77376 1078 mutex_unlock(&chip->mutex);
1da177e4
LT
1079 schedule();
1080 remove_wait_queue(&chip->wq, &wait);
1081 timeo = jiffies + HZ;
1082 goto retry;
1083 }
1084
1085 ENABLE_VPP(map);
1086 map_write(map, CMD(0x60), adr);
1087 map_write(map, CMD(0x01), adr);
1088 chip->state = FL_LOCKING;
1f948b43 1089
c4e77376 1090 mutex_unlock(&chip->mutex);
1da177e4 1091 msleep(1000);
c4e77376 1092 mutex_lock(&chip->mutex);
1da177e4
LT
1093
1094 /* FIXME. Use a timer to check this, and return immediately. */
1095 /* Once the state machine's known to be working I'll do that */
1096
1097 timeo = jiffies + (HZ*2);
1098 for (;;) {
1099
1100 status = map_read(map, adr);
1101 if (map_word_andequal(map, status, status_OK, status_OK))
1102 break;
1f948b43 1103
1da177e4
LT
1104 /* OK Still waiting */
1105 if (time_after(jiffies, timeo)) {
1106 map_write(map, CMD(0x70), adr);
1107 chip->state = FL_STATUS;
1108 printk(KERN_ERR "waiting for lock to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
1109 DISABLE_VPP(map);
c4e77376 1110 mutex_unlock(&chip->mutex);
1da177e4
LT
1111 return -EIO;
1112 }
1f948b43 1113
1da177e4 1114 /* Latency issues. Drop the lock, wait a while and retry */
c4e77376 1115 mutex_unlock(&chip->mutex);
1da177e4 1116 cfi_udelay(1);
c4e77376 1117 mutex_lock(&chip->mutex);
1da177e4 1118 }
1f948b43 1119
1da177e4
LT
1120 /* Done and happy. */
1121 chip->state = FL_STATUS;
1122 DISABLE_VPP(map);
1123 wake_up(&chip->wq);
c4e77376 1124 mutex_unlock(&chip->mutex);
1da177e4
LT
1125 return 0;
1126}
69423d99 1127static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1da177e4
LT
1128{
1129 struct map_info *map = mtd->priv;
1130 struct cfi_private *cfi = map->fldrv_priv;
1131 unsigned long adr;
1132 int chipnum, ret = 0;
1133#ifdef DEBUG_LOCK_BITS
1134 int ofs_factor = cfi->interleave * cfi->device_type;
1135#endif
1136
1137 if (ofs & (mtd->erasesize - 1))
1138 return -EINVAL;
1139
1140 if (len & (mtd->erasesize -1))
1141 return -EINVAL;
1142
1da177e4
LT
1143 chipnum = ofs >> cfi->chipshift;
1144 adr = ofs - (chipnum << cfi->chipshift);
1145
1146 while(len) {
1147
1148#ifdef DEBUG_LOCK_BITS
1149 cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1150 printk("before lock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor)));
1151 cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1152#endif
1153
1154 ret = do_lock_oneblock(map, &cfi->chips[chipnum], adr);
1155
1156#ifdef DEBUG_LOCK_BITS
1157 cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1158 printk("after lock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor)));
1159 cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1f948b43
TG
1160#endif
1161
1da177e4
LT
1162 if (ret)
1163 return ret;
1164
1165 adr += mtd->erasesize;
1166 len -= mtd->erasesize;
1167
1168 if (adr >> cfi->chipshift) {
1169 adr = 0;
1170 chipnum++;
1f948b43 1171
1da177e4 1172 if (chipnum >= cfi->numchips)
555b8d12 1173 break;
1da177e4
LT
1174 }
1175 }
1176 return 0;
1177}
1178static inline int do_unlock_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
1179{
1180 struct cfi_private *cfi = map->fldrv_priv;
1181 map_word status, status_OK;
1182 unsigned long timeo = jiffies + HZ;
1183 DECLARE_WAITQUEUE(wait, current);
1184
1185 adr += chip->start;
1186
1187 /* Let's determine this according to the interleave only once */
1188 status_OK = CMD(0x80);
1189
1190 timeo = jiffies + HZ;
1191retry:
c4e77376 1192 mutex_lock(&chip->mutex);
1da177e4
LT
1193
1194 /* Check that the chip's ready to talk to us. */
1195 switch (chip->state) {
1196 case FL_CFI_QUERY:
1197 case FL_JEDEC_QUERY:
1198 case FL_READY:
1199 map_write(map, CMD(0x70), adr);
1200 chip->state = FL_STATUS;
1201
1202 case FL_STATUS:
1203 status = map_read(map, adr);
1204 if (map_word_andequal(map, status, status_OK, status_OK))
1205 break;
1f948b43 1206
1da177e4
LT
1207 /* Urgh. Chip not yet ready to talk to us. */
1208 if (time_after(jiffies, timeo)) {
c4e77376 1209 mutex_unlock(&chip->mutex);
1da177e4
LT
1210 printk(KERN_ERR "waiting for chip to be ready timed out in unlock\n");
1211 return -EIO;
1212 }
1213
1214 /* Latency issues. Drop the lock, wait a while and retry */
c4e77376 1215 mutex_unlock(&chip->mutex);
1da177e4
LT
1216 cfi_udelay(1);
1217 goto retry;
1218
1219 default:
1220 /* Stick ourselves on a wait queue to be woken when
1221 someone changes the status */
1222 set_current_state(TASK_UNINTERRUPTIBLE);
1223 add_wait_queue(&chip->wq, &wait);
c4e77376 1224 mutex_unlock(&chip->mutex);
1da177e4
LT
1225 schedule();
1226 remove_wait_queue(&chip->wq, &wait);
1227 timeo = jiffies + HZ;
1228 goto retry;
1229 }
1230
1231 ENABLE_VPP(map);
1232 map_write(map, CMD(0x60), adr);
1233 map_write(map, CMD(0xD0), adr);
1234 chip->state = FL_UNLOCKING;
1f948b43 1235
c4e77376 1236 mutex_unlock(&chip->mutex);
1da177e4 1237 msleep(1000);
c4e77376 1238 mutex_lock(&chip->mutex);
1da177e4
LT
1239
1240 /* FIXME. Use a timer to check this, and return immediately. */
1241 /* Once the state machine's known to be working I'll do that */
1242
1243 timeo = jiffies + (HZ*2);
1244 for (;;) {
1245
1246 status = map_read(map, adr);
1247 if (map_word_andequal(map, status, status_OK, status_OK))
1248 break;
1f948b43 1249
1da177e4
LT
1250 /* OK Still waiting */
1251 if (time_after(jiffies, timeo)) {
1252 map_write(map, CMD(0x70), adr);
1253 chip->state = FL_STATUS;
1254 printk(KERN_ERR "waiting for unlock to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
1255 DISABLE_VPP(map);
c4e77376 1256 mutex_unlock(&chip->mutex);
1da177e4
LT
1257 return -EIO;
1258 }
1f948b43 1259
1da177e4 1260 /* Latency issues. Drop the unlock, wait a while and retry */
c4e77376 1261 mutex_unlock(&chip->mutex);
1da177e4 1262 cfi_udelay(1);
c4e77376 1263 mutex_lock(&chip->mutex);
1da177e4 1264 }
1f948b43 1265
1da177e4
LT
1266 /* Done and happy. */
1267 chip->state = FL_STATUS;
1268 DISABLE_VPP(map);
1269 wake_up(&chip->wq);
c4e77376 1270 mutex_unlock(&chip->mutex);
1da177e4
LT
1271 return 0;
1272}
69423d99 1273static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1da177e4
LT
1274{
1275 struct map_info *map = mtd->priv;
1276 struct cfi_private *cfi = map->fldrv_priv;
1277 unsigned long adr;
1278 int chipnum, ret = 0;
1279#ifdef DEBUG_LOCK_BITS
1280 int ofs_factor = cfi->interleave * cfi->device_type;
1281#endif
1282
1283 chipnum = ofs >> cfi->chipshift;
1284 adr = ofs - (chipnum << cfi->chipshift);
1285
1286#ifdef DEBUG_LOCK_BITS
1287 {
1288 unsigned long temp_adr = adr;
1289 unsigned long temp_len = len;
1f948b43 1290
1da177e4
LT
1291 cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1292 while (temp_len) {
1293 printk("before unlock %x: block status register is %x\n",temp_adr,cfi_read_query(map, temp_adr+(2*ofs_factor)));
1294 temp_adr += mtd->erasesize;
1295 temp_len -= mtd->erasesize;
1296 }
1297 cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1298 }
1299#endif
1300
1301 ret = do_unlock_oneblock(map, &cfi->chips[chipnum], adr);
1302
1303#ifdef DEBUG_LOCK_BITS
1304 cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1305 printk("after unlock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor)));
1306 cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1307#endif
1f948b43 1308
1da177e4
LT
1309 return ret;
1310}
1311
1312static int cfi_staa_suspend(struct mtd_info *mtd)
1313{
1314 struct map_info *map = mtd->priv;
1315 struct cfi_private *cfi = map->fldrv_priv;
1316 int i;
1317 struct flchip *chip;
1318 int ret = 0;
1319
1320 for (i=0; !ret && i<cfi->numchips; i++) {
1321 chip = &cfi->chips[i];
1322
c4e77376 1323 mutex_lock(&chip->mutex);
1da177e4
LT
1324
1325 switch(chip->state) {
1326 case FL_READY:
1327 case FL_STATUS:
1328 case FL_CFI_QUERY:
1329 case FL_JEDEC_QUERY:
1330 chip->oldstate = chip->state;
1331 chip->state = FL_PM_SUSPENDED;
1f948b43 1332 /* No need to wake_up() on this state change -
1da177e4
LT
1333 * as the whole point is that nobody can do anything
1334 * with the chip now anyway.
1335 */
1336 case FL_PM_SUSPENDED:
1337 break;
1338
1339 default:
1340 ret = -EAGAIN;
1341 break;
1342 }
c4e77376 1343 mutex_unlock(&chip->mutex);
1da177e4
LT
1344 }
1345
1346 /* Unlock the chips again */
1347
1348 if (ret) {
1349 for (i--; i >=0; i--) {
1350 chip = &cfi->chips[i];
1f948b43 1351
c4e77376 1352 mutex_lock(&chip->mutex);
1f948b43 1353
1da177e4
LT
1354 if (chip->state == FL_PM_SUSPENDED) {
1355 /* No need to force it into a known state here,
1356 because we're returning failure, and it didn't
1357 get power cycled */
1358 chip->state = chip->oldstate;
1359 wake_up(&chip->wq);
1360 }
c4e77376 1361 mutex_unlock(&chip->mutex);
1da177e4 1362 }
1f948b43
TG
1363 }
1364
1da177e4
LT
1365 return ret;
1366}
1367
1368static void cfi_staa_resume(struct mtd_info *mtd)
1369{
1370 struct map_info *map = mtd->priv;
1371 struct cfi_private *cfi = map->fldrv_priv;
1372 int i;
1373 struct flchip *chip;
1374
1375 for (i=0; i<cfi->numchips; i++) {
1f948b43 1376
1da177e4
LT
1377 chip = &cfi->chips[i];
1378
c4e77376 1379 mutex_lock(&chip->mutex);
1f948b43 1380
1da177e4
LT
1381 /* Go to known state. Chip may have been power cycled */
1382 if (chip->state == FL_PM_SUSPENDED) {
1383 map_write(map, CMD(0xFF), 0);
1384 chip->state = FL_READY;
1385 wake_up(&chip->wq);
1386 }
1387
c4e77376 1388 mutex_unlock(&chip->mutex);
1da177e4
LT
1389 }
1390}
1391
1392static void cfi_staa_destroy(struct mtd_info *mtd)
1393{
1394 struct map_info *map = mtd->priv;
1395 struct cfi_private *cfi = map->fldrv_priv;
1396 kfree(cfi->cmdset_priv);
1397 kfree(cfi);
1398}
1399
1da177e4 1400MODULE_LICENSE("GPL");