]> git.proxmox.com Git - qemu.git/blob - hw/omap_gpmc.c
Merge branch 'omap-for-upstream' of git://git.linaro.org/people/pmaydell/qemu-arm...
[qemu.git] / hw / omap_gpmc.c
1 /*
2 * TI OMAP general purpose memory controller emulation.
3 *
4 * Copyright (C) 2007-2009 Nokia Corporation
5 * Original code written by Andrzej Zaborowski <andrew@openedhand.com>
6 * Enhancements for OMAP3 and NAND support written by Juha Riihimäki
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License as
10 * published by the Free Software Foundation; either version 2 or
11 * (at your option) any later version of the License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, see <http://www.gnu.org/licenses/>.
20 */
21 #include "hw.h"
22 #include "flash.h"
23 #include "omap.h"
24 #include "memory.h"
25 #include "exec-memory.h"
26
27 /* General-Purpose Memory Controller */
28 struct omap_gpmc_s {
29 qemu_irq irq;
30 qemu_irq drq;
31 MemoryRegion iomem;
32 int accept_256;
33
34 uint8_t revision;
35 uint8_t sysconfig;
36 uint16_t irqst;
37 uint16_t irqen;
38 uint16_t lastirq;
39 uint16_t timeout;
40 uint16_t config;
41 struct omap_gpmc_cs_file_s {
42 uint32_t config[7];
43 MemoryRegion *iomem;
44 MemoryRegion container;
45 MemoryRegion nandiomem;
46 DeviceState *dev;
47 } cs_file[8];
48 int ecc_cs;
49 int ecc_ptr;
50 uint32_t ecc_cfg;
51 ECCState ecc[9];
52 struct prefetch {
53 uint32_t config1; /* GPMC_PREFETCH_CONFIG1 */
54 uint32_t transfercount; /* GPMC_PREFETCH_CONFIG2:TRANSFERCOUNT */
55 int startengine; /* GPMC_PREFETCH_CONTROL:STARTENGINE */
56 int fifopointer; /* GPMC_PREFETCH_STATUS:FIFOPOINTER */
57 int count; /* GPMC_PREFETCH_STATUS:COUNTVALUE */
58 MemoryRegion iomem;
59 uint8_t fifo[64];
60 } prefetch;
61 };
62
63 #define OMAP_GPMC_8BIT 0
64 #define OMAP_GPMC_16BIT 1
65 #define OMAP_GPMC_NOR 0
66 #define OMAP_GPMC_NAND 2
67
68 static int omap_gpmc_devtype(struct omap_gpmc_cs_file_s *f)
69 {
70 return (f->config[0] >> 10) & 3;
71 }
72
73 static int omap_gpmc_devsize(struct omap_gpmc_cs_file_s *f)
74 {
75 /* devsize field is really 2 bits but we ignore the high
76 * bit to ensure consistent behaviour if the guest sets
77 * it (values 2 and 3 are reserved in the TRM)
78 */
79 return (f->config[0] >> 12) & 1;
80 }
81
82 /* Extract the chip-select value from the prefetch config1 register */
83 static int prefetch_cs(uint32_t config1)
84 {
85 return (config1 >> 24) & 7;
86 }
87
88 static int prefetch_threshold(uint32_t config1)
89 {
90 return (config1 >> 8) & 0x7f;
91 }
92
93 static void omap_gpmc_int_update(struct omap_gpmc_s *s)
94 {
95 /* The TRM is a bit unclear, but it seems to say that
96 * the TERMINALCOUNTSTATUS bit is set only on the
97 * transition when the prefetch engine goes from
98 * active to inactive, whereas the FIFOEVENTSTATUS
99 * bit is held high as long as the fifo has at
100 * least THRESHOLD bytes available.
101 * So we do the latter here, but TERMINALCOUNTSTATUS
102 * is set elsewhere.
103 */
104 if (s->prefetch.fifopointer >= prefetch_threshold(s->prefetch.config1)) {
105 s->irqst |= 1;
106 }
107 if ((s->irqen & s->irqst) != s->lastirq) {
108 s->lastirq = s->irqen & s->irqst;
109 qemu_set_irq(s->irq, s->lastirq);
110 }
111 }
112
113 static void omap_gpmc_dma_update(struct omap_gpmc_s *s, int value)
114 {
115 if (s->prefetch.config1 & 4) {
116 qemu_set_irq(s->drq, value);
117 }
118 }
119
120 /* Access functions for when a NAND-like device is mapped into memory:
121 * all addresses in the region behave like accesses to the relevant
122 * GPMC_NAND_DATA_i register (which is actually implemented to call these)
123 */
124 static uint64_t omap_nand_read(void *opaque, target_phys_addr_t addr,
125 unsigned size)
126 {
127 struct omap_gpmc_cs_file_s *f = (struct omap_gpmc_cs_file_s *)opaque;
128 uint64_t v;
129 nand_setpins(f->dev, 0, 0, 0, 1, 0);
130 switch (omap_gpmc_devsize(f)) {
131 case OMAP_GPMC_8BIT:
132 v = nand_getio(f->dev);
133 if (size == 1) {
134 return v;
135 }
136 v |= (nand_getio(f->dev) << 8);
137 if (size == 2) {
138 return v;
139 }
140 v |= (nand_getio(f->dev) << 16);
141 v |= (nand_getio(f->dev) << 24);
142 return v;
143 case OMAP_GPMC_16BIT:
144 v = nand_getio(f->dev);
145 if (size == 1) {
146 /* 8 bit read from 16 bit device : probably a guest bug */
147 return v & 0xff;
148 }
149 if (size == 2) {
150 return v;
151 }
152 v |= (nand_getio(f->dev) << 16);
153 return v;
154 default:
155 abort();
156 }
157 }
158
159 static void omap_nand_setio(DeviceState *dev, uint64_t value,
160 int nandsize, int size)
161 {
162 /* Write the specified value to the NAND device, respecting
163 * both size of the NAND device and size of the write access.
164 */
165 switch (nandsize) {
166 case OMAP_GPMC_8BIT:
167 switch (size) {
168 case 1:
169 nand_setio(dev, value & 0xff);
170 break;
171 case 2:
172 nand_setio(dev, value & 0xff);
173 nand_setio(dev, (value >> 8) & 0xff);
174 break;
175 case 4:
176 default:
177 nand_setio(dev, value & 0xff);
178 nand_setio(dev, (value >> 8) & 0xff);
179 nand_setio(dev, (value >> 16) & 0xff);
180 nand_setio(dev, (value >> 24) & 0xff);
181 break;
182 }
183 case OMAP_GPMC_16BIT:
184 switch (size) {
185 case 1:
186 /* writing to a 16bit device with 8bit access is probably a guest
187 * bug; pass the value through anyway.
188 */
189 case 2:
190 nand_setio(dev, value & 0xffff);
191 break;
192 case 4:
193 default:
194 nand_setio(dev, value & 0xffff);
195 nand_setio(dev, (value >> 16) & 0xffff);
196 break;
197 }
198 }
199 }
200
201 static void omap_nand_write(void *opaque, target_phys_addr_t addr,
202 uint64_t value, unsigned size)
203 {
204 struct omap_gpmc_cs_file_s *f = (struct omap_gpmc_cs_file_s *)opaque;
205 nand_setpins(f->dev, 0, 0, 0, 1, 0);
206 omap_nand_setio(f->dev, value, omap_gpmc_devsize(f), size);
207 }
208
209 static const MemoryRegionOps omap_nand_ops = {
210 .read = omap_nand_read,
211 .write = omap_nand_write,
212 .endianness = DEVICE_NATIVE_ENDIAN,
213 };
214
215 static void fill_prefetch_fifo(struct omap_gpmc_s *s)
216 {
217 /* Fill the prefetch FIFO by reading data from NAND.
218 * We do this synchronously, unlike the hardware which
219 * will do this asynchronously. We refill when the
220 * FIFO has THRESHOLD bytes free, and we always refill
221 * as much data as possible starting at the top end
222 * of the FIFO.
223 * (We have to refill at THRESHOLD rather than waiting
224 * for the FIFO to empty to allow for the case where
225 * the FIFO size isn't an exact multiple of THRESHOLD
226 * and we're doing DMA transfers.)
227 * This means we never need to handle wrap-around in
228 * the fifo-reading code, and the next byte of data
229 * to read is always fifo[63 - fifopointer].
230 */
231 int fptr;
232 int cs = prefetch_cs(s->prefetch.config1);
233 int is16bit = (((s->cs_file[cs].config[0] >> 12) & 3) != 0);
234 int bytes;
235 /* Don't believe the bit of the OMAP TRM that says that COUNTVALUE
236 * and TRANSFERCOUNT are in units of 16 bit words for 16 bit NAND.
237 * Instead believe the bit that says it is always a byte count.
238 */
239 bytes = 64 - s->prefetch.fifopointer;
240 if (bytes > s->prefetch.count) {
241 bytes = s->prefetch.count;
242 }
243 s->prefetch.count -= bytes;
244 s->prefetch.fifopointer += bytes;
245 fptr = 64 - s->prefetch.fifopointer;
246 /* Move the existing data in the FIFO so it sits just
247 * before what we're about to read in
248 */
249 while (fptr < (64 - bytes)) {
250 s->prefetch.fifo[fptr] = s->prefetch.fifo[fptr + bytes];
251 fptr++;
252 }
253 while (fptr < 64) {
254 if (is16bit) {
255 uint32_t v = omap_nand_read(&s->cs_file[cs], 0, 2);
256 s->prefetch.fifo[fptr++] = v & 0xff;
257 s->prefetch.fifo[fptr++] = (v >> 8) & 0xff;
258 } else {
259 s->prefetch.fifo[fptr++] = omap_nand_read(&s->cs_file[cs], 0, 1);
260 }
261 }
262 if (s->prefetch.startengine && (s->prefetch.count == 0)) {
263 /* This was the final transfer: raise TERMINALCOUNTSTATUS */
264 s->irqst |= 2;
265 s->prefetch.startengine = 0;
266 }
267 /* If there are any bytes in the FIFO at this point then
268 * we must raise a DMA request (either this is a final part
269 * transfer, or we filled the FIFO in which case we certainly
270 * have THRESHOLD bytes available)
271 */
272 if (s->prefetch.fifopointer != 0) {
273 omap_gpmc_dma_update(s, 1);
274 }
275 omap_gpmc_int_update(s);
276 }
277
278 /* Access functions for a NAND-like device when the prefetch/postwrite
279 * engine is enabled -- all addresses in the region behave alike:
280 * data is read or written to the FIFO.
281 */
282 static uint64_t omap_gpmc_prefetch_read(void *opaque, target_phys_addr_t addr,
283 unsigned size)
284 {
285 struct omap_gpmc_s *s = (struct omap_gpmc_s *) opaque;
286 uint32_t data;
287 if (s->prefetch.config1 & 1) {
288 /* The TRM doesn't define the behaviour if you read from the
289 * FIFO when the prefetch engine is in write mode. We choose
290 * to always return zero.
291 */
292 return 0;
293 }
294 /* Note that trying to read an empty fifo repeats the last byte */
295 if (s->prefetch.fifopointer) {
296 s->prefetch.fifopointer--;
297 }
298 data = s->prefetch.fifo[63 - s->prefetch.fifopointer];
299 if (s->prefetch.fifopointer ==
300 (64 - prefetch_threshold(s->prefetch.config1))) {
301 /* We've drained THRESHOLD bytes now. So deassert the
302 * DMA request, then refill the FIFO (which will probably
303 * assert it again.)
304 */
305 omap_gpmc_dma_update(s, 0);
306 fill_prefetch_fifo(s);
307 }
308 omap_gpmc_int_update(s);
309 return data;
310 }
311
312 static void omap_gpmc_prefetch_write(void *opaque, target_phys_addr_t addr,
313 uint64_t value, unsigned size)
314 {
315 struct omap_gpmc_s *s = (struct omap_gpmc_s *) opaque;
316 int cs = prefetch_cs(s->prefetch.config1);
317 if ((s->prefetch.config1 & 1) == 0) {
318 /* The TRM doesn't define the behaviour of writing to the
319 * FIFO when the prefetch engine is in read mode. We
320 * choose to ignore the write.
321 */
322 return;
323 }
324 if (s->prefetch.count == 0) {
325 /* The TRM doesn't define the behaviour of writing to the
326 * FIFO if the transfer is complete. We choose to ignore.
327 */
328 return;
329 }
330 /* The only reason we do any data buffering in postwrite
331 * mode is if we are talking to a 16 bit NAND device, in
332 * which case we need to buffer the first byte of the
333 * 16 bit word until the other byte arrives.
334 */
335 int is16bit = (((s->cs_file[cs].config[0] >> 12) & 3) != 0);
336 if (is16bit) {
337 /* fifopointer alternates between 64 (waiting for first
338 * byte of word) and 63 (waiting for second byte)
339 */
340 if (s->prefetch.fifopointer == 64) {
341 s->prefetch.fifo[0] = value;
342 s->prefetch.fifopointer--;
343 } else {
344 value = (value << 8) | s->prefetch.fifo[0];
345 omap_nand_write(&s->cs_file[cs], 0, value, 2);
346 s->prefetch.count--;
347 s->prefetch.fifopointer = 64;
348 }
349 } else {
350 /* Just write the byte : fifopointer remains 64 at all times */
351 omap_nand_write(&s->cs_file[cs], 0, value, 1);
352 s->prefetch.count--;
353 }
354 if (s->prefetch.count == 0) {
355 /* Final transfer: raise TERMINALCOUNTSTATUS */
356 s->irqst |= 2;
357 s->prefetch.startengine = 0;
358 }
359 omap_gpmc_int_update(s);
360 }
361
362 static const MemoryRegionOps omap_prefetch_ops = {
363 .read = omap_gpmc_prefetch_read,
364 .write = omap_gpmc_prefetch_write,
365 .endianness = DEVICE_NATIVE_ENDIAN,
366 .impl.min_access_size = 1,
367 .impl.max_access_size = 1,
368 };
369
370 static MemoryRegion *omap_gpmc_cs_memregion(struct omap_gpmc_s *s, int cs)
371 {
372 /* Return the MemoryRegion* to map/unmap for this chipselect */
373 struct omap_gpmc_cs_file_s *f = &s->cs_file[cs];
374 if (omap_gpmc_devtype(f) == OMAP_GPMC_NOR) {
375 return f->iomem;
376 }
377 if ((s->prefetch.config1 & 0x80) &&
378 (prefetch_cs(s->prefetch.config1) == cs)) {
379 /* The prefetch engine is enabled for this CS: map the FIFO */
380 return &s->prefetch.iomem;
381 }
382 return &f->nandiomem;
383 }
384
385 static void omap_gpmc_cs_map(struct omap_gpmc_s *s, int cs)
386 {
387 struct omap_gpmc_cs_file_s *f = &s->cs_file[cs];
388 uint32_t mask = (f->config[6] >> 8) & 0xf;
389 uint32_t base = f->config[6] & 0x3f;
390 uint32_t size;
391
392 if (!f->iomem && !f->dev) {
393 return;
394 }
395
396 if (!(f->config[6] & (1 << 6))) {
397 /* Do nothing unless CSVALID */
398 return;
399 }
400
401 /* TODO: check for overlapping regions and report access errors */
402 if (mask != 0x8 && mask != 0xc && mask != 0xe && mask != 0xf
403 && !(s->accept_256 && !mask)) {
404 fprintf(stderr, "%s: invalid chip-select mask address (0x%x)\n",
405 __func__, mask);
406 }
407
408 base <<= 24;
409 size = (0x0fffffff & ~(mask << 24)) + 1;
410 /* TODO: rather than setting the size of the mapping (which should be
411 * constant), the mask should cause wrapping of the address space, so
412 * that the same memory becomes accessible at every <i>size</i> bytes
413 * starting from <i>base</i>. */
414 memory_region_init(&f->container, "omap-gpmc-file", size);
415 memory_region_add_subregion(&f->container, 0,
416 omap_gpmc_cs_memregion(s, cs));
417 memory_region_add_subregion(get_system_memory(), base,
418 &f->container);
419 }
420
421 static void omap_gpmc_cs_unmap(struct omap_gpmc_s *s, int cs)
422 {
423 struct omap_gpmc_cs_file_s *f = &s->cs_file[cs];
424 if (!(f->config[6] & (1 << 6))) {
425 /* Do nothing unless CSVALID */
426 return;
427 }
428 if (!f->iomem && !f->dev) {
429 return;
430 }
431 memory_region_del_subregion(get_system_memory(), &f->container);
432 memory_region_del_subregion(&f->container, omap_gpmc_cs_memregion(s, cs));
433 memory_region_destroy(&f->container);
434 }
435
436 void omap_gpmc_reset(struct omap_gpmc_s *s)
437 {
438 int i;
439
440 s->sysconfig = 0;
441 s->irqst = 0;
442 s->irqen = 0;
443 omap_gpmc_int_update(s);
444 s->timeout = 0;
445 s->config = 0xa00;
446 s->prefetch.config1 = 0x00004000;
447 s->prefetch.transfercount = 0x00000000;
448 s->prefetch.startengine = 0;
449 s->prefetch.fifopointer = 0;
450 s->prefetch.count = 0;
451 for (i = 0; i < 8; i ++) {
452 omap_gpmc_cs_unmap(s, i);
453 s->cs_file[i].config[1] = 0x101001;
454 s->cs_file[i].config[2] = 0x020201;
455 s->cs_file[i].config[3] = 0x10031003;
456 s->cs_file[i].config[4] = 0x10f1111;
457 s->cs_file[i].config[5] = 0;
458 s->cs_file[i].config[6] = 0xf00 | (i ? 0 : 1 << 6);
459
460 s->cs_file[i].config[6] = 0xf00;
461 /* In theory we could probe attached devices for some CFG1
462 * bits here, but we just retain them across resets as they
463 * were set initially by omap_gpmc_attach().
464 */
465 if (i == 0) {
466 s->cs_file[i].config[0] &= 0x00433e00;
467 s->cs_file[i].config[6] |= 1 << 6; /* CSVALID */
468 omap_gpmc_cs_map(s, i);
469 } else {
470 s->cs_file[i].config[0] &= 0x00403c00;
471 }
472 }
473 s->ecc_cs = 0;
474 s->ecc_ptr = 0;
475 s->ecc_cfg = 0x3fcff000;
476 for (i = 0; i < 9; i ++)
477 ecc_reset(&s->ecc[i]);
478 }
479
480 static int gpmc_wordaccess_only(target_phys_addr_t addr)
481 {
482 /* Return true if the register offset is to a register that
483 * only permits word width accesses.
484 * Non-word accesses are only OK for GPMC_NAND_DATA/ADDRESS/COMMAND
485 * for any chipselect.
486 */
487 if (addr >= 0x60 && addr <= 0x1d4) {
488 int cs = (addr - 0x60) / 0x30;
489 addr -= cs * 0x30;
490 if (addr >= 0x7c && addr < 0x88) {
491 /* GPMC_NAND_COMMAND, GPMC_NAND_ADDRESS, GPMC_NAND_DATA */
492 return 0;
493 }
494 }
495 return 1;
496 }
497
498 static uint64_t omap_gpmc_read(void *opaque, target_phys_addr_t addr,
499 unsigned size)
500 {
501 struct omap_gpmc_s *s = (struct omap_gpmc_s *) opaque;
502 int cs;
503 struct omap_gpmc_cs_file_s *f;
504
505 if (size != 4 && gpmc_wordaccess_only(addr)) {
506 return omap_badwidth_read32(opaque, addr);
507 }
508
509 switch (addr) {
510 case 0x000: /* GPMC_REVISION */
511 return s->revision;
512
513 case 0x010: /* GPMC_SYSCONFIG */
514 return s->sysconfig;
515
516 case 0x014: /* GPMC_SYSSTATUS */
517 return 1; /* RESETDONE */
518
519 case 0x018: /* GPMC_IRQSTATUS */
520 return s->irqst;
521
522 case 0x01c: /* GPMC_IRQENABLE */
523 return s->irqen;
524
525 case 0x040: /* GPMC_TIMEOUT_CONTROL */
526 return s->timeout;
527
528 case 0x044: /* GPMC_ERR_ADDRESS */
529 case 0x048: /* GPMC_ERR_TYPE */
530 return 0;
531
532 case 0x050: /* GPMC_CONFIG */
533 return s->config;
534
535 case 0x054: /* GPMC_STATUS */
536 return 0x001;
537
538 case 0x060 ... 0x1d4:
539 cs = (addr - 0x060) / 0x30;
540 addr -= cs * 0x30;
541 f = s->cs_file + cs;
542 switch (addr) {
543 case 0x60: /* GPMC_CONFIG1 */
544 return f->config[0];
545 case 0x64: /* GPMC_CONFIG2 */
546 return f->config[1];
547 case 0x68: /* GPMC_CONFIG3 */
548 return f->config[2];
549 case 0x6c: /* GPMC_CONFIG4 */
550 return f->config[3];
551 case 0x70: /* GPMC_CONFIG5 */
552 return f->config[4];
553 case 0x74: /* GPMC_CONFIG6 */
554 return f->config[5];
555 case 0x78: /* GPMC_CONFIG7 */
556 return f->config[6];
557 case 0x84 ... 0x87: /* GPMC_NAND_DATA */
558 if (omap_gpmc_devtype(f) == OMAP_GPMC_NAND) {
559 return omap_nand_read(f, 0, size);
560 }
561 return 0;
562 }
563 break;
564
565 case 0x1e0: /* GPMC_PREFETCH_CONFIG1 */
566 return s->prefetch.config1;
567 case 0x1e4: /* GPMC_PREFETCH_CONFIG2 */
568 return s->prefetch.transfercount;
569 case 0x1ec: /* GPMC_PREFETCH_CONTROL */
570 return s->prefetch.startengine;
571 case 0x1f0: /* GPMC_PREFETCH_STATUS */
572 return (s->prefetch.fifopointer << 24) |
573 ((s->prefetch.fifopointer >=
574 ((s->prefetch.config1 >> 8) & 0x7f) ? 1 : 0) << 16) |
575 s->prefetch.count;
576
577 case 0x1f4: /* GPMC_ECC_CONFIG */
578 return s->ecc_cs;
579 case 0x1f8: /* GPMC_ECC_CONTROL */
580 return s->ecc_ptr;
581 case 0x1fc: /* GPMC_ECC_SIZE_CONFIG */
582 return s->ecc_cfg;
583 case 0x200 ... 0x220: /* GPMC_ECC_RESULT */
584 cs = (addr & 0x1f) >> 2;
585 /* TODO: check correctness */
586 return
587 ((s->ecc[cs].cp & 0x07) << 0) |
588 ((s->ecc[cs].cp & 0x38) << 13) |
589 ((s->ecc[cs].lp[0] & 0x1ff) << 3) |
590 ((s->ecc[cs].lp[1] & 0x1ff) << 19);
591
592 case 0x230: /* GPMC_TESTMODE_CTRL */
593 return 0;
594 case 0x234: /* GPMC_PSA_LSB */
595 case 0x238: /* GPMC_PSA_MSB */
596 return 0x00000000;
597 }
598
599 OMAP_BAD_REG(addr);
600 return 0;
601 }
602
603 static void omap_gpmc_write(void *opaque, target_phys_addr_t addr,
604 uint64_t value, unsigned size)
605 {
606 struct omap_gpmc_s *s = (struct omap_gpmc_s *) opaque;
607 int cs;
608 struct omap_gpmc_cs_file_s *f;
609
610 if (size != 4 && gpmc_wordaccess_only(addr)) {
611 return omap_badwidth_write32(opaque, addr, value);
612 }
613
614 switch (addr) {
615 case 0x000: /* GPMC_REVISION */
616 case 0x014: /* GPMC_SYSSTATUS */
617 case 0x054: /* GPMC_STATUS */
618 case 0x1f0: /* GPMC_PREFETCH_STATUS */
619 case 0x200 ... 0x220: /* GPMC_ECC_RESULT */
620 case 0x234: /* GPMC_PSA_LSB */
621 case 0x238: /* GPMC_PSA_MSB */
622 OMAP_RO_REG(addr);
623 break;
624
625 case 0x010: /* GPMC_SYSCONFIG */
626 if ((value >> 3) == 0x3)
627 fprintf(stderr, "%s: bad SDRAM idle mode %"PRIi64"\n",
628 __FUNCTION__, value >> 3);
629 if (value & 2)
630 omap_gpmc_reset(s);
631 s->sysconfig = value & 0x19;
632 break;
633
634 case 0x018: /* GPMC_IRQSTATUS */
635 s->irqen &= ~value;
636 omap_gpmc_int_update(s);
637 break;
638
639 case 0x01c: /* GPMC_IRQENABLE */
640 s->irqen = value & 0xf03;
641 omap_gpmc_int_update(s);
642 break;
643
644 case 0x040: /* GPMC_TIMEOUT_CONTROL */
645 s->timeout = value & 0x1ff1;
646 break;
647
648 case 0x044: /* GPMC_ERR_ADDRESS */
649 case 0x048: /* GPMC_ERR_TYPE */
650 break;
651
652 case 0x050: /* GPMC_CONFIG */
653 s->config = value & 0xf13;
654 break;
655
656 case 0x060 ... 0x1d4:
657 cs = (addr - 0x060) / 0x30;
658 addr -= cs * 0x30;
659 f = s->cs_file + cs;
660 switch (addr) {
661 case 0x60: /* GPMC_CONFIG1 */
662 f->config[0] = value & 0xffef3e13;
663 break;
664 case 0x64: /* GPMC_CONFIG2 */
665 f->config[1] = value & 0x001f1f8f;
666 break;
667 case 0x68: /* GPMC_CONFIG3 */
668 f->config[2] = value & 0x001f1f8f;
669 break;
670 case 0x6c: /* GPMC_CONFIG4 */
671 f->config[3] = value & 0x1f8f1f8f;
672 break;
673 case 0x70: /* GPMC_CONFIG5 */
674 f->config[4] = value & 0x0f1f1f1f;
675 break;
676 case 0x74: /* GPMC_CONFIG6 */
677 f->config[5] = value & 0x00000fcf;
678 break;
679 case 0x78: /* GPMC_CONFIG7 */
680 if ((f->config[6] ^ value) & 0xf7f) {
681 omap_gpmc_cs_unmap(s, cs);
682 f->config[6] = value & 0x00000f7f;
683 omap_gpmc_cs_map(s, cs);
684 }
685 break;
686 case 0x7c ... 0x7f: /* GPMC_NAND_COMMAND */
687 if (omap_gpmc_devtype(f) == OMAP_GPMC_NAND) {
688 nand_setpins(f->dev, 1, 0, 0, 1, 0); /* CLE */
689 omap_nand_setio(f->dev, value, omap_gpmc_devsize(f), size);
690 }
691 break;
692 case 0x80 ... 0x83: /* GPMC_NAND_ADDRESS */
693 if (omap_gpmc_devtype(f) == OMAP_GPMC_NAND) {
694 nand_setpins(f->dev, 0, 1, 0, 1, 0); /* ALE */
695 omap_nand_setio(f->dev, value, omap_gpmc_devsize(f), size);
696 }
697 break;
698 case 0x84 ... 0x87: /* GPMC_NAND_DATA */
699 if (omap_gpmc_devtype(f) == OMAP_GPMC_NAND) {
700 omap_nand_write(f, 0, value, size);
701 }
702 break;
703 default:
704 goto bad_reg;
705 }
706 break;
707
708 case 0x1e0: /* GPMC_PREFETCH_CONFIG1 */
709 if (!s->prefetch.startengine) {
710 uint32_t oldconfig1 = s->prefetch.config1;
711 uint32_t changed;
712 s->prefetch.config1 = value & 0x7f8f7fbf;
713 changed = oldconfig1 ^ s->prefetch.config1;
714 if (changed & (0x80 | 0x7000000)) {
715 /* Turning the engine on or off, or mapping it somewhere else.
716 * cs_map() and cs_unmap() check the prefetch config and
717 * overall CSVALID bits, so it is sufficient to unmap-and-map
718 * both the old cs and the new one.
719 */
720 int oldcs = prefetch_cs(oldconfig1);
721 int newcs = prefetch_cs(s->prefetch.config1);
722 omap_gpmc_cs_unmap(s, oldcs);
723 omap_gpmc_cs_map(s, oldcs);
724 if (newcs != oldcs) {
725 omap_gpmc_cs_unmap(s, newcs);
726 omap_gpmc_cs_map(s, newcs);
727 }
728 }
729 }
730 break;
731
732 case 0x1e4: /* GPMC_PREFETCH_CONFIG2 */
733 if (!s->prefetch.startengine) {
734 s->prefetch.transfercount = value & 0x3fff;
735 }
736 break;
737
738 case 0x1ec: /* GPMC_PREFETCH_CONTROL */
739 if (s->prefetch.startengine != (value & 1)) {
740 s->prefetch.startengine = value & 1;
741 if (s->prefetch.startengine) {
742 /* Prefetch engine start */
743 s->prefetch.count = s->prefetch.transfercount;
744 if (s->prefetch.config1 & 1) {
745 /* Write */
746 s->prefetch.fifopointer = 64;
747 } else {
748 /* Read */
749 s->prefetch.fifopointer = 0;
750 fill_prefetch_fifo(s);
751 }
752 } else {
753 /* Prefetch engine forcibly stopped. The TRM
754 * doesn't define the behaviour if you do this.
755 * We clear the prefetch count, which means that
756 * we permit no more writes, and don't read any
757 * more data from NAND. The CPU can still drain
758 * the FIFO of unread data.
759 */
760 s->prefetch.count = 0;
761 }
762 omap_gpmc_int_update(s);
763 }
764 break;
765
766 case 0x1f4: /* GPMC_ECC_CONFIG */
767 s->ecc_cs = 0x8f;
768 break;
769 case 0x1f8: /* GPMC_ECC_CONTROL */
770 if (value & (1 << 8))
771 for (cs = 0; cs < 9; cs ++)
772 ecc_reset(&s->ecc[cs]);
773 s->ecc_ptr = value & 0xf;
774 if (s->ecc_ptr == 0 || s->ecc_ptr > 9) {
775 s->ecc_ptr = 0;
776 s->ecc_cs &= ~1;
777 }
778 break;
779 case 0x1fc: /* GPMC_ECC_SIZE_CONFIG */
780 s->ecc_cfg = value & 0x3fcff1ff;
781 break;
782 case 0x230: /* GPMC_TESTMODE_CTRL */
783 if (value & 7)
784 fprintf(stderr, "%s: test mode enable attempt\n", __FUNCTION__);
785 break;
786
787 default:
788 bad_reg:
789 OMAP_BAD_REG(addr);
790 return;
791 }
792 }
793
794 static const MemoryRegionOps omap_gpmc_ops = {
795 .read = omap_gpmc_read,
796 .write = omap_gpmc_write,
797 .endianness = DEVICE_NATIVE_ENDIAN,
798 };
799
800 struct omap_gpmc_s *omap_gpmc_init(struct omap_mpu_state_s *mpu,
801 target_phys_addr_t base,
802 qemu_irq irq, qemu_irq drq)
803 {
804 int cs;
805 struct omap_gpmc_s *s = (struct omap_gpmc_s *)
806 g_malloc0(sizeof(struct omap_gpmc_s));
807
808 memory_region_init_io(&s->iomem, &omap_gpmc_ops, s, "omap-gpmc", 0x1000);
809 memory_region_add_subregion(get_system_memory(), base, &s->iomem);
810
811 s->irq = irq;
812 s->drq = drq;
813 s->accept_256 = cpu_is_omap3630(mpu);
814 s->revision = cpu_class_omap3(mpu) ? 0x50 : 0x20;
815 s->lastirq = 0;
816 omap_gpmc_reset(s);
817
818 /* We have to register a different IO memory handler for each
819 * chip select region in case a NAND device is mapped there. We
820 * make the region the worst-case size of 256MB and rely on the
821 * container memory region in cs_map to chop it down to the actual
822 * guest-requested size.
823 */
824 for (cs = 0; cs < 8; cs++) {
825 memory_region_init_io(&s->cs_file[cs].nandiomem,
826 &omap_nand_ops,
827 &s->cs_file[cs],
828 "omap-nand",
829 256 * 1024 * 1024);
830 }
831
832 memory_region_init_io(&s->prefetch.iomem, &omap_prefetch_ops, s,
833 "omap-gpmc-prefetch", 256 * 1024 * 1024);
834 return s;
835 }
836
837 void omap_gpmc_attach(struct omap_gpmc_s *s, int cs, MemoryRegion *iomem)
838 {
839 struct omap_gpmc_cs_file_s *f;
840 assert(iomem);
841
842 if (cs < 0 || cs >= 8) {
843 fprintf(stderr, "%s: bad chip-select %i\n", __FUNCTION__, cs);
844 exit(-1);
845 }
846 f = &s->cs_file[cs];
847
848 omap_gpmc_cs_unmap(s, cs);
849 f->config[0] &= ~(0xf << 10);
850 f->iomem = iomem;
851 omap_gpmc_cs_map(s, cs);
852 }
853
854 void omap_gpmc_attach_nand(struct omap_gpmc_s *s, int cs, DeviceState *nand)
855 {
856 struct omap_gpmc_cs_file_s *f;
857 assert(nand);
858
859 if (cs < 0 || cs >= 8) {
860 fprintf(stderr, "%s: bad chip-select %i\n", __func__, cs);
861 exit(-1);
862 }
863 f = &s->cs_file[cs];
864
865 omap_gpmc_cs_unmap(s, cs);
866 f->config[0] &= ~(0xf << 10);
867 f->config[0] |= (OMAP_GPMC_NAND << 10);
868 f->dev = nand;
869 if (nand_getbuswidth(f->dev) == 16) {
870 f->config[0] |= OMAP_GPMC_16BIT << 12;
871 }
872 omap_gpmc_cs_map(s, cs);
873 }