]> git.proxmox.com Git - mirror_qemu.git/blob - hw/intel-hda.c
Merge branch 'queues/slirp' of git://git.kiszka.org/qemu
[mirror_qemu.git] / hw / intel-hda.c
1 /*
2 * Copyright (C) 2010 Red Hat, Inc.
3 *
4 * written by Gerd Hoffmann <kraxel@redhat.com>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation; either version 2 or
9 * (at your option) version 3 of the License.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "hw.h"
21 #include "pci.h"
22 #include "msi.h"
23 #include "qemu-timer.h"
24 #include "audiodev.h"
25 #include "intel-hda.h"
26 #include "intel-hda-defs.h"
27
28 /* --------------------------------------------------------------------- */
29 /* hda bus */
30
31 static struct BusInfo hda_codec_bus_info = {
32 .name = "HDA",
33 .size = sizeof(HDACodecBus),
34 .props = (Property[]) {
35 DEFINE_PROP_UINT32("cad", HDACodecDevice, cad, -1),
36 DEFINE_PROP_END_OF_LIST()
37 }
38 };
39
40 void hda_codec_bus_init(DeviceState *dev, HDACodecBus *bus,
41 hda_codec_response_func response,
42 hda_codec_xfer_func xfer)
43 {
44 qbus_create_inplace(&bus->qbus, &hda_codec_bus_info, dev, NULL);
45 bus->response = response;
46 bus->xfer = xfer;
47 }
48
49 static int hda_codec_dev_init(DeviceState *qdev, DeviceInfo *base)
50 {
51 HDACodecBus *bus = DO_UPCAST(HDACodecBus, qbus, qdev->parent_bus);
52 HDACodecDevice *dev = DO_UPCAST(HDACodecDevice, qdev, qdev);
53 HDACodecDeviceInfo *info = DO_UPCAST(HDACodecDeviceInfo, qdev, base);
54
55 dev->info = info;
56 if (dev->cad == -1) {
57 dev->cad = bus->next_cad;
58 }
59 if (dev->cad >= 15) {
60 return -1;
61 }
62 bus->next_cad = dev->cad + 1;
63 return info->init(dev);
64 }
65
66 static int hda_codec_dev_exit(DeviceState *qdev)
67 {
68 HDACodecDevice *dev = DO_UPCAST(HDACodecDevice, qdev, qdev);
69
70 if (dev->info->exit) {
71 dev->info->exit(dev);
72 }
73 return 0;
74 }
75
76 void hda_codec_register(HDACodecDeviceInfo *info)
77 {
78 info->qdev.init = hda_codec_dev_init;
79 info->qdev.exit = hda_codec_dev_exit;
80 info->qdev.bus_info = &hda_codec_bus_info;
81 qdev_register(&info->qdev);
82 }
83
84 HDACodecDevice *hda_codec_find(HDACodecBus *bus, uint32_t cad)
85 {
86 DeviceState *qdev;
87 HDACodecDevice *cdev;
88
89 QLIST_FOREACH(qdev, &bus->qbus.children, sibling) {
90 cdev = DO_UPCAST(HDACodecDevice, qdev, qdev);
91 if (cdev->cad == cad) {
92 return cdev;
93 }
94 }
95 return NULL;
96 }
97
98 void hda_codec_response(HDACodecDevice *dev, bool solicited, uint32_t response)
99 {
100 HDACodecBus *bus = DO_UPCAST(HDACodecBus, qbus, dev->qdev.parent_bus);
101 bus->response(dev, solicited, response);
102 }
103
104 bool hda_codec_xfer(HDACodecDevice *dev, uint32_t stnr, bool output,
105 uint8_t *buf, uint32_t len)
106 {
107 HDACodecBus *bus = DO_UPCAST(HDACodecBus, qbus, dev->qdev.parent_bus);
108 return bus->xfer(dev, stnr, output, buf, len);
109 }
110
111 /* --------------------------------------------------------------------- */
112 /* intel hda emulation */
113
114 typedef struct IntelHDAStream IntelHDAStream;
115 typedef struct IntelHDAState IntelHDAState;
116 typedef struct IntelHDAReg IntelHDAReg;
117
118 typedef struct bpl {
119 uint64_t addr;
120 uint32_t len;
121 uint32_t flags;
122 } bpl;
123
124 struct IntelHDAStream {
125 /* registers */
126 uint32_t ctl;
127 uint32_t lpib;
128 uint32_t cbl;
129 uint32_t lvi;
130 uint32_t fmt;
131 uint32_t bdlp_lbase;
132 uint32_t bdlp_ubase;
133
134 /* state */
135 bpl *bpl;
136 uint32_t bentries;
137 uint32_t bsize, be, bp;
138 };
139
140 struct IntelHDAState {
141 PCIDevice pci;
142 const char *name;
143 HDACodecBus codecs;
144
145 /* registers */
146 uint32_t g_ctl;
147 uint32_t wake_en;
148 uint32_t state_sts;
149 uint32_t int_ctl;
150 uint32_t int_sts;
151 uint32_t wall_clk;
152
153 uint32_t corb_lbase;
154 uint32_t corb_ubase;
155 uint32_t corb_rp;
156 uint32_t corb_wp;
157 uint32_t corb_ctl;
158 uint32_t corb_sts;
159 uint32_t corb_size;
160
161 uint32_t rirb_lbase;
162 uint32_t rirb_ubase;
163 uint32_t rirb_wp;
164 uint32_t rirb_cnt;
165 uint32_t rirb_ctl;
166 uint32_t rirb_sts;
167 uint32_t rirb_size;
168
169 uint32_t dp_lbase;
170 uint32_t dp_ubase;
171
172 uint32_t icw;
173 uint32_t irr;
174 uint32_t ics;
175
176 /* streams */
177 IntelHDAStream st[8];
178
179 /* state */
180 MemoryRegion mmio;
181 uint32_t rirb_count;
182 int64_t wall_base_ns;
183
184 /* debug logging */
185 const IntelHDAReg *last_reg;
186 uint32_t last_val;
187 uint32_t last_write;
188 uint32_t last_sec;
189 uint32_t repeat_count;
190
191 /* properties */
192 uint32_t debug;
193 uint32_t msi;
194 };
195
196 struct IntelHDAReg {
197 const char *name; /* register name */
198 uint32_t size; /* size in bytes */
199 uint32_t reset; /* reset value */
200 uint32_t wmask; /* write mask */
201 uint32_t wclear; /* write 1 to clear bits */
202 uint32_t offset; /* location in IntelHDAState */
203 uint32_t shift; /* byte access entries for dwords */
204 uint32_t stream;
205 void (*whandler)(IntelHDAState *d, const IntelHDAReg *reg, uint32_t old);
206 void (*rhandler)(IntelHDAState *d, const IntelHDAReg *reg);
207 };
208
209 static void intel_hda_reset(DeviceState *dev);
210
211 /* --------------------------------------------------------------------- */
212
213 static target_phys_addr_t intel_hda_addr(uint32_t lbase, uint32_t ubase)
214 {
215 target_phys_addr_t addr;
216
217 #if TARGET_PHYS_ADDR_BITS == 32
218 addr = lbase;
219 #else
220 addr = ubase;
221 addr <<= 32;
222 addr |= lbase;
223 #endif
224 return addr;
225 }
226
227 static void intel_hda_update_int_sts(IntelHDAState *d)
228 {
229 uint32_t sts = 0;
230 uint32_t i;
231
232 /* update controller status */
233 if (d->rirb_sts & ICH6_RBSTS_IRQ) {
234 sts |= (1 << 30);
235 }
236 if (d->rirb_sts & ICH6_RBSTS_OVERRUN) {
237 sts |= (1 << 30);
238 }
239 if (d->state_sts & d->wake_en) {
240 sts |= (1 << 30);
241 }
242
243 /* update stream status */
244 for (i = 0; i < 8; i++) {
245 /* buffer completion interrupt */
246 if (d->st[i].ctl & (1 << 26)) {
247 sts |= (1 << i);
248 }
249 }
250
251 /* update global status */
252 if (sts & d->int_ctl) {
253 sts |= (1 << 31);
254 }
255
256 d->int_sts = sts;
257 }
258
259 static void intel_hda_update_irq(IntelHDAState *d)
260 {
261 int msi = d->msi && msi_enabled(&d->pci);
262 int level;
263
264 intel_hda_update_int_sts(d);
265 if (d->int_sts & (1 << 31) && d->int_ctl & (1 << 31)) {
266 level = 1;
267 } else {
268 level = 0;
269 }
270 dprint(d, 2, "%s: level %d [%s]\n", __FUNCTION__,
271 level, msi ? "msi" : "intx");
272 if (msi) {
273 if (level) {
274 msi_notify(&d->pci, 0);
275 }
276 } else {
277 qemu_set_irq(d->pci.irq[0], level);
278 }
279 }
280
281 static int intel_hda_send_command(IntelHDAState *d, uint32_t verb)
282 {
283 uint32_t cad, nid, data;
284 HDACodecDevice *codec;
285
286 cad = (verb >> 28) & 0x0f;
287 if (verb & (1 << 27)) {
288 /* indirect node addressing, not specified in HDA 1.0 */
289 dprint(d, 1, "%s: indirect node addressing (guest bug?)\n", __FUNCTION__);
290 return -1;
291 }
292 nid = (verb >> 20) & 0x7f;
293 data = verb & 0xfffff;
294
295 codec = hda_codec_find(&d->codecs, cad);
296 if (codec == NULL) {
297 dprint(d, 1, "%s: addressed non-existing codec\n", __FUNCTION__);
298 return -1;
299 }
300 codec->info->command(codec, nid, data);
301 return 0;
302 }
303
304 static void intel_hda_corb_run(IntelHDAState *d)
305 {
306 target_phys_addr_t addr;
307 uint32_t rp, verb;
308
309 if (d->ics & ICH6_IRS_BUSY) {
310 dprint(d, 2, "%s: [icw] verb 0x%08x\n", __FUNCTION__, d->icw);
311 intel_hda_send_command(d, d->icw);
312 return;
313 }
314
315 for (;;) {
316 if (!(d->corb_ctl & ICH6_CORBCTL_RUN)) {
317 dprint(d, 2, "%s: !run\n", __FUNCTION__);
318 return;
319 }
320 if ((d->corb_rp & 0xff) == d->corb_wp) {
321 dprint(d, 2, "%s: corb ring empty\n", __FUNCTION__);
322 return;
323 }
324 if (d->rirb_count == d->rirb_cnt) {
325 dprint(d, 2, "%s: rirb count reached\n", __FUNCTION__);
326 return;
327 }
328
329 rp = (d->corb_rp + 1) & 0xff;
330 addr = intel_hda_addr(d->corb_lbase, d->corb_ubase);
331 verb = ldl_le_phys(addr + 4*rp);
332 d->corb_rp = rp;
333
334 dprint(d, 2, "%s: [rp 0x%x] verb 0x%08x\n", __FUNCTION__, rp, verb);
335 intel_hda_send_command(d, verb);
336 }
337 }
338
339 static void intel_hda_response(HDACodecDevice *dev, bool solicited, uint32_t response)
340 {
341 HDACodecBus *bus = DO_UPCAST(HDACodecBus, qbus, dev->qdev.parent_bus);
342 IntelHDAState *d = container_of(bus, IntelHDAState, codecs);
343 target_phys_addr_t addr;
344 uint32_t wp, ex;
345
346 if (d->ics & ICH6_IRS_BUSY) {
347 dprint(d, 2, "%s: [irr] response 0x%x, cad 0x%x\n",
348 __FUNCTION__, response, dev->cad);
349 d->irr = response;
350 d->ics &= ~(ICH6_IRS_BUSY | 0xf0);
351 d->ics |= (ICH6_IRS_VALID | (dev->cad << 4));
352 return;
353 }
354
355 if (!(d->rirb_ctl & ICH6_RBCTL_DMA_EN)) {
356 dprint(d, 1, "%s: rirb dma disabled, drop codec response\n", __FUNCTION__);
357 return;
358 }
359
360 ex = (solicited ? 0 : (1 << 4)) | dev->cad;
361 wp = (d->rirb_wp + 1) & 0xff;
362 addr = intel_hda_addr(d->rirb_lbase, d->rirb_ubase);
363 stl_le_phys(addr + 8*wp, response);
364 stl_le_phys(addr + 8*wp + 4, ex);
365 d->rirb_wp = wp;
366
367 dprint(d, 2, "%s: [wp 0x%x] response 0x%x, extra 0x%x\n",
368 __FUNCTION__, wp, response, ex);
369
370 d->rirb_count++;
371 if (d->rirb_count == d->rirb_cnt) {
372 dprint(d, 2, "%s: rirb count reached (%d)\n", __FUNCTION__, d->rirb_count);
373 if (d->rirb_ctl & ICH6_RBCTL_IRQ_EN) {
374 d->rirb_sts |= ICH6_RBSTS_IRQ;
375 intel_hda_update_irq(d);
376 }
377 } else if ((d->corb_rp & 0xff) == d->corb_wp) {
378 dprint(d, 2, "%s: corb ring empty (%d/%d)\n", __FUNCTION__,
379 d->rirb_count, d->rirb_cnt);
380 if (d->rirb_ctl & ICH6_RBCTL_IRQ_EN) {
381 d->rirb_sts |= ICH6_RBSTS_IRQ;
382 intel_hda_update_irq(d);
383 }
384 }
385 }
386
387 static bool intel_hda_xfer(HDACodecDevice *dev, uint32_t stnr, bool output,
388 uint8_t *buf, uint32_t len)
389 {
390 HDACodecBus *bus = DO_UPCAST(HDACodecBus, qbus, dev->qdev.parent_bus);
391 IntelHDAState *d = container_of(bus, IntelHDAState, codecs);
392 IntelHDAStream *st = NULL;
393 target_phys_addr_t addr;
394 uint32_t s, copy, left;
395 bool irq = false;
396
397 for (s = 0; s < ARRAY_SIZE(d->st); s++) {
398 if (stnr == ((d->st[s].ctl >> 20) & 0x0f)) {
399 st = d->st + s;
400 break;
401 }
402 }
403 if (st == NULL) {
404 return false;
405 }
406 if (st->bpl == NULL) {
407 return false;
408 }
409 if (st->ctl & (1 << 26)) {
410 /*
411 * Wait with the next DMA xfer until the guest
412 * has acked the buffer completion interrupt
413 */
414 return false;
415 }
416
417 left = len;
418 while (left > 0) {
419 copy = left;
420 if (copy > st->bsize - st->lpib)
421 copy = st->bsize - st->lpib;
422 if (copy > st->bpl[st->be].len - st->bp)
423 copy = st->bpl[st->be].len - st->bp;
424
425 dprint(d, 3, "dma: entry %d, pos %d/%d, copy %d\n",
426 st->be, st->bp, st->bpl[st->be].len, copy);
427
428 cpu_physical_memory_rw(st->bpl[st->be].addr + st->bp,
429 buf, copy, !output);
430 st->lpib += copy;
431 st->bp += copy;
432 buf += copy;
433 left -= copy;
434
435 if (st->bpl[st->be].len == st->bp) {
436 /* bpl entry filled */
437 if (st->bpl[st->be].flags & 0x01) {
438 irq = true;
439 }
440 st->bp = 0;
441 st->be++;
442 if (st->be == st->bentries) {
443 /* bpl wrap around */
444 st->be = 0;
445 st->lpib = 0;
446 }
447 }
448 }
449 if (d->dp_lbase & 0x01) {
450 addr = intel_hda_addr(d->dp_lbase & ~0x01, d->dp_ubase);
451 stl_le_phys(addr + 8*s, st->lpib);
452 }
453 dprint(d, 3, "dma: --\n");
454
455 if (irq) {
456 st->ctl |= (1 << 26); /* buffer completion interrupt */
457 intel_hda_update_irq(d);
458 }
459 return true;
460 }
461
462 static void intel_hda_parse_bdl(IntelHDAState *d, IntelHDAStream *st)
463 {
464 target_phys_addr_t addr;
465 uint8_t buf[16];
466 uint32_t i;
467
468 addr = intel_hda_addr(st->bdlp_lbase, st->bdlp_ubase);
469 st->bentries = st->lvi +1;
470 g_free(st->bpl);
471 st->bpl = g_malloc(sizeof(bpl) * st->bentries);
472 for (i = 0; i < st->bentries; i++, addr += 16) {
473 cpu_physical_memory_read(addr, buf, 16);
474 st->bpl[i].addr = le64_to_cpu(*(uint64_t *)buf);
475 st->bpl[i].len = le32_to_cpu(*(uint32_t *)(buf + 8));
476 st->bpl[i].flags = le32_to_cpu(*(uint32_t *)(buf + 12));
477 dprint(d, 1, "bdl/%d: 0x%" PRIx64 " +0x%x, 0x%x\n",
478 i, st->bpl[i].addr, st->bpl[i].len, st->bpl[i].flags);
479 }
480
481 st->bsize = st->cbl;
482 st->lpib = 0;
483 st->be = 0;
484 st->bp = 0;
485 }
486
487 static void intel_hda_notify_codecs(IntelHDAState *d, uint32_t stream, bool running)
488 {
489 DeviceState *qdev;
490 HDACodecDevice *cdev;
491
492 QLIST_FOREACH(qdev, &d->codecs.qbus.children, sibling) {
493 cdev = DO_UPCAST(HDACodecDevice, qdev, qdev);
494 if (cdev->info->stream) {
495 cdev->info->stream(cdev, stream, running);
496 }
497 }
498 }
499
500 /* --------------------------------------------------------------------- */
501
502 static void intel_hda_set_g_ctl(IntelHDAState *d, const IntelHDAReg *reg, uint32_t old)
503 {
504 if ((d->g_ctl & ICH6_GCTL_RESET) == 0) {
505 intel_hda_reset(&d->pci.qdev);
506 }
507 }
508
509 static void intel_hda_set_wake_en(IntelHDAState *d, const IntelHDAReg *reg, uint32_t old)
510 {
511 intel_hda_update_irq(d);
512 }
513
514 static void intel_hda_set_state_sts(IntelHDAState *d, const IntelHDAReg *reg, uint32_t old)
515 {
516 intel_hda_update_irq(d);
517 }
518
519 static void intel_hda_set_int_ctl(IntelHDAState *d, const IntelHDAReg *reg, uint32_t old)
520 {
521 intel_hda_update_irq(d);
522 }
523
524 static void intel_hda_get_wall_clk(IntelHDAState *d, const IntelHDAReg *reg)
525 {
526 int64_t ns;
527
528 ns = qemu_get_clock_ns(vm_clock) - d->wall_base_ns;
529 d->wall_clk = (uint32_t)(ns * 24 / 1000); /* 24 MHz */
530 }
531
532 static void intel_hda_set_corb_wp(IntelHDAState *d, const IntelHDAReg *reg, uint32_t old)
533 {
534 intel_hda_corb_run(d);
535 }
536
537 static void intel_hda_set_corb_ctl(IntelHDAState *d, const IntelHDAReg *reg, uint32_t old)
538 {
539 intel_hda_corb_run(d);
540 }
541
542 static void intel_hda_set_rirb_wp(IntelHDAState *d, const IntelHDAReg *reg, uint32_t old)
543 {
544 if (d->rirb_wp & ICH6_RIRBWP_RST) {
545 d->rirb_wp = 0;
546 }
547 }
548
549 static void intel_hda_set_rirb_sts(IntelHDAState *d, const IntelHDAReg *reg, uint32_t old)
550 {
551 intel_hda_update_irq(d);
552
553 if ((old & ICH6_RBSTS_IRQ) && !(d->rirb_sts & ICH6_RBSTS_IRQ)) {
554 /* cleared ICH6_RBSTS_IRQ */
555 d->rirb_count = 0;
556 intel_hda_corb_run(d);
557 }
558 }
559
560 static void intel_hda_set_ics(IntelHDAState *d, const IntelHDAReg *reg, uint32_t old)
561 {
562 if (d->ics & ICH6_IRS_BUSY) {
563 intel_hda_corb_run(d);
564 }
565 }
566
567 static void intel_hda_set_st_ctl(IntelHDAState *d, const IntelHDAReg *reg, uint32_t old)
568 {
569 IntelHDAStream *st = d->st + reg->stream;
570
571 if (st->ctl & 0x01) {
572 /* reset */
573 dprint(d, 1, "st #%d: reset\n", reg->stream);
574 st->ctl = 0;
575 }
576 if ((st->ctl & 0x02) != (old & 0x02)) {
577 uint32_t stnr = (st->ctl >> 20) & 0x0f;
578 /* run bit flipped */
579 if (st->ctl & 0x02) {
580 /* start */
581 dprint(d, 1, "st #%d: start %d (ring buf %d bytes)\n",
582 reg->stream, stnr, st->cbl);
583 intel_hda_parse_bdl(d, st);
584 intel_hda_notify_codecs(d, stnr, true);
585 } else {
586 /* stop */
587 dprint(d, 1, "st #%d: stop %d\n", reg->stream, stnr);
588 intel_hda_notify_codecs(d, stnr, false);
589 }
590 }
591 intel_hda_update_irq(d);
592 }
593
594 /* --------------------------------------------------------------------- */
595
596 #define ST_REG(_n, _o) (0x80 + (_n) * 0x20 + (_o))
597
598 static const struct IntelHDAReg regtab[] = {
599 /* global */
600 [ ICH6_REG_GCAP ] = {
601 .name = "GCAP",
602 .size = 2,
603 .reset = 0x4401,
604 },
605 [ ICH6_REG_VMIN ] = {
606 .name = "VMIN",
607 .size = 1,
608 },
609 [ ICH6_REG_VMAJ ] = {
610 .name = "VMAJ",
611 .size = 1,
612 .reset = 1,
613 },
614 [ ICH6_REG_OUTPAY ] = {
615 .name = "OUTPAY",
616 .size = 2,
617 .reset = 0x3c,
618 },
619 [ ICH6_REG_INPAY ] = {
620 .name = "INPAY",
621 .size = 2,
622 .reset = 0x1d,
623 },
624 [ ICH6_REG_GCTL ] = {
625 .name = "GCTL",
626 .size = 4,
627 .wmask = 0x0103,
628 .offset = offsetof(IntelHDAState, g_ctl),
629 .whandler = intel_hda_set_g_ctl,
630 },
631 [ ICH6_REG_WAKEEN ] = {
632 .name = "WAKEEN",
633 .size = 2,
634 .wmask = 0x7fff,
635 .offset = offsetof(IntelHDAState, wake_en),
636 .whandler = intel_hda_set_wake_en,
637 },
638 [ ICH6_REG_STATESTS ] = {
639 .name = "STATESTS",
640 .size = 2,
641 .wmask = 0x7fff,
642 .wclear = 0x7fff,
643 .offset = offsetof(IntelHDAState, state_sts),
644 .whandler = intel_hda_set_state_sts,
645 },
646
647 /* interrupts */
648 [ ICH6_REG_INTCTL ] = {
649 .name = "INTCTL",
650 .size = 4,
651 .wmask = 0xc00000ff,
652 .offset = offsetof(IntelHDAState, int_ctl),
653 .whandler = intel_hda_set_int_ctl,
654 },
655 [ ICH6_REG_INTSTS ] = {
656 .name = "INTSTS",
657 .size = 4,
658 .wmask = 0xc00000ff,
659 .wclear = 0xc00000ff,
660 .offset = offsetof(IntelHDAState, int_sts),
661 },
662
663 /* misc */
664 [ ICH6_REG_WALLCLK ] = {
665 .name = "WALLCLK",
666 .size = 4,
667 .offset = offsetof(IntelHDAState, wall_clk),
668 .rhandler = intel_hda_get_wall_clk,
669 },
670 [ ICH6_REG_WALLCLK + 0x2000 ] = {
671 .name = "WALLCLK(alias)",
672 .size = 4,
673 .offset = offsetof(IntelHDAState, wall_clk),
674 .rhandler = intel_hda_get_wall_clk,
675 },
676
677 /* dma engine */
678 [ ICH6_REG_CORBLBASE ] = {
679 .name = "CORBLBASE",
680 .size = 4,
681 .wmask = 0xffffff80,
682 .offset = offsetof(IntelHDAState, corb_lbase),
683 },
684 [ ICH6_REG_CORBUBASE ] = {
685 .name = "CORBUBASE",
686 .size = 4,
687 .wmask = 0xffffffff,
688 .offset = offsetof(IntelHDAState, corb_ubase),
689 },
690 [ ICH6_REG_CORBWP ] = {
691 .name = "CORBWP",
692 .size = 2,
693 .wmask = 0xff,
694 .offset = offsetof(IntelHDAState, corb_wp),
695 .whandler = intel_hda_set_corb_wp,
696 },
697 [ ICH6_REG_CORBRP ] = {
698 .name = "CORBRP",
699 .size = 2,
700 .wmask = 0x80ff,
701 .offset = offsetof(IntelHDAState, corb_rp),
702 },
703 [ ICH6_REG_CORBCTL ] = {
704 .name = "CORBCTL",
705 .size = 1,
706 .wmask = 0x03,
707 .offset = offsetof(IntelHDAState, corb_ctl),
708 .whandler = intel_hda_set_corb_ctl,
709 },
710 [ ICH6_REG_CORBSTS ] = {
711 .name = "CORBSTS",
712 .size = 1,
713 .wmask = 0x01,
714 .wclear = 0x01,
715 .offset = offsetof(IntelHDAState, corb_sts),
716 },
717 [ ICH6_REG_CORBSIZE ] = {
718 .name = "CORBSIZE",
719 .size = 1,
720 .reset = 0x42,
721 .offset = offsetof(IntelHDAState, corb_size),
722 },
723 [ ICH6_REG_RIRBLBASE ] = {
724 .name = "RIRBLBASE",
725 .size = 4,
726 .wmask = 0xffffff80,
727 .offset = offsetof(IntelHDAState, rirb_lbase),
728 },
729 [ ICH6_REG_RIRBUBASE ] = {
730 .name = "RIRBUBASE",
731 .size = 4,
732 .wmask = 0xffffffff,
733 .offset = offsetof(IntelHDAState, rirb_ubase),
734 },
735 [ ICH6_REG_RIRBWP ] = {
736 .name = "RIRBWP",
737 .size = 2,
738 .wmask = 0x8000,
739 .offset = offsetof(IntelHDAState, rirb_wp),
740 .whandler = intel_hda_set_rirb_wp,
741 },
742 [ ICH6_REG_RINTCNT ] = {
743 .name = "RINTCNT",
744 .size = 2,
745 .wmask = 0xff,
746 .offset = offsetof(IntelHDAState, rirb_cnt),
747 },
748 [ ICH6_REG_RIRBCTL ] = {
749 .name = "RIRBCTL",
750 .size = 1,
751 .wmask = 0x07,
752 .offset = offsetof(IntelHDAState, rirb_ctl),
753 },
754 [ ICH6_REG_RIRBSTS ] = {
755 .name = "RIRBSTS",
756 .size = 1,
757 .wmask = 0x05,
758 .wclear = 0x05,
759 .offset = offsetof(IntelHDAState, rirb_sts),
760 .whandler = intel_hda_set_rirb_sts,
761 },
762 [ ICH6_REG_RIRBSIZE ] = {
763 .name = "RIRBSIZE",
764 .size = 1,
765 .reset = 0x42,
766 .offset = offsetof(IntelHDAState, rirb_size),
767 },
768
769 [ ICH6_REG_DPLBASE ] = {
770 .name = "DPLBASE",
771 .size = 4,
772 .wmask = 0xffffff81,
773 .offset = offsetof(IntelHDAState, dp_lbase),
774 },
775 [ ICH6_REG_DPUBASE ] = {
776 .name = "DPUBASE",
777 .size = 4,
778 .wmask = 0xffffffff,
779 .offset = offsetof(IntelHDAState, dp_ubase),
780 },
781
782 [ ICH6_REG_IC ] = {
783 .name = "ICW",
784 .size = 4,
785 .wmask = 0xffffffff,
786 .offset = offsetof(IntelHDAState, icw),
787 },
788 [ ICH6_REG_IR ] = {
789 .name = "IRR",
790 .size = 4,
791 .offset = offsetof(IntelHDAState, irr),
792 },
793 [ ICH6_REG_IRS ] = {
794 .name = "ICS",
795 .size = 2,
796 .wmask = 0x0003,
797 .wclear = 0x0002,
798 .offset = offsetof(IntelHDAState, ics),
799 .whandler = intel_hda_set_ics,
800 },
801
802 #define HDA_STREAM(_t, _i) \
803 [ ST_REG(_i, ICH6_REG_SD_CTL) ] = { \
804 .stream = _i, \
805 .name = _t stringify(_i) " CTL", \
806 .size = 4, \
807 .wmask = 0x1cff001f, \
808 .offset = offsetof(IntelHDAState, st[_i].ctl), \
809 .whandler = intel_hda_set_st_ctl, \
810 }, \
811 [ ST_REG(_i, ICH6_REG_SD_CTL) + 2] = { \
812 .stream = _i, \
813 .name = _t stringify(_i) " CTL(stnr)", \
814 .size = 1, \
815 .shift = 16, \
816 .wmask = 0x00ff0000, \
817 .offset = offsetof(IntelHDAState, st[_i].ctl), \
818 .whandler = intel_hda_set_st_ctl, \
819 }, \
820 [ ST_REG(_i, ICH6_REG_SD_STS)] = { \
821 .stream = _i, \
822 .name = _t stringify(_i) " CTL(sts)", \
823 .size = 1, \
824 .shift = 24, \
825 .wmask = 0x1c000000, \
826 .wclear = 0x1c000000, \
827 .offset = offsetof(IntelHDAState, st[_i].ctl), \
828 .whandler = intel_hda_set_st_ctl, \
829 }, \
830 [ ST_REG(_i, ICH6_REG_SD_LPIB) ] = { \
831 .stream = _i, \
832 .name = _t stringify(_i) " LPIB", \
833 .size = 4, \
834 .offset = offsetof(IntelHDAState, st[_i].lpib), \
835 }, \
836 [ ST_REG(_i, ICH6_REG_SD_LPIB) + 0x2000 ] = { \
837 .stream = _i, \
838 .name = _t stringify(_i) " LPIB(alias)", \
839 .size = 4, \
840 .offset = offsetof(IntelHDAState, st[_i].lpib), \
841 }, \
842 [ ST_REG(_i, ICH6_REG_SD_CBL) ] = { \
843 .stream = _i, \
844 .name = _t stringify(_i) " CBL", \
845 .size = 4, \
846 .wmask = 0xffffffff, \
847 .offset = offsetof(IntelHDAState, st[_i].cbl), \
848 }, \
849 [ ST_REG(_i, ICH6_REG_SD_LVI) ] = { \
850 .stream = _i, \
851 .name = _t stringify(_i) " LVI", \
852 .size = 2, \
853 .wmask = 0x00ff, \
854 .offset = offsetof(IntelHDAState, st[_i].lvi), \
855 }, \
856 [ ST_REG(_i, ICH6_REG_SD_FIFOSIZE) ] = { \
857 .stream = _i, \
858 .name = _t stringify(_i) " FIFOS", \
859 .size = 2, \
860 .reset = HDA_BUFFER_SIZE, \
861 }, \
862 [ ST_REG(_i, ICH6_REG_SD_FORMAT) ] = { \
863 .stream = _i, \
864 .name = _t stringify(_i) " FMT", \
865 .size = 2, \
866 .wmask = 0x7f7f, \
867 .offset = offsetof(IntelHDAState, st[_i].fmt), \
868 }, \
869 [ ST_REG(_i, ICH6_REG_SD_BDLPL) ] = { \
870 .stream = _i, \
871 .name = _t stringify(_i) " BDLPL", \
872 .size = 4, \
873 .wmask = 0xffffff80, \
874 .offset = offsetof(IntelHDAState, st[_i].bdlp_lbase), \
875 }, \
876 [ ST_REG(_i, ICH6_REG_SD_BDLPU) ] = { \
877 .stream = _i, \
878 .name = _t stringify(_i) " BDLPU", \
879 .size = 4, \
880 .wmask = 0xffffffff, \
881 .offset = offsetof(IntelHDAState, st[_i].bdlp_ubase), \
882 }, \
883
884 HDA_STREAM("IN", 0)
885 HDA_STREAM("IN", 1)
886 HDA_STREAM("IN", 2)
887 HDA_STREAM("IN", 3)
888
889 HDA_STREAM("OUT", 4)
890 HDA_STREAM("OUT", 5)
891 HDA_STREAM("OUT", 6)
892 HDA_STREAM("OUT", 7)
893
894 };
895
896 static const IntelHDAReg *intel_hda_reg_find(IntelHDAState *d, target_phys_addr_t addr)
897 {
898 const IntelHDAReg *reg;
899
900 if (addr >= sizeof(regtab)/sizeof(regtab[0])) {
901 goto noreg;
902 }
903 reg = regtab+addr;
904 if (reg->name == NULL) {
905 goto noreg;
906 }
907 return reg;
908
909 noreg:
910 dprint(d, 1, "unknown register, addr 0x%x\n", (int) addr);
911 return NULL;
912 }
913
914 static uint32_t *intel_hda_reg_addr(IntelHDAState *d, const IntelHDAReg *reg)
915 {
916 uint8_t *addr = (void*)d;
917
918 addr += reg->offset;
919 return (uint32_t*)addr;
920 }
921
922 static void intel_hda_reg_write(IntelHDAState *d, const IntelHDAReg *reg, uint32_t val,
923 uint32_t wmask)
924 {
925 uint32_t *addr;
926 uint32_t old;
927
928 if (!reg) {
929 return;
930 }
931
932 if (d->debug) {
933 time_t now = time(NULL);
934 if (d->last_write && d->last_reg == reg && d->last_val == val) {
935 d->repeat_count++;
936 if (d->last_sec != now) {
937 dprint(d, 2, "previous register op repeated %d times\n", d->repeat_count);
938 d->last_sec = now;
939 d->repeat_count = 0;
940 }
941 } else {
942 if (d->repeat_count) {
943 dprint(d, 2, "previous register op repeated %d times\n", d->repeat_count);
944 }
945 dprint(d, 2, "write %-16s: 0x%x (%x)\n", reg->name, val, wmask);
946 d->last_write = 1;
947 d->last_reg = reg;
948 d->last_val = val;
949 d->last_sec = now;
950 d->repeat_count = 0;
951 }
952 }
953 assert(reg->offset != 0);
954
955 addr = intel_hda_reg_addr(d, reg);
956 old = *addr;
957
958 if (reg->shift) {
959 val <<= reg->shift;
960 wmask <<= reg->shift;
961 }
962 wmask &= reg->wmask;
963 *addr &= ~wmask;
964 *addr |= wmask & val;
965 *addr &= ~(val & reg->wclear);
966
967 if (reg->whandler) {
968 reg->whandler(d, reg, old);
969 }
970 }
971
972 static uint32_t intel_hda_reg_read(IntelHDAState *d, const IntelHDAReg *reg,
973 uint32_t rmask)
974 {
975 uint32_t *addr, ret;
976
977 if (!reg) {
978 return 0;
979 }
980
981 if (reg->rhandler) {
982 reg->rhandler(d, reg);
983 }
984
985 if (reg->offset == 0) {
986 /* constant read-only register */
987 ret = reg->reset;
988 } else {
989 addr = intel_hda_reg_addr(d, reg);
990 ret = *addr;
991 if (reg->shift) {
992 ret >>= reg->shift;
993 }
994 ret &= rmask;
995 }
996 if (d->debug) {
997 time_t now = time(NULL);
998 if (!d->last_write && d->last_reg == reg && d->last_val == ret) {
999 d->repeat_count++;
1000 if (d->last_sec != now) {
1001 dprint(d, 2, "previous register op repeated %d times\n", d->repeat_count);
1002 d->last_sec = now;
1003 d->repeat_count = 0;
1004 }
1005 } else {
1006 if (d->repeat_count) {
1007 dprint(d, 2, "previous register op repeated %d times\n", d->repeat_count);
1008 }
1009 dprint(d, 2, "read %-16s: 0x%x (%x)\n", reg->name, ret, rmask);
1010 d->last_write = 0;
1011 d->last_reg = reg;
1012 d->last_val = ret;
1013 d->last_sec = now;
1014 d->repeat_count = 0;
1015 }
1016 }
1017 return ret;
1018 }
1019
1020 static void intel_hda_regs_reset(IntelHDAState *d)
1021 {
1022 uint32_t *addr;
1023 int i;
1024
1025 for (i = 0; i < sizeof(regtab)/sizeof(regtab[0]); i++) {
1026 if (regtab[i].name == NULL) {
1027 continue;
1028 }
1029 if (regtab[i].offset == 0) {
1030 continue;
1031 }
1032 addr = intel_hda_reg_addr(d, regtab + i);
1033 *addr = regtab[i].reset;
1034 }
1035 }
1036
1037 /* --------------------------------------------------------------------- */
1038
1039 static void intel_hda_mmio_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1040 {
1041 IntelHDAState *d = opaque;
1042 const IntelHDAReg *reg = intel_hda_reg_find(d, addr);
1043
1044 intel_hda_reg_write(d, reg, val, 0xff);
1045 }
1046
1047 static void intel_hda_mmio_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
1048 {
1049 IntelHDAState *d = opaque;
1050 const IntelHDAReg *reg = intel_hda_reg_find(d, addr);
1051
1052 intel_hda_reg_write(d, reg, val, 0xffff);
1053 }
1054
1055 static void intel_hda_mmio_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
1056 {
1057 IntelHDAState *d = opaque;
1058 const IntelHDAReg *reg = intel_hda_reg_find(d, addr);
1059
1060 intel_hda_reg_write(d, reg, val, 0xffffffff);
1061 }
1062
1063 static uint32_t intel_hda_mmio_readb(void *opaque, target_phys_addr_t addr)
1064 {
1065 IntelHDAState *d = opaque;
1066 const IntelHDAReg *reg = intel_hda_reg_find(d, addr);
1067
1068 return intel_hda_reg_read(d, reg, 0xff);
1069 }
1070
1071 static uint32_t intel_hda_mmio_readw(void *opaque, target_phys_addr_t addr)
1072 {
1073 IntelHDAState *d = opaque;
1074 const IntelHDAReg *reg = intel_hda_reg_find(d, addr);
1075
1076 return intel_hda_reg_read(d, reg, 0xffff);
1077 }
1078
1079 static uint32_t intel_hda_mmio_readl(void *opaque, target_phys_addr_t addr)
1080 {
1081 IntelHDAState *d = opaque;
1082 const IntelHDAReg *reg = intel_hda_reg_find(d, addr);
1083
1084 return intel_hda_reg_read(d, reg, 0xffffffff);
1085 }
1086
1087 static const MemoryRegionOps intel_hda_mmio_ops = {
1088 .old_mmio = {
1089 .read = {
1090 intel_hda_mmio_readb,
1091 intel_hda_mmio_readw,
1092 intel_hda_mmio_readl,
1093 },
1094 .write = {
1095 intel_hda_mmio_writeb,
1096 intel_hda_mmio_writew,
1097 intel_hda_mmio_writel,
1098 },
1099 },
1100 .endianness = DEVICE_NATIVE_ENDIAN,
1101 };
1102
1103 /* --------------------------------------------------------------------- */
1104
1105 static void intel_hda_reset(DeviceState *dev)
1106 {
1107 IntelHDAState *d = DO_UPCAST(IntelHDAState, pci.qdev, dev);
1108 DeviceState *qdev;
1109 HDACodecDevice *cdev;
1110
1111 intel_hda_regs_reset(d);
1112 d->wall_base_ns = qemu_get_clock_ns(vm_clock);
1113
1114 /* reset codecs */
1115 QLIST_FOREACH(qdev, &d->codecs.qbus.children, sibling) {
1116 cdev = DO_UPCAST(HDACodecDevice, qdev, qdev);
1117 if (qdev->info->reset) {
1118 qdev->info->reset(qdev);
1119 }
1120 d->state_sts |= (1 << cdev->cad);
1121 }
1122 intel_hda_update_irq(d);
1123 }
1124
1125 static int intel_hda_init(PCIDevice *pci)
1126 {
1127 IntelHDAState *d = DO_UPCAST(IntelHDAState, pci, pci);
1128 uint8_t *conf = d->pci.config;
1129
1130 d->name = d->pci.qdev.info->name;
1131
1132 pci_config_set_interrupt_pin(conf, 1);
1133
1134 /* HDCTL off 0x40 bit 0 selects signaling mode (1-HDA, 0 - Ac97) 18.1.19 */
1135 conf[0x40] = 0x01;
1136
1137 memory_region_init_io(&d->mmio, &intel_hda_mmio_ops, d,
1138 "intel-hda", 0x4000);
1139 pci_register_bar(&d->pci, 0, 0, &d->mmio);
1140 if (d->msi) {
1141 msi_init(&d->pci, 0x50, 1, true, false);
1142 }
1143
1144 hda_codec_bus_init(&d->pci.qdev, &d->codecs,
1145 intel_hda_response, intel_hda_xfer);
1146
1147 return 0;
1148 }
1149
1150 static int intel_hda_exit(PCIDevice *pci)
1151 {
1152 IntelHDAState *d = DO_UPCAST(IntelHDAState, pci, pci);
1153
1154 msi_uninit(&d->pci);
1155 memory_region_destroy(&d->mmio);
1156 return 0;
1157 }
1158
1159 static void intel_hda_write_config(PCIDevice *pci, uint32_t addr,
1160 uint32_t val, int len)
1161 {
1162 IntelHDAState *d = DO_UPCAST(IntelHDAState, pci, pci);
1163
1164 pci_default_write_config(pci, addr, val, len);
1165 if (d->msi) {
1166 msi_write_config(pci, addr, val, len);
1167 }
1168 }
1169
1170 static int intel_hda_post_load(void *opaque, int version)
1171 {
1172 IntelHDAState* d = opaque;
1173 int i;
1174
1175 dprint(d, 1, "%s\n", __FUNCTION__);
1176 for (i = 0; i < ARRAY_SIZE(d->st); i++) {
1177 if (d->st[i].ctl & 0x02) {
1178 intel_hda_parse_bdl(d, &d->st[i]);
1179 }
1180 }
1181 intel_hda_update_irq(d);
1182 return 0;
1183 }
1184
1185 static const VMStateDescription vmstate_intel_hda_stream = {
1186 .name = "intel-hda-stream",
1187 .version_id = 1,
1188 .fields = (VMStateField []) {
1189 VMSTATE_UINT32(ctl, IntelHDAStream),
1190 VMSTATE_UINT32(lpib, IntelHDAStream),
1191 VMSTATE_UINT32(cbl, IntelHDAStream),
1192 VMSTATE_UINT32(lvi, IntelHDAStream),
1193 VMSTATE_UINT32(fmt, IntelHDAStream),
1194 VMSTATE_UINT32(bdlp_lbase, IntelHDAStream),
1195 VMSTATE_UINT32(bdlp_ubase, IntelHDAStream),
1196 VMSTATE_END_OF_LIST()
1197 }
1198 };
1199
1200 static const VMStateDescription vmstate_intel_hda = {
1201 .name = "intel-hda",
1202 .version_id = 1,
1203 .post_load = intel_hda_post_load,
1204 .fields = (VMStateField []) {
1205 VMSTATE_PCI_DEVICE(pci, IntelHDAState),
1206
1207 /* registers */
1208 VMSTATE_UINT32(g_ctl, IntelHDAState),
1209 VMSTATE_UINT32(wake_en, IntelHDAState),
1210 VMSTATE_UINT32(state_sts, IntelHDAState),
1211 VMSTATE_UINT32(int_ctl, IntelHDAState),
1212 VMSTATE_UINT32(int_sts, IntelHDAState),
1213 VMSTATE_UINT32(wall_clk, IntelHDAState),
1214 VMSTATE_UINT32(corb_lbase, IntelHDAState),
1215 VMSTATE_UINT32(corb_ubase, IntelHDAState),
1216 VMSTATE_UINT32(corb_rp, IntelHDAState),
1217 VMSTATE_UINT32(corb_wp, IntelHDAState),
1218 VMSTATE_UINT32(corb_ctl, IntelHDAState),
1219 VMSTATE_UINT32(corb_sts, IntelHDAState),
1220 VMSTATE_UINT32(corb_size, IntelHDAState),
1221 VMSTATE_UINT32(rirb_lbase, IntelHDAState),
1222 VMSTATE_UINT32(rirb_ubase, IntelHDAState),
1223 VMSTATE_UINT32(rirb_wp, IntelHDAState),
1224 VMSTATE_UINT32(rirb_cnt, IntelHDAState),
1225 VMSTATE_UINT32(rirb_ctl, IntelHDAState),
1226 VMSTATE_UINT32(rirb_sts, IntelHDAState),
1227 VMSTATE_UINT32(rirb_size, IntelHDAState),
1228 VMSTATE_UINT32(dp_lbase, IntelHDAState),
1229 VMSTATE_UINT32(dp_ubase, IntelHDAState),
1230 VMSTATE_UINT32(icw, IntelHDAState),
1231 VMSTATE_UINT32(irr, IntelHDAState),
1232 VMSTATE_UINT32(ics, IntelHDAState),
1233 VMSTATE_STRUCT_ARRAY(st, IntelHDAState, 8, 0,
1234 vmstate_intel_hda_stream,
1235 IntelHDAStream),
1236
1237 /* additional state info */
1238 VMSTATE_UINT32(rirb_count, IntelHDAState),
1239 VMSTATE_INT64(wall_base_ns, IntelHDAState),
1240
1241 VMSTATE_END_OF_LIST()
1242 }
1243 };
1244
1245 static PCIDeviceInfo intel_hda_info = {
1246 .qdev.name = "intel-hda",
1247 .qdev.desc = "Intel HD Audio Controller",
1248 .qdev.size = sizeof(IntelHDAState),
1249 .qdev.vmsd = &vmstate_intel_hda,
1250 .qdev.reset = intel_hda_reset,
1251 .init = intel_hda_init,
1252 .exit = intel_hda_exit,
1253 .config_write = intel_hda_write_config,
1254 .vendor_id = PCI_VENDOR_ID_INTEL,
1255 .device_id = 0x2668,
1256 .revision = 1,
1257 .class_id = PCI_CLASS_MULTIMEDIA_HD_AUDIO,
1258 .qdev.props = (Property[]) {
1259 DEFINE_PROP_UINT32("debug", IntelHDAState, debug, 0),
1260 DEFINE_PROP_UINT32("msi", IntelHDAState, msi, 1),
1261 DEFINE_PROP_END_OF_LIST(),
1262 }
1263 };
1264
1265 static void intel_hda_register(void)
1266 {
1267 pci_qdev_register(&intel_hda_info);
1268 }
1269 device_init(intel_hda_register);
1270
1271 /*
1272 * create intel hda controller with codec attached to it,
1273 * so '-soundhw hda' works.
1274 */
1275 int intel_hda_and_codec_init(PCIBus *bus)
1276 {
1277 PCIDevice *controller;
1278 BusState *hdabus;
1279 DeviceState *codec;
1280
1281 controller = pci_create_simple(bus, -1, "intel-hda");
1282 hdabus = QLIST_FIRST(&controller->qdev.child_bus);
1283 codec = qdev_create(hdabus, "hda-duplex");
1284 qdev_init_nofail(codec);
1285 return 0;
1286 }
1287