]> git.proxmox.com Git - mirror_qemu.git/blob - hw/xen/xen_pt_graphics.c
linux-user: Allow gdbstub to ignore page protection
[mirror_qemu.git] / hw / xen / xen_pt_graphics.c
1 /*
2 * graphics passthrough
3 */
4 #include "qemu/osdep.h"
5 #include "qapi/error.h"
6 #include "xen_pt.h"
7 #include "xen-host-pci-device.h"
8
9 static unsigned long igd_guest_opregion;
10 static unsigned long igd_host_opregion;
11
12 #define XEN_PCI_INTEL_OPREGION_MASK 0xfff
13
14 typedef struct VGARegion {
15 int type; /* Memory or port I/O */
16 uint64_t guest_base_addr;
17 uint64_t machine_base_addr;
18 uint64_t size; /* size of the region */
19 int rc;
20 } VGARegion;
21
22 #define IORESOURCE_IO 0x00000100
23 #define IORESOURCE_MEM 0x00000200
24
25 static struct VGARegion vga_args[] = {
26 {
27 .type = IORESOURCE_IO,
28 .guest_base_addr = 0x3B0,
29 .machine_base_addr = 0x3B0,
30 .size = 0xC,
31 .rc = -1,
32 },
33 {
34 .type = IORESOURCE_IO,
35 .guest_base_addr = 0x3C0,
36 .machine_base_addr = 0x3C0,
37 .size = 0x20,
38 .rc = -1,
39 },
40 {
41 .type = IORESOURCE_MEM,
42 .guest_base_addr = 0xa0000 >> XC_PAGE_SHIFT,
43 .machine_base_addr = 0xa0000 >> XC_PAGE_SHIFT,
44 .size = 0x20,
45 .rc = -1,
46 },
47 };
48
49 /*
50 * register VGA resources for the domain with assigned gfx
51 */
52 int xen_pt_register_vga_regions(XenHostPCIDevice *dev)
53 {
54 int i = 0;
55
56 if (!is_igd_vga_passthrough(dev)) {
57 return 0;
58 }
59
60 for (i = 0 ; i < ARRAY_SIZE(vga_args); i++) {
61 if (vga_args[i].type == IORESOURCE_IO) {
62 vga_args[i].rc = xc_domain_ioport_mapping(xen_xc, xen_domid,
63 vga_args[i].guest_base_addr,
64 vga_args[i].machine_base_addr,
65 vga_args[i].size, DPCI_ADD_MAPPING);
66 } else {
67 vga_args[i].rc = xc_domain_memory_mapping(xen_xc, xen_domid,
68 vga_args[i].guest_base_addr,
69 vga_args[i].machine_base_addr,
70 vga_args[i].size, DPCI_ADD_MAPPING);
71 }
72
73 if (vga_args[i].rc) {
74 XEN_PT_ERR(NULL, "VGA %s mapping failed! (rc: %i)\n",
75 vga_args[i].type == IORESOURCE_IO ? "ioport" : "memory",
76 vga_args[i].rc);
77 return vga_args[i].rc;
78 }
79 }
80
81 return 0;
82 }
83
84 /*
85 * unregister VGA resources for the domain with assigned gfx
86 */
87 int xen_pt_unregister_vga_regions(XenHostPCIDevice *dev)
88 {
89 int i = 0;
90 int ret = 0;
91
92 if (!is_igd_vga_passthrough(dev)) {
93 return 0;
94 }
95
96 for (i = 0 ; i < ARRAY_SIZE(vga_args); i++) {
97 if (vga_args[i].type == IORESOURCE_IO) {
98 vga_args[i].rc = xc_domain_ioport_mapping(xen_xc, xen_domid,
99 vga_args[i].guest_base_addr,
100 vga_args[i].machine_base_addr,
101 vga_args[i].size, DPCI_REMOVE_MAPPING);
102 } else {
103 vga_args[i].rc = xc_domain_memory_mapping(xen_xc, xen_domid,
104 vga_args[i].guest_base_addr,
105 vga_args[i].machine_base_addr,
106 vga_args[i].size, DPCI_REMOVE_MAPPING);
107 }
108
109 if (vga_args[i].rc) {
110 XEN_PT_ERR(NULL, "VGA %s unmapping failed! (rc: %i)\n",
111 vga_args[i].type == IORESOURCE_IO ? "ioport" : "memory",
112 vga_args[i].rc);
113 return vga_args[i].rc;
114 }
115 }
116
117 if (igd_guest_opregion) {
118 ret = xc_domain_memory_mapping(xen_xc, xen_domid,
119 (unsigned long)(igd_guest_opregion >> XC_PAGE_SHIFT),
120 (unsigned long)(igd_host_opregion >> XC_PAGE_SHIFT),
121 3,
122 DPCI_REMOVE_MAPPING);
123 if (ret) {
124 return ret;
125 }
126 }
127
128 return 0;
129 }
130
131 static void *get_vgabios(XenPCIPassthroughState *s, int *size,
132 XenHostPCIDevice *dev)
133 {
134 return pci_assign_dev_load_option_rom(&s->dev, size,
135 dev->domain, dev->bus,
136 dev->dev, dev->func);
137 }
138
139 /* Refer to Seabios. */
140 struct rom_header {
141 uint16_t signature;
142 uint8_t size;
143 uint8_t initVector[4];
144 uint8_t reserved[17];
145 uint16_t pcioffset;
146 uint16_t pnpoffset;
147 } __attribute__((packed));
148
149 struct pci_data {
150 uint32_t signature;
151 uint16_t vendor;
152 uint16_t device;
153 uint16_t vitaldata;
154 uint16_t dlen;
155 uint8_t drevision;
156 uint8_t class_lo;
157 uint16_t class_hi;
158 uint16_t ilen;
159 uint16_t irevision;
160 uint8_t type;
161 uint8_t indicator;
162 uint16_t reserved;
163 } __attribute__((packed));
164
165 void xen_pt_setup_vga(XenPCIPassthroughState *s, XenHostPCIDevice *dev,
166 Error **errp)
167 {
168 unsigned char *bios = NULL;
169 struct rom_header *rom;
170 int bios_size;
171 char *c = NULL;
172 char checksum = 0;
173 uint32_t len = 0;
174 struct pci_data *pd = NULL;
175
176 if (!is_igd_vga_passthrough(dev)) {
177 error_setg(errp, "Need to enable igd-passthrough");
178 return;
179 }
180
181 bios = get_vgabios(s, &bios_size, dev);
182 if (!bios) {
183 error_setg(errp, "VGA: Can't get VBIOS");
184 return;
185 }
186
187 if (bios_size < sizeof(struct rom_header)) {
188 error_setg(errp, "VGA: VBIOS image corrupt (too small)");
189 return;
190 }
191
192 /* Currently we fixed this address as a primary. */
193 rom = (struct rom_header *)bios;
194
195 if (rom->pcioffset + sizeof(struct pci_data) > bios_size) {
196 error_setg(errp, "VGA: VBIOS image corrupt (bad pcioffset field)");
197 return;
198 }
199
200 pd = (void *)(bios + (unsigned char)rom->pcioffset);
201
202 /* We may need to fixup Device Identification. */
203 if (pd->device != s->real_device.device_id) {
204 pd->device = s->real_device.device_id;
205
206 len = rom->size * 512;
207 if (len > bios_size) {
208 error_setg(errp, "VGA: VBIOS image corrupt (bad size field)");
209 return;
210 }
211
212 /* Then adjust the bios checksum */
213 for (c = (char *)bios; c < ((char *)bios + len); c++) {
214 checksum += *c;
215 }
216 if (checksum) {
217 bios[len - 1] -= checksum;
218 XEN_PT_LOG(&s->dev, "vga bios checksum is adjusted %x!\n",
219 checksum);
220 }
221 }
222
223 /* Currently we fixed this address as a primary for legacy BIOS. */
224 cpu_physical_memory_write(0xc0000, bios, bios_size);
225 }
226
227 uint32_t igd_read_opregion(XenPCIPassthroughState *s)
228 {
229 uint32_t val = 0;
230
231 if (!igd_guest_opregion) {
232 return val;
233 }
234
235 val = igd_guest_opregion;
236
237 XEN_PT_LOG(&s->dev, "Read opregion val=%x\n", val);
238 return val;
239 }
240
241 #define XEN_PCI_INTEL_OPREGION_PAGES 0x3
242 #define XEN_PCI_INTEL_OPREGION_ENABLE_ACCESSED 0x1
243 void igd_write_opregion(XenPCIPassthroughState *s, uint32_t val)
244 {
245 int ret;
246
247 if (igd_guest_opregion) {
248 XEN_PT_LOG(&s->dev, "opregion register already been set, ignoring %x\n",
249 val);
250 return;
251 }
252
253 /* We just work with LE. */
254 xen_host_pci_get_block(&s->real_device, XEN_PCI_INTEL_OPREGION,
255 (uint8_t *)&igd_host_opregion, 4);
256 igd_guest_opregion = (unsigned long)(val & ~XEN_PCI_INTEL_OPREGION_MASK)
257 | (igd_host_opregion & XEN_PCI_INTEL_OPREGION_MASK);
258
259 ret = xc_domain_iomem_permission(xen_xc, xen_domid,
260 (unsigned long)(igd_host_opregion >> XC_PAGE_SHIFT),
261 XEN_PCI_INTEL_OPREGION_PAGES,
262 XEN_PCI_INTEL_OPREGION_ENABLE_ACCESSED);
263
264 if (ret) {
265 XEN_PT_ERR(&s->dev, "[%d]:Can't enable to access IGD host opregion:"
266 " 0x%lx.\n", ret,
267 (unsigned long)(igd_host_opregion >> XC_PAGE_SHIFT)),
268 igd_guest_opregion = 0;
269 return;
270 }
271
272 ret = xc_domain_memory_mapping(xen_xc, xen_domid,
273 (unsigned long)(igd_guest_opregion >> XC_PAGE_SHIFT),
274 (unsigned long)(igd_host_opregion >> XC_PAGE_SHIFT),
275 XEN_PCI_INTEL_OPREGION_PAGES,
276 DPCI_ADD_MAPPING);
277
278 if (ret) {
279 XEN_PT_ERR(&s->dev, "[%d]:Can't map IGD host opregion:0x%lx to"
280 " guest opregion:0x%lx.\n", ret,
281 (unsigned long)(igd_host_opregion >> XC_PAGE_SHIFT),
282 (unsigned long)(igd_guest_opregion >> XC_PAGE_SHIFT));
283 igd_guest_opregion = 0;
284 return;
285 }
286
287 XEN_PT_LOG(&s->dev, "Map OpRegion: 0x%lx -> 0x%lx\n",
288 (unsigned long)(igd_host_opregion >> XC_PAGE_SHIFT),
289 (unsigned long)(igd_guest_opregion >> XC_PAGE_SHIFT));
290 }
291
292 typedef struct {
293 uint16_t gpu_device_id;
294 uint16_t pch_device_id;
295 uint8_t pch_revision_id;
296 } IGDDeviceIDInfo;
297
298 /*
299 * In real world different GPU should have different PCH. But actually
300 * the different PCH DIDs likely map to different PCH SKUs. We do the
301 * same thing for the GPU. For PCH, the different SKUs are going to be
302 * all the same silicon design and implementation, just different
303 * features turn on and off with fuses. The SW interfaces should be
304 * consistent across all SKUs in a given family (eg LPT). But just same
305 * features may not be supported.
306 *
307 * Most of these different PCH features probably don't matter to the
308 * Gfx driver, but obviously any difference in display port connections
309 * will so it should be fine with any PCH in case of passthrough.
310 *
311 * So currently use one PCH version, 0x8c4e, to cover all HSW(Haswell)
312 * scenarios, 0x9cc3 for BDW(Broadwell).
313 */
314 static const IGDDeviceIDInfo igd_combo_id_infos[] = {
315 /* HSW Classic */
316 {0x0402, 0x8c4e, 0x04}, /* HSWGT1D, HSWD_w7 */
317 {0x0406, 0x8c4e, 0x04}, /* HSWGT1M, HSWM_w7 */
318 {0x0412, 0x8c4e, 0x04}, /* HSWGT2D, HSWD_w7 */
319 {0x0416, 0x8c4e, 0x04}, /* HSWGT2M, HSWM_w7 */
320 {0x041E, 0x8c4e, 0x04}, /* HSWGT15D, HSWD_w7 */
321 /* HSW ULT */
322 {0x0A06, 0x8c4e, 0x04}, /* HSWGT1UT, HSWM_w7 */
323 {0x0A16, 0x8c4e, 0x04}, /* HSWGT2UT, HSWM_w7 */
324 {0x0A26, 0x8c4e, 0x06}, /* HSWGT3UT, HSWM_w7 */
325 {0x0A2E, 0x8c4e, 0x04}, /* HSWGT3UT28W, HSWM_w7 */
326 {0x0A1E, 0x8c4e, 0x04}, /* HSWGT2UX, HSWM_w7 */
327 {0x0A0E, 0x8c4e, 0x04}, /* HSWGT1ULX, HSWM_w7 */
328 /* HSW CRW */
329 {0x0D26, 0x8c4e, 0x04}, /* HSWGT3CW, HSWM_w7 */
330 {0x0D22, 0x8c4e, 0x04}, /* HSWGT3CWDT, HSWD_w7 */
331 /* HSW Server */
332 {0x041A, 0x8c4e, 0x04}, /* HSWSVGT2, HSWD_w7 */
333 /* HSW SRVR */
334 {0x040A, 0x8c4e, 0x04}, /* HSWSVGT1, HSWD_w7 */
335 /* BSW */
336 {0x1606, 0x9cc3, 0x03}, /* BDWULTGT1, BDWM_w7 */
337 {0x1616, 0x9cc3, 0x03}, /* BDWULTGT2, BDWM_w7 */
338 {0x1626, 0x9cc3, 0x03}, /* BDWULTGT3, BDWM_w7 */
339 {0x160E, 0x9cc3, 0x03}, /* BDWULXGT1, BDWM_w7 */
340 {0x161E, 0x9cc3, 0x03}, /* BDWULXGT2, BDWM_w7 */
341 {0x1602, 0x9cc3, 0x03}, /* BDWHALOGT1, BDWM_w7 */
342 {0x1612, 0x9cc3, 0x03}, /* BDWHALOGT2, BDWM_w7 */
343 {0x1622, 0x9cc3, 0x03}, /* BDWHALOGT3, BDWM_w7 */
344 {0x162B, 0x9cc3, 0x03}, /* BDWHALO28W, BDWM_w7 */
345 {0x162A, 0x9cc3, 0x03}, /* BDWGT3WRKS, BDWM_w7 */
346 {0x162D, 0x9cc3, 0x03}, /* BDWGT3SRVR, BDWM_w7 */
347 };
348
349 static void isa_bridge_class_init(ObjectClass *klass, void *data)
350 {
351 DeviceClass *dc = DEVICE_CLASS(klass);
352 PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
353
354 dc->desc = "ISA bridge faked to support IGD PT";
355 set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
356 k->vendor_id = PCI_VENDOR_ID_INTEL;
357 k->class_id = PCI_CLASS_BRIDGE_ISA;
358 };
359
360 static const TypeInfo isa_bridge_info = {
361 .name = "igd-passthrough-isa-bridge",
362 .parent = TYPE_PCI_DEVICE,
363 .instance_size = sizeof(PCIDevice),
364 .class_init = isa_bridge_class_init,
365 .interfaces = (InterfaceInfo[]) {
366 { INTERFACE_CONVENTIONAL_PCI_DEVICE },
367 { },
368 },
369 };
370
371 static void pt_graphics_register_types(void)
372 {
373 type_register_static(&isa_bridge_info);
374 }
375 type_init(pt_graphics_register_types)
376
377 void xen_igd_passthrough_isa_bridge_create(XenPCIPassthroughState *s,
378 XenHostPCIDevice *dev)
379 {
380 PCIBus *bus = pci_get_bus(&s->dev);
381 struct PCIDevice *bridge_dev;
382 int i, num;
383 const uint16_t gpu_dev_id = dev->device_id;
384 uint16_t pch_dev_id = 0xffff;
385 uint8_t pch_rev_id = 0;
386
387 num = ARRAY_SIZE(igd_combo_id_infos);
388 for (i = 0; i < num; i++) {
389 if (gpu_dev_id == igd_combo_id_infos[i].gpu_device_id) {
390 pch_dev_id = igd_combo_id_infos[i].pch_device_id;
391 pch_rev_id = igd_combo_id_infos[i].pch_revision_id;
392 }
393 }
394
395 if (pch_dev_id == 0xffff) {
396 return;
397 }
398
399 /* Currently IGD drivers always need to access PCH by 1f.0. */
400 bridge_dev = pci_create_simple(bus, PCI_DEVFN(0x1f, 0),
401 "igd-passthrough-isa-bridge");
402
403 /*
404 * Note that vendor id is always PCI_VENDOR_ID_INTEL.
405 */
406 if (!bridge_dev) {
407 fprintf(stderr, "set igd-passthrough-isa-bridge failed!\n");
408 return;
409 }
410 pci_config_set_device_id(bridge_dev->config, pch_dev_id);
411 pci_config_set_revision(bridge_dev->config, pch_rev_id);
412 }