]> git.proxmox.com Git - mirror_qemu.git/blob - hw/ppc/spapr_iommu.c
memory/iommu: introduce IOMMUMemoryRegionClass
[mirror_qemu.git] / hw / ppc / spapr_iommu.c
1 /*
2 * QEMU sPAPR IOMMU (TCE) code
3 *
4 * Copyright (c) 2010 David Gibson, IBM Corporation <dwg@au1.ibm.com>
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "qemu/osdep.h"
20 #include "qemu/error-report.h"
21 #include "hw/hw.h"
22 #include "qemu/log.h"
23 #include "sysemu/kvm.h"
24 #include "hw/qdev.h"
25 #include "kvm_ppc.h"
26 #include "sysemu/dma.h"
27 #include "exec/address-spaces.h"
28 #include "trace.h"
29
30 #include "hw/ppc/spapr.h"
31 #include "hw/ppc/spapr_vio.h"
32
33 #include <libfdt.h>
34
35 enum sPAPRTCEAccess {
36 SPAPR_TCE_FAULT = 0,
37 SPAPR_TCE_RO = 1,
38 SPAPR_TCE_WO = 2,
39 SPAPR_TCE_RW = 3,
40 };
41
42 #define IOMMU_PAGE_SIZE(shift) (1ULL << (shift))
43 #define IOMMU_PAGE_MASK(shift) (~(IOMMU_PAGE_SIZE(shift) - 1))
44
45 static QLIST_HEAD(spapr_tce_tables, sPAPRTCETable) spapr_tce_tables;
46
47 sPAPRTCETable *spapr_tce_find_by_liobn(target_ulong liobn)
48 {
49 sPAPRTCETable *tcet;
50
51 if (liobn & 0xFFFFFFFF00000000ULL) {
52 hcall_dprintf("Request for out-of-bounds LIOBN 0x" TARGET_FMT_lx "\n",
53 liobn);
54 return NULL;
55 }
56
57 QLIST_FOREACH(tcet, &spapr_tce_tables, list) {
58 if (tcet->liobn == (uint32_t)liobn) {
59 return tcet;
60 }
61 }
62
63 return NULL;
64 }
65
66 static IOMMUAccessFlags spapr_tce_iommu_access_flags(uint64_t tce)
67 {
68 switch (tce & SPAPR_TCE_RW) {
69 case SPAPR_TCE_FAULT:
70 return IOMMU_NONE;
71 case SPAPR_TCE_RO:
72 return IOMMU_RO;
73 case SPAPR_TCE_WO:
74 return IOMMU_WO;
75 default: /* SPAPR_TCE_RW */
76 return IOMMU_RW;
77 }
78 }
79
80 static uint64_t *spapr_tce_alloc_table(uint32_t liobn,
81 uint32_t page_shift,
82 uint64_t bus_offset,
83 uint32_t nb_table,
84 int *fd,
85 bool need_vfio)
86 {
87 uint64_t *table = NULL;
88
89 if (kvm_enabled()) {
90 table = kvmppc_create_spapr_tce(liobn, page_shift, bus_offset, nb_table,
91 fd, need_vfio);
92 }
93
94 if (!table) {
95 *fd = -1;
96 table = g_malloc0(nb_table * sizeof(uint64_t));
97 }
98
99 trace_spapr_iommu_new_table(liobn, table, *fd);
100
101 return table;
102 }
103
104 static void spapr_tce_free_table(uint64_t *table, int fd, uint32_t nb_table)
105 {
106 if (!kvm_enabled() ||
107 (kvmppc_remove_spapr_tce(table, fd, nb_table) != 0)) {
108 g_free(table);
109 }
110 }
111
112 /* Called from RCU critical section */
113 static IOMMUTLBEntry spapr_tce_translate_iommu(IOMMUMemoryRegion *iommu,
114 hwaddr addr,
115 IOMMUAccessFlags flag)
116 {
117 sPAPRTCETable *tcet = container_of(iommu, sPAPRTCETable, iommu);
118 uint64_t tce;
119 IOMMUTLBEntry ret = {
120 .target_as = &address_space_memory,
121 .iova = 0,
122 .translated_addr = 0,
123 .addr_mask = ~(hwaddr)0,
124 .perm = IOMMU_NONE,
125 };
126
127 if ((addr >> tcet->page_shift) < tcet->nb_table) {
128 /* Check if we are in bound */
129 hwaddr page_mask = IOMMU_PAGE_MASK(tcet->page_shift);
130
131 tce = tcet->table[addr >> tcet->page_shift];
132 ret.iova = addr & page_mask;
133 ret.translated_addr = tce & page_mask;
134 ret.addr_mask = ~page_mask;
135 ret.perm = spapr_tce_iommu_access_flags(tce);
136 }
137 trace_spapr_iommu_xlate(tcet->liobn, addr, ret.iova, ret.perm,
138 ret.addr_mask);
139
140 return ret;
141 }
142
143 static void spapr_tce_table_pre_save(void *opaque)
144 {
145 sPAPRTCETable *tcet = SPAPR_TCE_TABLE(opaque);
146
147 tcet->mig_table = tcet->table;
148 tcet->mig_nb_table = tcet->nb_table;
149
150 trace_spapr_iommu_pre_save(tcet->liobn, tcet->mig_nb_table,
151 tcet->bus_offset, tcet->page_shift);
152 }
153
154 static uint64_t spapr_tce_get_min_page_size(IOMMUMemoryRegion *iommu)
155 {
156 sPAPRTCETable *tcet = container_of(iommu, sPAPRTCETable, iommu);
157
158 return 1ULL << tcet->page_shift;
159 }
160
161 static void spapr_tce_notify_flag_changed(IOMMUMemoryRegion *iommu,
162 IOMMUNotifierFlag old,
163 IOMMUNotifierFlag new)
164 {
165 struct sPAPRTCETable *tbl = container_of(iommu, sPAPRTCETable, iommu);
166
167 if (old == IOMMU_NOTIFIER_NONE && new != IOMMU_NOTIFIER_NONE) {
168 spapr_tce_set_need_vfio(tbl, true);
169 } else if (old != IOMMU_NOTIFIER_NONE && new == IOMMU_NOTIFIER_NONE) {
170 spapr_tce_set_need_vfio(tbl, false);
171 }
172 }
173
174 static int spapr_tce_table_post_load(void *opaque, int version_id)
175 {
176 sPAPRTCETable *tcet = SPAPR_TCE_TABLE(opaque);
177 uint32_t old_nb_table = tcet->nb_table;
178 uint64_t old_bus_offset = tcet->bus_offset;
179 uint32_t old_page_shift = tcet->page_shift;
180
181 if (tcet->vdev) {
182 spapr_vio_set_bypass(tcet->vdev, tcet->bypass);
183 }
184
185 if (tcet->mig_nb_table != tcet->nb_table) {
186 spapr_tce_table_disable(tcet);
187 }
188
189 if (tcet->mig_nb_table) {
190 if (!tcet->nb_table) {
191 spapr_tce_table_enable(tcet, old_page_shift, old_bus_offset,
192 tcet->mig_nb_table);
193 }
194
195 memcpy(tcet->table, tcet->mig_table,
196 tcet->nb_table * sizeof(tcet->table[0]));
197
198 free(tcet->mig_table);
199 tcet->mig_table = NULL;
200 }
201
202 trace_spapr_iommu_post_load(tcet->liobn, old_nb_table, tcet->nb_table,
203 tcet->bus_offset, tcet->page_shift);
204
205 return 0;
206 }
207
208 static bool spapr_tce_table_ex_needed(void *opaque)
209 {
210 sPAPRTCETable *tcet = opaque;
211
212 return tcet->bus_offset || tcet->page_shift != 0xC;
213 }
214
215 static const VMStateDescription vmstate_spapr_tce_table_ex = {
216 .name = "spapr_iommu_ex",
217 .version_id = 1,
218 .minimum_version_id = 1,
219 .needed = spapr_tce_table_ex_needed,
220 .fields = (VMStateField[]) {
221 VMSTATE_UINT64(bus_offset, sPAPRTCETable),
222 VMSTATE_UINT32(page_shift, sPAPRTCETable),
223 VMSTATE_END_OF_LIST()
224 },
225 };
226
227 static const VMStateDescription vmstate_spapr_tce_table = {
228 .name = "spapr_iommu",
229 .version_id = 2,
230 .minimum_version_id = 2,
231 .pre_save = spapr_tce_table_pre_save,
232 .post_load = spapr_tce_table_post_load,
233 .fields = (VMStateField []) {
234 /* Sanity check */
235 VMSTATE_UINT32_EQUAL(liobn, sPAPRTCETable, NULL),
236
237 /* IOMMU state */
238 VMSTATE_UINT32(mig_nb_table, sPAPRTCETable),
239 VMSTATE_BOOL(bypass, sPAPRTCETable),
240 VMSTATE_VARRAY_UINT32_ALLOC(mig_table, sPAPRTCETable, mig_nb_table, 0,
241 vmstate_info_uint64, uint64_t),
242
243 VMSTATE_END_OF_LIST()
244 },
245 .subsections = (const VMStateDescription*[]) {
246 &vmstate_spapr_tce_table_ex,
247 NULL
248 }
249 };
250
251 static int spapr_tce_table_realize(DeviceState *dev)
252 {
253 sPAPRTCETable *tcet = SPAPR_TCE_TABLE(dev);
254 Object *tcetobj = OBJECT(tcet);
255 char tmp[32];
256
257 tcet->fd = -1;
258 tcet->need_vfio = false;
259 snprintf(tmp, sizeof(tmp), "tce-root-%x", tcet->liobn);
260 memory_region_init(&tcet->root, tcetobj, tmp, UINT64_MAX);
261
262 snprintf(tmp, sizeof(tmp), "tce-iommu-%x", tcet->liobn);
263 memory_region_init_iommu(&tcet->iommu, sizeof(tcet->iommu),
264 TYPE_SPAPR_IOMMU_MEMORY_REGION,
265 tcetobj, tmp, 0);
266
267 QLIST_INSERT_HEAD(&spapr_tce_tables, tcet, list);
268
269 vmstate_register(DEVICE(tcet), tcet->liobn, &vmstate_spapr_tce_table,
270 tcet);
271
272 return 0;
273 }
274
275 void spapr_tce_set_need_vfio(sPAPRTCETable *tcet, bool need_vfio)
276 {
277 size_t table_size = tcet->nb_table * sizeof(uint64_t);
278 void *newtable;
279
280 if (need_vfio == tcet->need_vfio) {
281 /* Nothing to do */
282 return;
283 }
284
285 if (!need_vfio) {
286 /* FIXME: We don't support transition back to KVM accelerated
287 * TCEs yet */
288 return;
289 }
290
291 tcet->need_vfio = true;
292
293 if (tcet->fd < 0) {
294 /* Table is already in userspace, nothing to be do */
295 return;
296 }
297
298 newtable = g_malloc(table_size);
299 memcpy(newtable, tcet->table, table_size);
300
301 kvmppc_remove_spapr_tce(tcet->table, tcet->fd, tcet->nb_table);
302
303 tcet->fd = -1;
304 tcet->table = newtable;
305 }
306
307 sPAPRTCETable *spapr_tce_new_table(DeviceState *owner, uint32_t liobn)
308 {
309 sPAPRTCETable *tcet;
310 char tmp[32];
311
312 if (spapr_tce_find_by_liobn(liobn)) {
313 error_report("Attempted to create TCE table with duplicate"
314 " LIOBN 0x%x", liobn);
315 return NULL;
316 }
317
318 tcet = SPAPR_TCE_TABLE(object_new(TYPE_SPAPR_TCE_TABLE));
319 tcet->liobn = liobn;
320
321 snprintf(tmp, sizeof(tmp), "tce-table-%x", liobn);
322 object_property_add_child(OBJECT(owner), tmp, OBJECT(tcet), NULL);
323
324 object_property_set_bool(OBJECT(tcet), true, "realized", NULL);
325
326 return tcet;
327 }
328
329 void spapr_tce_table_enable(sPAPRTCETable *tcet,
330 uint32_t page_shift, uint64_t bus_offset,
331 uint32_t nb_table)
332 {
333 if (tcet->nb_table) {
334 error_report("Warning: trying to enable already enabled TCE table");
335 return;
336 }
337
338 tcet->bus_offset = bus_offset;
339 tcet->page_shift = page_shift;
340 tcet->nb_table = nb_table;
341 tcet->table = spapr_tce_alloc_table(tcet->liobn,
342 tcet->page_shift,
343 tcet->bus_offset,
344 tcet->nb_table,
345 &tcet->fd,
346 tcet->need_vfio);
347
348 memory_region_set_size(MEMORY_REGION(&tcet->iommu),
349 (uint64_t)tcet->nb_table << tcet->page_shift);
350 memory_region_add_subregion(&tcet->root, tcet->bus_offset,
351 MEMORY_REGION(&tcet->iommu));
352 }
353
354 void spapr_tce_table_disable(sPAPRTCETable *tcet)
355 {
356 if (!tcet->nb_table) {
357 return;
358 }
359
360 memory_region_del_subregion(&tcet->root, MEMORY_REGION(&tcet->iommu));
361 memory_region_set_size(MEMORY_REGION(&tcet->iommu), 0);
362
363 spapr_tce_free_table(tcet->table, tcet->fd, tcet->nb_table);
364 tcet->fd = -1;
365 tcet->table = NULL;
366 tcet->bus_offset = 0;
367 tcet->page_shift = 0;
368 tcet->nb_table = 0;
369 }
370
371 static void spapr_tce_table_unrealize(DeviceState *dev, Error **errp)
372 {
373 sPAPRTCETable *tcet = SPAPR_TCE_TABLE(dev);
374
375 QLIST_REMOVE(tcet, list);
376
377 spapr_tce_table_disable(tcet);
378 }
379
380 MemoryRegion *spapr_tce_get_iommu(sPAPRTCETable *tcet)
381 {
382 return &tcet->root;
383 }
384
385 static void spapr_tce_reset(DeviceState *dev)
386 {
387 sPAPRTCETable *tcet = SPAPR_TCE_TABLE(dev);
388 size_t table_size = tcet->nb_table * sizeof(uint64_t);
389
390 if (tcet->nb_table) {
391 memset(tcet->table, 0, table_size);
392 }
393 }
394
395 static target_ulong put_tce_emu(sPAPRTCETable *tcet, target_ulong ioba,
396 target_ulong tce)
397 {
398 IOMMUTLBEntry entry;
399 hwaddr page_mask = IOMMU_PAGE_MASK(tcet->page_shift);
400 unsigned long index = (ioba - tcet->bus_offset) >> tcet->page_shift;
401
402 if (index >= tcet->nb_table) {
403 hcall_dprintf("spapr_vio_put_tce on out-of-bounds IOBA 0x"
404 TARGET_FMT_lx "\n", ioba);
405 return H_PARAMETER;
406 }
407
408 tcet->table[index] = tce;
409
410 entry.target_as = &address_space_memory,
411 entry.iova = (ioba - tcet->bus_offset) & page_mask;
412 entry.translated_addr = tce & page_mask;
413 entry.addr_mask = ~page_mask;
414 entry.perm = spapr_tce_iommu_access_flags(tce);
415 memory_region_notify_iommu(&tcet->iommu, entry);
416
417 return H_SUCCESS;
418 }
419
420 static target_ulong h_put_tce_indirect(PowerPCCPU *cpu,
421 sPAPRMachineState *spapr,
422 target_ulong opcode, target_ulong *args)
423 {
424 int i;
425 target_ulong liobn = args[0];
426 target_ulong ioba = args[1];
427 target_ulong ioba1 = ioba;
428 target_ulong tce_list = args[2];
429 target_ulong npages = args[3];
430 target_ulong ret = H_PARAMETER, tce = 0;
431 sPAPRTCETable *tcet = spapr_tce_find_by_liobn(liobn);
432 CPUState *cs = CPU(cpu);
433 hwaddr page_mask, page_size;
434
435 if (!tcet) {
436 return H_PARAMETER;
437 }
438
439 if ((npages > 512) || (tce_list & SPAPR_TCE_PAGE_MASK)) {
440 return H_PARAMETER;
441 }
442
443 page_mask = IOMMU_PAGE_MASK(tcet->page_shift);
444 page_size = IOMMU_PAGE_SIZE(tcet->page_shift);
445 ioba &= page_mask;
446
447 for (i = 0; i < npages; ++i, ioba += page_size) {
448 tce = ldq_be_phys(cs->as, tce_list + i * sizeof(target_ulong));
449
450 ret = put_tce_emu(tcet, ioba, tce);
451 if (ret) {
452 break;
453 }
454 }
455
456 /* Trace last successful or the first problematic entry */
457 i = i ? (i - 1) : 0;
458 if (SPAPR_IS_PCI_LIOBN(liobn)) {
459 trace_spapr_iommu_pci_indirect(liobn, ioba1, tce_list, i, tce, ret);
460 } else {
461 trace_spapr_iommu_indirect(liobn, ioba1, tce_list, i, tce, ret);
462 }
463 return ret;
464 }
465
466 static target_ulong h_stuff_tce(PowerPCCPU *cpu, sPAPRMachineState *spapr,
467 target_ulong opcode, target_ulong *args)
468 {
469 int i;
470 target_ulong liobn = args[0];
471 target_ulong ioba = args[1];
472 target_ulong tce_value = args[2];
473 target_ulong npages = args[3];
474 target_ulong ret = H_PARAMETER;
475 sPAPRTCETable *tcet = spapr_tce_find_by_liobn(liobn);
476 hwaddr page_mask, page_size;
477
478 if (!tcet) {
479 return H_PARAMETER;
480 }
481
482 if (npages > tcet->nb_table) {
483 return H_PARAMETER;
484 }
485
486 page_mask = IOMMU_PAGE_MASK(tcet->page_shift);
487 page_size = IOMMU_PAGE_SIZE(tcet->page_shift);
488 ioba &= page_mask;
489
490 for (i = 0; i < npages; ++i, ioba += page_size) {
491 ret = put_tce_emu(tcet, ioba, tce_value);
492 if (ret) {
493 break;
494 }
495 }
496 if (SPAPR_IS_PCI_LIOBN(liobn)) {
497 trace_spapr_iommu_pci_stuff(liobn, ioba, tce_value, npages, ret);
498 } else {
499 trace_spapr_iommu_stuff(liobn, ioba, tce_value, npages, ret);
500 }
501
502 return ret;
503 }
504
505 static target_ulong h_put_tce(PowerPCCPU *cpu, sPAPRMachineState *spapr,
506 target_ulong opcode, target_ulong *args)
507 {
508 target_ulong liobn = args[0];
509 target_ulong ioba = args[1];
510 target_ulong tce = args[2];
511 target_ulong ret = H_PARAMETER;
512 sPAPRTCETable *tcet = spapr_tce_find_by_liobn(liobn);
513
514 if (tcet) {
515 hwaddr page_mask = IOMMU_PAGE_MASK(tcet->page_shift);
516
517 ioba &= page_mask;
518
519 ret = put_tce_emu(tcet, ioba, tce);
520 }
521 if (SPAPR_IS_PCI_LIOBN(liobn)) {
522 trace_spapr_iommu_pci_put(liobn, ioba, tce, ret);
523 } else {
524 trace_spapr_iommu_put(liobn, ioba, tce, ret);
525 }
526
527 return ret;
528 }
529
530 static target_ulong get_tce_emu(sPAPRTCETable *tcet, target_ulong ioba,
531 target_ulong *tce)
532 {
533 unsigned long index = (ioba - tcet->bus_offset) >> tcet->page_shift;
534
535 if (index >= tcet->nb_table) {
536 hcall_dprintf("spapr_iommu_get_tce on out-of-bounds IOBA 0x"
537 TARGET_FMT_lx "\n", ioba);
538 return H_PARAMETER;
539 }
540
541 *tce = tcet->table[index];
542
543 return H_SUCCESS;
544 }
545
546 static target_ulong h_get_tce(PowerPCCPU *cpu, sPAPRMachineState *spapr,
547 target_ulong opcode, target_ulong *args)
548 {
549 target_ulong liobn = args[0];
550 target_ulong ioba = args[1];
551 target_ulong tce = 0;
552 target_ulong ret = H_PARAMETER;
553 sPAPRTCETable *tcet = spapr_tce_find_by_liobn(liobn);
554
555 if (tcet) {
556 hwaddr page_mask = IOMMU_PAGE_MASK(tcet->page_shift);
557
558 ioba &= page_mask;
559
560 ret = get_tce_emu(tcet, ioba, &tce);
561 if (!ret) {
562 args[0] = tce;
563 }
564 }
565 if (SPAPR_IS_PCI_LIOBN(liobn)) {
566 trace_spapr_iommu_pci_get(liobn, ioba, ret, tce);
567 } else {
568 trace_spapr_iommu_get(liobn, ioba, ret, tce);
569 }
570
571 return ret;
572 }
573
574 int spapr_dma_dt(void *fdt, int node_off, const char *propname,
575 uint32_t liobn, uint64_t window, uint32_t size)
576 {
577 uint32_t dma_prop[5];
578 int ret;
579
580 dma_prop[0] = cpu_to_be32(liobn);
581 dma_prop[1] = cpu_to_be32(window >> 32);
582 dma_prop[2] = cpu_to_be32(window & 0xFFFFFFFF);
583 dma_prop[3] = 0; /* window size is 32 bits */
584 dma_prop[4] = cpu_to_be32(size);
585
586 ret = fdt_setprop_cell(fdt, node_off, "ibm,#dma-address-cells", 2);
587 if (ret < 0) {
588 return ret;
589 }
590
591 ret = fdt_setprop_cell(fdt, node_off, "ibm,#dma-size-cells", 2);
592 if (ret < 0) {
593 return ret;
594 }
595
596 ret = fdt_setprop(fdt, node_off, propname, dma_prop, sizeof(dma_prop));
597 if (ret < 0) {
598 return ret;
599 }
600
601 return 0;
602 }
603
604 int spapr_tcet_dma_dt(void *fdt, int node_off, const char *propname,
605 sPAPRTCETable *tcet)
606 {
607 if (!tcet) {
608 return 0;
609 }
610
611 return spapr_dma_dt(fdt, node_off, propname,
612 tcet->liobn, 0, tcet->nb_table << tcet->page_shift);
613 }
614
615 static void spapr_tce_table_class_init(ObjectClass *klass, void *data)
616 {
617 DeviceClass *dc = DEVICE_CLASS(klass);
618 dc->init = spapr_tce_table_realize;
619 dc->reset = spapr_tce_reset;
620 dc->unrealize = spapr_tce_table_unrealize;
621
622 QLIST_INIT(&spapr_tce_tables);
623
624 /* hcall-tce */
625 spapr_register_hypercall(H_PUT_TCE, h_put_tce);
626 spapr_register_hypercall(H_GET_TCE, h_get_tce);
627 spapr_register_hypercall(H_PUT_TCE_INDIRECT, h_put_tce_indirect);
628 spapr_register_hypercall(H_STUFF_TCE, h_stuff_tce);
629 }
630
631 static TypeInfo spapr_tce_table_info = {
632 .name = TYPE_SPAPR_TCE_TABLE,
633 .parent = TYPE_DEVICE,
634 .instance_size = sizeof(sPAPRTCETable),
635 .class_init = spapr_tce_table_class_init,
636 };
637
638 static void spapr_iommu_memory_region_class_init(ObjectClass *klass, void *data)
639 {
640 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass);
641
642 imrc->translate = spapr_tce_translate_iommu;
643 imrc->get_min_page_size = spapr_tce_get_min_page_size;
644 imrc->notify_flag_changed = spapr_tce_notify_flag_changed;
645 }
646
647 static const TypeInfo spapr_iommu_memory_region_info = {
648 .parent = TYPE_IOMMU_MEMORY_REGION,
649 .name = TYPE_SPAPR_IOMMU_MEMORY_REGION,
650 .class_init = spapr_iommu_memory_region_class_init,
651 };
652
653 static void register_types(void)
654 {
655 type_register_static(&spapr_tce_table_info);
656 type_register_static(&spapr_iommu_memory_region_info);
657 }
658
659 type_init(register_types);