]> git.proxmox.com Git - mirror_qemu.git/blame - hw/arm/smmuv3.c
hw/arm/smmuv3: Queue helpers
[mirror_qemu.git] / hw / arm / smmuv3.c
CommitLineData
10a83cb9
PM
1/*
2 * Copyright (C) 2014-2016 Broadcom Corporation
3 * Copyright (c) 2017 Red Hat, Inc.
4 * Written by Prem Mallappa, Eric Auger
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, see <http://www.gnu.org/licenses/>.
17 */
18
19#include "qemu/osdep.h"
20#include "hw/boards.h"
21#include "sysemu/sysemu.h"
22#include "hw/sysbus.h"
23#include "hw/qdev-core.h"
24#include "hw/pci/pci.h"
25#include "exec/address-spaces.h"
26#include "trace.h"
27#include "qemu/log.h"
28#include "qemu/error-report.h"
29#include "qapi/error.h"
30
31#include "hw/arm/smmuv3.h"
32#include "smmuv3-internal.h"
33
6a736033
EA
34/**
35 * smmuv3_trigger_irq - pulse @irq if enabled and update
36 * GERROR register in case of GERROR interrupt
37 *
38 * @irq: irq type
39 * @gerror_mask: mask of gerrors to toggle (relevant if @irq is GERROR)
40 */
41void smmuv3_trigger_irq(SMMUv3State *s, SMMUIrq irq, uint32_t gerror_mask)
42{
43
44 bool pulse = false;
45
46 switch (irq) {
47 case SMMU_IRQ_EVTQ:
48 pulse = smmuv3_eventq_irq_enabled(s);
49 break;
50 case SMMU_IRQ_PRIQ:
51 qemu_log_mask(LOG_UNIMP, "PRI not yet supported\n");
52 break;
53 case SMMU_IRQ_CMD_SYNC:
54 pulse = true;
55 break;
56 case SMMU_IRQ_GERROR:
57 {
58 uint32_t pending = s->gerror ^ s->gerrorn;
59 uint32_t new_gerrors = ~pending & gerror_mask;
60
61 if (!new_gerrors) {
62 /* only toggle non pending errors */
63 return;
64 }
65 s->gerror ^= new_gerrors;
66 trace_smmuv3_write_gerror(new_gerrors, s->gerror);
67
68 pulse = smmuv3_gerror_irq_enabled(s);
69 break;
70 }
71 }
72 if (pulse) {
73 trace_smmuv3_trigger_irq(irq);
74 qemu_irq_pulse(s->irq[irq]);
75 }
76}
77
78void smmuv3_write_gerrorn(SMMUv3State *s, uint32_t new_gerrorn)
79{
80 uint32_t pending = s->gerror ^ s->gerrorn;
81 uint32_t toggled = s->gerrorn ^ new_gerrorn;
82
83 if (toggled & ~pending) {
84 qemu_log_mask(LOG_GUEST_ERROR,
85 "guest toggles non pending errors = 0x%x\n",
86 toggled & ~pending);
87 }
88
89 /*
90 * We do not raise any error in case guest toggles bits corresponding
91 * to not active IRQs (CONSTRAINED UNPREDICTABLE)
92 */
93 s->gerrorn = new_gerrorn;
94
95 trace_smmuv3_write_gerrorn(toggled & pending, s->gerrorn);
96}
97
dadd1a08
EA
98static inline MemTxResult queue_read(SMMUQueue *q, void *data)
99{
100 dma_addr_t addr = Q_CONS_ENTRY(q);
101
102 return dma_memory_read(&address_space_memory, addr, data, q->entry_size);
103}
104
105static MemTxResult queue_write(SMMUQueue *q, void *data)
106{
107 dma_addr_t addr = Q_PROD_ENTRY(q);
108 MemTxResult ret;
109
110 ret = dma_memory_write(&address_space_memory, addr, data, q->entry_size);
111 if (ret != MEMTX_OK) {
112 return ret;
113 }
114
115 queue_prod_incr(q);
116 return MEMTX_OK;
117}
118
119void smmuv3_write_eventq(SMMUv3State *s, Evt *evt)
120{
121 SMMUQueue *q = &s->eventq;
122
123 if (!smmuv3_eventq_enabled(s)) {
124 return;
125 }
126
127 if (smmuv3_q_full(q)) {
128 return;
129 }
130
131 queue_write(q, evt);
132
133 if (smmuv3_q_empty(q)) {
134 smmuv3_trigger_irq(s, SMMU_IRQ_EVTQ, 0);
135 }
136}
137
10a83cb9
PM
138static void smmuv3_init_regs(SMMUv3State *s)
139{
140 /**
141 * IDR0: stage1 only, AArch64 only, coherent access, 16b ASID,
142 * multi-level stream table
143 */
144 s->idr[0] = FIELD_DP32(s->idr[0], IDR0, S1P, 1); /* stage 1 supported */
145 s->idr[0] = FIELD_DP32(s->idr[0], IDR0, TTF, 2); /* AArch64 PTW only */
146 s->idr[0] = FIELD_DP32(s->idr[0], IDR0, COHACC, 1); /* IO coherent */
147 s->idr[0] = FIELD_DP32(s->idr[0], IDR0, ASID16, 1); /* 16-bit ASID */
148 s->idr[0] = FIELD_DP32(s->idr[0], IDR0, TTENDIAN, 2); /* little endian */
149 s->idr[0] = FIELD_DP32(s->idr[0], IDR0, STALL_MODEL, 1); /* No stall */
150 /* terminated transaction will always be aborted/error returned */
151 s->idr[0] = FIELD_DP32(s->idr[0], IDR0, TERM_MODEL, 1);
152 /* 2-level stream table supported */
153 s->idr[0] = FIELD_DP32(s->idr[0], IDR0, STLEVEL, 1);
154
155 s->idr[1] = FIELD_DP32(s->idr[1], IDR1, SIDSIZE, SMMU_IDR1_SIDSIZE);
156 s->idr[1] = FIELD_DP32(s->idr[1], IDR1, EVENTQS, SMMU_EVENTQS);
157 s->idr[1] = FIELD_DP32(s->idr[1], IDR1, CMDQS, SMMU_CMDQS);
158
159 /* 4K and 64K granule support */
160 s->idr[5] = FIELD_DP32(s->idr[5], IDR5, GRAN4K, 1);
161 s->idr[5] = FIELD_DP32(s->idr[5], IDR5, GRAN64K, 1);
162 s->idr[5] = FIELD_DP32(s->idr[5], IDR5, OAS, SMMU_IDR5_OAS); /* 44 bits */
163
164 s->cmdq.base = deposit64(s->cmdq.base, 0, 5, SMMU_CMDQS);
165 s->cmdq.prod = 0;
166 s->cmdq.cons = 0;
167 s->cmdq.entry_size = sizeof(struct Cmd);
168 s->eventq.base = deposit64(s->eventq.base, 0, 5, SMMU_EVENTQS);
169 s->eventq.prod = 0;
170 s->eventq.cons = 0;
171 s->eventq.entry_size = sizeof(struct Evt);
172
173 s->features = 0;
174 s->sid_split = 0;
175}
176
dadd1a08
EA
177int smmuv3_cmdq_consume(SMMUv3State *s)
178{
179 SMMUCmdError cmd_error = SMMU_CERROR_NONE;
180 SMMUQueue *q = &s->cmdq;
181 SMMUCommandType type = 0;
182
183 if (!smmuv3_cmdq_enabled(s)) {
184 return 0;
185 }
186 /*
187 * some commands depend on register values, typically CR0. In case those
188 * register values change while handling the command, spec says it
189 * is UNPREDICTABLE whether the command is interpreted under the new
190 * or old value.
191 */
192
193 while (!smmuv3_q_empty(q)) {
194 uint32_t pending = s->gerror ^ s->gerrorn;
195 Cmd cmd;
196
197 trace_smmuv3_cmdq_consume(Q_PROD(q), Q_CONS(q),
198 Q_PROD_WRAP(q), Q_CONS_WRAP(q));
199
200 if (FIELD_EX32(pending, GERROR, CMDQ_ERR)) {
201 break;
202 }
203
204 if (queue_read(q, &cmd) != MEMTX_OK) {
205 cmd_error = SMMU_CERROR_ABT;
206 break;
207 }
208
209 type = CMD_TYPE(&cmd);
210
211 trace_smmuv3_cmdq_opcode(smmu_cmd_string(type));
212
213 switch (type) {
214 case SMMU_CMD_SYNC:
215 if (CMD_SYNC_CS(&cmd) & CMD_SYNC_SIG_IRQ) {
216 smmuv3_trigger_irq(s, SMMU_IRQ_CMD_SYNC, 0);
217 }
218 break;
219 case SMMU_CMD_PREFETCH_CONFIG:
220 case SMMU_CMD_PREFETCH_ADDR:
221 case SMMU_CMD_CFGI_STE:
222 case SMMU_CMD_CFGI_STE_RANGE: /* same as SMMU_CMD_CFGI_ALL */
223 case SMMU_CMD_CFGI_CD:
224 case SMMU_CMD_CFGI_CD_ALL:
225 case SMMU_CMD_TLBI_NH_ALL:
226 case SMMU_CMD_TLBI_NH_ASID:
227 case SMMU_CMD_TLBI_NH_VA:
228 case SMMU_CMD_TLBI_NH_VAA:
229 case SMMU_CMD_TLBI_EL3_ALL:
230 case SMMU_CMD_TLBI_EL3_VA:
231 case SMMU_CMD_TLBI_EL2_ALL:
232 case SMMU_CMD_TLBI_EL2_ASID:
233 case SMMU_CMD_TLBI_EL2_VA:
234 case SMMU_CMD_TLBI_EL2_VAA:
235 case SMMU_CMD_TLBI_S12_VMALL:
236 case SMMU_CMD_TLBI_S2_IPA:
237 case SMMU_CMD_TLBI_NSNH_ALL:
238 case SMMU_CMD_ATC_INV:
239 case SMMU_CMD_PRI_RESP:
240 case SMMU_CMD_RESUME:
241 case SMMU_CMD_STALL_TERM:
242 trace_smmuv3_unhandled_cmd(type);
243 break;
244 default:
245 cmd_error = SMMU_CERROR_ILL;
246 qemu_log_mask(LOG_GUEST_ERROR,
247 "Illegal command type: %d\n", CMD_TYPE(&cmd));
248 break;
249 }
250 if (cmd_error) {
251 break;
252 }
253 /*
254 * We only increment the cons index after the completion of
255 * the command. We do that because the SYNC returns immediately
256 * and does not check the completion of previous commands
257 */
258 queue_cons_incr(q);
259 }
260
261 if (cmd_error) {
262 trace_smmuv3_cmdq_consume_error(smmu_cmd_string(type), cmd_error);
263 smmu_write_cmdq_err(s, cmd_error);
264 smmuv3_trigger_irq(s, SMMU_IRQ_GERROR, R_GERROR_CMDQ_ERR_MASK);
265 }
266
267 trace_smmuv3_cmdq_consume_out(Q_PROD(q), Q_CONS(q),
268 Q_PROD_WRAP(q), Q_CONS_WRAP(q));
269
270 return 0;
271}
272
10a83cb9
PM
273static MemTxResult smmu_write_mmio(void *opaque, hwaddr offset, uint64_t data,
274 unsigned size, MemTxAttrs attrs)
275{
276 /* not yet implemented */
277 return MEMTX_ERROR;
278}
279
280static MemTxResult smmu_readll(SMMUv3State *s, hwaddr offset,
281 uint64_t *data, MemTxAttrs attrs)
282{
283 switch (offset) {
284 case A_GERROR_IRQ_CFG0:
285 *data = s->gerror_irq_cfg0;
286 return MEMTX_OK;
287 case A_STRTAB_BASE:
288 *data = s->strtab_base;
289 return MEMTX_OK;
290 case A_CMDQ_BASE:
291 *data = s->cmdq.base;
292 return MEMTX_OK;
293 case A_EVENTQ_BASE:
294 *data = s->eventq.base;
295 return MEMTX_OK;
296 default:
297 *data = 0;
298 qemu_log_mask(LOG_UNIMP,
299 "%s Unexpected 64-bit access to 0x%"PRIx64" (RAZ)\n",
300 __func__, offset);
301 return MEMTX_OK;
302 }
303}
304
305static MemTxResult smmu_readl(SMMUv3State *s, hwaddr offset,
306 uint64_t *data, MemTxAttrs attrs)
307{
308 switch (offset) {
309 case A_IDREGS ... A_IDREGS + 0x1f:
310 *data = smmuv3_idreg(offset - A_IDREGS);
311 return MEMTX_OK;
312 case A_IDR0 ... A_IDR5:
313 *data = s->idr[(offset - A_IDR0) / 4];
314 return MEMTX_OK;
315 case A_IIDR:
316 *data = s->iidr;
317 return MEMTX_OK;
318 case A_CR0:
319 *data = s->cr[0];
320 return MEMTX_OK;
321 case A_CR0ACK:
322 *data = s->cr0ack;
323 return MEMTX_OK;
324 case A_CR1:
325 *data = s->cr[1];
326 return MEMTX_OK;
327 case A_CR2:
328 *data = s->cr[2];
329 return MEMTX_OK;
330 case A_STATUSR:
331 *data = s->statusr;
332 return MEMTX_OK;
333 case A_IRQ_CTRL:
334 case A_IRQ_CTRL_ACK:
335 *data = s->irq_ctrl;
336 return MEMTX_OK;
337 case A_GERROR:
338 *data = s->gerror;
339 return MEMTX_OK;
340 case A_GERRORN:
341 *data = s->gerrorn;
342 return MEMTX_OK;
343 case A_GERROR_IRQ_CFG0: /* 64b */
344 *data = extract64(s->gerror_irq_cfg0, 0, 32);
345 return MEMTX_OK;
346 case A_GERROR_IRQ_CFG0 + 4:
347 *data = extract64(s->gerror_irq_cfg0, 32, 32);
348 return MEMTX_OK;
349 case A_GERROR_IRQ_CFG1:
350 *data = s->gerror_irq_cfg1;
351 return MEMTX_OK;
352 case A_GERROR_IRQ_CFG2:
353 *data = s->gerror_irq_cfg2;
354 return MEMTX_OK;
355 case A_STRTAB_BASE: /* 64b */
356 *data = extract64(s->strtab_base, 0, 32);
357 return MEMTX_OK;
358 case A_STRTAB_BASE + 4: /* 64b */
359 *data = extract64(s->strtab_base, 32, 32);
360 return MEMTX_OK;
361 case A_STRTAB_BASE_CFG:
362 *data = s->strtab_base_cfg;
363 return MEMTX_OK;
364 case A_CMDQ_BASE: /* 64b */
365 *data = extract64(s->cmdq.base, 0, 32);
366 return MEMTX_OK;
367 case A_CMDQ_BASE + 4:
368 *data = extract64(s->cmdq.base, 32, 32);
369 return MEMTX_OK;
370 case A_CMDQ_PROD:
371 *data = s->cmdq.prod;
372 return MEMTX_OK;
373 case A_CMDQ_CONS:
374 *data = s->cmdq.cons;
375 return MEMTX_OK;
376 case A_EVENTQ_BASE: /* 64b */
377 *data = extract64(s->eventq.base, 0, 32);
378 return MEMTX_OK;
379 case A_EVENTQ_BASE + 4: /* 64b */
380 *data = extract64(s->eventq.base, 32, 32);
381 return MEMTX_OK;
382 case A_EVENTQ_PROD:
383 *data = s->eventq.prod;
384 return MEMTX_OK;
385 case A_EVENTQ_CONS:
386 *data = s->eventq.cons;
387 return MEMTX_OK;
388 default:
389 *data = 0;
390 qemu_log_mask(LOG_UNIMP,
391 "%s unhandled 32-bit access at 0x%"PRIx64" (RAZ)\n",
392 __func__, offset);
393 return MEMTX_OK;
394 }
395}
396
397static MemTxResult smmu_read_mmio(void *opaque, hwaddr offset, uint64_t *data,
398 unsigned size, MemTxAttrs attrs)
399{
400 SMMUState *sys = opaque;
401 SMMUv3State *s = ARM_SMMUV3(sys);
402 MemTxResult r;
403
404 /* CONSTRAINED UNPREDICTABLE choice to have page0/1 be exact aliases */
405 offset &= ~0x10000;
406
407 switch (size) {
408 case 8:
409 r = smmu_readll(s, offset, data, attrs);
410 break;
411 case 4:
412 r = smmu_readl(s, offset, data, attrs);
413 break;
414 default:
415 r = MEMTX_ERROR;
416 break;
417 }
418
419 trace_smmuv3_read_mmio(offset, *data, size, r);
420 return r;
421}
422
423static const MemoryRegionOps smmu_mem_ops = {
424 .read_with_attrs = smmu_read_mmio,
425 .write_with_attrs = smmu_write_mmio,
426 .endianness = DEVICE_LITTLE_ENDIAN,
427 .valid = {
428 .min_access_size = 4,
429 .max_access_size = 8,
430 },
431 .impl = {
432 .min_access_size = 4,
433 .max_access_size = 8,
434 },
435};
436
437static void smmu_init_irq(SMMUv3State *s, SysBusDevice *dev)
438{
439 int i;
440
441 for (i = 0; i < ARRAY_SIZE(s->irq); i++) {
442 sysbus_init_irq(dev, &s->irq[i]);
443 }
444}
445
446static void smmu_reset(DeviceState *dev)
447{
448 SMMUv3State *s = ARM_SMMUV3(dev);
449 SMMUv3Class *c = ARM_SMMUV3_GET_CLASS(s);
450
451 c->parent_reset(dev);
452
453 smmuv3_init_regs(s);
454}
455
456static void smmu_realize(DeviceState *d, Error **errp)
457{
458 SMMUState *sys = ARM_SMMU(d);
459 SMMUv3State *s = ARM_SMMUV3(sys);
460 SMMUv3Class *c = ARM_SMMUV3_GET_CLASS(s);
461 SysBusDevice *dev = SYS_BUS_DEVICE(d);
462 Error *local_err = NULL;
463
464 c->parent_realize(d, &local_err);
465 if (local_err) {
466 error_propagate(errp, local_err);
467 return;
468 }
469
470 memory_region_init_io(&sys->iomem, OBJECT(s),
471 &smmu_mem_ops, sys, TYPE_ARM_SMMUV3, 0x20000);
472
473 sys->mrtypename = TYPE_SMMUV3_IOMMU_MEMORY_REGION;
474
475 sysbus_init_mmio(dev, &sys->iomem);
476
477 smmu_init_irq(s, dev);
478}
479
480static const VMStateDescription vmstate_smmuv3_queue = {
481 .name = "smmuv3_queue",
482 .version_id = 1,
483 .minimum_version_id = 1,
484 .fields = (VMStateField[]) {
485 VMSTATE_UINT64(base, SMMUQueue),
486 VMSTATE_UINT32(prod, SMMUQueue),
487 VMSTATE_UINT32(cons, SMMUQueue),
488 VMSTATE_UINT8(log2size, SMMUQueue),
489 },
490};
491
492static const VMStateDescription vmstate_smmuv3 = {
493 .name = "smmuv3",
494 .version_id = 1,
495 .minimum_version_id = 1,
496 .fields = (VMStateField[]) {
497 VMSTATE_UINT32(features, SMMUv3State),
498 VMSTATE_UINT8(sid_size, SMMUv3State),
499 VMSTATE_UINT8(sid_split, SMMUv3State),
500
501 VMSTATE_UINT32_ARRAY(cr, SMMUv3State, 3),
502 VMSTATE_UINT32(cr0ack, SMMUv3State),
503 VMSTATE_UINT32(statusr, SMMUv3State),
504 VMSTATE_UINT32(irq_ctrl, SMMUv3State),
505 VMSTATE_UINT32(gerror, SMMUv3State),
506 VMSTATE_UINT32(gerrorn, SMMUv3State),
507 VMSTATE_UINT64(gerror_irq_cfg0, SMMUv3State),
508 VMSTATE_UINT32(gerror_irq_cfg1, SMMUv3State),
509 VMSTATE_UINT32(gerror_irq_cfg2, SMMUv3State),
510 VMSTATE_UINT64(strtab_base, SMMUv3State),
511 VMSTATE_UINT32(strtab_base_cfg, SMMUv3State),
512 VMSTATE_UINT64(eventq_irq_cfg0, SMMUv3State),
513 VMSTATE_UINT32(eventq_irq_cfg1, SMMUv3State),
514 VMSTATE_UINT32(eventq_irq_cfg2, SMMUv3State),
515
516 VMSTATE_STRUCT(cmdq, SMMUv3State, 0, vmstate_smmuv3_queue, SMMUQueue),
517 VMSTATE_STRUCT(eventq, SMMUv3State, 0, vmstate_smmuv3_queue, SMMUQueue),
518
519 VMSTATE_END_OF_LIST(),
520 },
521};
522
523static void smmuv3_instance_init(Object *obj)
524{
525 /* Nothing much to do here as of now */
526}
527
528static void smmuv3_class_init(ObjectClass *klass, void *data)
529{
530 DeviceClass *dc = DEVICE_CLASS(klass);
531 SMMUv3Class *c = ARM_SMMUV3_CLASS(klass);
532
533 dc->vmsd = &vmstate_smmuv3;
534 device_class_set_parent_reset(dc, smmu_reset, &c->parent_reset);
535 c->parent_realize = dc->realize;
536 dc->realize = smmu_realize;
537}
538
539static void smmuv3_iommu_memory_region_class_init(ObjectClass *klass,
540 void *data)
541{
542}
543
544static const TypeInfo smmuv3_type_info = {
545 .name = TYPE_ARM_SMMUV3,
546 .parent = TYPE_ARM_SMMU,
547 .instance_size = sizeof(SMMUv3State),
548 .instance_init = smmuv3_instance_init,
549 .class_size = sizeof(SMMUv3Class),
550 .class_init = smmuv3_class_init,
551};
552
553static const TypeInfo smmuv3_iommu_memory_region_info = {
554 .parent = TYPE_IOMMU_MEMORY_REGION,
555 .name = TYPE_SMMUV3_IOMMU_MEMORY_REGION,
556 .class_init = smmuv3_iommu_memory_region_class_init,
557};
558
559static void smmuv3_register_types(void)
560{
561 type_register(&smmuv3_type_info);
562 type_register(&smmuv3_iommu_memory_region_info);
563}
564
565type_init(smmuv3_register_types)
566