]> git.proxmox.com Git - mirror_qemu.git/blob - hw/arm/smmuv3.c
aio: remove aio_disable_external() API
[mirror_qemu.git] / hw / arm / smmuv3.c
1 /*
2 * Copyright (C) 2014-2016 Broadcom Corporation
3 * Copyright (c) 2017 Red Hat, Inc.
4 * Written by Prem Mallappa, Eric Auger
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, see <http://www.gnu.org/licenses/>.
17 */
18
19 #include "qemu/osdep.h"
20 #include "qemu/bitops.h"
21 #include "hw/irq.h"
22 #include "hw/sysbus.h"
23 #include "migration/vmstate.h"
24 #include "hw/qdev-core.h"
25 #include "hw/pci/pci.h"
26 #include "cpu.h"
27 #include "trace.h"
28 #include "qemu/log.h"
29 #include "qemu/error-report.h"
30 #include "qapi/error.h"
31
32 #include "hw/arm/smmuv3.h"
33 #include "smmuv3-internal.h"
34 #include "smmu-internal.h"
35
36 /**
37 * smmuv3_trigger_irq - pulse @irq if enabled and update
38 * GERROR register in case of GERROR interrupt
39 *
40 * @irq: irq type
41 * @gerror_mask: mask of gerrors to toggle (relevant if @irq is GERROR)
42 */
43 static void smmuv3_trigger_irq(SMMUv3State *s, SMMUIrq irq,
44 uint32_t gerror_mask)
45 {
46
47 bool pulse = false;
48
49 switch (irq) {
50 case SMMU_IRQ_EVTQ:
51 pulse = smmuv3_eventq_irq_enabled(s);
52 break;
53 case SMMU_IRQ_PRIQ:
54 qemu_log_mask(LOG_UNIMP, "PRI not yet supported\n");
55 break;
56 case SMMU_IRQ_CMD_SYNC:
57 pulse = true;
58 break;
59 case SMMU_IRQ_GERROR:
60 {
61 uint32_t pending = s->gerror ^ s->gerrorn;
62 uint32_t new_gerrors = ~pending & gerror_mask;
63
64 if (!new_gerrors) {
65 /* only toggle non pending errors */
66 return;
67 }
68 s->gerror ^= new_gerrors;
69 trace_smmuv3_write_gerror(new_gerrors, s->gerror);
70
71 pulse = smmuv3_gerror_irq_enabled(s);
72 break;
73 }
74 }
75 if (pulse) {
76 trace_smmuv3_trigger_irq(irq);
77 qemu_irq_pulse(s->irq[irq]);
78 }
79 }
80
81 static void smmuv3_write_gerrorn(SMMUv3State *s, uint32_t new_gerrorn)
82 {
83 uint32_t pending = s->gerror ^ s->gerrorn;
84 uint32_t toggled = s->gerrorn ^ new_gerrorn;
85
86 if (toggled & ~pending) {
87 qemu_log_mask(LOG_GUEST_ERROR,
88 "guest toggles non pending errors = 0x%x\n",
89 toggled & ~pending);
90 }
91
92 /*
93 * We do not raise any error in case guest toggles bits corresponding
94 * to not active IRQs (CONSTRAINED UNPREDICTABLE)
95 */
96 s->gerrorn = new_gerrorn;
97
98 trace_smmuv3_write_gerrorn(toggled & pending, s->gerrorn);
99 }
100
101 static inline MemTxResult queue_read(SMMUQueue *q, void *data)
102 {
103 dma_addr_t addr = Q_CONS_ENTRY(q);
104
105 return dma_memory_read(&address_space_memory, addr, data, q->entry_size,
106 MEMTXATTRS_UNSPECIFIED);
107 }
108
109 static MemTxResult queue_write(SMMUQueue *q, void *data)
110 {
111 dma_addr_t addr = Q_PROD_ENTRY(q);
112 MemTxResult ret;
113
114 ret = dma_memory_write(&address_space_memory, addr, data, q->entry_size,
115 MEMTXATTRS_UNSPECIFIED);
116 if (ret != MEMTX_OK) {
117 return ret;
118 }
119
120 queue_prod_incr(q);
121 return MEMTX_OK;
122 }
123
124 static MemTxResult smmuv3_write_eventq(SMMUv3State *s, Evt *evt)
125 {
126 SMMUQueue *q = &s->eventq;
127 MemTxResult r;
128
129 if (!smmuv3_eventq_enabled(s)) {
130 return MEMTX_ERROR;
131 }
132
133 if (smmuv3_q_full(q)) {
134 return MEMTX_ERROR;
135 }
136
137 r = queue_write(q, evt);
138 if (r != MEMTX_OK) {
139 return r;
140 }
141
142 if (!smmuv3_q_empty(q)) {
143 smmuv3_trigger_irq(s, SMMU_IRQ_EVTQ, 0);
144 }
145 return MEMTX_OK;
146 }
147
148 void smmuv3_record_event(SMMUv3State *s, SMMUEventInfo *info)
149 {
150 Evt evt = {};
151 MemTxResult r;
152
153 if (!smmuv3_eventq_enabled(s)) {
154 return;
155 }
156
157 EVT_SET_TYPE(&evt, info->type);
158 EVT_SET_SID(&evt, info->sid);
159
160 switch (info->type) {
161 case SMMU_EVT_NONE:
162 return;
163 case SMMU_EVT_F_UUT:
164 EVT_SET_SSID(&evt, info->u.f_uut.ssid);
165 EVT_SET_SSV(&evt, info->u.f_uut.ssv);
166 EVT_SET_ADDR(&evt, info->u.f_uut.addr);
167 EVT_SET_RNW(&evt, info->u.f_uut.rnw);
168 EVT_SET_PNU(&evt, info->u.f_uut.pnu);
169 EVT_SET_IND(&evt, info->u.f_uut.ind);
170 break;
171 case SMMU_EVT_C_BAD_STREAMID:
172 EVT_SET_SSID(&evt, info->u.c_bad_streamid.ssid);
173 EVT_SET_SSV(&evt, info->u.c_bad_streamid.ssv);
174 break;
175 case SMMU_EVT_F_STE_FETCH:
176 EVT_SET_SSID(&evt, info->u.f_ste_fetch.ssid);
177 EVT_SET_SSV(&evt, info->u.f_ste_fetch.ssv);
178 EVT_SET_ADDR2(&evt, info->u.f_ste_fetch.addr);
179 break;
180 case SMMU_EVT_C_BAD_STE:
181 EVT_SET_SSID(&evt, info->u.c_bad_ste.ssid);
182 EVT_SET_SSV(&evt, info->u.c_bad_ste.ssv);
183 break;
184 case SMMU_EVT_F_STREAM_DISABLED:
185 break;
186 case SMMU_EVT_F_TRANS_FORBIDDEN:
187 EVT_SET_ADDR(&evt, info->u.f_transl_forbidden.addr);
188 EVT_SET_RNW(&evt, info->u.f_transl_forbidden.rnw);
189 break;
190 case SMMU_EVT_C_BAD_SUBSTREAMID:
191 EVT_SET_SSID(&evt, info->u.c_bad_substream.ssid);
192 break;
193 case SMMU_EVT_F_CD_FETCH:
194 EVT_SET_SSID(&evt, info->u.f_cd_fetch.ssid);
195 EVT_SET_SSV(&evt, info->u.f_cd_fetch.ssv);
196 EVT_SET_ADDR(&evt, info->u.f_cd_fetch.addr);
197 break;
198 case SMMU_EVT_C_BAD_CD:
199 EVT_SET_SSID(&evt, info->u.c_bad_cd.ssid);
200 EVT_SET_SSV(&evt, info->u.c_bad_cd.ssv);
201 break;
202 case SMMU_EVT_F_WALK_EABT:
203 case SMMU_EVT_F_TRANSLATION:
204 case SMMU_EVT_F_ADDR_SIZE:
205 case SMMU_EVT_F_ACCESS:
206 case SMMU_EVT_F_PERMISSION:
207 EVT_SET_STALL(&evt, info->u.f_walk_eabt.stall);
208 EVT_SET_STAG(&evt, info->u.f_walk_eabt.stag);
209 EVT_SET_SSID(&evt, info->u.f_walk_eabt.ssid);
210 EVT_SET_SSV(&evt, info->u.f_walk_eabt.ssv);
211 EVT_SET_S2(&evt, info->u.f_walk_eabt.s2);
212 EVT_SET_ADDR(&evt, info->u.f_walk_eabt.addr);
213 EVT_SET_RNW(&evt, info->u.f_walk_eabt.rnw);
214 EVT_SET_PNU(&evt, info->u.f_walk_eabt.pnu);
215 EVT_SET_IND(&evt, info->u.f_walk_eabt.ind);
216 EVT_SET_CLASS(&evt, info->u.f_walk_eabt.class);
217 EVT_SET_ADDR2(&evt, info->u.f_walk_eabt.addr2);
218 break;
219 case SMMU_EVT_F_CFG_CONFLICT:
220 EVT_SET_SSID(&evt, info->u.f_cfg_conflict.ssid);
221 EVT_SET_SSV(&evt, info->u.f_cfg_conflict.ssv);
222 break;
223 /* rest is not implemented */
224 case SMMU_EVT_F_BAD_ATS_TREQ:
225 case SMMU_EVT_F_TLB_CONFLICT:
226 case SMMU_EVT_E_PAGE_REQ:
227 default:
228 g_assert_not_reached();
229 }
230
231 trace_smmuv3_record_event(smmu_event_string(info->type), info->sid);
232 r = smmuv3_write_eventq(s, &evt);
233 if (r != MEMTX_OK) {
234 smmuv3_trigger_irq(s, SMMU_IRQ_GERROR, R_GERROR_EVENTQ_ABT_ERR_MASK);
235 }
236 info->recorded = true;
237 }
238
239 static void smmuv3_init_regs(SMMUv3State *s)
240 {
241 /**
242 * IDR0: stage1 only, AArch64 only, coherent access, 16b ASID,
243 * multi-level stream table
244 */
245 s->idr[0] = FIELD_DP32(s->idr[0], IDR0, S1P, 1); /* stage 1 supported */
246 s->idr[0] = FIELD_DP32(s->idr[0], IDR0, TTF, 2); /* AArch64 PTW only */
247 s->idr[0] = FIELD_DP32(s->idr[0], IDR0, COHACC, 1); /* IO coherent */
248 s->idr[0] = FIELD_DP32(s->idr[0], IDR0, ASID16, 1); /* 16-bit ASID */
249 s->idr[0] = FIELD_DP32(s->idr[0], IDR0, TTENDIAN, 2); /* little endian */
250 s->idr[0] = FIELD_DP32(s->idr[0], IDR0, STALL_MODEL, 1); /* No stall */
251 /* terminated transaction will always be aborted/error returned */
252 s->idr[0] = FIELD_DP32(s->idr[0], IDR0, TERM_MODEL, 1);
253 /* 2-level stream table supported */
254 s->idr[0] = FIELD_DP32(s->idr[0], IDR0, STLEVEL, 1);
255
256 s->idr[1] = FIELD_DP32(s->idr[1], IDR1, SIDSIZE, SMMU_IDR1_SIDSIZE);
257 s->idr[1] = FIELD_DP32(s->idr[1], IDR1, EVENTQS, SMMU_EVENTQS);
258 s->idr[1] = FIELD_DP32(s->idr[1], IDR1, CMDQS, SMMU_CMDQS);
259
260 s->idr[3] = FIELD_DP32(s->idr[3], IDR3, RIL, 1);
261 s->idr[3] = FIELD_DP32(s->idr[3], IDR3, HAD, 1);
262 s->idr[3] = FIELD_DP32(s->idr[3], IDR3, BBML, 2);
263
264 /* 4K, 16K and 64K granule support */
265 s->idr[5] = FIELD_DP32(s->idr[5], IDR5, GRAN4K, 1);
266 s->idr[5] = FIELD_DP32(s->idr[5], IDR5, GRAN16K, 1);
267 s->idr[5] = FIELD_DP32(s->idr[5], IDR5, GRAN64K, 1);
268 s->idr[5] = FIELD_DP32(s->idr[5], IDR5, OAS, SMMU_IDR5_OAS); /* 44 bits */
269
270 s->cmdq.base = deposit64(s->cmdq.base, 0, 5, SMMU_CMDQS);
271 s->cmdq.prod = 0;
272 s->cmdq.cons = 0;
273 s->cmdq.entry_size = sizeof(struct Cmd);
274 s->eventq.base = deposit64(s->eventq.base, 0, 5, SMMU_EVENTQS);
275 s->eventq.prod = 0;
276 s->eventq.cons = 0;
277 s->eventq.entry_size = sizeof(struct Evt);
278
279 s->features = 0;
280 s->sid_split = 0;
281 s->aidr = 0x1;
282 s->cr[0] = 0;
283 s->cr0ack = 0;
284 s->irq_ctrl = 0;
285 s->gerror = 0;
286 s->gerrorn = 0;
287 s->statusr = 0;
288 s->gbpa = SMMU_GBPA_RESET_VAL;
289 }
290
291 static int smmu_get_ste(SMMUv3State *s, dma_addr_t addr, STE *buf,
292 SMMUEventInfo *event)
293 {
294 int ret;
295
296 trace_smmuv3_get_ste(addr);
297 /* TODO: guarantee 64-bit single-copy atomicity */
298 ret = dma_memory_read(&address_space_memory, addr, buf, sizeof(*buf),
299 MEMTXATTRS_UNSPECIFIED);
300 if (ret != MEMTX_OK) {
301 qemu_log_mask(LOG_GUEST_ERROR,
302 "Cannot fetch pte at address=0x%"PRIx64"\n", addr);
303 event->type = SMMU_EVT_F_STE_FETCH;
304 event->u.f_ste_fetch.addr = addr;
305 return -EINVAL;
306 }
307 return 0;
308
309 }
310
311 /* @ssid > 0 not supported yet */
312 static int smmu_get_cd(SMMUv3State *s, STE *ste, uint32_t ssid,
313 CD *buf, SMMUEventInfo *event)
314 {
315 dma_addr_t addr = STE_CTXPTR(ste);
316 int ret;
317
318 trace_smmuv3_get_cd(addr);
319 /* TODO: guarantee 64-bit single-copy atomicity */
320 ret = dma_memory_read(&address_space_memory, addr, buf, sizeof(*buf),
321 MEMTXATTRS_UNSPECIFIED);
322 if (ret != MEMTX_OK) {
323 qemu_log_mask(LOG_GUEST_ERROR,
324 "Cannot fetch pte at address=0x%"PRIx64"\n", addr);
325 event->type = SMMU_EVT_F_CD_FETCH;
326 event->u.f_ste_fetch.addr = addr;
327 return -EINVAL;
328 }
329 return 0;
330 }
331
332 /* Returns < 0 in case of invalid STE, 0 otherwise */
333 static int decode_ste(SMMUv3State *s, SMMUTransCfg *cfg,
334 STE *ste, SMMUEventInfo *event)
335 {
336 uint32_t config;
337
338 if (!STE_VALID(ste)) {
339 if (!event->inval_ste_allowed) {
340 qemu_log_mask(LOG_GUEST_ERROR, "invalid STE\n");
341 }
342 goto bad_ste;
343 }
344
345 config = STE_CONFIG(ste);
346
347 if (STE_CFG_ABORT(config)) {
348 cfg->aborted = true;
349 return 0;
350 }
351
352 if (STE_CFG_BYPASS(config)) {
353 cfg->bypassed = true;
354 return 0;
355 }
356
357 if (STE_CFG_S2_ENABLED(config)) {
358 qemu_log_mask(LOG_UNIMP, "SMMUv3 does not support stage 2 yet\n");
359 goto bad_ste;
360 }
361
362 if (STE_S1CDMAX(ste) != 0) {
363 qemu_log_mask(LOG_UNIMP,
364 "SMMUv3 does not support multiple context descriptors yet\n");
365 goto bad_ste;
366 }
367
368 if (STE_S1STALLD(ste)) {
369 qemu_log_mask(LOG_UNIMP,
370 "SMMUv3 S1 stalling fault model not allowed yet\n");
371 goto bad_ste;
372 }
373 return 0;
374
375 bad_ste:
376 event->type = SMMU_EVT_C_BAD_STE;
377 return -EINVAL;
378 }
379
380 /**
381 * smmu_find_ste - Return the stream table entry associated
382 * to the sid
383 *
384 * @s: smmuv3 handle
385 * @sid: stream ID
386 * @ste: returned stream table entry
387 * @event: handle to an event info
388 *
389 * Supports linear and 2-level stream table
390 * Return 0 on success, -EINVAL otherwise
391 */
392 static int smmu_find_ste(SMMUv3State *s, uint32_t sid, STE *ste,
393 SMMUEventInfo *event)
394 {
395 dma_addr_t addr, strtab_base;
396 uint32_t log2size;
397 int strtab_size_shift;
398 int ret;
399
400 trace_smmuv3_find_ste(sid, s->features, s->sid_split);
401 log2size = FIELD_EX32(s->strtab_base_cfg, STRTAB_BASE_CFG, LOG2SIZE);
402 /*
403 * Check SID range against both guest-configured and implementation limits
404 */
405 if (sid >= (1 << MIN(log2size, SMMU_IDR1_SIDSIZE))) {
406 event->type = SMMU_EVT_C_BAD_STREAMID;
407 return -EINVAL;
408 }
409 if (s->features & SMMU_FEATURE_2LVL_STE) {
410 int l1_ste_offset, l2_ste_offset, max_l2_ste, span;
411 dma_addr_t l1ptr, l2ptr;
412 STEDesc l1std;
413
414 /*
415 * Align strtab base address to table size. For this purpose, assume it
416 * is not bounded by SMMU_IDR1_SIDSIZE.
417 */
418 strtab_size_shift = MAX(5, (int)log2size - s->sid_split - 1 + 3);
419 strtab_base = s->strtab_base & SMMU_BASE_ADDR_MASK &
420 ~MAKE_64BIT_MASK(0, strtab_size_shift);
421 l1_ste_offset = sid >> s->sid_split;
422 l2_ste_offset = sid & ((1 << s->sid_split) - 1);
423 l1ptr = (dma_addr_t)(strtab_base + l1_ste_offset * sizeof(l1std));
424 /* TODO: guarantee 64-bit single-copy atomicity */
425 ret = dma_memory_read(&address_space_memory, l1ptr, &l1std,
426 sizeof(l1std), MEMTXATTRS_UNSPECIFIED);
427 if (ret != MEMTX_OK) {
428 qemu_log_mask(LOG_GUEST_ERROR,
429 "Could not read L1PTR at 0X%"PRIx64"\n", l1ptr);
430 event->type = SMMU_EVT_F_STE_FETCH;
431 event->u.f_ste_fetch.addr = l1ptr;
432 return -EINVAL;
433 }
434
435 span = L1STD_SPAN(&l1std);
436
437 if (!span) {
438 /* l2ptr is not valid */
439 if (!event->inval_ste_allowed) {
440 qemu_log_mask(LOG_GUEST_ERROR,
441 "invalid sid=%d (L1STD span=0)\n", sid);
442 }
443 event->type = SMMU_EVT_C_BAD_STREAMID;
444 return -EINVAL;
445 }
446 max_l2_ste = (1 << span) - 1;
447 l2ptr = l1std_l2ptr(&l1std);
448 trace_smmuv3_find_ste_2lvl(s->strtab_base, l1ptr, l1_ste_offset,
449 l2ptr, l2_ste_offset, max_l2_ste);
450 if (l2_ste_offset > max_l2_ste) {
451 qemu_log_mask(LOG_GUEST_ERROR,
452 "l2_ste_offset=%d > max_l2_ste=%d\n",
453 l2_ste_offset, max_l2_ste);
454 event->type = SMMU_EVT_C_BAD_STE;
455 return -EINVAL;
456 }
457 addr = l2ptr + l2_ste_offset * sizeof(*ste);
458 } else {
459 strtab_size_shift = log2size + 5;
460 strtab_base = s->strtab_base & SMMU_BASE_ADDR_MASK &
461 ~MAKE_64BIT_MASK(0, strtab_size_shift);
462 addr = strtab_base + sid * sizeof(*ste);
463 }
464
465 if (smmu_get_ste(s, addr, ste, event)) {
466 return -EINVAL;
467 }
468
469 return 0;
470 }
471
472 static int decode_cd(SMMUTransCfg *cfg, CD *cd, SMMUEventInfo *event)
473 {
474 int ret = -EINVAL;
475 int i;
476
477 if (!CD_VALID(cd) || !CD_AARCH64(cd)) {
478 goto bad_cd;
479 }
480 if (!CD_A(cd)) {
481 goto bad_cd; /* SMMU_IDR0.TERM_MODEL == 1 */
482 }
483 if (CD_S(cd)) {
484 goto bad_cd; /* !STE_SECURE && SMMU_IDR0.STALL_MODEL == 1 */
485 }
486 if (CD_HA(cd) || CD_HD(cd)) {
487 goto bad_cd; /* HTTU = 0 */
488 }
489
490 /* we support only those at the moment */
491 cfg->aa64 = true;
492 cfg->stage = 1;
493
494 cfg->oas = oas2bits(CD_IPS(cd));
495 cfg->oas = MIN(oas2bits(SMMU_IDR5_OAS), cfg->oas);
496 cfg->tbi = CD_TBI(cd);
497 cfg->asid = CD_ASID(cd);
498
499 trace_smmuv3_decode_cd(cfg->oas);
500
501 /* decode data dependent on TT */
502 for (i = 0; i <= 1; i++) {
503 int tg, tsz;
504 SMMUTransTableInfo *tt = &cfg->tt[i];
505
506 cfg->tt[i].disabled = CD_EPD(cd, i);
507 if (cfg->tt[i].disabled) {
508 continue;
509 }
510
511 tsz = CD_TSZ(cd, i);
512 if (tsz < 16 || tsz > 39) {
513 goto bad_cd;
514 }
515
516 tg = CD_TG(cd, i);
517 tt->granule_sz = tg2granule(tg, i);
518 if ((tt->granule_sz != 12 && tt->granule_sz != 14 &&
519 tt->granule_sz != 16) || CD_ENDI(cd)) {
520 goto bad_cd;
521 }
522
523 tt->tsz = tsz;
524 tt->ttb = CD_TTB(cd, i);
525 if (tt->ttb & ~(MAKE_64BIT_MASK(0, cfg->oas))) {
526 goto bad_cd;
527 }
528 tt->had = CD_HAD(cd, i);
529 trace_smmuv3_decode_cd_tt(i, tt->tsz, tt->ttb, tt->granule_sz, tt->had);
530 }
531
532 cfg->record_faults = CD_R(cd);
533
534 return 0;
535
536 bad_cd:
537 event->type = SMMU_EVT_C_BAD_CD;
538 return ret;
539 }
540
541 /**
542 * smmuv3_decode_config - Prepare the translation configuration
543 * for the @mr iommu region
544 * @mr: iommu memory region the translation config must be prepared for
545 * @cfg: output translation configuration which is populated through
546 * the different configuration decoding steps
547 * @event: must be zero'ed by the caller
548 *
549 * return < 0 in case of config decoding error (@event is filled
550 * accordingly). Return 0 otherwise.
551 */
552 static int smmuv3_decode_config(IOMMUMemoryRegion *mr, SMMUTransCfg *cfg,
553 SMMUEventInfo *event)
554 {
555 SMMUDevice *sdev = container_of(mr, SMMUDevice, iommu);
556 uint32_t sid = smmu_get_sid(sdev);
557 SMMUv3State *s = sdev->smmu;
558 int ret;
559 STE ste;
560 CD cd;
561
562 ret = smmu_find_ste(s, sid, &ste, event);
563 if (ret) {
564 return ret;
565 }
566
567 ret = decode_ste(s, cfg, &ste, event);
568 if (ret) {
569 return ret;
570 }
571
572 if (cfg->aborted || cfg->bypassed) {
573 return 0;
574 }
575
576 ret = smmu_get_cd(s, &ste, 0 /* ssid */, &cd, event);
577 if (ret) {
578 return ret;
579 }
580
581 return decode_cd(cfg, &cd, event);
582 }
583
584 /**
585 * smmuv3_get_config - Look up for a cached copy of configuration data for
586 * @sdev and on cache miss performs a configuration structure decoding from
587 * guest RAM.
588 *
589 * @sdev: SMMUDevice handle
590 * @event: output event info
591 *
592 * The configuration cache contains data resulting from both STE and CD
593 * decoding under the form of an SMMUTransCfg struct. The hash table is indexed
594 * by the SMMUDevice handle.
595 */
596 static SMMUTransCfg *smmuv3_get_config(SMMUDevice *sdev, SMMUEventInfo *event)
597 {
598 SMMUv3State *s = sdev->smmu;
599 SMMUState *bc = &s->smmu_state;
600 SMMUTransCfg *cfg;
601
602 cfg = g_hash_table_lookup(bc->configs, sdev);
603 if (cfg) {
604 sdev->cfg_cache_hits++;
605 trace_smmuv3_config_cache_hit(smmu_get_sid(sdev),
606 sdev->cfg_cache_hits, sdev->cfg_cache_misses,
607 100 * sdev->cfg_cache_hits /
608 (sdev->cfg_cache_hits + sdev->cfg_cache_misses));
609 } else {
610 sdev->cfg_cache_misses++;
611 trace_smmuv3_config_cache_miss(smmu_get_sid(sdev),
612 sdev->cfg_cache_hits, sdev->cfg_cache_misses,
613 100 * sdev->cfg_cache_hits /
614 (sdev->cfg_cache_hits + sdev->cfg_cache_misses));
615 cfg = g_new0(SMMUTransCfg, 1);
616
617 if (!smmuv3_decode_config(&sdev->iommu, cfg, event)) {
618 g_hash_table_insert(bc->configs, sdev, cfg);
619 } else {
620 g_free(cfg);
621 cfg = NULL;
622 }
623 }
624 return cfg;
625 }
626
627 static void smmuv3_flush_config(SMMUDevice *sdev)
628 {
629 SMMUv3State *s = sdev->smmu;
630 SMMUState *bc = &s->smmu_state;
631
632 trace_smmuv3_config_cache_inv(smmu_get_sid(sdev));
633 g_hash_table_remove(bc->configs, sdev);
634 }
635
636 static IOMMUTLBEntry smmuv3_translate(IOMMUMemoryRegion *mr, hwaddr addr,
637 IOMMUAccessFlags flag, int iommu_idx)
638 {
639 SMMUDevice *sdev = container_of(mr, SMMUDevice, iommu);
640 SMMUv3State *s = sdev->smmu;
641 uint32_t sid = smmu_get_sid(sdev);
642 SMMUEventInfo event = {.type = SMMU_EVT_NONE,
643 .sid = sid,
644 .inval_ste_allowed = false};
645 SMMUPTWEventInfo ptw_info = {};
646 SMMUTranslationStatus status;
647 SMMUState *bs = ARM_SMMU(s);
648 uint64_t page_mask, aligned_addr;
649 SMMUTLBEntry *cached_entry = NULL;
650 SMMUTransTableInfo *tt;
651 SMMUTransCfg *cfg = NULL;
652 IOMMUTLBEntry entry = {
653 .target_as = &address_space_memory,
654 .iova = addr,
655 .translated_addr = addr,
656 .addr_mask = ~(hwaddr)0,
657 .perm = IOMMU_NONE,
658 };
659
660 qemu_mutex_lock(&s->mutex);
661
662 if (!smmu_enabled(s)) {
663 if (FIELD_EX32(s->gbpa, GBPA, ABORT)) {
664 status = SMMU_TRANS_ABORT;
665 } else {
666 status = SMMU_TRANS_DISABLE;
667 }
668 goto epilogue;
669 }
670
671 cfg = smmuv3_get_config(sdev, &event);
672 if (!cfg) {
673 status = SMMU_TRANS_ERROR;
674 goto epilogue;
675 }
676
677 if (cfg->aborted) {
678 status = SMMU_TRANS_ABORT;
679 goto epilogue;
680 }
681
682 if (cfg->bypassed) {
683 status = SMMU_TRANS_BYPASS;
684 goto epilogue;
685 }
686
687 tt = select_tt(cfg, addr);
688 if (!tt) {
689 if (cfg->record_faults) {
690 event.type = SMMU_EVT_F_TRANSLATION;
691 event.u.f_translation.addr = addr;
692 event.u.f_translation.rnw = flag & 0x1;
693 }
694 status = SMMU_TRANS_ERROR;
695 goto epilogue;
696 }
697
698 page_mask = (1ULL << (tt->granule_sz)) - 1;
699 aligned_addr = addr & ~page_mask;
700
701 cached_entry = smmu_iotlb_lookup(bs, cfg, tt, aligned_addr);
702 if (cached_entry) {
703 if ((flag & IOMMU_WO) && !(cached_entry->entry.perm & IOMMU_WO)) {
704 status = SMMU_TRANS_ERROR;
705 if (cfg->record_faults) {
706 event.type = SMMU_EVT_F_PERMISSION;
707 event.u.f_permission.addr = addr;
708 event.u.f_permission.rnw = flag & 0x1;
709 }
710 } else {
711 status = SMMU_TRANS_SUCCESS;
712 }
713 goto epilogue;
714 }
715
716 cached_entry = g_new0(SMMUTLBEntry, 1);
717
718 if (smmu_ptw(cfg, aligned_addr, flag, cached_entry, &ptw_info)) {
719 g_free(cached_entry);
720 switch (ptw_info.type) {
721 case SMMU_PTW_ERR_WALK_EABT:
722 event.type = SMMU_EVT_F_WALK_EABT;
723 event.u.f_walk_eabt.addr = addr;
724 event.u.f_walk_eabt.rnw = flag & 0x1;
725 event.u.f_walk_eabt.class = 0x1;
726 event.u.f_walk_eabt.addr2 = ptw_info.addr;
727 break;
728 case SMMU_PTW_ERR_TRANSLATION:
729 if (cfg->record_faults) {
730 event.type = SMMU_EVT_F_TRANSLATION;
731 event.u.f_translation.addr = addr;
732 event.u.f_translation.rnw = flag & 0x1;
733 }
734 break;
735 case SMMU_PTW_ERR_ADDR_SIZE:
736 if (cfg->record_faults) {
737 event.type = SMMU_EVT_F_ADDR_SIZE;
738 event.u.f_addr_size.addr = addr;
739 event.u.f_addr_size.rnw = flag & 0x1;
740 }
741 break;
742 case SMMU_PTW_ERR_ACCESS:
743 if (cfg->record_faults) {
744 event.type = SMMU_EVT_F_ACCESS;
745 event.u.f_access.addr = addr;
746 event.u.f_access.rnw = flag & 0x1;
747 }
748 break;
749 case SMMU_PTW_ERR_PERMISSION:
750 if (cfg->record_faults) {
751 event.type = SMMU_EVT_F_PERMISSION;
752 event.u.f_permission.addr = addr;
753 event.u.f_permission.rnw = flag & 0x1;
754 }
755 break;
756 default:
757 g_assert_not_reached();
758 }
759 status = SMMU_TRANS_ERROR;
760 } else {
761 smmu_iotlb_insert(bs, cfg, cached_entry);
762 status = SMMU_TRANS_SUCCESS;
763 }
764
765 epilogue:
766 qemu_mutex_unlock(&s->mutex);
767 switch (status) {
768 case SMMU_TRANS_SUCCESS:
769 entry.perm = cached_entry->entry.perm;
770 entry.translated_addr = cached_entry->entry.translated_addr +
771 (addr & cached_entry->entry.addr_mask);
772 entry.addr_mask = cached_entry->entry.addr_mask;
773 trace_smmuv3_translate_success(mr->parent_obj.name, sid, addr,
774 entry.translated_addr, entry.perm);
775 break;
776 case SMMU_TRANS_DISABLE:
777 entry.perm = flag;
778 entry.addr_mask = ~TARGET_PAGE_MASK;
779 trace_smmuv3_translate_disable(mr->parent_obj.name, sid, addr,
780 entry.perm);
781 break;
782 case SMMU_TRANS_BYPASS:
783 entry.perm = flag;
784 entry.addr_mask = ~TARGET_PAGE_MASK;
785 trace_smmuv3_translate_bypass(mr->parent_obj.name, sid, addr,
786 entry.perm);
787 break;
788 case SMMU_TRANS_ABORT:
789 /* no event is recorded on abort */
790 trace_smmuv3_translate_abort(mr->parent_obj.name, sid, addr,
791 entry.perm);
792 break;
793 case SMMU_TRANS_ERROR:
794 qemu_log_mask(LOG_GUEST_ERROR,
795 "%s translation failed for iova=0x%"PRIx64" (%s)\n",
796 mr->parent_obj.name, addr, smmu_event_string(event.type));
797 smmuv3_record_event(s, &event);
798 break;
799 }
800
801 return entry;
802 }
803
804 /**
805 * smmuv3_notify_iova - call the notifier @n for a given
806 * @asid and @iova tuple.
807 *
808 * @mr: IOMMU mr region handle
809 * @n: notifier to be called
810 * @asid: address space ID or negative value if we don't care
811 * @iova: iova
812 * @tg: translation granule (if communicated through range invalidation)
813 * @num_pages: number of @granule sized pages (if tg != 0), otherwise 1
814 */
815 static void smmuv3_notify_iova(IOMMUMemoryRegion *mr,
816 IOMMUNotifier *n,
817 int asid, dma_addr_t iova,
818 uint8_t tg, uint64_t num_pages)
819 {
820 SMMUDevice *sdev = container_of(mr, SMMUDevice, iommu);
821 IOMMUTLBEvent event;
822 uint8_t granule;
823
824 if (!tg) {
825 SMMUEventInfo event = {.inval_ste_allowed = true};
826 SMMUTransCfg *cfg = smmuv3_get_config(sdev, &event);
827 SMMUTransTableInfo *tt;
828
829 if (!cfg) {
830 return;
831 }
832
833 if (asid >= 0 && cfg->asid != asid) {
834 return;
835 }
836
837 tt = select_tt(cfg, iova);
838 if (!tt) {
839 return;
840 }
841 granule = tt->granule_sz;
842 } else {
843 granule = tg * 2 + 10;
844 }
845
846 event.type = IOMMU_NOTIFIER_UNMAP;
847 event.entry.target_as = &address_space_memory;
848 event.entry.iova = iova;
849 event.entry.addr_mask = num_pages * (1 << granule) - 1;
850 event.entry.perm = IOMMU_NONE;
851
852 memory_region_notify_iommu_one(n, &event);
853 }
854
855 /* invalidate an asid/iova range tuple in all mr's */
856 static void smmuv3_inv_notifiers_iova(SMMUState *s, int asid, dma_addr_t iova,
857 uint8_t tg, uint64_t num_pages)
858 {
859 SMMUDevice *sdev;
860
861 QLIST_FOREACH(sdev, &s->devices_with_notifiers, next) {
862 IOMMUMemoryRegion *mr = &sdev->iommu;
863 IOMMUNotifier *n;
864
865 trace_smmuv3_inv_notifiers_iova(mr->parent_obj.name, asid, iova,
866 tg, num_pages);
867
868 IOMMU_NOTIFIER_FOREACH(n, mr) {
869 smmuv3_notify_iova(mr, n, asid, iova, tg, num_pages);
870 }
871 }
872 }
873
874 static void smmuv3_s1_range_inval(SMMUState *s, Cmd *cmd)
875 {
876 dma_addr_t end, addr = CMD_ADDR(cmd);
877 uint8_t type = CMD_TYPE(cmd);
878 uint16_t vmid = CMD_VMID(cmd);
879 uint8_t scale = CMD_SCALE(cmd);
880 uint8_t num = CMD_NUM(cmd);
881 uint8_t ttl = CMD_TTL(cmd);
882 bool leaf = CMD_LEAF(cmd);
883 uint8_t tg = CMD_TG(cmd);
884 uint64_t num_pages;
885 uint8_t granule;
886 int asid = -1;
887
888 if (type == SMMU_CMD_TLBI_NH_VA) {
889 asid = CMD_ASID(cmd);
890 }
891
892 if (!tg) {
893 trace_smmuv3_s1_range_inval(vmid, asid, addr, tg, 1, ttl, leaf);
894 smmuv3_inv_notifiers_iova(s, asid, addr, tg, 1);
895 smmu_iotlb_inv_iova(s, asid, addr, tg, 1, ttl);
896 return;
897 }
898
899 /* RIL in use */
900
901 num_pages = (num + 1) * BIT_ULL(scale);
902 granule = tg * 2 + 10;
903
904 /* Split invalidations into ^2 range invalidations */
905 end = addr + (num_pages << granule) - 1;
906
907 while (addr != end + 1) {
908 uint64_t mask = dma_aligned_pow2_mask(addr, end, 64);
909
910 num_pages = (mask + 1) >> granule;
911 trace_smmuv3_s1_range_inval(vmid, asid, addr, tg, num_pages, ttl, leaf);
912 smmuv3_inv_notifiers_iova(s, asid, addr, tg, num_pages);
913 smmu_iotlb_inv_iova(s, asid, addr, tg, num_pages, ttl);
914 addr += mask + 1;
915 }
916 }
917
918 static gboolean
919 smmuv3_invalidate_ste(gpointer key, gpointer value, gpointer user_data)
920 {
921 SMMUDevice *sdev = (SMMUDevice *)key;
922 uint32_t sid = smmu_get_sid(sdev);
923 SMMUSIDRange *sid_range = (SMMUSIDRange *)user_data;
924
925 if (sid < sid_range->start || sid > sid_range->end) {
926 return false;
927 }
928 trace_smmuv3_config_cache_inv(sid);
929 return true;
930 }
931
932 static int smmuv3_cmdq_consume(SMMUv3State *s)
933 {
934 SMMUState *bs = ARM_SMMU(s);
935 SMMUCmdError cmd_error = SMMU_CERROR_NONE;
936 SMMUQueue *q = &s->cmdq;
937 SMMUCommandType type = 0;
938
939 if (!smmuv3_cmdq_enabled(s)) {
940 return 0;
941 }
942 /*
943 * some commands depend on register values, typically CR0. In case those
944 * register values change while handling the command, spec says it
945 * is UNPREDICTABLE whether the command is interpreted under the new
946 * or old value.
947 */
948
949 while (!smmuv3_q_empty(q)) {
950 uint32_t pending = s->gerror ^ s->gerrorn;
951 Cmd cmd;
952
953 trace_smmuv3_cmdq_consume(Q_PROD(q), Q_CONS(q),
954 Q_PROD_WRAP(q), Q_CONS_WRAP(q));
955
956 if (FIELD_EX32(pending, GERROR, CMDQ_ERR)) {
957 break;
958 }
959
960 if (queue_read(q, &cmd) != MEMTX_OK) {
961 cmd_error = SMMU_CERROR_ABT;
962 break;
963 }
964
965 type = CMD_TYPE(&cmd);
966
967 trace_smmuv3_cmdq_opcode(smmu_cmd_string(type));
968
969 qemu_mutex_lock(&s->mutex);
970 switch (type) {
971 case SMMU_CMD_SYNC:
972 if (CMD_SYNC_CS(&cmd) & CMD_SYNC_SIG_IRQ) {
973 smmuv3_trigger_irq(s, SMMU_IRQ_CMD_SYNC, 0);
974 }
975 break;
976 case SMMU_CMD_PREFETCH_CONFIG:
977 case SMMU_CMD_PREFETCH_ADDR:
978 break;
979 case SMMU_CMD_CFGI_STE:
980 {
981 uint32_t sid = CMD_SID(&cmd);
982 IOMMUMemoryRegion *mr = smmu_iommu_mr(bs, sid);
983 SMMUDevice *sdev;
984
985 if (CMD_SSEC(&cmd)) {
986 cmd_error = SMMU_CERROR_ILL;
987 break;
988 }
989
990 if (!mr) {
991 break;
992 }
993
994 trace_smmuv3_cmdq_cfgi_ste(sid);
995 sdev = container_of(mr, SMMUDevice, iommu);
996 smmuv3_flush_config(sdev);
997
998 break;
999 }
1000 case SMMU_CMD_CFGI_STE_RANGE: /* same as SMMU_CMD_CFGI_ALL */
1001 {
1002 uint32_t sid = CMD_SID(&cmd), mask;
1003 uint8_t range = CMD_STE_RANGE(&cmd);
1004 SMMUSIDRange sid_range;
1005
1006 if (CMD_SSEC(&cmd)) {
1007 cmd_error = SMMU_CERROR_ILL;
1008 break;
1009 }
1010
1011 mask = (1ULL << (range + 1)) - 1;
1012 sid_range.start = sid & ~mask;
1013 sid_range.end = sid_range.start + mask;
1014
1015 trace_smmuv3_cmdq_cfgi_ste_range(sid_range.start, sid_range.end);
1016 g_hash_table_foreach_remove(bs->configs, smmuv3_invalidate_ste,
1017 &sid_range);
1018 break;
1019 }
1020 case SMMU_CMD_CFGI_CD:
1021 case SMMU_CMD_CFGI_CD_ALL:
1022 {
1023 uint32_t sid = CMD_SID(&cmd);
1024 IOMMUMemoryRegion *mr = smmu_iommu_mr(bs, sid);
1025 SMMUDevice *sdev;
1026
1027 if (CMD_SSEC(&cmd)) {
1028 cmd_error = SMMU_CERROR_ILL;
1029 break;
1030 }
1031
1032 if (!mr) {
1033 break;
1034 }
1035
1036 trace_smmuv3_cmdq_cfgi_cd(sid);
1037 sdev = container_of(mr, SMMUDevice, iommu);
1038 smmuv3_flush_config(sdev);
1039 break;
1040 }
1041 case SMMU_CMD_TLBI_NH_ASID:
1042 {
1043 uint16_t asid = CMD_ASID(&cmd);
1044
1045 trace_smmuv3_cmdq_tlbi_nh_asid(asid);
1046 smmu_inv_notifiers_all(&s->smmu_state);
1047 smmu_iotlb_inv_asid(bs, asid);
1048 break;
1049 }
1050 case SMMU_CMD_TLBI_NH_ALL:
1051 case SMMU_CMD_TLBI_NSNH_ALL:
1052 trace_smmuv3_cmdq_tlbi_nh();
1053 smmu_inv_notifiers_all(&s->smmu_state);
1054 smmu_iotlb_inv_all(bs);
1055 break;
1056 case SMMU_CMD_TLBI_NH_VAA:
1057 case SMMU_CMD_TLBI_NH_VA:
1058 smmuv3_s1_range_inval(bs, &cmd);
1059 break;
1060 case SMMU_CMD_TLBI_EL3_ALL:
1061 case SMMU_CMD_TLBI_EL3_VA:
1062 case SMMU_CMD_TLBI_EL2_ALL:
1063 case SMMU_CMD_TLBI_EL2_ASID:
1064 case SMMU_CMD_TLBI_EL2_VA:
1065 case SMMU_CMD_TLBI_EL2_VAA:
1066 case SMMU_CMD_TLBI_S12_VMALL:
1067 case SMMU_CMD_TLBI_S2_IPA:
1068 case SMMU_CMD_ATC_INV:
1069 case SMMU_CMD_PRI_RESP:
1070 case SMMU_CMD_RESUME:
1071 case SMMU_CMD_STALL_TERM:
1072 trace_smmuv3_unhandled_cmd(type);
1073 break;
1074 default:
1075 cmd_error = SMMU_CERROR_ILL;
1076 qemu_log_mask(LOG_GUEST_ERROR,
1077 "Illegal command type: %d\n", CMD_TYPE(&cmd));
1078 break;
1079 }
1080 qemu_mutex_unlock(&s->mutex);
1081 if (cmd_error) {
1082 break;
1083 }
1084 /*
1085 * We only increment the cons index after the completion of
1086 * the command. We do that because the SYNC returns immediately
1087 * and does not check the completion of previous commands
1088 */
1089 queue_cons_incr(q);
1090 }
1091
1092 if (cmd_error) {
1093 trace_smmuv3_cmdq_consume_error(smmu_cmd_string(type), cmd_error);
1094 smmu_write_cmdq_err(s, cmd_error);
1095 smmuv3_trigger_irq(s, SMMU_IRQ_GERROR, R_GERROR_CMDQ_ERR_MASK);
1096 }
1097
1098 trace_smmuv3_cmdq_consume_out(Q_PROD(q), Q_CONS(q),
1099 Q_PROD_WRAP(q), Q_CONS_WRAP(q));
1100
1101 return 0;
1102 }
1103
1104 static MemTxResult smmu_writell(SMMUv3State *s, hwaddr offset,
1105 uint64_t data, MemTxAttrs attrs)
1106 {
1107 switch (offset) {
1108 case A_GERROR_IRQ_CFG0:
1109 s->gerror_irq_cfg0 = data;
1110 return MEMTX_OK;
1111 case A_STRTAB_BASE:
1112 s->strtab_base = data;
1113 return MEMTX_OK;
1114 case A_CMDQ_BASE:
1115 s->cmdq.base = data;
1116 s->cmdq.log2size = extract64(s->cmdq.base, 0, 5);
1117 if (s->cmdq.log2size > SMMU_CMDQS) {
1118 s->cmdq.log2size = SMMU_CMDQS;
1119 }
1120 return MEMTX_OK;
1121 case A_EVENTQ_BASE:
1122 s->eventq.base = data;
1123 s->eventq.log2size = extract64(s->eventq.base, 0, 5);
1124 if (s->eventq.log2size > SMMU_EVENTQS) {
1125 s->eventq.log2size = SMMU_EVENTQS;
1126 }
1127 return MEMTX_OK;
1128 case A_EVENTQ_IRQ_CFG0:
1129 s->eventq_irq_cfg0 = data;
1130 return MEMTX_OK;
1131 default:
1132 qemu_log_mask(LOG_UNIMP,
1133 "%s Unexpected 64-bit access to 0x%"PRIx64" (WI)\n",
1134 __func__, offset);
1135 return MEMTX_OK;
1136 }
1137 }
1138
1139 static MemTxResult smmu_writel(SMMUv3State *s, hwaddr offset,
1140 uint64_t data, MemTxAttrs attrs)
1141 {
1142 switch (offset) {
1143 case A_CR0:
1144 s->cr[0] = data;
1145 s->cr0ack = data & ~SMMU_CR0_RESERVED;
1146 /* in case the command queue has been enabled */
1147 smmuv3_cmdq_consume(s);
1148 return MEMTX_OK;
1149 case A_CR1:
1150 s->cr[1] = data;
1151 return MEMTX_OK;
1152 case A_CR2:
1153 s->cr[2] = data;
1154 return MEMTX_OK;
1155 case A_IRQ_CTRL:
1156 s->irq_ctrl = data;
1157 return MEMTX_OK;
1158 case A_GERRORN:
1159 smmuv3_write_gerrorn(s, data);
1160 /*
1161 * By acknowledging the CMDQ_ERR, SW may notify cmds can
1162 * be processed again
1163 */
1164 smmuv3_cmdq_consume(s);
1165 return MEMTX_OK;
1166 case A_GERROR_IRQ_CFG0: /* 64b */
1167 s->gerror_irq_cfg0 = deposit64(s->gerror_irq_cfg0, 0, 32, data);
1168 return MEMTX_OK;
1169 case A_GERROR_IRQ_CFG0 + 4:
1170 s->gerror_irq_cfg0 = deposit64(s->gerror_irq_cfg0, 32, 32, data);
1171 return MEMTX_OK;
1172 case A_GERROR_IRQ_CFG1:
1173 s->gerror_irq_cfg1 = data;
1174 return MEMTX_OK;
1175 case A_GERROR_IRQ_CFG2:
1176 s->gerror_irq_cfg2 = data;
1177 return MEMTX_OK;
1178 case A_GBPA:
1179 /*
1180 * If UPDATE is not set, the write is ignored. This is the only
1181 * permitted behavior in SMMUv3.2 and later.
1182 */
1183 if (data & R_GBPA_UPDATE_MASK) {
1184 /* Ignore update bit as write is synchronous. */
1185 s->gbpa = data & ~R_GBPA_UPDATE_MASK;
1186 }
1187 return MEMTX_OK;
1188 case A_STRTAB_BASE: /* 64b */
1189 s->strtab_base = deposit64(s->strtab_base, 0, 32, data);
1190 return MEMTX_OK;
1191 case A_STRTAB_BASE + 4:
1192 s->strtab_base = deposit64(s->strtab_base, 32, 32, data);
1193 return MEMTX_OK;
1194 case A_STRTAB_BASE_CFG:
1195 s->strtab_base_cfg = data;
1196 if (FIELD_EX32(data, STRTAB_BASE_CFG, FMT) == 1) {
1197 s->sid_split = FIELD_EX32(data, STRTAB_BASE_CFG, SPLIT);
1198 s->features |= SMMU_FEATURE_2LVL_STE;
1199 }
1200 return MEMTX_OK;
1201 case A_CMDQ_BASE: /* 64b */
1202 s->cmdq.base = deposit64(s->cmdq.base, 0, 32, data);
1203 s->cmdq.log2size = extract64(s->cmdq.base, 0, 5);
1204 if (s->cmdq.log2size > SMMU_CMDQS) {
1205 s->cmdq.log2size = SMMU_CMDQS;
1206 }
1207 return MEMTX_OK;
1208 case A_CMDQ_BASE + 4: /* 64b */
1209 s->cmdq.base = deposit64(s->cmdq.base, 32, 32, data);
1210 return MEMTX_OK;
1211 case A_CMDQ_PROD:
1212 s->cmdq.prod = data;
1213 smmuv3_cmdq_consume(s);
1214 return MEMTX_OK;
1215 case A_CMDQ_CONS:
1216 s->cmdq.cons = data;
1217 return MEMTX_OK;
1218 case A_EVENTQ_BASE: /* 64b */
1219 s->eventq.base = deposit64(s->eventq.base, 0, 32, data);
1220 s->eventq.log2size = extract64(s->eventq.base, 0, 5);
1221 if (s->eventq.log2size > SMMU_EVENTQS) {
1222 s->eventq.log2size = SMMU_EVENTQS;
1223 }
1224 return MEMTX_OK;
1225 case A_EVENTQ_BASE + 4:
1226 s->eventq.base = deposit64(s->eventq.base, 32, 32, data);
1227 return MEMTX_OK;
1228 case A_EVENTQ_PROD:
1229 s->eventq.prod = data;
1230 return MEMTX_OK;
1231 case A_EVENTQ_CONS:
1232 s->eventq.cons = data;
1233 return MEMTX_OK;
1234 case A_EVENTQ_IRQ_CFG0: /* 64b */
1235 s->eventq_irq_cfg0 = deposit64(s->eventq_irq_cfg0, 0, 32, data);
1236 return MEMTX_OK;
1237 case A_EVENTQ_IRQ_CFG0 + 4:
1238 s->eventq_irq_cfg0 = deposit64(s->eventq_irq_cfg0, 32, 32, data);
1239 return MEMTX_OK;
1240 case A_EVENTQ_IRQ_CFG1:
1241 s->eventq_irq_cfg1 = data;
1242 return MEMTX_OK;
1243 case A_EVENTQ_IRQ_CFG2:
1244 s->eventq_irq_cfg2 = data;
1245 return MEMTX_OK;
1246 default:
1247 qemu_log_mask(LOG_UNIMP,
1248 "%s Unexpected 32-bit access to 0x%"PRIx64" (WI)\n",
1249 __func__, offset);
1250 return MEMTX_OK;
1251 }
1252 }
1253
1254 static MemTxResult smmu_write_mmio(void *opaque, hwaddr offset, uint64_t data,
1255 unsigned size, MemTxAttrs attrs)
1256 {
1257 SMMUState *sys = opaque;
1258 SMMUv3State *s = ARM_SMMUV3(sys);
1259 MemTxResult r;
1260
1261 /* CONSTRAINED UNPREDICTABLE choice to have page0/1 be exact aliases */
1262 offset &= ~0x10000;
1263
1264 switch (size) {
1265 case 8:
1266 r = smmu_writell(s, offset, data, attrs);
1267 break;
1268 case 4:
1269 r = smmu_writel(s, offset, data, attrs);
1270 break;
1271 default:
1272 r = MEMTX_ERROR;
1273 break;
1274 }
1275
1276 trace_smmuv3_write_mmio(offset, data, size, r);
1277 return r;
1278 }
1279
1280 static MemTxResult smmu_readll(SMMUv3State *s, hwaddr offset,
1281 uint64_t *data, MemTxAttrs attrs)
1282 {
1283 switch (offset) {
1284 case A_GERROR_IRQ_CFG0:
1285 *data = s->gerror_irq_cfg0;
1286 return MEMTX_OK;
1287 case A_STRTAB_BASE:
1288 *data = s->strtab_base;
1289 return MEMTX_OK;
1290 case A_CMDQ_BASE:
1291 *data = s->cmdq.base;
1292 return MEMTX_OK;
1293 case A_EVENTQ_BASE:
1294 *data = s->eventq.base;
1295 return MEMTX_OK;
1296 default:
1297 *data = 0;
1298 qemu_log_mask(LOG_UNIMP,
1299 "%s Unexpected 64-bit access to 0x%"PRIx64" (RAZ)\n",
1300 __func__, offset);
1301 return MEMTX_OK;
1302 }
1303 }
1304
1305 static MemTxResult smmu_readl(SMMUv3State *s, hwaddr offset,
1306 uint64_t *data, MemTxAttrs attrs)
1307 {
1308 switch (offset) {
1309 case A_IDREGS ... A_IDREGS + 0x2f:
1310 *data = smmuv3_idreg(offset - A_IDREGS);
1311 return MEMTX_OK;
1312 case A_IDR0 ... A_IDR5:
1313 *data = s->idr[(offset - A_IDR0) / 4];
1314 return MEMTX_OK;
1315 case A_IIDR:
1316 *data = s->iidr;
1317 return MEMTX_OK;
1318 case A_AIDR:
1319 *data = s->aidr;
1320 return MEMTX_OK;
1321 case A_CR0:
1322 *data = s->cr[0];
1323 return MEMTX_OK;
1324 case A_CR0ACK:
1325 *data = s->cr0ack;
1326 return MEMTX_OK;
1327 case A_CR1:
1328 *data = s->cr[1];
1329 return MEMTX_OK;
1330 case A_CR2:
1331 *data = s->cr[2];
1332 return MEMTX_OK;
1333 case A_STATUSR:
1334 *data = s->statusr;
1335 return MEMTX_OK;
1336 case A_GBPA:
1337 *data = s->gbpa;
1338 return MEMTX_OK;
1339 case A_IRQ_CTRL:
1340 case A_IRQ_CTRL_ACK:
1341 *data = s->irq_ctrl;
1342 return MEMTX_OK;
1343 case A_GERROR:
1344 *data = s->gerror;
1345 return MEMTX_OK;
1346 case A_GERRORN:
1347 *data = s->gerrorn;
1348 return MEMTX_OK;
1349 case A_GERROR_IRQ_CFG0: /* 64b */
1350 *data = extract64(s->gerror_irq_cfg0, 0, 32);
1351 return MEMTX_OK;
1352 case A_GERROR_IRQ_CFG0 + 4:
1353 *data = extract64(s->gerror_irq_cfg0, 32, 32);
1354 return MEMTX_OK;
1355 case A_GERROR_IRQ_CFG1:
1356 *data = s->gerror_irq_cfg1;
1357 return MEMTX_OK;
1358 case A_GERROR_IRQ_CFG2:
1359 *data = s->gerror_irq_cfg2;
1360 return MEMTX_OK;
1361 case A_STRTAB_BASE: /* 64b */
1362 *data = extract64(s->strtab_base, 0, 32);
1363 return MEMTX_OK;
1364 case A_STRTAB_BASE + 4: /* 64b */
1365 *data = extract64(s->strtab_base, 32, 32);
1366 return MEMTX_OK;
1367 case A_STRTAB_BASE_CFG:
1368 *data = s->strtab_base_cfg;
1369 return MEMTX_OK;
1370 case A_CMDQ_BASE: /* 64b */
1371 *data = extract64(s->cmdq.base, 0, 32);
1372 return MEMTX_OK;
1373 case A_CMDQ_BASE + 4:
1374 *data = extract64(s->cmdq.base, 32, 32);
1375 return MEMTX_OK;
1376 case A_CMDQ_PROD:
1377 *data = s->cmdq.prod;
1378 return MEMTX_OK;
1379 case A_CMDQ_CONS:
1380 *data = s->cmdq.cons;
1381 return MEMTX_OK;
1382 case A_EVENTQ_BASE: /* 64b */
1383 *data = extract64(s->eventq.base, 0, 32);
1384 return MEMTX_OK;
1385 case A_EVENTQ_BASE + 4: /* 64b */
1386 *data = extract64(s->eventq.base, 32, 32);
1387 return MEMTX_OK;
1388 case A_EVENTQ_PROD:
1389 *data = s->eventq.prod;
1390 return MEMTX_OK;
1391 case A_EVENTQ_CONS:
1392 *data = s->eventq.cons;
1393 return MEMTX_OK;
1394 default:
1395 *data = 0;
1396 qemu_log_mask(LOG_UNIMP,
1397 "%s unhandled 32-bit access at 0x%"PRIx64" (RAZ)\n",
1398 __func__, offset);
1399 return MEMTX_OK;
1400 }
1401 }
1402
1403 static MemTxResult smmu_read_mmio(void *opaque, hwaddr offset, uint64_t *data,
1404 unsigned size, MemTxAttrs attrs)
1405 {
1406 SMMUState *sys = opaque;
1407 SMMUv3State *s = ARM_SMMUV3(sys);
1408 MemTxResult r;
1409
1410 /* CONSTRAINED UNPREDICTABLE choice to have page0/1 be exact aliases */
1411 offset &= ~0x10000;
1412
1413 switch (size) {
1414 case 8:
1415 r = smmu_readll(s, offset, data, attrs);
1416 break;
1417 case 4:
1418 r = smmu_readl(s, offset, data, attrs);
1419 break;
1420 default:
1421 r = MEMTX_ERROR;
1422 break;
1423 }
1424
1425 trace_smmuv3_read_mmio(offset, *data, size, r);
1426 return r;
1427 }
1428
1429 static const MemoryRegionOps smmu_mem_ops = {
1430 .read_with_attrs = smmu_read_mmio,
1431 .write_with_attrs = smmu_write_mmio,
1432 .endianness = DEVICE_LITTLE_ENDIAN,
1433 .valid = {
1434 .min_access_size = 4,
1435 .max_access_size = 8,
1436 },
1437 .impl = {
1438 .min_access_size = 4,
1439 .max_access_size = 8,
1440 },
1441 };
1442
1443 static void smmu_init_irq(SMMUv3State *s, SysBusDevice *dev)
1444 {
1445 int i;
1446
1447 for (i = 0; i < ARRAY_SIZE(s->irq); i++) {
1448 sysbus_init_irq(dev, &s->irq[i]);
1449 }
1450 }
1451
1452 static void smmu_reset_hold(Object *obj)
1453 {
1454 SMMUv3State *s = ARM_SMMUV3(obj);
1455 SMMUv3Class *c = ARM_SMMUV3_GET_CLASS(s);
1456
1457 if (c->parent_phases.hold) {
1458 c->parent_phases.hold(obj);
1459 }
1460
1461 smmuv3_init_regs(s);
1462 }
1463
1464 static void smmu_realize(DeviceState *d, Error **errp)
1465 {
1466 SMMUState *sys = ARM_SMMU(d);
1467 SMMUv3State *s = ARM_SMMUV3(sys);
1468 SMMUv3Class *c = ARM_SMMUV3_GET_CLASS(s);
1469 SysBusDevice *dev = SYS_BUS_DEVICE(d);
1470 Error *local_err = NULL;
1471
1472 c->parent_realize(d, &local_err);
1473 if (local_err) {
1474 error_propagate(errp, local_err);
1475 return;
1476 }
1477
1478 qemu_mutex_init(&s->mutex);
1479
1480 memory_region_init_io(&sys->iomem, OBJECT(s),
1481 &smmu_mem_ops, sys, TYPE_ARM_SMMUV3, 0x20000);
1482
1483 sys->mrtypename = TYPE_SMMUV3_IOMMU_MEMORY_REGION;
1484
1485 sysbus_init_mmio(dev, &sys->iomem);
1486
1487 smmu_init_irq(s, dev);
1488 }
1489
1490 static const VMStateDescription vmstate_smmuv3_queue = {
1491 .name = "smmuv3_queue",
1492 .version_id = 1,
1493 .minimum_version_id = 1,
1494 .fields = (VMStateField[]) {
1495 VMSTATE_UINT64(base, SMMUQueue),
1496 VMSTATE_UINT32(prod, SMMUQueue),
1497 VMSTATE_UINT32(cons, SMMUQueue),
1498 VMSTATE_UINT8(log2size, SMMUQueue),
1499 VMSTATE_END_OF_LIST(),
1500 },
1501 };
1502
1503 static bool smmuv3_gbpa_needed(void *opaque)
1504 {
1505 SMMUv3State *s = opaque;
1506
1507 /* Only migrate GBPA if it has different reset value. */
1508 return s->gbpa != SMMU_GBPA_RESET_VAL;
1509 }
1510
1511 static const VMStateDescription vmstate_gbpa = {
1512 .name = "smmuv3/gbpa",
1513 .version_id = 1,
1514 .minimum_version_id = 1,
1515 .needed = smmuv3_gbpa_needed,
1516 .fields = (VMStateField[]) {
1517 VMSTATE_UINT32(gbpa, SMMUv3State),
1518 VMSTATE_END_OF_LIST()
1519 }
1520 };
1521
1522 static const VMStateDescription vmstate_smmuv3 = {
1523 .name = "smmuv3",
1524 .version_id = 1,
1525 .minimum_version_id = 1,
1526 .priority = MIG_PRI_IOMMU,
1527 .fields = (VMStateField[]) {
1528 VMSTATE_UINT32(features, SMMUv3State),
1529 VMSTATE_UINT8(sid_size, SMMUv3State),
1530 VMSTATE_UINT8(sid_split, SMMUv3State),
1531
1532 VMSTATE_UINT32_ARRAY(cr, SMMUv3State, 3),
1533 VMSTATE_UINT32(cr0ack, SMMUv3State),
1534 VMSTATE_UINT32(statusr, SMMUv3State),
1535 VMSTATE_UINT32(irq_ctrl, SMMUv3State),
1536 VMSTATE_UINT32(gerror, SMMUv3State),
1537 VMSTATE_UINT32(gerrorn, SMMUv3State),
1538 VMSTATE_UINT64(gerror_irq_cfg0, SMMUv3State),
1539 VMSTATE_UINT32(gerror_irq_cfg1, SMMUv3State),
1540 VMSTATE_UINT32(gerror_irq_cfg2, SMMUv3State),
1541 VMSTATE_UINT64(strtab_base, SMMUv3State),
1542 VMSTATE_UINT32(strtab_base_cfg, SMMUv3State),
1543 VMSTATE_UINT64(eventq_irq_cfg0, SMMUv3State),
1544 VMSTATE_UINT32(eventq_irq_cfg1, SMMUv3State),
1545 VMSTATE_UINT32(eventq_irq_cfg2, SMMUv3State),
1546
1547 VMSTATE_STRUCT(cmdq, SMMUv3State, 0, vmstate_smmuv3_queue, SMMUQueue),
1548 VMSTATE_STRUCT(eventq, SMMUv3State, 0, vmstate_smmuv3_queue, SMMUQueue),
1549
1550 VMSTATE_END_OF_LIST(),
1551 },
1552 .subsections = (const VMStateDescription * []) {
1553 &vmstate_gbpa,
1554 NULL
1555 }
1556 };
1557
1558 static void smmuv3_instance_init(Object *obj)
1559 {
1560 /* Nothing much to do here as of now */
1561 }
1562
1563 static void smmuv3_class_init(ObjectClass *klass, void *data)
1564 {
1565 DeviceClass *dc = DEVICE_CLASS(klass);
1566 ResettableClass *rc = RESETTABLE_CLASS(klass);
1567 SMMUv3Class *c = ARM_SMMUV3_CLASS(klass);
1568
1569 dc->vmsd = &vmstate_smmuv3;
1570 resettable_class_set_parent_phases(rc, NULL, smmu_reset_hold, NULL,
1571 &c->parent_phases);
1572 c->parent_realize = dc->realize;
1573 dc->realize = smmu_realize;
1574 }
1575
1576 static int smmuv3_notify_flag_changed(IOMMUMemoryRegion *iommu,
1577 IOMMUNotifierFlag old,
1578 IOMMUNotifierFlag new,
1579 Error **errp)
1580 {
1581 SMMUDevice *sdev = container_of(iommu, SMMUDevice, iommu);
1582 SMMUv3State *s3 = sdev->smmu;
1583 SMMUState *s = &(s3->smmu_state);
1584
1585 if (new & IOMMU_NOTIFIER_DEVIOTLB_UNMAP) {
1586 error_setg(errp, "SMMUv3 does not support dev-iotlb yet");
1587 return -EINVAL;
1588 }
1589
1590 if (new & IOMMU_NOTIFIER_MAP) {
1591 error_setg(errp,
1592 "device %02x.%02x.%x requires iommu MAP notifier which is "
1593 "not currently supported", pci_bus_num(sdev->bus),
1594 PCI_SLOT(sdev->devfn), PCI_FUNC(sdev->devfn));
1595 return -EINVAL;
1596 }
1597
1598 if (old == IOMMU_NOTIFIER_NONE) {
1599 trace_smmuv3_notify_flag_add(iommu->parent_obj.name);
1600 QLIST_INSERT_HEAD(&s->devices_with_notifiers, sdev, next);
1601 } else if (new == IOMMU_NOTIFIER_NONE) {
1602 trace_smmuv3_notify_flag_del(iommu->parent_obj.name);
1603 QLIST_REMOVE(sdev, next);
1604 }
1605 return 0;
1606 }
1607
1608 static void smmuv3_iommu_memory_region_class_init(ObjectClass *klass,
1609 void *data)
1610 {
1611 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass);
1612
1613 imrc->translate = smmuv3_translate;
1614 imrc->notify_flag_changed = smmuv3_notify_flag_changed;
1615 }
1616
1617 static const TypeInfo smmuv3_type_info = {
1618 .name = TYPE_ARM_SMMUV3,
1619 .parent = TYPE_ARM_SMMU,
1620 .instance_size = sizeof(SMMUv3State),
1621 .instance_init = smmuv3_instance_init,
1622 .class_size = sizeof(SMMUv3Class),
1623 .class_init = smmuv3_class_init,
1624 };
1625
1626 static const TypeInfo smmuv3_iommu_memory_region_info = {
1627 .parent = TYPE_IOMMU_MEMORY_REGION,
1628 .name = TYPE_SMMUV3_IOMMU_MEMORY_REGION,
1629 .class_init = smmuv3_iommu_memory_region_class_init,
1630 };
1631
1632 static void smmuv3_register_types(void)
1633 {
1634 type_register(&smmuv3_type_info);
1635 type_register(&smmuv3_iommu_memory_region_info);
1636 }
1637
1638 type_init(smmuv3_register_types)
1639