]> git.proxmox.com Git - mirror_qemu.git/blob - hw/arm/smmuv3.c
iommu: Add IOMMU index argument to translate method
[mirror_qemu.git] / hw / arm / smmuv3.c
1 /*
2 * Copyright (C) 2014-2016 Broadcom Corporation
3 * Copyright (c) 2017 Red Hat, Inc.
4 * Written by Prem Mallappa, Eric Auger
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, see <http://www.gnu.org/licenses/>.
17 */
18
19 #include "qemu/osdep.h"
20 #include "hw/boards.h"
21 #include "sysemu/sysemu.h"
22 #include "hw/sysbus.h"
23 #include "hw/qdev-core.h"
24 #include "hw/pci/pci.h"
25 #include "exec/address-spaces.h"
26 #include "trace.h"
27 #include "qemu/log.h"
28 #include "qemu/error-report.h"
29 #include "qapi/error.h"
30
31 #include "hw/arm/smmuv3.h"
32 #include "smmuv3-internal.h"
33
34 /**
35 * smmuv3_trigger_irq - pulse @irq if enabled and update
36 * GERROR register in case of GERROR interrupt
37 *
38 * @irq: irq type
39 * @gerror_mask: mask of gerrors to toggle (relevant if @irq is GERROR)
40 */
41 static void smmuv3_trigger_irq(SMMUv3State *s, SMMUIrq irq,
42 uint32_t gerror_mask)
43 {
44
45 bool pulse = false;
46
47 switch (irq) {
48 case SMMU_IRQ_EVTQ:
49 pulse = smmuv3_eventq_irq_enabled(s);
50 break;
51 case SMMU_IRQ_PRIQ:
52 qemu_log_mask(LOG_UNIMP, "PRI not yet supported\n");
53 break;
54 case SMMU_IRQ_CMD_SYNC:
55 pulse = true;
56 break;
57 case SMMU_IRQ_GERROR:
58 {
59 uint32_t pending = s->gerror ^ s->gerrorn;
60 uint32_t new_gerrors = ~pending & gerror_mask;
61
62 if (!new_gerrors) {
63 /* only toggle non pending errors */
64 return;
65 }
66 s->gerror ^= new_gerrors;
67 trace_smmuv3_write_gerror(new_gerrors, s->gerror);
68
69 pulse = smmuv3_gerror_irq_enabled(s);
70 break;
71 }
72 }
73 if (pulse) {
74 trace_smmuv3_trigger_irq(irq);
75 qemu_irq_pulse(s->irq[irq]);
76 }
77 }
78
79 static void smmuv3_write_gerrorn(SMMUv3State *s, uint32_t new_gerrorn)
80 {
81 uint32_t pending = s->gerror ^ s->gerrorn;
82 uint32_t toggled = s->gerrorn ^ new_gerrorn;
83
84 if (toggled & ~pending) {
85 qemu_log_mask(LOG_GUEST_ERROR,
86 "guest toggles non pending errors = 0x%x\n",
87 toggled & ~pending);
88 }
89
90 /*
91 * We do not raise any error in case guest toggles bits corresponding
92 * to not active IRQs (CONSTRAINED UNPREDICTABLE)
93 */
94 s->gerrorn = new_gerrorn;
95
96 trace_smmuv3_write_gerrorn(toggled & pending, s->gerrorn);
97 }
98
99 static inline MemTxResult queue_read(SMMUQueue *q, void *data)
100 {
101 dma_addr_t addr = Q_CONS_ENTRY(q);
102
103 return dma_memory_read(&address_space_memory, addr, data, q->entry_size);
104 }
105
106 static MemTxResult queue_write(SMMUQueue *q, void *data)
107 {
108 dma_addr_t addr = Q_PROD_ENTRY(q);
109 MemTxResult ret;
110
111 ret = dma_memory_write(&address_space_memory, addr, data, q->entry_size);
112 if (ret != MEMTX_OK) {
113 return ret;
114 }
115
116 queue_prod_incr(q);
117 return MEMTX_OK;
118 }
119
120 static MemTxResult smmuv3_write_eventq(SMMUv3State *s, Evt *evt)
121 {
122 SMMUQueue *q = &s->eventq;
123 MemTxResult r;
124
125 if (!smmuv3_eventq_enabled(s)) {
126 return MEMTX_ERROR;
127 }
128
129 if (smmuv3_q_full(q)) {
130 return MEMTX_ERROR;
131 }
132
133 r = queue_write(q, evt);
134 if (r != MEMTX_OK) {
135 return r;
136 }
137
138 if (smmuv3_q_empty(q)) {
139 smmuv3_trigger_irq(s, SMMU_IRQ_EVTQ, 0);
140 }
141 return MEMTX_OK;
142 }
143
144 void smmuv3_record_event(SMMUv3State *s, SMMUEventInfo *info)
145 {
146 Evt evt = {};
147 MemTxResult r;
148
149 if (!smmuv3_eventq_enabled(s)) {
150 return;
151 }
152
153 EVT_SET_TYPE(&evt, info->type);
154 EVT_SET_SID(&evt, info->sid);
155
156 switch (info->type) {
157 case SMMU_EVT_OK:
158 return;
159 case SMMU_EVT_F_UUT:
160 EVT_SET_SSID(&evt, info->u.f_uut.ssid);
161 EVT_SET_SSV(&evt, info->u.f_uut.ssv);
162 EVT_SET_ADDR(&evt, info->u.f_uut.addr);
163 EVT_SET_RNW(&evt, info->u.f_uut.rnw);
164 EVT_SET_PNU(&evt, info->u.f_uut.pnu);
165 EVT_SET_IND(&evt, info->u.f_uut.ind);
166 break;
167 case SMMU_EVT_C_BAD_STREAMID:
168 EVT_SET_SSID(&evt, info->u.c_bad_streamid.ssid);
169 EVT_SET_SSV(&evt, info->u.c_bad_streamid.ssv);
170 break;
171 case SMMU_EVT_F_STE_FETCH:
172 EVT_SET_SSID(&evt, info->u.f_ste_fetch.ssid);
173 EVT_SET_SSV(&evt, info->u.f_ste_fetch.ssv);
174 EVT_SET_ADDR(&evt, info->u.f_ste_fetch.addr);
175 break;
176 case SMMU_EVT_C_BAD_STE:
177 EVT_SET_SSID(&evt, info->u.c_bad_ste.ssid);
178 EVT_SET_SSV(&evt, info->u.c_bad_ste.ssv);
179 break;
180 case SMMU_EVT_F_STREAM_DISABLED:
181 break;
182 case SMMU_EVT_F_TRANS_FORBIDDEN:
183 EVT_SET_ADDR(&evt, info->u.f_transl_forbidden.addr);
184 EVT_SET_RNW(&evt, info->u.f_transl_forbidden.rnw);
185 break;
186 case SMMU_EVT_C_BAD_SUBSTREAMID:
187 EVT_SET_SSID(&evt, info->u.c_bad_substream.ssid);
188 break;
189 case SMMU_EVT_F_CD_FETCH:
190 EVT_SET_SSID(&evt, info->u.f_cd_fetch.ssid);
191 EVT_SET_SSV(&evt, info->u.f_cd_fetch.ssv);
192 EVT_SET_ADDR(&evt, info->u.f_cd_fetch.addr);
193 break;
194 case SMMU_EVT_C_BAD_CD:
195 EVT_SET_SSID(&evt, info->u.c_bad_cd.ssid);
196 EVT_SET_SSV(&evt, info->u.c_bad_cd.ssv);
197 break;
198 case SMMU_EVT_F_WALK_EABT:
199 case SMMU_EVT_F_TRANSLATION:
200 case SMMU_EVT_F_ADDR_SIZE:
201 case SMMU_EVT_F_ACCESS:
202 case SMMU_EVT_F_PERMISSION:
203 EVT_SET_STALL(&evt, info->u.f_walk_eabt.stall);
204 EVT_SET_STAG(&evt, info->u.f_walk_eabt.stag);
205 EVT_SET_SSID(&evt, info->u.f_walk_eabt.ssid);
206 EVT_SET_SSV(&evt, info->u.f_walk_eabt.ssv);
207 EVT_SET_S2(&evt, info->u.f_walk_eabt.s2);
208 EVT_SET_ADDR(&evt, info->u.f_walk_eabt.addr);
209 EVT_SET_RNW(&evt, info->u.f_walk_eabt.rnw);
210 EVT_SET_PNU(&evt, info->u.f_walk_eabt.pnu);
211 EVT_SET_IND(&evt, info->u.f_walk_eabt.ind);
212 EVT_SET_CLASS(&evt, info->u.f_walk_eabt.class);
213 EVT_SET_ADDR2(&evt, info->u.f_walk_eabt.addr2);
214 break;
215 case SMMU_EVT_F_CFG_CONFLICT:
216 EVT_SET_SSID(&evt, info->u.f_cfg_conflict.ssid);
217 EVT_SET_SSV(&evt, info->u.f_cfg_conflict.ssv);
218 break;
219 /* rest is not implemented */
220 case SMMU_EVT_F_BAD_ATS_TREQ:
221 case SMMU_EVT_F_TLB_CONFLICT:
222 case SMMU_EVT_E_PAGE_REQ:
223 default:
224 g_assert_not_reached();
225 }
226
227 trace_smmuv3_record_event(smmu_event_string(info->type), info->sid);
228 r = smmuv3_write_eventq(s, &evt);
229 if (r != MEMTX_OK) {
230 smmuv3_trigger_irq(s, SMMU_IRQ_GERROR, R_GERROR_EVENTQ_ABT_ERR_MASK);
231 }
232 info->recorded = true;
233 }
234
235 static void smmuv3_init_regs(SMMUv3State *s)
236 {
237 /**
238 * IDR0: stage1 only, AArch64 only, coherent access, 16b ASID,
239 * multi-level stream table
240 */
241 s->idr[0] = FIELD_DP32(s->idr[0], IDR0, S1P, 1); /* stage 1 supported */
242 s->idr[0] = FIELD_DP32(s->idr[0], IDR0, TTF, 2); /* AArch64 PTW only */
243 s->idr[0] = FIELD_DP32(s->idr[0], IDR0, COHACC, 1); /* IO coherent */
244 s->idr[0] = FIELD_DP32(s->idr[0], IDR0, ASID16, 1); /* 16-bit ASID */
245 s->idr[0] = FIELD_DP32(s->idr[0], IDR0, TTENDIAN, 2); /* little endian */
246 s->idr[0] = FIELD_DP32(s->idr[0], IDR0, STALL_MODEL, 1); /* No stall */
247 /* terminated transaction will always be aborted/error returned */
248 s->idr[0] = FIELD_DP32(s->idr[0], IDR0, TERM_MODEL, 1);
249 /* 2-level stream table supported */
250 s->idr[0] = FIELD_DP32(s->idr[0], IDR0, STLEVEL, 1);
251
252 s->idr[1] = FIELD_DP32(s->idr[1], IDR1, SIDSIZE, SMMU_IDR1_SIDSIZE);
253 s->idr[1] = FIELD_DP32(s->idr[1], IDR1, EVENTQS, SMMU_EVENTQS);
254 s->idr[1] = FIELD_DP32(s->idr[1], IDR1, CMDQS, SMMU_CMDQS);
255
256 /* 4K and 64K granule support */
257 s->idr[5] = FIELD_DP32(s->idr[5], IDR5, GRAN4K, 1);
258 s->idr[5] = FIELD_DP32(s->idr[5], IDR5, GRAN64K, 1);
259 s->idr[5] = FIELD_DP32(s->idr[5], IDR5, OAS, SMMU_IDR5_OAS); /* 44 bits */
260
261 s->cmdq.base = deposit64(s->cmdq.base, 0, 5, SMMU_CMDQS);
262 s->cmdq.prod = 0;
263 s->cmdq.cons = 0;
264 s->cmdq.entry_size = sizeof(struct Cmd);
265 s->eventq.base = deposit64(s->eventq.base, 0, 5, SMMU_EVENTQS);
266 s->eventq.prod = 0;
267 s->eventq.cons = 0;
268 s->eventq.entry_size = sizeof(struct Evt);
269
270 s->features = 0;
271 s->sid_split = 0;
272 }
273
274 static int smmu_get_ste(SMMUv3State *s, dma_addr_t addr, STE *buf,
275 SMMUEventInfo *event)
276 {
277 int ret;
278
279 trace_smmuv3_get_ste(addr);
280 /* TODO: guarantee 64-bit single-copy atomicity */
281 ret = dma_memory_read(&address_space_memory, addr,
282 (void *)buf, sizeof(*buf));
283 if (ret != MEMTX_OK) {
284 qemu_log_mask(LOG_GUEST_ERROR,
285 "Cannot fetch pte at address=0x%"PRIx64"\n", addr);
286 event->type = SMMU_EVT_F_STE_FETCH;
287 event->u.f_ste_fetch.addr = addr;
288 return -EINVAL;
289 }
290 return 0;
291
292 }
293
294 /* @ssid > 0 not supported yet */
295 static int smmu_get_cd(SMMUv3State *s, STE *ste, uint32_t ssid,
296 CD *buf, SMMUEventInfo *event)
297 {
298 dma_addr_t addr = STE_CTXPTR(ste);
299 int ret;
300
301 trace_smmuv3_get_cd(addr);
302 /* TODO: guarantee 64-bit single-copy atomicity */
303 ret = dma_memory_read(&address_space_memory, addr,
304 (void *)buf, sizeof(*buf));
305 if (ret != MEMTX_OK) {
306 qemu_log_mask(LOG_GUEST_ERROR,
307 "Cannot fetch pte at address=0x%"PRIx64"\n", addr);
308 event->type = SMMU_EVT_F_CD_FETCH;
309 event->u.f_ste_fetch.addr = addr;
310 return -EINVAL;
311 }
312 return 0;
313 }
314
315 /* Returns <0 if the caller has no need to continue the translation */
316 static int decode_ste(SMMUv3State *s, SMMUTransCfg *cfg,
317 STE *ste, SMMUEventInfo *event)
318 {
319 uint32_t config;
320 int ret = -EINVAL;
321
322 if (!STE_VALID(ste)) {
323 goto bad_ste;
324 }
325
326 config = STE_CONFIG(ste);
327
328 if (STE_CFG_ABORT(config)) {
329 cfg->aborted = true; /* abort but don't record any event */
330 return ret;
331 }
332
333 if (STE_CFG_BYPASS(config)) {
334 cfg->bypassed = true;
335 return ret;
336 }
337
338 if (STE_CFG_S2_ENABLED(config)) {
339 qemu_log_mask(LOG_UNIMP, "SMMUv3 does not support stage 2 yet\n");
340 goto bad_ste;
341 }
342
343 if (STE_S1CDMAX(ste) != 0) {
344 qemu_log_mask(LOG_UNIMP,
345 "SMMUv3 does not support multiple context descriptors yet\n");
346 goto bad_ste;
347 }
348
349 if (STE_S1STALLD(ste)) {
350 qemu_log_mask(LOG_UNIMP,
351 "SMMUv3 S1 stalling fault model not allowed yet\n");
352 goto bad_ste;
353 }
354 return 0;
355
356 bad_ste:
357 event->type = SMMU_EVT_C_BAD_STE;
358 return -EINVAL;
359 }
360
361 /**
362 * smmu_find_ste - Return the stream table entry associated
363 * to the sid
364 *
365 * @s: smmuv3 handle
366 * @sid: stream ID
367 * @ste: returned stream table entry
368 * @event: handle to an event info
369 *
370 * Supports linear and 2-level stream table
371 * Return 0 on success, -EINVAL otherwise
372 */
373 static int smmu_find_ste(SMMUv3State *s, uint32_t sid, STE *ste,
374 SMMUEventInfo *event)
375 {
376 dma_addr_t addr;
377 int ret;
378
379 trace_smmuv3_find_ste(sid, s->features, s->sid_split);
380 /* Check SID range */
381 if (sid > (1 << SMMU_IDR1_SIDSIZE)) {
382 event->type = SMMU_EVT_C_BAD_STREAMID;
383 return -EINVAL;
384 }
385 if (s->features & SMMU_FEATURE_2LVL_STE) {
386 int l1_ste_offset, l2_ste_offset, max_l2_ste, span;
387 dma_addr_t strtab_base, l1ptr, l2ptr;
388 STEDesc l1std;
389
390 strtab_base = s->strtab_base & SMMU_BASE_ADDR_MASK;
391 l1_ste_offset = sid >> s->sid_split;
392 l2_ste_offset = sid & ((1 << s->sid_split) - 1);
393 l1ptr = (dma_addr_t)(strtab_base + l1_ste_offset * sizeof(l1std));
394 /* TODO: guarantee 64-bit single-copy atomicity */
395 ret = dma_memory_read(&address_space_memory, l1ptr,
396 (uint8_t *)&l1std, sizeof(l1std));
397 if (ret != MEMTX_OK) {
398 qemu_log_mask(LOG_GUEST_ERROR,
399 "Could not read L1PTR at 0X%"PRIx64"\n", l1ptr);
400 event->type = SMMU_EVT_F_STE_FETCH;
401 event->u.f_ste_fetch.addr = l1ptr;
402 return -EINVAL;
403 }
404
405 span = L1STD_SPAN(&l1std);
406
407 if (!span) {
408 /* l2ptr is not valid */
409 qemu_log_mask(LOG_GUEST_ERROR,
410 "invalid sid=%d (L1STD span=0)\n", sid);
411 event->type = SMMU_EVT_C_BAD_STREAMID;
412 return -EINVAL;
413 }
414 max_l2_ste = (1 << span) - 1;
415 l2ptr = l1std_l2ptr(&l1std);
416 trace_smmuv3_find_ste_2lvl(s->strtab_base, l1ptr, l1_ste_offset,
417 l2ptr, l2_ste_offset, max_l2_ste);
418 if (l2_ste_offset > max_l2_ste) {
419 qemu_log_mask(LOG_GUEST_ERROR,
420 "l2_ste_offset=%d > max_l2_ste=%d\n",
421 l2_ste_offset, max_l2_ste);
422 event->type = SMMU_EVT_C_BAD_STE;
423 return -EINVAL;
424 }
425 addr = l2ptr + l2_ste_offset * sizeof(*ste);
426 } else {
427 addr = s->strtab_base + sid * sizeof(*ste);
428 }
429
430 if (smmu_get_ste(s, addr, ste, event)) {
431 return -EINVAL;
432 }
433
434 return 0;
435 }
436
437 static int decode_cd(SMMUTransCfg *cfg, CD *cd, SMMUEventInfo *event)
438 {
439 int ret = -EINVAL;
440 int i;
441
442 if (!CD_VALID(cd) || !CD_AARCH64(cd)) {
443 goto bad_cd;
444 }
445 if (!CD_A(cd)) {
446 goto bad_cd; /* SMMU_IDR0.TERM_MODEL == 1 */
447 }
448 if (CD_S(cd)) {
449 goto bad_cd; /* !STE_SECURE && SMMU_IDR0.STALL_MODEL == 1 */
450 }
451 if (CD_HA(cd) || CD_HD(cd)) {
452 goto bad_cd; /* HTTU = 0 */
453 }
454
455 /* we support only those at the moment */
456 cfg->aa64 = true;
457 cfg->stage = 1;
458
459 cfg->oas = oas2bits(CD_IPS(cd));
460 cfg->oas = MIN(oas2bits(SMMU_IDR5_OAS), cfg->oas);
461 cfg->tbi = CD_TBI(cd);
462 cfg->asid = CD_ASID(cd);
463
464 trace_smmuv3_decode_cd(cfg->oas);
465
466 /* decode data dependent on TT */
467 for (i = 0; i <= 1; i++) {
468 int tg, tsz;
469 SMMUTransTableInfo *tt = &cfg->tt[i];
470
471 cfg->tt[i].disabled = CD_EPD(cd, i);
472 if (cfg->tt[i].disabled) {
473 continue;
474 }
475
476 tsz = CD_TSZ(cd, i);
477 if (tsz < 16 || tsz > 39) {
478 goto bad_cd;
479 }
480
481 tg = CD_TG(cd, i);
482 tt->granule_sz = tg2granule(tg, i);
483 if ((tt->granule_sz != 12 && tt->granule_sz != 16) || CD_ENDI(cd)) {
484 goto bad_cd;
485 }
486
487 tt->tsz = tsz;
488 tt->ttb = CD_TTB(cd, i);
489 if (tt->ttb & ~(MAKE_64BIT_MASK(0, cfg->oas))) {
490 goto bad_cd;
491 }
492 trace_smmuv3_decode_cd_tt(i, tt->tsz, tt->ttb, tt->granule_sz);
493 }
494
495 event->record_trans_faults = CD_R(cd);
496
497 return 0;
498
499 bad_cd:
500 event->type = SMMU_EVT_C_BAD_CD;
501 return ret;
502 }
503
504 /**
505 * smmuv3_decode_config - Prepare the translation configuration
506 * for the @mr iommu region
507 * @mr: iommu memory region the translation config must be prepared for
508 * @cfg: output translation configuration which is populated through
509 * the different configuration decoding steps
510 * @event: must be zero'ed by the caller
511 *
512 * return < 0 if the translation needs to be aborted (@event is filled
513 * accordingly). Return 0 otherwise.
514 */
515 static int smmuv3_decode_config(IOMMUMemoryRegion *mr, SMMUTransCfg *cfg,
516 SMMUEventInfo *event)
517 {
518 SMMUDevice *sdev = container_of(mr, SMMUDevice, iommu);
519 uint32_t sid = smmu_get_sid(sdev);
520 SMMUv3State *s = sdev->smmu;
521 int ret = -EINVAL;
522 STE ste;
523 CD cd;
524
525 if (smmu_find_ste(s, sid, &ste, event)) {
526 return ret;
527 }
528
529 if (decode_ste(s, cfg, &ste, event)) {
530 return ret;
531 }
532
533 if (smmu_get_cd(s, &ste, 0 /* ssid */, &cd, event)) {
534 return ret;
535 }
536
537 return decode_cd(cfg, &cd, event);
538 }
539
540 static IOMMUTLBEntry smmuv3_translate(IOMMUMemoryRegion *mr, hwaddr addr,
541 IOMMUAccessFlags flag, int iommu_idx)
542 {
543 SMMUDevice *sdev = container_of(mr, SMMUDevice, iommu);
544 SMMUv3State *s = sdev->smmu;
545 uint32_t sid = smmu_get_sid(sdev);
546 SMMUEventInfo event = {.type = SMMU_EVT_OK, .sid = sid};
547 SMMUPTWEventInfo ptw_info = {};
548 SMMUTransCfg cfg = {};
549 IOMMUTLBEntry entry = {
550 .target_as = &address_space_memory,
551 .iova = addr,
552 .translated_addr = addr,
553 .addr_mask = ~(hwaddr)0,
554 .perm = IOMMU_NONE,
555 };
556 int ret = 0;
557
558 if (!smmu_enabled(s)) {
559 goto out;
560 }
561
562 ret = smmuv3_decode_config(mr, &cfg, &event);
563 if (ret) {
564 goto out;
565 }
566
567 if (cfg.aborted) {
568 goto out;
569 }
570
571 ret = smmu_ptw(&cfg, addr, flag, &entry, &ptw_info);
572 if (ret) {
573 switch (ptw_info.type) {
574 case SMMU_PTW_ERR_WALK_EABT:
575 event.type = SMMU_EVT_F_WALK_EABT;
576 event.u.f_walk_eabt.addr = addr;
577 event.u.f_walk_eabt.rnw = flag & 0x1;
578 event.u.f_walk_eabt.class = 0x1;
579 event.u.f_walk_eabt.addr2 = ptw_info.addr;
580 break;
581 case SMMU_PTW_ERR_TRANSLATION:
582 if (event.record_trans_faults) {
583 event.type = SMMU_EVT_F_TRANSLATION;
584 event.u.f_translation.addr = addr;
585 event.u.f_translation.rnw = flag & 0x1;
586 }
587 break;
588 case SMMU_PTW_ERR_ADDR_SIZE:
589 if (event.record_trans_faults) {
590 event.type = SMMU_EVT_F_ADDR_SIZE;
591 event.u.f_addr_size.addr = addr;
592 event.u.f_addr_size.rnw = flag & 0x1;
593 }
594 break;
595 case SMMU_PTW_ERR_ACCESS:
596 if (event.record_trans_faults) {
597 event.type = SMMU_EVT_F_ACCESS;
598 event.u.f_access.addr = addr;
599 event.u.f_access.rnw = flag & 0x1;
600 }
601 break;
602 case SMMU_PTW_ERR_PERMISSION:
603 if (event.record_trans_faults) {
604 event.type = SMMU_EVT_F_PERMISSION;
605 event.u.f_permission.addr = addr;
606 event.u.f_permission.rnw = flag & 0x1;
607 }
608 break;
609 default:
610 g_assert_not_reached();
611 }
612 }
613 out:
614 if (ret) {
615 qemu_log_mask(LOG_GUEST_ERROR,
616 "%s translation failed for iova=0x%"PRIx64"(%d)\n",
617 mr->parent_obj.name, addr, ret);
618 entry.perm = IOMMU_NONE;
619 smmuv3_record_event(s, &event);
620 } else if (!cfg.aborted) {
621 entry.perm = flag;
622 trace_smmuv3_translate(mr->parent_obj.name, sid, addr,
623 entry.translated_addr, entry.perm);
624 }
625
626 return entry;
627 }
628
629 static int smmuv3_cmdq_consume(SMMUv3State *s)
630 {
631 SMMUCmdError cmd_error = SMMU_CERROR_NONE;
632 SMMUQueue *q = &s->cmdq;
633 SMMUCommandType type = 0;
634
635 if (!smmuv3_cmdq_enabled(s)) {
636 return 0;
637 }
638 /*
639 * some commands depend on register values, typically CR0. In case those
640 * register values change while handling the command, spec says it
641 * is UNPREDICTABLE whether the command is interpreted under the new
642 * or old value.
643 */
644
645 while (!smmuv3_q_empty(q)) {
646 uint32_t pending = s->gerror ^ s->gerrorn;
647 Cmd cmd;
648
649 trace_smmuv3_cmdq_consume(Q_PROD(q), Q_CONS(q),
650 Q_PROD_WRAP(q), Q_CONS_WRAP(q));
651
652 if (FIELD_EX32(pending, GERROR, CMDQ_ERR)) {
653 break;
654 }
655
656 if (queue_read(q, &cmd) != MEMTX_OK) {
657 cmd_error = SMMU_CERROR_ABT;
658 break;
659 }
660
661 type = CMD_TYPE(&cmd);
662
663 trace_smmuv3_cmdq_opcode(smmu_cmd_string(type));
664
665 switch (type) {
666 case SMMU_CMD_SYNC:
667 if (CMD_SYNC_CS(&cmd) & CMD_SYNC_SIG_IRQ) {
668 smmuv3_trigger_irq(s, SMMU_IRQ_CMD_SYNC, 0);
669 }
670 break;
671 case SMMU_CMD_PREFETCH_CONFIG:
672 case SMMU_CMD_PREFETCH_ADDR:
673 case SMMU_CMD_CFGI_STE:
674 case SMMU_CMD_CFGI_STE_RANGE: /* same as SMMU_CMD_CFGI_ALL */
675 case SMMU_CMD_CFGI_CD:
676 case SMMU_CMD_CFGI_CD_ALL:
677 case SMMU_CMD_TLBI_NH_ALL:
678 case SMMU_CMD_TLBI_NH_ASID:
679 case SMMU_CMD_TLBI_NH_VA:
680 case SMMU_CMD_TLBI_NH_VAA:
681 case SMMU_CMD_TLBI_EL3_ALL:
682 case SMMU_CMD_TLBI_EL3_VA:
683 case SMMU_CMD_TLBI_EL2_ALL:
684 case SMMU_CMD_TLBI_EL2_ASID:
685 case SMMU_CMD_TLBI_EL2_VA:
686 case SMMU_CMD_TLBI_EL2_VAA:
687 case SMMU_CMD_TLBI_S12_VMALL:
688 case SMMU_CMD_TLBI_S2_IPA:
689 case SMMU_CMD_TLBI_NSNH_ALL:
690 case SMMU_CMD_ATC_INV:
691 case SMMU_CMD_PRI_RESP:
692 case SMMU_CMD_RESUME:
693 case SMMU_CMD_STALL_TERM:
694 trace_smmuv3_unhandled_cmd(type);
695 break;
696 default:
697 cmd_error = SMMU_CERROR_ILL;
698 qemu_log_mask(LOG_GUEST_ERROR,
699 "Illegal command type: %d\n", CMD_TYPE(&cmd));
700 break;
701 }
702 if (cmd_error) {
703 break;
704 }
705 /*
706 * We only increment the cons index after the completion of
707 * the command. We do that because the SYNC returns immediately
708 * and does not check the completion of previous commands
709 */
710 queue_cons_incr(q);
711 }
712
713 if (cmd_error) {
714 trace_smmuv3_cmdq_consume_error(smmu_cmd_string(type), cmd_error);
715 smmu_write_cmdq_err(s, cmd_error);
716 smmuv3_trigger_irq(s, SMMU_IRQ_GERROR, R_GERROR_CMDQ_ERR_MASK);
717 }
718
719 trace_smmuv3_cmdq_consume_out(Q_PROD(q), Q_CONS(q),
720 Q_PROD_WRAP(q), Q_CONS_WRAP(q));
721
722 return 0;
723 }
724
725 static MemTxResult smmu_writell(SMMUv3State *s, hwaddr offset,
726 uint64_t data, MemTxAttrs attrs)
727 {
728 switch (offset) {
729 case A_GERROR_IRQ_CFG0:
730 s->gerror_irq_cfg0 = data;
731 return MEMTX_OK;
732 case A_STRTAB_BASE:
733 s->strtab_base = data;
734 return MEMTX_OK;
735 case A_CMDQ_BASE:
736 s->cmdq.base = data;
737 s->cmdq.log2size = extract64(s->cmdq.base, 0, 5);
738 if (s->cmdq.log2size > SMMU_CMDQS) {
739 s->cmdq.log2size = SMMU_CMDQS;
740 }
741 return MEMTX_OK;
742 case A_EVENTQ_BASE:
743 s->eventq.base = data;
744 s->eventq.log2size = extract64(s->eventq.base, 0, 5);
745 if (s->eventq.log2size > SMMU_EVENTQS) {
746 s->eventq.log2size = SMMU_EVENTQS;
747 }
748 return MEMTX_OK;
749 case A_EVENTQ_IRQ_CFG0:
750 s->eventq_irq_cfg0 = data;
751 return MEMTX_OK;
752 default:
753 qemu_log_mask(LOG_UNIMP,
754 "%s Unexpected 64-bit access to 0x%"PRIx64" (WI)\n",
755 __func__, offset);
756 return MEMTX_OK;
757 }
758 }
759
760 static MemTxResult smmu_writel(SMMUv3State *s, hwaddr offset,
761 uint64_t data, MemTxAttrs attrs)
762 {
763 switch (offset) {
764 case A_CR0:
765 s->cr[0] = data;
766 s->cr0ack = data & ~SMMU_CR0_RESERVED;
767 /* in case the command queue has been enabled */
768 smmuv3_cmdq_consume(s);
769 return MEMTX_OK;
770 case A_CR1:
771 s->cr[1] = data;
772 return MEMTX_OK;
773 case A_CR2:
774 s->cr[2] = data;
775 return MEMTX_OK;
776 case A_IRQ_CTRL:
777 s->irq_ctrl = data;
778 return MEMTX_OK;
779 case A_GERRORN:
780 smmuv3_write_gerrorn(s, data);
781 /*
782 * By acknowledging the CMDQ_ERR, SW may notify cmds can
783 * be processed again
784 */
785 smmuv3_cmdq_consume(s);
786 return MEMTX_OK;
787 case A_GERROR_IRQ_CFG0: /* 64b */
788 s->gerror_irq_cfg0 = deposit64(s->gerror_irq_cfg0, 0, 32, data);
789 return MEMTX_OK;
790 case A_GERROR_IRQ_CFG0 + 4:
791 s->gerror_irq_cfg0 = deposit64(s->gerror_irq_cfg0, 32, 32, data);
792 return MEMTX_OK;
793 case A_GERROR_IRQ_CFG1:
794 s->gerror_irq_cfg1 = data;
795 return MEMTX_OK;
796 case A_GERROR_IRQ_CFG2:
797 s->gerror_irq_cfg2 = data;
798 return MEMTX_OK;
799 case A_STRTAB_BASE: /* 64b */
800 s->strtab_base = deposit64(s->strtab_base, 0, 32, data);
801 return MEMTX_OK;
802 case A_STRTAB_BASE + 4:
803 s->strtab_base = deposit64(s->strtab_base, 32, 32, data);
804 return MEMTX_OK;
805 case A_STRTAB_BASE_CFG:
806 s->strtab_base_cfg = data;
807 if (FIELD_EX32(data, STRTAB_BASE_CFG, FMT) == 1) {
808 s->sid_split = FIELD_EX32(data, STRTAB_BASE_CFG, SPLIT);
809 s->features |= SMMU_FEATURE_2LVL_STE;
810 }
811 return MEMTX_OK;
812 case A_CMDQ_BASE: /* 64b */
813 s->cmdq.base = deposit64(s->cmdq.base, 0, 32, data);
814 s->cmdq.log2size = extract64(s->cmdq.base, 0, 5);
815 if (s->cmdq.log2size > SMMU_CMDQS) {
816 s->cmdq.log2size = SMMU_CMDQS;
817 }
818 return MEMTX_OK;
819 case A_CMDQ_BASE + 4: /* 64b */
820 s->cmdq.base = deposit64(s->cmdq.base, 32, 32, data);
821 return MEMTX_OK;
822 case A_CMDQ_PROD:
823 s->cmdq.prod = data;
824 smmuv3_cmdq_consume(s);
825 return MEMTX_OK;
826 case A_CMDQ_CONS:
827 s->cmdq.cons = data;
828 return MEMTX_OK;
829 case A_EVENTQ_BASE: /* 64b */
830 s->eventq.base = deposit64(s->eventq.base, 0, 32, data);
831 s->eventq.log2size = extract64(s->eventq.base, 0, 5);
832 if (s->eventq.log2size > SMMU_EVENTQS) {
833 s->eventq.log2size = SMMU_EVENTQS;
834 }
835 return MEMTX_OK;
836 case A_EVENTQ_BASE + 4:
837 s->eventq.base = deposit64(s->eventq.base, 32, 32, data);
838 return MEMTX_OK;
839 case A_EVENTQ_PROD:
840 s->eventq.prod = data;
841 return MEMTX_OK;
842 case A_EVENTQ_CONS:
843 s->eventq.cons = data;
844 return MEMTX_OK;
845 case A_EVENTQ_IRQ_CFG0: /* 64b */
846 s->eventq_irq_cfg0 = deposit64(s->eventq_irq_cfg0, 0, 32, data);
847 return MEMTX_OK;
848 case A_EVENTQ_IRQ_CFG0 + 4:
849 s->eventq_irq_cfg0 = deposit64(s->eventq_irq_cfg0, 32, 32, data);
850 return MEMTX_OK;
851 case A_EVENTQ_IRQ_CFG1:
852 s->eventq_irq_cfg1 = data;
853 return MEMTX_OK;
854 case A_EVENTQ_IRQ_CFG2:
855 s->eventq_irq_cfg2 = data;
856 return MEMTX_OK;
857 default:
858 qemu_log_mask(LOG_UNIMP,
859 "%s Unexpected 32-bit access to 0x%"PRIx64" (WI)\n",
860 __func__, offset);
861 return MEMTX_OK;
862 }
863 }
864
865 static MemTxResult smmu_write_mmio(void *opaque, hwaddr offset, uint64_t data,
866 unsigned size, MemTxAttrs attrs)
867 {
868 SMMUState *sys = opaque;
869 SMMUv3State *s = ARM_SMMUV3(sys);
870 MemTxResult r;
871
872 /* CONSTRAINED UNPREDICTABLE choice to have page0/1 be exact aliases */
873 offset &= ~0x10000;
874
875 switch (size) {
876 case 8:
877 r = smmu_writell(s, offset, data, attrs);
878 break;
879 case 4:
880 r = smmu_writel(s, offset, data, attrs);
881 break;
882 default:
883 r = MEMTX_ERROR;
884 break;
885 }
886
887 trace_smmuv3_write_mmio(offset, data, size, r);
888 return r;
889 }
890
891 static MemTxResult smmu_readll(SMMUv3State *s, hwaddr offset,
892 uint64_t *data, MemTxAttrs attrs)
893 {
894 switch (offset) {
895 case A_GERROR_IRQ_CFG0:
896 *data = s->gerror_irq_cfg0;
897 return MEMTX_OK;
898 case A_STRTAB_BASE:
899 *data = s->strtab_base;
900 return MEMTX_OK;
901 case A_CMDQ_BASE:
902 *data = s->cmdq.base;
903 return MEMTX_OK;
904 case A_EVENTQ_BASE:
905 *data = s->eventq.base;
906 return MEMTX_OK;
907 default:
908 *data = 0;
909 qemu_log_mask(LOG_UNIMP,
910 "%s Unexpected 64-bit access to 0x%"PRIx64" (RAZ)\n",
911 __func__, offset);
912 return MEMTX_OK;
913 }
914 }
915
916 static MemTxResult smmu_readl(SMMUv3State *s, hwaddr offset,
917 uint64_t *data, MemTxAttrs attrs)
918 {
919 switch (offset) {
920 case A_IDREGS ... A_IDREGS + 0x1f:
921 *data = smmuv3_idreg(offset - A_IDREGS);
922 return MEMTX_OK;
923 case A_IDR0 ... A_IDR5:
924 *data = s->idr[(offset - A_IDR0) / 4];
925 return MEMTX_OK;
926 case A_IIDR:
927 *data = s->iidr;
928 return MEMTX_OK;
929 case A_CR0:
930 *data = s->cr[0];
931 return MEMTX_OK;
932 case A_CR0ACK:
933 *data = s->cr0ack;
934 return MEMTX_OK;
935 case A_CR1:
936 *data = s->cr[1];
937 return MEMTX_OK;
938 case A_CR2:
939 *data = s->cr[2];
940 return MEMTX_OK;
941 case A_STATUSR:
942 *data = s->statusr;
943 return MEMTX_OK;
944 case A_IRQ_CTRL:
945 case A_IRQ_CTRL_ACK:
946 *data = s->irq_ctrl;
947 return MEMTX_OK;
948 case A_GERROR:
949 *data = s->gerror;
950 return MEMTX_OK;
951 case A_GERRORN:
952 *data = s->gerrorn;
953 return MEMTX_OK;
954 case A_GERROR_IRQ_CFG0: /* 64b */
955 *data = extract64(s->gerror_irq_cfg0, 0, 32);
956 return MEMTX_OK;
957 case A_GERROR_IRQ_CFG0 + 4:
958 *data = extract64(s->gerror_irq_cfg0, 32, 32);
959 return MEMTX_OK;
960 case A_GERROR_IRQ_CFG1:
961 *data = s->gerror_irq_cfg1;
962 return MEMTX_OK;
963 case A_GERROR_IRQ_CFG2:
964 *data = s->gerror_irq_cfg2;
965 return MEMTX_OK;
966 case A_STRTAB_BASE: /* 64b */
967 *data = extract64(s->strtab_base, 0, 32);
968 return MEMTX_OK;
969 case A_STRTAB_BASE + 4: /* 64b */
970 *data = extract64(s->strtab_base, 32, 32);
971 return MEMTX_OK;
972 case A_STRTAB_BASE_CFG:
973 *data = s->strtab_base_cfg;
974 return MEMTX_OK;
975 case A_CMDQ_BASE: /* 64b */
976 *data = extract64(s->cmdq.base, 0, 32);
977 return MEMTX_OK;
978 case A_CMDQ_BASE + 4:
979 *data = extract64(s->cmdq.base, 32, 32);
980 return MEMTX_OK;
981 case A_CMDQ_PROD:
982 *data = s->cmdq.prod;
983 return MEMTX_OK;
984 case A_CMDQ_CONS:
985 *data = s->cmdq.cons;
986 return MEMTX_OK;
987 case A_EVENTQ_BASE: /* 64b */
988 *data = extract64(s->eventq.base, 0, 32);
989 return MEMTX_OK;
990 case A_EVENTQ_BASE + 4: /* 64b */
991 *data = extract64(s->eventq.base, 32, 32);
992 return MEMTX_OK;
993 case A_EVENTQ_PROD:
994 *data = s->eventq.prod;
995 return MEMTX_OK;
996 case A_EVENTQ_CONS:
997 *data = s->eventq.cons;
998 return MEMTX_OK;
999 default:
1000 *data = 0;
1001 qemu_log_mask(LOG_UNIMP,
1002 "%s unhandled 32-bit access at 0x%"PRIx64" (RAZ)\n",
1003 __func__, offset);
1004 return MEMTX_OK;
1005 }
1006 }
1007
1008 static MemTxResult smmu_read_mmio(void *opaque, hwaddr offset, uint64_t *data,
1009 unsigned size, MemTxAttrs attrs)
1010 {
1011 SMMUState *sys = opaque;
1012 SMMUv3State *s = ARM_SMMUV3(sys);
1013 MemTxResult r;
1014
1015 /* CONSTRAINED UNPREDICTABLE choice to have page0/1 be exact aliases */
1016 offset &= ~0x10000;
1017
1018 switch (size) {
1019 case 8:
1020 r = smmu_readll(s, offset, data, attrs);
1021 break;
1022 case 4:
1023 r = smmu_readl(s, offset, data, attrs);
1024 break;
1025 default:
1026 r = MEMTX_ERROR;
1027 break;
1028 }
1029
1030 trace_smmuv3_read_mmio(offset, *data, size, r);
1031 return r;
1032 }
1033
1034 static const MemoryRegionOps smmu_mem_ops = {
1035 .read_with_attrs = smmu_read_mmio,
1036 .write_with_attrs = smmu_write_mmio,
1037 .endianness = DEVICE_LITTLE_ENDIAN,
1038 .valid = {
1039 .min_access_size = 4,
1040 .max_access_size = 8,
1041 },
1042 .impl = {
1043 .min_access_size = 4,
1044 .max_access_size = 8,
1045 },
1046 };
1047
1048 static void smmu_init_irq(SMMUv3State *s, SysBusDevice *dev)
1049 {
1050 int i;
1051
1052 for (i = 0; i < ARRAY_SIZE(s->irq); i++) {
1053 sysbus_init_irq(dev, &s->irq[i]);
1054 }
1055 }
1056
1057 static void smmu_reset(DeviceState *dev)
1058 {
1059 SMMUv3State *s = ARM_SMMUV3(dev);
1060 SMMUv3Class *c = ARM_SMMUV3_GET_CLASS(s);
1061
1062 c->parent_reset(dev);
1063
1064 smmuv3_init_regs(s);
1065 }
1066
1067 static void smmu_realize(DeviceState *d, Error **errp)
1068 {
1069 SMMUState *sys = ARM_SMMU(d);
1070 SMMUv3State *s = ARM_SMMUV3(sys);
1071 SMMUv3Class *c = ARM_SMMUV3_GET_CLASS(s);
1072 SysBusDevice *dev = SYS_BUS_DEVICE(d);
1073 Error *local_err = NULL;
1074
1075 c->parent_realize(d, &local_err);
1076 if (local_err) {
1077 error_propagate(errp, local_err);
1078 return;
1079 }
1080
1081 memory_region_init_io(&sys->iomem, OBJECT(s),
1082 &smmu_mem_ops, sys, TYPE_ARM_SMMUV3, 0x20000);
1083
1084 sys->mrtypename = TYPE_SMMUV3_IOMMU_MEMORY_REGION;
1085
1086 sysbus_init_mmio(dev, &sys->iomem);
1087
1088 smmu_init_irq(s, dev);
1089 }
1090
1091 static const VMStateDescription vmstate_smmuv3_queue = {
1092 .name = "smmuv3_queue",
1093 .version_id = 1,
1094 .minimum_version_id = 1,
1095 .fields = (VMStateField[]) {
1096 VMSTATE_UINT64(base, SMMUQueue),
1097 VMSTATE_UINT32(prod, SMMUQueue),
1098 VMSTATE_UINT32(cons, SMMUQueue),
1099 VMSTATE_UINT8(log2size, SMMUQueue),
1100 },
1101 };
1102
1103 static const VMStateDescription vmstate_smmuv3 = {
1104 .name = "smmuv3",
1105 .version_id = 1,
1106 .minimum_version_id = 1,
1107 .fields = (VMStateField[]) {
1108 VMSTATE_UINT32(features, SMMUv3State),
1109 VMSTATE_UINT8(sid_size, SMMUv3State),
1110 VMSTATE_UINT8(sid_split, SMMUv3State),
1111
1112 VMSTATE_UINT32_ARRAY(cr, SMMUv3State, 3),
1113 VMSTATE_UINT32(cr0ack, SMMUv3State),
1114 VMSTATE_UINT32(statusr, SMMUv3State),
1115 VMSTATE_UINT32(irq_ctrl, SMMUv3State),
1116 VMSTATE_UINT32(gerror, SMMUv3State),
1117 VMSTATE_UINT32(gerrorn, SMMUv3State),
1118 VMSTATE_UINT64(gerror_irq_cfg0, SMMUv3State),
1119 VMSTATE_UINT32(gerror_irq_cfg1, SMMUv3State),
1120 VMSTATE_UINT32(gerror_irq_cfg2, SMMUv3State),
1121 VMSTATE_UINT64(strtab_base, SMMUv3State),
1122 VMSTATE_UINT32(strtab_base_cfg, SMMUv3State),
1123 VMSTATE_UINT64(eventq_irq_cfg0, SMMUv3State),
1124 VMSTATE_UINT32(eventq_irq_cfg1, SMMUv3State),
1125 VMSTATE_UINT32(eventq_irq_cfg2, SMMUv3State),
1126
1127 VMSTATE_STRUCT(cmdq, SMMUv3State, 0, vmstate_smmuv3_queue, SMMUQueue),
1128 VMSTATE_STRUCT(eventq, SMMUv3State, 0, vmstate_smmuv3_queue, SMMUQueue),
1129
1130 VMSTATE_END_OF_LIST(),
1131 },
1132 };
1133
1134 static void smmuv3_instance_init(Object *obj)
1135 {
1136 /* Nothing much to do here as of now */
1137 }
1138
1139 static void smmuv3_class_init(ObjectClass *klass, void *data)
1140 {
1141 DeviceClass *dc = DEVICE_CLASS(klass);
1142 SMMUv3Class *c = ARM_SMMUV3_CLASS(klass);
1143
1144 dc->vmsd = &vmstate_smmuv3;
1145 device_class_set_parent_reset(dc, smmu_reset, &c->parent_reset);
1146 c->parent_realize = dc->realize;
1147 dc->realize = smmu_realize;
1148 }
1149
1150 static void smmuv3_notify_flag_changed(IOMMUMemoryRegion *iommu,
1151 IOMMUNotifierFlag old,
1152 IOMMUNotifierFlag new)
1153 {
1154 if (old == IOMMU_NOTIFIER_NONE) {
1155 warn_report("SMMUV3 does not support vhost/vfio integration yet: "
1156 "devices of those types will not function properly");
1157 }
1158 }
1159
1160 static void smmuv3_iommu_memory_region_class_init(ObjectClass *klass,
1161 void *data)
1162 {
1163 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass);
1164
1165 imrc->translate = smmuv3_translate;
1166 imrc->notify_flag_changed = smmuv3_notify_flag_changed;
1167 }
1168
1169 static const TypeInfo smmuv3_type_info = {
1170 .name = TYPE_ARM_SMMUV3,
1171 .parent = TYPE_ARM_SMMU,
1172 .instance_size = sizeof(SMMUv3State),
1173 .instance_init = smmuv3_instance_init,
1174 .class_size = sizeof(SMMUv3Class),
1175 .class_init = smmuv3_class_init,
1176 };
1177
1178 static const TypeInfo smmuv3_iommu_memory_region_info = {
1179 .parent = TYPE_IOMMU_MEMORY_REGION,
1180 .name = TYPE_SMMUV3_IOMMU_MEMORY_REGION,
1181 .class_init = smmuv3_iommu_memory_region_class_init,
1182 };
1183
1184 static void smmuv3_register_types(void)
1185 {
1186 type_register(&smmuv3_type_info);
1187 type_register(&smmuv3_iommu_memory_region_info);
1188 }
1189
1190 type_init(smmuv3_register_types)
1191