]>
Commit | Line | Data |
---|---|---|
10a83cb9 PM |
1 | /* |
2 | * Copyright (C) 2014-2016 Broadcom Corporation | |
3 | * Copyright (c) 2017 Red Hat, Inc. | |
4 | * Written by Prem Mallappa, Eric Auger | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 as | |
8 | * published by the Free Software Foundation. | |
9 | * | |
10 | * This program is distributed in the hope that it will be useful, | |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
13 | * GNU General Public License for more details. | |
14 | * | |
15 | * You should have received a copy of the GNU General Public License along | |
16 | * with this program; if not, see <http://www.gnu.org/licenses/>. | |
17 | */ | |
18 | ||
19 | #include "qemu/osdep.h" | |
744a790e | 20 | #include "qemu/bitops.h" |
64552b6b | 21 | #include "hw/irq.h" |
10a83cb9 | 22 | #include "hw/sysbus.h" |
d6454270 | 23 | #include "migration/vmstate.h" |
8cefcc3b | 24 | #include "hw/qdev-properties.h" |
10a83cb9 PM |
25 | #include "hw/qdev-core.h" |
26 | #include "hw/pci/pci.h" | |
9122bea9 | 27 | #include "cpu.h" |
10a83cb9 PM |
28 | #include "trace.h" |
29 | #include "qemu/log.h" | |
30 | #include "qemu/error-report.h" | |
31 | #include "qapi/error.h" | |
32 | ||
33 | #include "hw/arm/smmuv3.h" | |
34 | #include "smmuv3-internal.h" | |
1194140b | 35 | #include "smmu-internal.h" |
10a83cb9 | 36 | |
21eb5b5c MS |
37 | #define PTW_RECORD_FAULT(cfg) (((cfg)->stage == 1) ? (cfg)->record_faults : \ |
38 | (cfg)->s2cfg.record_faults) | |
39 | ||
6a736033 EA |
40 | /** |
41 | * smmuv3_trigger_irq - pulse @irq if enabled and update | |
42 | * GERROR register in case of GERROR interrupt | |
43 | * | |
44 | * @irq: irq type | |
45 | * @gerror_mask: mask of gerrors to toggle (relevant if @irq is GERROR) | |
46 | */ | |
fae4be38 EA |
47 | static void smmuv3_trigger_irq(SMMUv3State *s, SMMUIrq irq, |
48 | uint32_t gerror_mask) | |
6a736033 EA |
49 | { |
50 | ||
51 | bool pulse = false; | |
52 | ||
53 | switch (irq) { | |
54 | case SMMU_IRQ_EVTQ: | |
55 | pulse = smmuv3_eventq_irq_enabled(s); | |
56 | break; | |
57 | case SMMU_IRQ_PRIQ: | |
58 | qemu_log_mask(LOG_UNIMP, "PRI not yet supported\n"); | |
59 | break; | |
60 | case SMMU_IRQ_CMD_SYNC: | |
61 | pulse = true; | |
62 | break; | |
63 | case SMMU_IRQ_GERROR: | |
64 | { | |
65 | uint32_t pending = s->gerror ^ s->gerrorn; | |
66 | uint32_t new_gerrors = ~pending & gerror_mask; | |
67 | ||
68 | if (!new_gerrors) { | |
69 | /* only toggle non pending errors */ | |
70 | return; | |
71 | } | |
72 | s->gerror ^= new_gerrors; | |
73 | trace_smmuv3_write_gerror(new_gerrors, s->gerror); | |
74 | ||
75 | pulse = smmuv3_gerror_irq_enabled(s); | |
76 | break; | |
77 | } | |
78 | } | |
79 | if (pulse) { | |
80 | trace_smmuv3_trigger_irq(irq); | |
81 | qemu_irq_pulse(s->irq[irq]); | |
82 | } | |
83 | } | |
84 | ||
fae4be38 | 85 | static void smmuv3_write_gerrorn(SMMUv3State *s, uint32_t new_gerrorn) |
6a736033 EA |
86 | { |
87 | uint32_t pending = s->gerror ^ s->gerrorn; | |
88 | uint32_t toggled = s->gerrorn ^ new_gerrorn; | |
89 | ||
90 | if (toggled & ~pending) { | |
91 | qemu_log_mask(LOG_GUEST_ERROR, | |
92 | "guest toggles non pending errors = 0x%x\n", | |
93 | toggled & ~pending); | |
94 | } | |
95 | ||
96 | /* | |
97 | * We do not raise any error in case guest toggles bits corresponding | |
98 | * to not active IRQs (CONSTRAINED UNPREDICTABLE) | |
99 | */ | |
100 | s->gerrorn = new_gerrorn; | |
101 | ||
102 | trace_smmuv3_write_gerrorn(toggled & pending, s->gerrorn); | |
103 | } | |
104 | ||
c6445544 | 105 | static inline MemTxResult queue_read(SMMUQueue *q, Cmd *cmd) |
dadd1a08 EA |
106 | { |
107 | dma_addr_t addr = Q_CONS_ENTRY(q); | |
c6445544 PM |
108 | MemTxResult ret; |
109 | int i; | |
dadd1a08 | 110 | |
c6445544 PM |
111 | ret = dma_memory_read(&address_space_memory, addr, cmd, sizeof(Cmd), |
112 | MEMTXATTRS_UNSPECIFIED); | |
113 | if (ret != MEMTX_OK) { | |
114 | return ret; | |
115 | } | |
116 | for (i = 0; i < ARRAY_SIZE(cmd->word); i++) { | |
117 | le32_to_cpus(&cmd->word[i]); | |
118 | } | |
119 | return ret; | |
dadd1a08 EA |
120 | } |
121 | ||
c6445544 | 122 | static MemTxResult queue_write(SMMUQueue *q, Evt *evt_in) |
dadd1a08 EA |
123 | { |
124 | dma_addr_t addr = Q_PROD_ENTRY(q); | |
125 | MemTxResult ret; | |
c6445544 PM |
126 | Evt evt = *evt_in; |
127 | int i; | |
dadd1a08 | 128 | |
c6445544 PM |
129 | for (i = 0; i < ARRAY_SIZE(evt.word); i++) { |
130 | cpu_to_le32s(&evt.word[i]); | |
131 | } | |
132 | ret = dma_memory_write(&address_space_memory, addr, &evt, sizeof(Evt), | |
ba06fe8a | 133 | MEMTXATTRS_UNSPECIFIED); |
dadd1a08 EA |
134 | if (ret != MEMTX_OK) { |
135 | return ret; | |
136 | } | |
137 | ||
138 | queue_prod_incr(q); | |
139 | return MEMTX_OK; | |
140 | } | |
141 | ||
bb981004 | 142 | static MemTxResult smmuv3_write_eventq(SMMUv3State *s, Evt *evt) |
dadd1a08 EA |
143 | { |
144 | SMMUQueue *q = &s->eventq; | |
bb981004 | 145 | MemTxResult r; |
dadd1a08 EA |
146 | |
147 | if (!smmuv3_eventq_enabled(s)) { | |
bb981004 | 148 | return MEMTX_ERROR; |
dadd1a08 EA |
149 | } |
150 | ||
151 | if (smmuv3_q_full(q)) { | |
bb981004 | 152 | return MEMTX_ERROR; |
dadd1a08 EA |
153 | } |
154 | ||
bb981004 EA |
155 | r = queue_write(q, evt); |
156 | if (r != MEMTX_OK) { | |
157 | return r; | |
158 | } | |
dadd1a08 | 159 | |
9f4d2a13 | 160 | if (!smmuv3_q_empty(q)) { |
dadd1a08 EA |
161 | smmuv3_trigger_irq(s, SMMU_IRQ_EVTQ, 0); |
162 | } | |
bb981004 EA |
163 | return MEMTX_OK; |
164 | } | |
165 | ||
166 | void smmuv3_record_event(SMMUv3State *s, SMMUEventInfo *info) | |
167 | { | |
24af32e0 | 168 | Evt evt = {}; |
bb981004 EA |
169 | MemTxResult r; |
170 | ||
171 | if (!smmuv3_eventq_enabled(s)) { | |
172 | return; | |
173 | } | |
174 | ||
175 | EVT_SET_TYPE(&evt, info->type); | |
176 | EVT_SET_SID(&evt, info->sid); | |
177 | ||
178 | switch (info->type) { | |
9122bea9 | 179 | case SMMU_EVT_NONE: |
bb981004 EA |
180 | return; |
181 | case SMMU_EVT_F_UUT: | |
182 | EVT_SET_SSID(&evt, info->u.f_uut.ssid); | |
183 | EVT_SET_SSV(&evt, info->u.f_uut.ssv); | |
184 | EVT_SET_ADDR(&evt, info->u.f_uut.addr); | |
185 | EVT_SET_RNW(&evt, info->u.f_uut.rnw); | |
186 | EVT_SET_PNU(&evt, info->u.f_uut.pnu); | |
187 | EVT_SET_IND(&evt, info->u.f_uut.ind); | |
188 | break; | |
189 | case SMMU_EVT_C_BAD_STREAMID: | |
190 | EVT_SET_SSID(&evt, info->u.c_bad_streamid.ssid); | |
191 | EVT_SET_SSV(&evt, info->u.c_bad_streamid.ssv); | |
192 | break; | |
193 | case SMMU_EVT_F_STE_FETCH: | |
194 | EVT_SET_SSID(&evt, info->u.f_ste_fetch.ssid); | |
195 | EVT_SET_SSV(&evt, info->u.f_ste_fetch.ssv); | |
b255cafb | 196 | EVT_SET_ADDR2(&evt, info->u.f_ste_fetch.addr); |
bb981004 EA |
197 | break; |
198 | case SMMU_EVT_C_BAD_STE: | |
199 | EVT_SET_SSID(&evt, info->u.c_bad_ste.ssid); | |
200 | EVT_SET_SSV(&evt, info->u.c_bad_ste.ssv); | |
201 | break; | |
202 | case SMMU_EVT_F_STREAM_DISABLED: | |
203 | break; | |
204 | case SMMU_EVT_F_TRANS_FORBIDDEN: | |
205 | EVT_SET_ADDR(&evt, info->u.f_transl_forbidden.addr); | |
206 | EVT_SET_RNW(&evt, info->u.f_transl_forbidden.rnw); | |
207 | break; | |
208 | case SMMU_EVT_C_BAD_SUBSTREAMID: | |
209 | EVT_SET_SSID(&evt, info->u.c_bad_substream.ssid); | |
210 | break; | |
211 | case SMMU_EVT_F_CD_FETCH: | |
212 | EVT_SET_SSID(&evt, info->u.f_cd_fetch.ssid); | |
213 | EVT_SET_SSV(&evt, info->u.f_cd_fetch.ssv); | |
214 | EVT_SET_ADDR(&evt, info->u.f_cd_fetch.addr); | |
215 | break; | |
216 | case SMMU_EVT_C_BAD_CD: | |
217 | EVT_SET_SSID(&evt, info->u.c_bad_cd.ssid); | |
218 | EVT_SET_SSV(&evt, info->u.c_bad_cd.ssv); | |
219 | break; | |
220 | case SMMU_EVT_F_WALK_EABT: | |
221 | case SMMU_EVT_F_TRANSLATION: | |
222 | case SMMU_EVT_F_ADDR_SIZE: | |
223 | case SMMU_EVT_F_ACCESS: | |
224 | case SMMU_EVT_F_PERMISSION: | |
225 | EVT_SET_STALL(&evt, info->u.f_walk_eabt.stall); | |
226 | EVT_SET_STAG(&evt, info->u.f_walk_eabt.stag); | |
227 | EVT_SET_SSID(&evt, info->u.f_walk_eabt.ssid); | |
228 | EVT_SET_SSV(&evt, info->u.f_walk_eabt.ssv); | |
229 | EVT_SET_S2(&evt, info->u.f_walk_eabt.s2); | |
230 | EVT_SET_ADDR(&evt, info->u.f_walk_eabt.addr); | |
231 | EVT_SET_RNW(&evt, info->u.f_walk_eabt.rnw); | |
232 | EVT_SET_PNU(&evt, info->u.f_walk_eabt.pnu); | |
233 | EVT_SET_IND(&evt, info->u.f_walk_eabt.ind); | |
234 | EVT_SET_CLASS(&evt, info->u.f_walk_eabt.class); | |
235 | EVT_SET_ADDR2(&evt, info->u.f_walk_eabt.addr2); | |
236 | break; | |
237 | case SMMU_EVT_F_CFG_CONFLICT: | |
238 | EVT_SET_SSID(&evt, info->u.f_cfg_conflict.ssid); | |
239 | EVT_SET_SSV(&evt, info->u.f_cfg_conflict.ssv); | |
240 | break; | |
241 | /* rest is not implemented */ | |
242 | case SMMU_EVT_F_BAD_ATS_TREQ: | |
243 | case SMMU_EVT_F_TLB_CONFLICT: | |
244 | case SMMU_EVT_E_PAGE_REQ: | |
245 | default: | |
246 | g_assert_not_reached(); | |
247 | } | |
248 | ||
249 | trace_smmuv3_record_event(smmu_event_string(info->type), info->sid); | |
250 | r = smmuv3_write_eventq(s, &evt); | |
251 | if (r != MEMTX_OK) { | |
252 | smmuv3_trigger_irq(s, SMMU_IRQ_GERROR, R_GERROR_EVENTQ_ABT_ERR_MASK); | |
253 | } | |
254 | info->recorded = true; | |
dadd1a08 EA |
255 | } |
256 | ||
10a83cb9 PM |
257 | static void smmuv3_init_regs(SMMUv3State *s) |
258 | { | |
8cefcc3b MS |
259 | /* Based on sys property, the stages supported in smmu will be advertised.*/ |
260 | if (s->stage && !strcmp("2", s->stage)) { | |
261 | s->idr[0] = FIELD_DP32(s->idr[0], IDR0, S2P, 1); | |
262 | } else { | |
263 | s->idr[0] = FIELD_DP32(s->idr[0], IDR0, S1P, 1); | |
264 | } | |
265 | ||
10a83cb9 PM |
266 | s->idr[0] = FIELD_DP32(s->idr[0], IDR0, TTF, 2); /* AArch64 PTW only */ |
267 | s->idr[0] = FIELD_DP32(s->idr[0], IDR0, COHACC, 1); /* IO coherent */ | |
268 | s->idr[0] = FIELD_DP32(s->idr[0], IDR0, ASID16, 1); /* 16-bit ASID */ | |
8cefcc3b | 269 | s->idr[0] = FIELD_DP32(s->idr[0], IDR0, VMID16, 1); /* 16-bit VMID */ |
10a83cb9 PM |
270 | s->idr[0] = FIELD_DP32(s->idr[0], IDR0, TTENDIAN, 2); /* little endian */ |
271 | s->idr[0] = FIELD_DP32(s->idr[0], IDR0, STALL_MODEL, 1); /* No stall */ | |
272 | /* terminated transaction will always be aborted/error returned */ | |
273 | s->idr[0] = FIELD_DP32(s->idr[0], IDR0, TERM_MODEL, 1); | |
274 | /* 2-level stream table supported */ | |
275 | s->idr[0] = FIELD_DP32(s->idr[0], IDR0, STLEVEL, 1); | |
276 | ||
277 | s->idr[1] = FIELD_DP32(s->idr[1], IDR1, SIDSIZE, SMMU_IDR1_SIDSIZE); | |
278 | s->idr[1] = FIELD_DP32(s->idr[1], IDR1, EVENTQS, SMMU_EVENTQS); | |
279 | s->idr[1] = FIELD_DP32(s->idr[1], IDR1, CMDQS, SMMU_CMDQS); | |
280 | ||
e7c3b9d9 | 281 | s->idr[3] = FIELD_DP32(s->idr[3], IDR3, HAD, 1); |
4cdd146d PM |
282 | if (FIELD_EX32(s->idr[0], IDR0, S2P)) { |
283 | /* XNX is a stage-2-specific feature */ | |
284 | s->idr[3] = FIELD_DP32(s->idr[3], IDR3, XNX, 1); | |
285 | } | |
27fd85d3 | 286 | s->idr[3] = FIELD_DP32(s->idr[3], IDR3, RIL, 1); |
f8e7163d | 287 | s->idr[3] = FIELD_DP32(s->idr[3], IDR3, BBML, 2); |
e7c3b9d9 | 288 | |
27fd85d3 | 289 | s->idr[5] = FIELD_DP32(s->idr[5], IDR5, OAS, SMMU_IDR5_OAS); /* 44 bits */ |
bf559ee4 | 290 | /* 4K, 16K and 64K granule support */ |
10a83cb9 | 291 | s->idr[5] = FIELD_DP32(s->idr[5], IDR5, GRAN4K, 1); |
bf559ee4 | 292 | s->idr[5] = FIELD_DP32(s->idr[5], IDR5, GRAN16K, 1); |
10a83cb9 | 293 | s->idr[5] = FIELD_DP32(s->idr[5], IDR5, GRAN64K, 1); |
10a83cb9 PM |
294 | |
295 | s->cmdq.base = deposit64(s->cmdq.base, 0, 5, SMMU_CMDQS); | |
296 | s->cmdq.prod = 0; | |
297 | s->cmdq.cons = 0; | |
298 | s->cmdq.entry_size = sizeof(struct Cmd); | |
299 | s->eventq.base = deposit64(s->eventq.base, 0, 5, SMMU_EVENTQS); | |
300 | s->eventq.prod = 0; | |
301 | s->eventq.cons = 0; | |
302 | s->eventq.entry_size = sizeof(struct Evt); | |
303 | ||
304 | s->features = 0; | |
305 | s->sid_split = 0; | |
e7c3b9d9 | 306 | s->aidr = 0x1; |
43530095 EA |
307 | s->cr[0] = 0; |
308 | s->cr0ack = 0; | |
309 | s->irq_ctrl = 0; | |
310 | s->gerror = 0; | |
311 | s->gerrorn = 0; | |
312 | s->statusr = 0; | |
c2ecb424 | 313 | s->gbpa = SMMU_GBPA_RESET_VAL; |
10a83cb9 PM |
314 | } |
315 | ||
9bde7f06 EA |
316 | static int smmu_get_ste(SMMUv3State *s, dma_addr_t addr, STE *buf, |
317 | SMMUEventInfo *event) | |
318 | { | |
c6445544 | 319 | int ret, i; |
9bde7f06 EA |
320 | |
321 | trace_smmuv3_get_ste(addr); | |
322 | /* TODO: guarantee 64-bit single-copy atomicity */ | |
ba06fe8a PMD |
323 | ret = dma_memory_read(&address_space_memory, addr, buf, sizeof(*buf), |
324 | MEMTXATTRS_UNSPECIFIED); | |
9bde7f06 EA |
325 | if (ret != MEMTX_OK) { |
326 | qemu_log_mask(LOG_GUEST_ERROR, | |
327 | "Cannot fetch pte at address=0x%"PRIx64"\n", addr); | |
328 | event->type = SMMU_EVT_F_STE_FETCH; | |
329 | event->u.f_ste_fetch.addr = addr; | |
330 | return -EINVAL; | |
331 | } | |
c6445544 PM |
332 | for (i = 0; i < ARRAY_SIZE(buf->word); i++) { |
333 | le32_to_cpus(&buf->word[i]); | |
334 | } | |
9bde7f06 EA |
335 | return 0; |
336 | ||
337 | } | |
338 | ||
339 | /* @ssid > 0 not supported yet */ | |
340 | static int smmu_get_cd(SMMUv3State *s, STE *ste, uint32_t ssid, | |
341 | CD *buf, SMMUEventInfo *event) | |
342 | { | |
343 | dma_addr_t addr = STE_CTXPTR(ste); | |
c6445544 | 344 | int ret, i; |
9bde7f06 EA |
345 | |
346 | trace_smmuv3_get_cd(addr); | |
347 | /* TODO: guarantee 64-bit single-copy atomicity */ | |
ba06fe8a PMD |
348 | ret = dma_memory_read(&address_space_memory, addr, buf, sizeof(*buf), |
349 | MEMTXATTRS_UNSPECIFIED); | |
9bde7f06 EA |
350 | if (ret != MEMTX_OK) { |
351 | qemu_log_mask(LOG_GUEST_ERROR, | |
352 | "Cannot fetch pte at address=0x%"PRIx64"\n", addr); | |
353 | event->type = SMMU_EVT_F_CD_FETCH; | |
354 | event->u.f_ste_fetch.addr = addr; | |
355 | return -EINVAL; | |
356 | } | |
c6445544 PM |
357 | for (i = 0; i < ARRAY_SIZE(buf->word); i++) { |
358 | le32_to_cpus(&buf->word[i]); | |
359 | } | |
9bde7f06 EA |
360 | return 0; |
361 | } | |
362 | ||
21eb5b5c MS |
363 | /* |
364 | * Max valid value is 39 when SMMU_IDR3.STT == 0. | |
365 | * In architectures after SMMUv3.0: | |
366 | * - If STE.S2TG selects a 4KB or 16KB granule, the minimum valid value for this | |
367 | * field is MAX(16, 64-IAS) | |
368 | * - If STE.S2TG selects a 64KB granule, the minimum valid value for this field | |
369 | * is (64-IAS). | |
370 | * As we only support AA64, IAS = OAS. | |
371 | */ | |
372 | static bool s2t0sz_valid(SMMUTransCfg *cfg) | |
373 | { | |
374 | if (cfg->s2cfg.tsz > 39) { | |
375 | return false; | |
376 | } | |
377 | ||
378 | if (cfg->s2cfg.granule_sz == 16) { | |
379 | return (cfg->s2cfg.tsz >= 64 - oas2bits(SMMU_IDR5_OAS)); | |
380 | } | |
381 | ||
382 | return (cfg->s2cfg.tsz >= MAX(64 - oas2bits(SMMU_IDR5_OAS), 16)); | |
383 | } | |
384 | ||
385 | /* | |
386 | * Return true if s2 page table config is valid. | |
387 | * This checks with the configured start level, ias_bits and granularity we can | |
388 | * have a valid page table as described in ARM ARM D8.2 Translation process. | |
389 | * The idea here is to see for the highest possible number of IPA bits, how | |
390 | * many concatenated tables we would need, if it is more than 16, then this is | |
391 | * not possible. | |
392 | */ | |
393 | static bool s2_pgtable_config_valid(uint8_t sl0, uint8_t t0sz, uint8_t gran) | |
394 | { | |
395 | int level = get_start_level(sl0, gran); | |
396 | uint64_t ipa_bits = 64 - t0sz; | |
397 | uint64_t max_ipa = (1ULL << ipa_bits) - 1; | |
398 | int nr_concat = pgd_concat_idx(level, gran, max_ipa) + 1; | |
399 | ||
400 | return nr_concat <= VMSA_MAX_S2_CONCAT; | |
401 | } | |
402 | ||
403 | static int decode_ste_s2_cfg(SMMUTransCfg *cfg, STE *ste) | |
404 | { | |
405 | cfg->stage = 2; | |
406 | ||
407 | if (STE_S2AA64(ste) == 0x0) { | |
408 | qemu_log_mask(LOG_UNIMP, | |
409 | "SMMUv3 AArch32 tables not supported\n"); | |
410 | g_assert_not_reached(); | |
411 | } | |
412 | ||
413 | switch (STE_S2TG(ste)) { | |
414 | case 0x0: /* 4KB */ | |
415 | cfg->s2cfg.granule_sz = 12; | |
416 | break; | |
417 | case 0x1: /* 64KB */ | |
418 | cfg->s2cfg.granule_sz = 16; | |
419 | break; | |
420 | case 0x2: /* 16KB */ | |
421 | cfg->s2cfg.granule_sz = 14; | |
422 | break; | |
423 | default: | |
424 | qemu_log_mask(LOG_GUEST_ERROR, | |
425 | "SMMUv3 bad STE S2TG: %x\n", STE_S2TG(ste)); | |
426 | goto bad_ste; | |
427 | } | |
428 | ||
429 | cfg->s2cfg.vttb = STE_S2TTB(ste); | |
430 | ||
431 | cfg->s2cfg.sl0 = STE_S2SL0(ste); | |
432 | /* FEAT_TTST not supported. */ | |
433 | if (cfg->s2cfg.sl0 == 0x3) { | |
434 | qemu_log_mask(LOG_UNIMP, "SMMUv3 S2SL0 = 0x3 has no meaning!\n"); | |
435 | goto bad_ste; | |
436 | } | |
437 | ||
438 | /* For AA64, The effective S2PS size is capped to the OAS. */ | |
439 | cfg->s2cfg.eff_ps = oas2bits(MIN(STE_S2PS(ste), SMMU_IDR5_OAS)); | |
440 | /* | |
441 | * It is ILLEGAL for the address in S2TTB to be outside the range | |
442 | * described by the effective S2PS value. | |
443 | */ | |
444 | if (cfg->s2cfg.vttb & ~(MAKE_64BIT_MASK(0, cfg->s2cfg.eff_ps))) { | |
445 | qemu_log_mask(LOG_GUEST_ERROR, | |
446 | "SMMUv3 S2TTB too large 0x%" PRIx64 | |
447 | ", effective PS %d bits\n", | |
448 | cfg->s2cfg.vttb, cfg->s2cfg.eff_ps); | |
449 | goto bad_ste; | |
450 | } | |
451 | ||
452 | cfg->s2cfg.tsz = STE_S2T0SZ(ste); | |
453 | ||
454 | if (!s2t0sz_valid(cfg)) { | |
455 | qemu_log_mask(LOG_GUEST_ERROR, "SMMUv3 bad STE S2T0SZ = %d\n", | |
456 | cfg->s2cfg.tsz); | |
457 | goto bad_ste; | |
458 | } | |
459 | ||
460 | if (!s2_pgtable_config_valid(cfg->s2cfg.sl0, cfg->s2cfg.tsz, | |
461 | cfg->s2cfg.granule_sz)) { | |
462 | qemu_log_mask(LOG_GUEST_ERROR, | |
463 | "SMMUv3 STE stage 2 config not valid!\n"); | |
464 | goto bad_ste; | |
465 | } | |
466 | ||
467 | /* Only LE supported(IDR0.TTENDIAN). */ | |
468 | if (STE_S2ENDI(ste)) { | |
469 | qemu_log_mask(LOG_GUEST_ERROR, | |
470 | "SMMUv3 STE_S2ENDI only supports LE!\n"); | |
471 | goto bad_ste; | |
472 | } | |
473 | ||
474 | cfg->s2cfg.affd = STE_S2AFFD(ste); | |
475 | ||
476 | cfg->s2cfg.record_faults = STE_S2R(ste); | |
477 | /* As stall is not supported. */ | |
478 | if (STE_S2S(ste)) { | |
479 | qemu_log_mask(LOG_UNIMP, "SMMUv3 Stall not implemented!\n"); | |
480 | goto bad_ste; | |
481 | } | |
482 | ||
21eb5b5c MS |
483 | return 0; |
484 | ||
485 | bad_ste: | |
486 | return -EINVAL; | |
487 | } | |
488 | ||
9122bea9 | 489 | /* Returns < 0 in case of invalid STE, 0 otherwise */ |
9bde7f06 EA |
490 | static int decode_ste(SMMUv3State *s, SMMUTransCfg *cfg, |
491 | STE *ste, SMMUEventInfo *event) | |
492 | { | |
493 | uint32_t config; | |
21eb5b5c | 494 | int ret; |
9bde7f06 EA |
495 | |
496 | if (!STE_VALID(ste)) { | |
3499ec08 EA |
497 | if (!event->inval_ste_allowed) { |
498 | qemu_log_mask(LOG_GUEST_ERROR, "invalid STE\n"); | |
499 | } | |
9bde7f06 EA |
500 | goto bad_ste; |
501 | } | |
502 | ||
503 | config = STE_CONFIG(ste); | |
504 | ||
505 | if (STE_CFG_ABORT(config)) { | |
9122bea9 JH |
506 | cfg->aborted = true; |
507 | return 0; | |
9bde7f06 EA |
508 | } |
509 | ||
510 | if (STE_CFG_BYPASS(config)) { | |
511 | cfg->bypassed = true; | |
9122bea9 | 512 | return 0; |
9bde7f06 EA |
513 | } |
514 | ||
21eb5b5c MS |
515 | /* |
516 | * If a stage is enabled in SW while not advertised, throw bad ste | |
517 | * according to user manual(IHI0070E) "5.2 Stream Table Entry". | |
518 | */ | |
519 | if (!STAGE1_SUPPORTED(s) && STE_CFG_S1_ENABLED(config)) { | |
520 | qemu_log_mask(LOG_GUEST_ERROR, "SMMUv3 S1 used but not supported.\n"); | |
521 | goto bad_ste; | |
522 | } | |
523 | if (!STAGE2_SUPPORTED(s) && STE_CFG_S2_ENABLED(config)) { | |
524 | qemu_log_mask(LOG_GUEST_ERROR, "SMMUv3 S2 used but not supported.\n"); | |
9bde7f06 EA |
525 | goto bad_ste; |
526 | } | |
527 | ||
21eb5b5c MS |
528 | if (STAGE2_SUPPORTED(s)) { |
529 | /* VMID is considered even if s2 is disabled. */ | |
530 | cfg->s2cfg.vmid = STE_S2VMID(ste); | |
531 | } else { | |
532 | /* Default to -1 */ | |
533 | cfg->s2cfg.vmid = -1; | |
534 | } | |
535 | ||
536 | if (STE_CFG_S2_ENABLED(config)) { | |
537 | /* | |
538 | * Stage-1 OAS defaults to OAS even if not enabled as it would be used | |
539 | * in input address check for stage-2. | |
540 | */ | |
541 | cfg->oas = oas2bits(SMMU_IDR5_OAS); | |
542 | ret = decode_ste_s2_cfg(cfg, ste); | |
543 | if (ret) { | |
544 | goto bad_ste; | |
545 | } | |
546 | } | |
547 | ||
9bde7f06 EA |
548 | if (STE_S1CDMAX(ste) != 0) { |
549 | qemu_log_mask(LOG_UNIMP, | |
550 | "SMMUv3 does not support multiple context descriptors yet\n"); | |
551 | goto bad_ste; | |
552 | } | |
553 | ||
554 | if (STE_S1STALLD(ste)) { | |
555 | qemu_log_mask(LOG_UNIMP, | |
556 | "SMMUv3 S1 stalling fault model not allowed yet\n"); | |
557 | goto bad_ste; | |
558 | } | |
559 | return 0; | |
560 | ||
561 | bad_ste: | |
562 | event->type = SMMU_EVT_C_BAD_STE; | |
563 | return -EINVAL; | |
564 | } | |
565 | ||
566 | /** | |
567 | * smmu_find_ste - Return the stream table entry associated | |
568 | * to the sid | |
569 | * | |
570 | * @s: smmuv3 handle | |
571 | * @sid: stream ID | |
572 | * @ste: returned stream table entry | |
573 | * @event: handle to an event info | |
574 | * | |
575 | * Supports linear and 2-level stream table | |
576 | * Return 0 on success, -EINVAL otherwise | |
577 | */ | |
578 | static int smmu_find_ste(SMMUv3State *s, uint32_t sid, STE *ste, | |
579 | SMMUEventInfo *event) | |
580 | { | |
41678c33 | 581 | dma_addr_t addr, strtab_base; |
05ff2fb8 | 582 | uint32_t log2size; |
41678c33 | 583 | int strtab_size_shift; |
9bde7f06 EA |
584 | int ret; |
585 | ||
586 | trace_smmuv3_find_ste(sid, s->features, s->sid_split); | |
05ff2fb8 SV |
587 | log2size = FIELD_EX32(s->strtab_base_cfg, STRTAB_BASE_CFG, LOG2SIZE); |
588 | /* | |
589 | * Check SID range against both guest-configured and implementation limits | |
590 | */ | |
591 | if (sid >= (1 << MIN(log2size, SMMU_IDR1_SIDSIZE))) { | |
9bde7f06 EA |
592 | event->type = SMMU_EVT_C_BAD_STREAMID; |
593 | return -EINVAL; | |
594 | } | |
595 | if (s->features & SMMU_FEATURE_2LVL_STE) { | |
c6445544 | 596 | int l1_ste_offset, l2_ste_offset, max_l2_ste, span, i; |
41678c33 | 597 | dma_addr_t l1ptr, l2ptr; |
9bde7f06 EA |
598 | STEDesc l1std; |
599 | ||
41678c33 SV |
600 | /* |
601 | * Align strtab base address to table size. For this purpose, assume it | |
602 | * is not bounded by SMMU_IDR1_SIDSIZE. | |
603 | */ | |
604 | strtab_size_shift = MAX(5, (int)log2size - s->sid_split - 1 + 3); | |
605 | strtab_base = s->strtab_base & SMMU_BASE_ADDR_MASK & | |
606 | ~MAKE_64BIT_MASK(0, strtab_size_shift); | |
9bde7f06 EA |
607 | l1_ste_offset = sid >> s->sid_split; |
608 | l2_ste_offset = sid & ((1 << s->sid_split) - 1); | |
609 | l1ptr = (dma_addr_t)(strtab_base + l1_ste_offset * sizeof(l1std)); | |
610 | /* TODO: guarantee 64-bit single-copy atomicity */ | |
18610bfd | 611 | ret = dma_memory_read(&address_space_memory, l1ptr, &l1std, |
ba06fe8a | 612 | sizeof(l1std), MEMTXATTRS_UNSPECIFIED); |
9bde7f06 EA |
613 | if (ret != MEMTX_OK) { |
614 | qemu_log_mask(LOG_GUEST_ERROR, | |
615 | "Could not read L1PTR at 0X%"PRIx64"\n", l1ptr); | |
616 | event->type = SMMU_EVT_F_STE_FETCH; | |
617 | event->u.f_ste_fetch.addr = l1ptr; | |
618 | return -EINVAL; | |
619 | } | |
c6445544 PM |
620 | for (i = 0; i < ARRAY_SIZE(l1std.word); i++) { |
621 | le32_to_cpus(&l1std.word[i]); | |
622 | } | |
9bde7f06 EA |
623 | |
624 | span = L1STD_SPAN(&l1std); | |
625 | ||
626 | if (!span) { | |
627 | /* l2ptr is not valid */ | |
3499ec08 EA |
628 | if (!event->inval_ste_allowed) { |
629 | qemu_log_mask(LOG_GUEST_ERROR, | |
630 | "invalid sid=%d (L1STD span=0)\n", sid); | |
631 | } | |
9bde7f06 EA |
632 | event->type = SMMU_EVT_C_BAD_STREAMID; |
633 | return -EINVAL; | |
634 | } | |
635 | max_l2_ste = (1 << span) - 1; | |
636 | l2ptr = l1std_l2ptr(&l1std); | |
637 | trace_smmuv3_find_ste_2lvl(s->strtab_base, l1ptr, l1_ste_offset, | |
638 | l2ptr, l2_ste_offset, max_l2_ste); | |
639 | if (l2_ste_offset > max_l2_ste) { | |
640 | qemu_log_mask(LOG_GUEST_ERROR, | |
641 | "l2_ste_offset=%d > max_l2_ste=%d\n", | |
642 | l2_ste_offset, max_l2_ste); | |
643 | event->type = SMMU_EVT_C_BAD_STE; | |
644 | return -EINVAL; | |
645 | } | |
646 | addr = l2ptr + l2_ste_offset * sizeof(*ste); | |
647 | } else { | |
41678c33 SV |
648 | strtab_size_shift = log2size + 5; |
649 | strtab_base = s->strtab_base & SMMU_BASE_ADDR_MASK & | |
650 | ~MAKE_64BIT_MASK(0, strtab_size_shift); | |
651 | addr = strtab_base + sid * sizeof(*ste); | |
9bde7f06 EA |
652 | } |
653 | ||
654 | if (smmu_get_ste(s, addr, ste, event)) { | |
655 | return -EINVAL; | |
656 | } | |
657 | ||
658 | return 0; | |
659 | } | |
660 | ||
661 | static int decode_cd(SMMUTransCfg *cfg, CD *cd, SMMUEventInfo *event) | |
662 | { | |
663 | int ret = -EINVAL; | |
664 | int i; | |
665 | ||
666 | if (!CD_VALID(cd) || !CD_AARCH64(cd)) { | |
667 | goto bad_cd; | |
668 | } | |
669 | if (!CD_A(cd)) { | |
670 | goto bad_cd; /* SMMU_IDR0.TERM_MODEL == 1 */ | |
671 | } | |
672 | if (CD_S(cd)) { | |
673 | goto bad_cd; /* !STE_SECURE && SMMU_IDR0.STALL_MODEL == 1 */ | |
674 | } | |
675 | if (CD_HA(cd) || CD_HD(cd)) { | |
676 | goto bad_cd; /* HTTU = 0 */ | |
677 | } | |
678 | ||
679 | /* we support only those at the moment */ | |
680 | cfg->aa64 = true; | |
681 | cfg->stage = 1; | |
682 | ||
683 | cfg->oas = oas2bits(CD_IPS(cd)); | |
684 | cfg->oas = MIN(oas2bits(SMMU_IDR5_OAS), cfg->oas); | |
685 | cfg->tbi = CD_TBI(cd); | |
686 | cfg->asid = CD_ASID(cd); | |
687 | ||
688 | trace_smmuv3_decode_cd(cfg->oas); | |
689 | ||
690 | /* decode data dependent on TT */ | |
691 | for (i = 0; i <= 1; i++) { | |
692 | int tg, tsz; | |
693 | SMMUTransTableInfo *tt = &cfg->tt[i]; | |
694 | ||
695 | cfg->tt[i].disabled = CD_EPD(cd, i); | |
696 | if (cfg->tt[i].disabled) { | |
697 | continue; | |
698 | } | |
699 | ||
700 | tsz = CD_TSZ(cd, i); | |
701 | if (tsz < 16 || tsz > 39) { | |
702 | goto bad_cd; | |
703 | } | |
704 | ||
705 | tg = CD_TG(cd, i); | |
706 | tt->granule_sz = tg2granule(tg, i); | |
bf559ee4 KJ |
707 | if ((tt->granule_sz != 12 && tt->granule_sz != 14 && |
708 | tt->granule_sz != 16) || CD_ENDI(cd)) { | |
9bde7f06 EA |
709 | goto bad_cd; |
710 | } | |
711 | ||
712 | tt->tsz = tsz; | |
713 | tt->ttb = CD_TTB(cd, i); | |
714 | if (tt->ttb & ~(MAKE_64BIT_MASK(0, cfg->oas))) { | |
715 | goto bad_cd; | |
716 | } | |
e7c3b9d9 EA |
717 | tt->had = CD_HAD(cd, i); |
718 | trace_smmuv3_decode_cd_tt(i, tt->tsz, tt->ttb, tt->granule_sz, tt->had); | |
9bde7f06 EA |
719 | } |
720 | ||
ced71694 | 721 | cfg->record_faults = CD_R(cd); |
9bde7f06 EA |
722 | |
723 | return 0; | |
724 | ||
725 | bad_cd: | |
726 | event->type = SMMU_EVT_C_BAD_CD; | |
727 | return ret; | |
728 | } | |
729 | ||
730 | /** | |
731 | * smmuv3_decode_config - Prepare the translation configuration | |
732 | * for the @mr iommu region | |
733 | * @mr: iommu memory region the translation config must be prepared for | |
734 | * @cfg: output translation configuration which is populated through | |
735 | * the different configuration decoding steps | |
736 | * @event: must be zero'ed by the caller | |
737 | * | |
9122bea9 | 738 | * return < 0 in case of config decoding error (@event is filled |
9bde7f06 EA |
739 | * accordingly). Return 0 otherwise. |
740 | */ | |
741 | static int smmuv3_decode_config(IOMMUMemoryRegion *mr, SMMUTransCfg *cfg, | |
742 | SMMUEventInfo *event) | |
743 | { | |
744 | SMMUDevice *sdev = container_of(mr, SMMUDevice, iommu); | |
745 | uint32_t sid = smmu_get_sid(sdev); | |
746 | SMMUv3State *s = sdev->smmu; | |
9122bea9 | 747 | int ret; |
9bde7f06 EA |
748 | STE ste; |
749 | CD cd; | |
750 | ||
cd617556 MS |
751 | /* ASID defaults to -1 (if s1 is not supported). */ |
752 | cfg->asid = -1; | |
753 | ||
9122bea9 JH |
754 | ret = smmu_find_ste(s, sid, &ste, event); |
755 | if (ret) { | |
9bde7f06 EA |
756 | return ret; |
757 | } | |
758 | ||
9122bea9 JH |
759 | ret = decode_ste(s, cfg, &ste, event); |
760 | if (ret) { | |
9bde7f06 EA |
761 | return ret; |
762 | } | |
763 | ||
8cefcc3b | 764 | if (cfg->aborted || cfg->bypassed || (cfg->stage == 2)) { |
9122bea9 JH |
765 | return 0; |
766 | } | |
767 | ||
768 | ret = smmu_get_cd(s, &ste, 0 /* ssid */, &cd, event); | |
769 | if (ret) { | |
9bde7f06 EA |
770 | return ret; |
771 | } | |
772 | ||
773 | return decode_cd(cfg, &cd, event); | |
774 | } | |
775 | ||
32cfd7f3 EA |
776 | /** |
777 | * smmuv3_get_config - Look up for a cached copy of configuration data for | |
778 | * @sdev and on cache miss performs a configuration structure decoding from | |
779 | * guest RAM. | |
780 | * | |
781 | * @sdev: SMMUDevice handle | |
782 | * @event: output event info | |
783 | * | |
784 | * The configuration cache contains data resulting from both STE and CD | |
785 | * decoding under the form of an SMMUTransCfg struct. The hash table is indexed | |
786 | * by the SMMUDevice handle. | |
787 | */ | |
788 | static SMMUTransCfg *smmuv3_get_config(SMMUDevice *sdev, SMMUEventInfo *event) | |
789 | { | |
790 | SMMUv3State *s = sdev->smmu; | |
791 | SMMUState *bc = &s->smmu_state; | |
792 | SMMUTransCfg *cfg; | |
793 | ||
794 | cfg = g_hash_table_lookup(bc->configs, sdev); | |
795 | if (cfg) { | |
796 | sdev->cfg_cache_hits++; | |
797 | trace_smmuv3_config_cache_hit(smmu_get_sid(sdev), | |
798 | sdev->cfg_cache_hits, sdev->cfg_cache_misses, | |
799 | 100 * sdev->cfg_cache_hits / | |
800 | (sdev->cfg_cache_hits + sdev->cfg_cache_misses)); | |
801 | } else { | |
802 | sdev->cfg_cache_misses++; | |
803 | trace_smmuv3_config_cache_miss(smmu_get_sid(sdev), | |
804 | sdev->cfg_cache_hits, sdev->cfg_cache_misses, | |
805 | 100 * sdev->cfg_cache_hits / | |
806 | (sdev->cfg_cache_hits + sdev->cfg_cache_misses)); | |
807 | cfg = g_new0(SMMUTransCfg, 1); | |
808 | ||
809 | if (!smmuv3_decode_config(&sdev->iommu, cfg, event)) { | |
810 | g_hash_table_insert(bc->configs, sdev, cfg); | |
811 | } else { | |
812 | g_free(cfg); | |
813 | cfg = NULL; | |
814 | } | |
815 | } | |
816 | return cfg; | |
817 | } | |
818 | ||
819 | static void smmuv3_flush_config(SMMUDevice *sdev) | |
820 | { | |
821 | SMMUv3State *s = sdev->smmu; | |
822 | SMMUState *bc = &s->smmu_state; | |
823 | ||
824 | trace_smmuv3_config_cache_inv(smmu_get_sid(sdev)); | |
825 | g_hash_table_remove(bc->configs, sdev); | |
826 | } | |
827 | ||
9bde7f06 | 828 | static IOMMUTLBEntry smmuv3_translate(IOMMUMemoryRegion *mr, hwaddr addr, |
2c91bcf2 | 829 | IOMMUAccessFlags flag, int iommu_idx) |
9bde7f06 EA |
830 | { |
831 | SMMUDevice *sdev = container_of(mr, SMMUDevice, iommu); | |
832 | SMMUv3State *s = sdev->smmu; | |
833 | uint32_t sid = smmu_get_sid(sdev); | |
3499ec08 EA |
834 | SMMUEventInfo event = {.type = SMMU_EVT_NONE, |
835 | .sid = sid, | |
836 | .inval_ste_allowed = false}; | |
9bde7f06 | 837 | SMMUPTWEventInfo ptw_info = {}; |
9122bea9 | 838 | SMMUTranslationStatus status; |
cc27ed81 EA |
839 | SMMUState *bs = ARM_SMMU(s); |
840 | uint64_t page_mask, aligned_addr; | |
a7550158 | 841 | SMMUTLBEntry *cached_entry = NULL; |
cc27ed81 | 842 | SMMUTransTableInfo *tt; |
32cfd7f3 | 843 | SMMUTransCfg *cfg = NULL; |
9bde7f06 EA |
844 | IOMMUTLBEntry entry = { |
845 | .target_as = &address_space_memory, | |
846 | .iova = addr, | |
847 | .translated_addr = addr, | |
848 | .addr_mask = ~(hwaddr)0, | |
849 | .perm = IOMMU_NONE, | |
850 | }; | |
cd617556 MS |
851 | /* |
852 | * Combined attributes used for TLB lookup, as only one stage is supported, | |
853 | * it will hold attributes based on the enabled stage. | |
854 | */ | |
855 | SMMUTransTableInfo tt_combined; | |
9bde7f06 | 856 | |
32cfd7f3 EA |
857 | qemu_mutex_lock(&s->mutex); |
858 | ||
9bde7f06 | 859 | if (!smmu_enabled(s)) { |
c2ecb424 MS |
860 | if (FIELD_EX32(s->gbpa, GBPA, ABORT)) { |
861 | status = SMMU_TRANS_ABORT; | |
862 | } else { | |
863 | status = SMMU_TRANS_DISABLE; | |
864 | } | |
9122bea9 | 865 | goto epilogue; |
9bde7f06 EA |
866 | } |
867 | ||
32cfd7f3 EA |
868 | cfg = smmuv3_get_config(sdev, &event); |
869 | if (!cfg) { | |
9122bea9 JH |
870 | status = SMMU_TRANS_ERROR; |
871 | goto epilogue; | |
9bde7f06 EA |
872 | } |
873 | ||
32cfd7f3 | 874 | if (cfg->aborted) { |
9122bea9 JH |
875 | status = SMMU_TRANS_ABORT; |
876 | goto epilogue; | |
9bde7f06 EA |
877 | } |
878 | ||
32cfd7f3 | 879 | if (cfg->bypassed) { |
9122bea9 JH |
880 | status = SMMU_TRANS_BYPASS; |
881 | goto epilogue; | |
882 | } | |
883 | ||
cd617556 MS |
884 | if (cfg->stage == 1) { |
885 | /* Select stage1 translation table. */ | |
886 | tt = select_tt(cfg, addr); | |
887 | if (!tt) { | |
888 | if (cfg->record_faults) { | |
889 | event.type = SMMU_EVT_F_TRANSLATION; | |
890 | event.u.f_translation.addr = addr; | |
891 | event.u.f_translation.rnw = flag & 0x1; | |
892 | } | |
893 | status = SMMU_TRANS_ERROR; | |
894 | goto epilogue; | |
cc27ed81 | 895 | } |
cd617556 MS |
896 | tt_combined.granule_sz = tt->granule_sz; |
897 | tt_combined.tsz = tt->tsz; | |
cc27ed81 | 898 | |
cd617556 MS |
899 | } else { |
900 | /* Stage2. */ | |
901 | tt_combined.granule_sz = cfg->s2cfg.granule_sz; | |
902 | tt_combined.tsz = cfg->s2cfg.tsz; | |
903 | } | |
904 | /* | |
905 | * TLB lookup looks for granule and input size for a translation stage, | |
906 | * as only one stage is supported right now, choose the right values | |
907 | * from the configuration. | |
908 | */ | |
909 | page_mask = (1ULL << tt_combined.granule_sz) - 1; | |
cc27ed81 EA |
910 | aligned_addr = addr & ~page_mask; |
911 | ||
cd617556 | 912 | cached_entry = smmu_iotlb_lookup(bs, cfg, &tt_combined, aligned_addr); |
cc27ed81 | 913 | if (cached_entry) { |
a7550158 | 914 | if ((flag & IOMMU_WO) && !(cached_entry->entry.perm & IOMMU_WO)) { |
cc27ed81 | 915 | status = SMMU_TRANS_ERROR; |
21eb5b5c MS |
916 | /* |
917 | * We know that the TLB only contains either stage-1 or stage-2 as | |
918 | * nesting is not supported. So it is sufficient to check the | |
919 | * translation stage to know the TLB stage for now. | |
920 | */ | |
921 | event.u.f_walk_eabt.s2 = (cfg->stage == 2); | |
922 | if (PTW_RECORD_FAULT(cfg)) { | |
cc27ed81 EA |
923 | event.type = SMMU_EVT_F_PERMISSION; |
924 | event.u.f_permission.addr = addr; | |
925 | event.u.f_permission.rnw = flag & 0x1; | |
926 | } | |
927 | } else { | |
928 | status = SMMU_TRANS_SUCCESS; | |
929 | } | |
930 | goto epilogue; | |
931 | } | |
932 | ||
a7550158 | 933 | cached_entry = g_new0(SMMUTLBEntry, 1); |
cc27ed81 EA |
934 | |
935 | if (smmu_ptw(cfg, aligned_addr, flag, cached_entry, &ptw_info)) { | |
bcc919e7 MS |
936 | /* All faults from PTW has S2 field. */ |
937 | event.u.f_walk_eabt.s2 = (ptw_info.stage == 2); | |
cc27ed81 | 938 | g_free(cached_entry); |
9bde7f06 EA |
939 | switch (ptw_info.type) { |
940 | case SMMU_PTW_ERR_WALK_EABT: | |
941 | event.type = SMMU_EVT_F_WALK_EABT; | |
942 | event.u.f_walk_eabt.addr = addr; | |
943 | event.u.f_walk_eabt.rnw = flag & 0x1; | |
944 | event.u.f_walk_eabt.class = 0x1; | |
945 | event.u.f_walk_eabt.addr2 = ptw_info.addr; | |
946 | break; | |
947 | case SMMU_PTW_ERR_TRANSLATION: | |
21eb5b5c | 948 | if (PTW_RECORD_FAULT(cfg)) { |
9bde7f06 EA |
949 | event.type = SMMU_EVT_F_TRANSLATION; |
950 | event.u.f_translation.addr = addr; | |
951 | event.u.f_translation.rnw = flag & 0x1; | |
952 | } | |
953 | break; | |
954 | case SMMU_PTW_ERR_ADDR_SIZE: | |
21eb5b5c | 955 | if (PTW_RECORD_FAULT(cfg)) { |
9bde7f06 EA |
956 | event.type = SMMU_EVT_F_ADDR_SIZE; |
957 | event.u.f_addr_size.addr = addr; | |
958 | event.u.f_addr_size.rnw = flag & 0x1; | |
959 | } | |
960 | break; | |
961 | case SMMU_PTW_ERR_ACCESS: | |
21eb5b5c | 962 | if (PTW_RECORD_FAULT(cfg)) { |
9bde7f06 EA |
963 | event.type = SMMU_EVT_F_ACCESS; |
964 | event.u.f_access.addr = addr; | |
965 | event.u.f_access.rnw = flag & 0x1; | |
966 | } | |
967 | break; | |
968 | case SMMU_PTW_ERR_PERMISSION: | |
21eb5b5c | 969 | if (PTW_RECORD_FAULT(cfg)) { |
9bde7f06 EA |
970 | event.type = SMMU_EVT_F_PERMISSION; |
971 | event.u.f_permission.addr = addr; | |
972 | event.u.f_permission.rnw = flag & 0x1; | |
973 | } | |
974 | break; | |
975 | default: | |
976 | g_assert_not_reached(); | |
977 | } | |
9122bea9 JH |
978 | status = SMMU_TRANS_ERROR; |
979 | } else { | |
6808bca9 | 980 | smmu_iotlb_insert(bs, cfg, cached_entry); |
9122bea9 | 981 | status = SMMU_TRANS_SUCCESS; |
9bde7f06 | 982 | } |
9122bea9 JH |
983 | |
984 | epilogue: | |
32cfd7f3 | 985 | qemu_mutex_unlock(&s->mutex); |
9122bea9 JH |
986 | switch (status) { |
987 | case SMMU_TRANS_SUCCESS: | |
c3ca7d56 | 988 | entry.perm = cached_entry->entry.perm; |
a7550158 | 989 | entry.translated_addr = cached_entry->entry.translated_addr + |
9e54dee7 | 990 | (addr & cached_entry->entry.addr_mask); |
a7550158 | 991 | entry.addr_mask = cached_entry->entry.addr_mask; |
9122bea9 JH |
992 | trace_smmuv3_translate_success(mr->parent_obj.name, sid, addr, |
993 | entry.translated_addr, entry.perm); | |
994 | break; | |
995 | case SMMU_TRANS_DISABLE: | |
996 | entry.perm = flag; | |
997 | entry.addr_mask = ~TARGET_PAGE_MASK; | |
998 | trace_smmuv3_translate_disable(mr->parent_obj.name, sid, addr, | |
999 | entry.perm); | |
1000 | break; | |
1001 | case SMMU_TRANS_BYPASS: | |
1002 | entry.perm = flag; | |
1003 | entry.addr_mask = ~TARGET_PAGE_MASK; | |
1004 | trace_smmuv3_translate_bypass(mr->parent_obj.name, sid, addr, | |
1005 | entry.perm); | |
1006 | break; | |
1007 | case SMMU_TRANS_ABORT: | |
1008 | /* no event is recorded on abort */ | |
1009 | trace_smmuv3_translate_abort(mr->parent_obj.name, sid, addr, | |
1010 | entry.perm); | |
1011 | break; | |
1012 | case SMMU_TRANS_ERROR: | |
9bde7f06 | 1013 | qemu_log_mask(LOG_GUEST_ERROR, |
264a3b2e | 1014 | "%s translation failed for iova=0x%"PRIx64" (%s)\n", |
9122bea9 | 1015 | mr->parent_obj.name, addr, smmu_event_string(event.type)); |
9bde7f06 | 1016 | smmuv3_record_event(s, &event); |
9122bea9 | 1017 | break; |
9bde7f06 EA |
1018 | } |
1019 | ||
1020 | return entry; | |
1021 | } | |
1022 | ||
832e4222 EA |
1023 | /** |
1024 | * smmuv3_notify_iova - call the notifier @n for a given | |
1025 | * @asid and @iova tuple. | |
1026 | * | |
1027 | * @mr: IOMMU mr region handle | |
1028 | * @n: notifier to be called | |
1029 | * @asid: address space ID or negative value if we don't care | |
32bd7bae | 1030 | * @vmid: virtual machine ID or negative value if we don't care |
832e4222 | 1031 | * @iova: iova |
d5291561 EA |
1032 | * @tg: translation granule (if communicated through range invalidation) |
1033 | * @num_pages: number of @granule sized pages (if tg != 0), otherwise 1 | |
832e4222 EA |
1034 | */ |
1035 | static void smmuv3_notify_iova(IOMMUMemoryRegion *mr, | |
1036 | IOMMUNotifier *n, | |
32bd7bae MS |
1037 | int asid, int vmid, |
1038 | dma_addr_t iova, uint8_t tg, | |
1039 | uint64_t num_pages) | |
832e4222 EA |
1040 | { |
1041 | SMMUDevice *sdev = container_of(mr, SMMUDevice, iommu); | |
5039caf3 | 1042 | IOMMUTLBEvent event; |
dcda883c | 1043 | uint8_t granule; |
32bd7bae | 1044 | SMMUv3State *s = sdev->smmu; |
832e4222 | 1045 | |
d5291561 | 1046 | if (!tg) { |
9e2135ee PM |
1047 | SMMUEventInfo eventinfo = {.inval_ste_allowed = true}; |
1048 | SMMUTransCfg *cfg = smmuv3_get_config(sdev, &eventinfo); | |
d5291561 | 1049 | SMMUTransTableInfo *tt; |
832e4222 | 1050 | |
d5291561 EA |
1051 | if (!cfg) { |
1052 | return; | |
1053 | } | |
832e4222 | 1054 | |
d5291561 EA |
1055 | if (asid >= 0 && cfg->asid != asid) { |
1056 | return; | |
1057 | } | |
1058 | ||
32bd7bae | 1059 | if (vmid >= 0 && cfg->s2cfg.vmid != vmid) { |
d5291561 EA |
1060 | return; |
1061 | } | |
32bd7bae MS |
1062 | |
1063 | if (STAGE1_SUPPORTED(s)) { | |
1064 | tt = select_tt(cfg, iova); | |
1065 | if (!tt) { | |
1066 | return; | |
1067 | } | |
1068 | granule = tt->granule_sz; | |
1069 | } else { | |
1070 | granule = cfg->s2cfg.granule_sz; | |
1071 | } | |
1072 | ||
dcda883c ZY |
1073 | } else { |
1074 | granule = tg * 2 + 10; | |
832e4222 EA |
1075 | } |
1076 | ||
5039caf3 EP |
1077 | event.type = IOMMU_NOTIFIER_UNMAP; |
1078 | event.entry.target_as = &address_space_memory; | |
1079 | event.entry.iova = iova; | |
1080 | event.entry.addr_mask = num_pages * (1 << granule) - 1; | |
1081 | event.entry.perm = IOMMU_NONE; | |
832e4222 | 1082 | |
5039caf3 | 1083 | memory_region_notify_iommu_one(n, &event); |
832e4222 EA |
1084 | } |
1085 | ||
32bd7bae MS |
1086 | /* invalidate an asid/vmid/iova range tuple in all mr's */ |
1087 | static void smmuv3_inv_notifiers_iova(SMMUState *s, int asid, int vmid, | |
1088 | dma_addr_t iova, uint8_t tg, | |
1089 | uint64_t num_pages) | |
832e4222 | 1090 | { |
c6370441 | 1091 | SMMUDevice *sdev; |
832e4222 | 1092 | |
c6370441 EA |
1093 | QLIST_FOREACH(sdev, &s->devices_with_notifiers, next) { |
1094 | IOMMUMemoryRegion *mr = &sdev->iommu; | |
832e4222 EA |
1095 | IOMMUNotifier *n; |
1096 | ||
32bd7bae MS |
1097 | trace_smmuv3_inv_notifiers_iova(mr->parent_obj.name, asid, vmid, |
1098 | iova, tg, num_pages); | |
832e4222 EA |
1099 | |
1100 | IOMMU_NOTIFIER_FOREACH(n, mr) { | |
32bd7bae | 1101 | smmuv3_notify_iova(mr, n, asid, vmid, iova, tg, num_pages); |
832e4222 EA |
1102 | } |
1103 | } | |
1104 | } | |
1105 | ||
ccc3ee38 | 1106 | static void smmuv3_range_inval(SMMUState *s, Cmd *cmd) |
c0f9ef70 | 1107 | { |
219729cf | 1108 | dma_addr_t end, addr = CMD_ADDR(cmd); |
c0f9ef70 | 1109 | uint8_t type = CMD_TYPE(cmd); |
2eaeb7d5 | 1110 | int vmid = -1; |
219729cf EA |
1111 | uint8_t scale = CMD_SCALE(cmd); |
1112 | uint8_t num = CMD_NUM(cmd); | |
1113 | uint8_t ttl = CMD_TTL(cmd); | |
c0f9ef70 | 1114 | bool leaf = CMD_LEAF(cmd); |
d5291561 | 1115 | uint8_t tg = CMD_TG(cmd); |
219729cf EA |
1116 | uint64_t num_pages; |
1117 | uint8_t granule; | |
c0f9ef70 | 1118 | int asid = -1; |
2eaeb7d5 MS |
1119 | SMMUv3State *smmuv3 = ARM_SMMUV3(s); |
1120 | ||
1121 | /* Only consider VMID if stage-2 is supported. */ | |
1122 | if (STAGE2_SUPPORTED(smmuv3)) { | |
1123 | vmid = CMD_VMID(cmd); | |
1124 | } | |
c0f9ef70 EA |
1125 | |
1126 | if (type == SMMU_CMD_TLBI_NH_VA) { | |
1127 | asid = CMD_ASID(cmd); | |
1128 | } | |
6d9cd115 | 1129 | |
219729cf | 1130 | if (!tg) { |
ccc3ee38 | 1131 | trace_smmuv3_range_inval(vmid, asid, addr, tg, 1, ttl, leaf); |
32bd7bae | 1132 | smmuv3_inv_notifiers_iova(s, asid, vmid, addr, tg, 1); |
2eaeb7d5 | 1133 | smmu_iotlb_inv_iova(s, asid, vmid, addr, tg, 1, ttl); |
219729cf EA |
1134 | return; |
1135 | } | |
1136 | ||
1137 | /* RIL in use */ | |
6d9cd115 | 1138 | |
219729cf EA |
1139 | num_pages = (num + 1) * BIT_ULL(scale); |
1140 | granule = tg * 2 + 10; | |
1141 | ||
1142 | /* Split invalidations into ^2 range invalidations */ | |
1143 | end = addr + (num_pages << granule) - 1; | |
6d9cd115 | 1144 | |
219729cf EA |
1145 | while (addr != end + 1) { |
1146 | uint64_t mask = dma_aligned_pow2_mask(addr, end, 64); | |
6d9cd115 | 1147 | |
219729cf | 1148 | num_pages = (mask + 1) >> granule; |
ccc3ee38 | 1149 | trace_smmuv3_range_inval(vmid, asid, addr, tg, num_pages, ttl, leaf); |
32bd7bae | 1150 | smmuv3_inv_notifiers_iova(s, asid, vmid, addr, tg, num_pages); |
2eaeb7d5 | 1151 | smmu_iotlb_inv_iova(s, asid, vmid, addr, tg, num_pages, ttl); |
219729cf | 1152 | addr += mask + 1; |
6d9cd115 | 1153 | } |
c0f9ef70 EA |
1154 | } |
1155 | ||
1194140b EA |
1156 | static gboolean |
1157 | smmuv3_invalidate_ste(gpointer key, gpointer value, gpointer user_data) | |
1158 | { | |
1159 | SMMUDevice *sdev = (SMMUDevice *)key; | |
1160 | uint32_t sid = smmu_get_sid(sdev); | |
1161 | SMMUSIDRange *sid_range = (SMMUSIDRange *)user_data; | |
1162 | ||
1163 | if (sid < sid_range->start || sid > sid_range->end) { | |
1164 | return false; | |
1165 | } | |
1166 | trace_smmuv3_config_cache_inv(sid); | |
1167 | return true; | |
1168 | } | |
1169 | ||
fae4be38 | 1170 | static int smmuv3_cmdq_consume(SMMUv3State *s) |
dadd1a08 | 1171 | { |
32cfd7f3 | 1172 | SMMUState *bs = ARM_SMMU(s); |
dadd1a08 EA |
1173 | SMMUCmdError cmd_error = SMMU_CERROR_NONE; |
1174 | SMMUQueue *q = &s->cmdq; | |
1175 | SMMUCommandType type = 0; | |
1176 | ||
1177 | if (!smmuv3_cmdq_enabled(s)) { | |
1178 | return 0; | |
1179 | } | |
1180 | /* | |
1181 | * some commands depend on register values, typically CR0. In case those | |
1182 | * register values change while handling the command, spec says it | |
1183 | * is UNPREDICTABLE whether the command is interpreted under the new | |
1184 | * or old value. | |
1185 | */ | |
1186 | ||
1187 | while (!smmuv3_q_empty(q)) { | |
1188 | uint32_t pending = s->gerror ^ s->gerrorn; | |
1189 | Cmd cmd; | |
1190 | ||
1191 | trace_smmuv3_cmdq_consume(Q_PROD(q), Q_CONS(q), | |
1192 | Q_PROD_WRAP(q), Q_CONS_WRAP(q)); | |
1193 | ||
1194 | if (FIELD_EX32(pending, GERROR, CMDQ_ERR)) { | |
1195 | break; | |
1196 | } | |
1197 | ||
1198 | if (queue_read(q, &cmd) != MEMTX_OK) { | |
1199 | cmd_error = SMMU_CERROR_ABT; | |
1200 | break; | |
1201 | } | |
1202 | ||
1203 | type = CMD_TYPE(&cmd); | |
1204 | ||
1205 | trace_smmuv3_cmdq_opcode(smmu_cmd_string(type)); | |
1206 | ||
32cfd7f3 | 1207 | qemu_mutex_lock(&s->mutex); |
dadd1a08 EA |
1208 | switch (type) { |
1209 | case SMMU_CMD_SYNC: | |
1210 | if (CMD_SYNC_CS(&cmd) & CMD_SYNC_SIG_IRQ) { | |
1211 | smmuv3_trigger_irq(s, SMMU_IRQ_CMD_SYNC, 0); | |
1212 | } | |
1213 | break; | |
1214 | case SMMU_CMD_PREFETCH_CONFIG: | |
1215 | case SMMU_CMD_PREFETCH_ADDR: | |
32cfd7f3 | 1216 | break; |
dadd1a08 | 1217 | case SMMU_CMD_CFGI_STE: |
32cfd7f3 EA |
1218 | { |
1219 | uint32_t sid = CMD_SID(&cmd); | |
1220 | IOMMUMemoryRegion *mr = smmu_iommu_mr(bs, sid); | |
1221 | SMMUDevice *sdev; | |
1222 | ||
1223 | if (CMD_SSEC(&cmd)) { | |
1224 | cmd_error = SMMU_CERROR_ILL; | |
1225 | break; | |
1226 | } | |
1227 | ||
1228 | if (!mr) { | |
1229 | break; | |
1230 | } | |
1231 | ||
1232 | trace_smmuv3_cmdq_cfgi_ste(sid); | |
1233 | sdev = container_of(mr, SMMUDevice, iommu); | |
1234 | smmuv3_flush_config(sdev); | |
1235 | ||
1236 | break; | |
1237 | } | |
dadd1a08 | 1238 | case SMMU_CMD_CFGI_STE_RANGE: /* same as SMMU_CMD_CFGI_ALL */ |
32cfd7f3 | 1239 | { |
017a913a | 1240 | uint32_t sid = CMD_SID(&cmd), mask; |
32cfd7f3 | 1241 | uint8_t range = CMD_STE_RANGE(&cmd); |
017a913a | 1242 | SMMUSIDRange sid_range; |
32cfd7f3 EA |
1243 | |
1244 | if (CMD_SSEC(&cmd)) { | |
1245 | cmd_error = SMMU_CERROR_ILL; | |
1246 | break; | |
1247 | } | |
017a913a ZY |
1248 | |
1249 | mask = (1ULL << (range + 1)) - 1; | |
1250 | sid_range.start = sid & ~mask; | |
1251 | sid_range.end = sid_range.start + mask; | |
1252 | ||
1253 | trace_smmuv3_cmdq_cfgi_ste_range(sid_range.start, sid_range.end); | |
1194140b EA |
1254 | g_hash_table_foreach_remove(bs->configs, smmuv3_invalidate_ste, |
1255 | &sid_range); | |
32cfd7f3 EA |
1256 | break; |
1257 | } | |
dadd1a08 EA |
1258 | case SMMU_CMD_CFGI_CD: |
1259 | case SMMU_CMD_CFGI_CD_ALL: | |
32cfd7f3 EA |
1260 | { |
1261 | uint32_t sid = CMD_SID(&cmd); | |
1262 | IOMMUMemoryRegion *mr = smmu_iommu_mr(bs, sid); | |
1263 | SMMUDevice *sdev; | |
1264 | ||
1265 | if (CMD_SSEC(&cmd)) { | |
1266 | cmd_error = SMMU_CERROR_ILL; | |
1267 | break; | |
1268 | } | |
1269 | ||
1270 | if (!mr) { | |
1271 | break; | |
1272 | } | |
1273 | ||
1274 | trace_smmuv3_cmdq_cfgi_cd(sid); | |
1275 | sdev = container_of(mr, SMMUDevice, iommu); | |
1276 | smmuv3_flush_config(sdev); | |
1277 | break; | |
1278 | } | |
dadd1a08 | 1279 | case SMMU_CMD_TLBI_NH_ASID: |
cc27ed81 EA |
1280 | { |
1281 | uint16_t asid = CMD_ASID(&cmd); | |
1282 | ||
ccc3ee38 MS |
1283 | if (!STAGE1_SUPPORTED(s)) { |
1284 | cmd_error = SMMU_CERROR_ILL; | |
1285 | break; | |
1286 | } | |
1287 | ||
cc27ed81 | 1288 | trace_smmuv3_cmdq_tlbi_nh_asid(asid); |
832e4222 | 1289 | smmu_inv_notifiers_all(&s->smmu_state); |
cc27ed81 EA |
1290 | smmu_iotlb_inv_asid(bs, asid); |
1291 | break; | |
1292 | } | |
1293 | case SMMU_CMD_TLBI_NH_ALL: | |
ccc3ee38 MS |
1294 | if (!STAGE1_SUPPORTED(s)) { |
1295 | cmd_error = SMMU_CERROR_ILL; | |
1296 | break; | |
1297 | } | |
1298 | QEMU_FALLTHROUGH; | |
cc27ed81 EA |
1299 | case SMMU_CMD_TLBI_NSNH_ALL: |
1300 | trace_smmuv3_cmdq_tlbi_nh(); | |
832e4222 | 1301 | smmu_inv_notifiers_all(&s->smmu_state); |
cc27ed81 EA |
1302 | smmu_iotlb_inv_all(bs); |
1303 | break; | |
dadd1a08 | 1304 | case SMMU_CMD_TLBI_NH_VAA: |
cc27ed81 | 1305 | case SMMU_CMD_TLBI_NH_VA: |
ccc3ee38 MS |
1306 | if (!STAGE1_SUPPORTED(s)) { |
1307 | cmd_error = SMMU_CERROR_ILL; | |
1308 | break; | |
1309 | } | |
1310 | smmuv3_range_inval(bs, &cmd); | |
1311 | break; | |
1312 | case SMMU_CMD_TLBI_S12_VMALL: | |
1313 | { | |
1314 | uint16_t vmid = CMD_VMID(&cmd); | |
1315 | ||
1316 | if (!STAGE2_SUPPORTED(s)) { | |
1317 | cmd_error = SMMU_CERROR_ILL; | |
1318 | break; | |
1319 | } | |
1320 | ||
1321 | trace_smmuv3_cmdq_tlbi_s12_vmid(vmid); | |
1322 | smmu_inv_notifiers_all(&s->smmu_state); | |
1323 | smmu_iotlb_inv_vmid(bs, vmid); | |
1324 | break; | |
1325 | } | |
1326 | case SMMU_CMD_TLBI_S2_IPA: | |
1327 | if (!STAGE2_SUPPORTED(s)) { | |
1328 | cmd_error = SMMU_CERROR_ILL; | |
1329 | break; | |
1330 | } | |
1331 | /* | |
1332 | * As currently only either s1 or s2 are supported | |
1333 | * we can reuse same function for s2. | |
1334 | */ | |
1335 | smmuv3_range_inval(bs, &cmd); | |
cc27ed81 | 1336 | break; |
dadd1a08 EA |
1337 | case SMMU_CMD_TLBI_EL3_ALL: |
1338 | case SMMU_CMD_TLBI_EL3_VA: | |
1339 | case SMMU_CMD_TLBI_EL2_ALL: | |
1340 | case SMMU_CMD_TLBI_EL2_ASID: | |
1341 | case SMMU_CMD_TLBI_EL2_VA: | |
1342 | case SMMU_CMD_TLBI_EL2_VAA: | |
dadd1a08 EA |
1343 | case SMMU_CMD_ATC_INV: |
1344 | case SMMU_CMD_PRI_RESP: | |
1345 | case SMMU_CMD_RESUME: | |
1346 | case SMMU_CMD_STALL_TERM: | |
1347 | trace_smmuv3_unhandled_cmd(type); | |
1348 | break; | |
1349 | default: | |
1350 | cmd_error = SMMU_CERROR_ILL; | |
dadd1a08 EA |
1351 | break; |
1352 | } | |
32cfd7f3 | 1353 | qemu_mutex_unlock(&s->mutex); |
dadd1a08 | 1354 | if (cmd_error) { |
ccc3ee38 MS |
1355 | if (cmd_error == SMMU_CERROR_ILL) { |
1356 | qemu_log_mask(LOG_GUEST_ERROR, | |
1357 | "Illegal command type: %d\n", CMD_TYPE(&cmd)); | |
1358 | } | |
dadd1a08 EA |
1359 | break; |
1360 | } | |
1361 | /* | |
1362 | * We only increment the cons index after the completion of | |
1363 | * the command. We do that because the SYNC returns immediately | |
1364 | * and does not check the completion of previous commands | |
1365 | */ | |
1366 | queue_cons_incr(q); | |
1367 | } | |
1368 | ||
1369 | if (cmd_error) { | |
1370 | trace_smmuv3_cmdq_consume_error(smmu_cmd_string(type), cmd_error); | |
1371 | smmu_write_cmdq_err(s, cmd_error); | |
1372 | smmuv3_trigger_irq(s, SMMU_IRQ_GERROR, R_GERROR_CMDQ_ERR_MASK); | |
1373 | } | |
1374 | ||
1375 | trace_smmuv3_cmdq_consume_out(Q_PROD(q), Q_CONS(q), | |
1376 | Q_PROD_WRAP(q), Q_CONS_WRAP(q)); | |
1377 | ||
1378 | return 0; | |
1379 | } | |
1380 | ||
fae4be38 EA |
1381 | static MemTxResult smmu_writell(SMMUv3State *s, hwaddr offset, |
1382 | uint64_t data, MemTxAttrs attrs) | |
1383 | { | |
1384 | switch (offset) { | |
1385 | case A_GERROR_IRQ_CFG0: | |
1386 | s->gerror_irq_cfg0 = data; | |
1387 | return MEMTX_OK; | |
1388 | case A_STRTAB_BASE: | |
1389 | s->strtab_base = data; | |
1390 | return MEMTX_OK; | |
1391 | case A_CMDQ_BASE: | |
1392 | s->cmdq.base = data; | |
1393 | s->cmdq.log2size = extract64(s->cmdq.base, 0, 5); | |
1394 | if (s->cmdq.log2size > SMMU_CMDQS) { | |
1395 | s->cmdq.log2size = SMMU_CMDQS; | |
1396 | } | |
1397 | return MEMTX_OK; | |
1398 | case A_EVENTQ_BASE: | |
1399 | s->eventq.base = data; | |
1400 | s->eventq.log2size = extract64(s->eventq.base, 0, 5); | |
1401 | if (s->eventq.log2size > SMMU_EVENTQS) { | |
1402 | s->eventq.log2size = SMMU_EVENTQS; | |
1403 | } | |
1404 | return MEMTX_OK; | |
1405 | case A_EVENTQ_IRQ_CFG0: | |
1406 | s->eventq_irq_cfg0 = data; | |
1407 | return MEMTX_OK; | |
1408 | default: | |
1409 | qemu_log_mask(LOG_UNIMP, | |
1410 | "%s Unexpected 64-bit access to 0x%"PRIx64" (WI)\n", | |
1411 | __func__, offset); | |
1412 | return MEMTX_OK; | |
1413 | } | |
1414 | } | |
1415 | ||
1416 | static MemTxResult smmu_writel(SMMUv3State *s, hwaddr offset, | |
1417 | uint64_t data, MemTxAttrs attrs) | |
1418 | { | |
1419 | switch (offset) { | |
1420 | case A_CR0: | |
1421 | s->cr[0] = data; | |
1422 | s->cr0ack = data & ~SMMU_CR0_RESERVED; | |
1423 | /* in case the command queue has been enabled */ | |
1424 | smmuv3_cmdq_consume(s); | |
1425 | return MEMTX_OK; | |
1426 | case A_CR1: | |
1427 | s->cr[1] = data; | |
1428 | return MEMTX_OK; | |
1429 | case A_CR2: | |
1430 | s->cr[2] = data; | |
1431 | return MEMTX_OK; | |
1432 | case A_IRQ_CTRL: | |
1433 | s->irq_ctrl = data; | |
1434 | return MEMTX_OK; | |
1435 | case A_GERRORN: | |
1436 | smmuv3_write_gerrorn(s, data); | |
1437 | /* | |
1438 | * By acknowledging the CMDQ_ERR, SW may notify cmds can | |
1439 | * be processed again | |
1440 | */ | |
1441 | smmuv3_cmdq_consume(s); | |
1442 | return MEMTX_OK; | |
1443 | case A_GERROR_IRQ_CFG0: /* 64b */ | |
1444 | s->gerror_irq_cfg0 = deposit64(s->gerror_irq_cfg0, 0, 32, data); | |
1445 | return MEMTX_OK; | |
1446 | case A_GERROR_IRQ_CFG0 + 4: | |
1447 | s->gerror_irq_cfg0 = deposit64(s->gerror_irq_cfg0, 32, 32, data); | |
1448 | return MEMTX_OK; | |
1449 | case A_GERROR_IRQ_CFG1: | |
1450 | s->gerror_irq_cfg1 = data; | |
1451 | return MEMTX_OK; | |
1452 | case A_GERROR_IRQ_CFG2: | |
1453 | s->gerror_irq_cfg2 = data; | |
1454 | return MEMTX_OK; | |
c2ecb424 MS |
1455 | case A_GBPA: |
1456 | /* | |
1457 | * If UPDATE is not set, the write is ignored. This is the only | |
1458 | * permitted behavior in SMMUv3.2 and later. | |
1459 | */ | |
1460 | if (data & R_GBPA_UPDATE_MASK) { | |
1461 | /* Ignore update bit as write is synchronous. */ | |
1462 | s->gbpa = data & ~R_GBPA_UPDATE_MASK; | |
1463 | } | |
1464 | return MEMTX_OK; | |
fae4be38 EA |
1465 | case A_STRTAB_BASE: /* 64b */ |
1466 | s->strtab_base = deposit64(s->strtab_base, 0, 32, data); | |
1467 | return MEMTX_OK; | |
1468 | case A_STRTAB_BASE + 4: | |
1469 | s->strtab_base = deposit64(s->strtab_base, 32, 32, data); | |
1470 | return MEMTX_OK; | |
1471 | case A_STRTAB_BASE_CFG: | |
1472 | s->strtab_base_cfg = data; | |
1473 | if (FIELD_EX32(data, STRTAB_BASE_CFG, FMT) == 1) { | |
1474 | s->sid_split = FIELD_EX32(data, STRTAB_BASE_CFG, SPLIT); | |
1475 | s->features |= SMMU_FEATURE_2LVL_STE; | |
1476 | } | |
1477 | return MEMTX_OK; | |
1478 | case A_CMDQ_BASE: /* 64b */ | |
1479 | s->cmdq.base = deposit64(s->cmdq.base, 0, 32, data); | |
1480 | s->cmdq.log2size = extract64(s->cmdq.base, 0, 5); | |
1481 | if (s->cmdq.log2size > SMMU_CMDQS) { | |
1482 | s->cmdq.log2size = SMMU_CMDQS; | |
1483 | } | |
1484 | return MEMTX_OK; | |
1485 | case A_CMDQ_BASE + 4: /* 64b */ | |
1486 | s->cmdq.base = deposit64(s->cmdq.base, 32, 32, data); | |
1487 | return MEMTX_OK; | |
1488 | case A_CMDQ_PROD: | |
1489 | s->cmdq.prod = data; | |
1490 | smmuv3_cmdq_consume(s); | |
1491 | return MEMTX_OK; | |
1492 | case A_CMDQ_CONS: | |
1493 | s->cmdq.cons = data; | |
1494 | return MEMTX_OK; | |
1495 | case A_EVENTQ_BASE: /* 64b */ | |
1496 | s->eventq.base = deposit64(s->eventq.base, 0, 32, data); | |
1497 | s->eventq.log2size = extract64(s->eventq.base, 0, 5); | |
1498 | if (s->eventq.log2size > SMMU_EVENTQS) { | |
1499 | s->eventq.log2size = SMMU_EVENTQS; | |
1500 | } | |
1501 | return MEMTX_OK; | |
1502 | case A_EVENTQ_BASE + 4: | |
1503 | s->eventq.base = deposit64(s->eventq.base, 32, 32, data); | |
1504 | return MEMTX_OK; | |
1505 | case A_EVENTQ_PROD: | |
1506 | s->eventq.prod = data; | |
1507 | return MEMTX_OK; | |
1508 | case A_EVENTQ_CONS: | |
1509 | s->eventq.cons = data; | |
1510 | return MEMTX_OK; | |
1511 | case A_EVENTQ_IRQ_CFG0: /* 64b */ | |
1512 | s->eventq_irq_cfg0 = deposit64(s->eventq_irq_cfg0, 0, 32, data); | |
1513 | return MEMTX_OK; | |
1514 | case A_EVENTQ_IRQ_CFG0 + 4: | |
1515 | s->eventq_irq_cfg0 = deposit64(s->eventq_irq_cfg0, 32, 32, data); | |
1516 | return MEMTX_OK; | |
1517 | case A_EVENTQ_IRQ_CFG1: | |
1518 | s->eventq_irq_cfg1 = data; | |
1519 | return MEMTX_OK; | |
1520 | case A_EVENTQ_IRQ_CFG2: | |
1521 | s->eventq_irq_cfg2 = data; | |
1522 | return MEMTX_OK; | |
1523 | default: | |
1524 | qemu_log_mask(LOG_UNIMP, | |
1525 | "%s Unexpected 32-bit access to 0x%"PRIx64" (WI)\n", | |
1526 | __func__, offset); | |
1527 | return MEMTX_OK; | |
1528 | } | |
1529 | } | |
1530 | ||
10a83cb9 PM |
1531 | static MemTxResult smmu_write_mmio(void *opaque, hwaddr offset, uint64_t data, |
1532 | unsigned size, MemTxAttrs attrs) | |
1533 | { | |
fae4be38 EA |
1534 | SMMUState *sys = opaque; |
1535 | SMMUv3State *s = ARM_SMMUV3(sys); | |
1536 | MemTxResult r; | |
1537 | ||
1538 | /* CONSTRAINED UNPREDICTABLE choice to have page0/1 be exact aliases */ | |
1539 | offset &= ~0x10000; | |
1540 | ||
1541 | switch (size) { | |
1542 | case 8: | |
1543 | r = smmu_writell(s, offset, data, attrs); | |
1544 | break; | |
1545 | case 4: | |
1546 | r = smmu_writel(s, offset, data, attrs); | |
1547 | break; | |
1548 | default: | |
1549 | r = MEMTX_ERROR; | |
1550 | break; | |
1551 | } | |
1552 | ||
1553 | trace_smmuv3_write_mmio(offset, data, size, r); | |
1554 | return r; | |
10a83cb9 PM |
1555 | } |
1556 | ||
1557 | static MemTxResult smmu_readll(SMMUv3State *s, hwaddr offset, | |
1558 | uint64_t *data, MemTxAttrs attrs) | |
1559 | { | |
1560 | switch (offset) { | |
1561 | case A_GERROR_IRQ_CFG0: | |
1562 | *data = s->gerror_irq_cfg0; | |
1563 | return MEMTX_OK; | |
1564 | case A_STRTAB_BASE: | |
1565 | *data = s->strtab_base; | |
1566 | return MEMTX_OK; | |
1567 | case A_CMDQ_BASE: | |
1568 | *data = s->cmdq.base; | |
1569 | return MEMTX_OK; | |
1570 | case A_EVENTQ_BASE: | |
1571 | *data = s->eventq.base; | |
1572 | return MEMTX_OK; | |
1573 | default: | |
1574 | *data = 0; | |
1575 | qemu_log_mask(LOG_UNIMP, | |
1576 | "%s Unexpected 64-bit access to 0x%"PRIx64" (RAZ)\n", | |
1577 | __func__, offset); | |
1578 | return MEMTX_OK; | |
1579 | } | |
1580 | } | |
1581 | ||
1582 | static MemTxResult smmu_readl(SMMUv3State *s, hwaddr offset, | |
1583 | uint64_t *data, MemTxAttrs attrs) | |
1584 | { | |
1585 | switch (offset) { | |
97fb318d | 1586 | case A_IDREGS ... A_IDREGS + 0x2f: |
10a83cb9 PM |
1587 | *data = smmuv3_idreg(offset - A_IDREGS); |
1588 | return MEMTX_OK; | |
1589 | case A_IDR0 ... A_IDR5: | |
1590 | *data = s->idr[(offset - A_IDR0) / 4]; | |
1591 | return MEMTX_OK; | |
1592 | case A_IIDR: | |
1593 | *data = s->iidr; | |
1594 | return MEMTX_OK; | |
5888f0ad EA |
1595 | case A_AIDR: |
1596 | *data = s->aidr; | |
1597 | return MEMTX_OK; | |
10a83cb9 PM |
1598 | case A_CR0: |
1599 | *data = s->cr[0]; | |
1600 | return MEMTX_OK; | |
1601 | case A_CR0ACK: | |
1602 | *data = s->cr0ack; | |
1603 | return MEMTX_OK; | |
1604 | case A_CR1: | |
1605 | *data = s->cr[1]; | |
1606 | return MEMTX_OK; | |
1607 | case A_CR2: | |
1608 | *data = s->cr[2]; | |
1609 | return MEMTX_OK; | |
1610 | case A_STATUSR: | |
1611 | *data = s->statusr; | |
1612 | return MEMTX_OK; | |
c2ecb424 MS |
1613 | case A_GBPA: |
1614 | *data = s->gbpa; | |
1615 | return MEMTX_OK; | |
10a83cb9 PM |
1616 | case A_IRQ_CTRL: |
1617 | case A_IRQ_CTRL_ACK: | |
1618 | *data = s->irq_ctrl; | |
1619 | return MEMTX_OK; | |
1620 | case A_GERROR: | |
1621 | *data = s->gerror; | |
1622 | return MEMTX_OK; | |
1623 | case A_GERRORN: | |
1624 | *data = s->gerrorn; | |
1625 | return MEMTX_OK; | |
1626 | case A_GERROR_IRQ_CFG0: /* 64b */ | |
1627 | *data = extract64(s->gerror_irq_cfg0, 0, 32); | |
1628 | return MEMTX_OK; | |
1629 | case A_GERROR_IRQ_CFG0 + 4: | |
1630 | *data = extract64(s->gerror_irq_cfg0, 32, 32); | |
1631 | return MEMTX_OK; | |
1632 | case A_GERROR_IRQ_CFG1: | |
1633 | *data = s->gerror_irq_cfg1; | |
1634 | return MEMTX_OK; | |
1635 | case A_GERROR_IRQ_CFG2: | |
1636 | *data = s->gerror_irq_cfg2; | |
1637 | return MEMTX_OK; | |
1638 | case A_STRTAB_BASE: /* 64b */ | |
1639 | *data = extract64(s->strtab_base, 0, 32); | |
1640 | return MEMTX_OK; | |
1641 | case A_STRTAB_BASE + 4: /* 64b */ | |
1642 | *data = extract64(s->strtab_base, 32, 32); | |
1643 | return MEMTX_OK; | |
1644 | case A_STRTAB_BASE_CFG: | |
1645 | *data = s->strtab_base_cfg; | |
1646 | return MEMTX_OK; | |
1647 | case A_CMDQ_BASE: /* 64b */ | |
1648 | *data = extract64(s->cmdq.base, 0, 32); | |
1649 | return MEMTX_OK; | |
1650 | case A_CMDQ_BASE + 4: | |
1651 | *data = extract64(s->cmdq.base, 32, 32); | |
1652 | return MEMTX_OK; | |
1653 | case A_CMDQ_PROD: | |
1654 | *data = s->cmdq.prod; | |
1655 | return MEMTX_OK; | |
1656 | case A_CMDQ_CONS: | |
1657 | *data = s->cmdq.cons; | |
1658 | return MEMTX_OK; | |
1659 | case A_EVENTQ_BASE: /* 64b */ | |
1660 | *data = extract64(s->eventq.base, 0, 32); | |
1661 | return MEMTX_OK; | |
1662 | case A_EVENTQ_BASE + 4: /* 64b */ | |
1663 | *data = extract64(s->eventq.base, 32, 32); | |
1664 | return MEMTX_OK; | |
1665 | case A_EVENTQ_PROD: | |
1666 | *data = s->eventq.prod; | |
1667 | return MEMTX_OK; | |
1668 | case A_EVENTQ_CONS: | |
1669 | *data = s->eventq.cons; | |
1670 | return MEMTX_OK; | |
1671 | default: | |
1672 | *data = 0; | |
1673 | qemu_log_mask(LOG_UNIMP, | |
1674 | "%s unhandled 32-bit access at 0x%"PRIx64" (RAZ)\n", | |
1675 | __func__, offset); | |
1676 | return MEMTX_OK; | |
1677 | } | |
1678 | } | |
1679 | ||
1680 | static MemTxResult smmu_read_mmio(void *opaque, hwaddr offset, uint64_t *data, | |
1681 | unsigned size, MemTxAttrs attrs) | |
1682 | { | |
1683 | SMMUState *sys = opaque; | |
1684 | SMMUv3State *s = ARM_SMMUV3(sys); | |
1685 | MemTxResult r; | |
1686 | ||
1687 | /* CONSTRAINED UNPREDICTABLE choice to have page0/1 be exact aliases */ | |
1688 | offset &= ~0x10000; | |
1689 | ||
1690 | switch (size) { | |
1691 | case 8: | |
1692 | r = smmu_readll(s, offset, data, attrs); | |
1693 | break; | |
1694 | case 4: | |
1695 | r = smmu_readl(s, offset, data, attrs); | |
1696 | break; | |
1697 | default: | |
1698 | r = MEMTX_ERROR; | |
1699 | break; | |
1700 | } | |
1701 | ||
1702 | trace_smmuv3_read_mmio(offset, *data, size, r); | |
1703 | return r; | |
1704 | } | |
1705 | ||
1706 | static const MemoryRegionOps smmu_mem_ops = { | |
1707 | .read_with_attrs = smmu_read_mmio, | |
1708 | .write_with_attrs = smmu_write_mmio, | |
1709 | .endianness = DEVICE_LITTLE_ENDIAN, | |
1710 | .valid = { | |
1711 | .min_access_size = 4, | |
1712 | .max_access_size = 8, | |
1713 | }, | |
1714 | .impl = { | |
1715 | .min_access_size = 4, | |
1716 | .max_access_size = 8, | |
1717 | }, | |
1718 | }; | |
1719 | ||
1720 | static void smmu_init_irq(SMMUv3State *s, SysBusDevice *dev) | |
1721 | { | |
1722 | int i; | |
1723 | ||
1724 | for (i = 0; i < ARRAY_SIZE(s->irq); i++) { | |
1725 | sysbus_init_irq(dev, &s->irq[i]); | |
1726 | } | |
1727 | } | |
1728 | ||
503819a3 | 1729 | static void smmu_reset_hold(Object *obj) |
10a83cb9 | 1730 | { |
503819a3 | 1731 | SMMUv3State *s = ARM_SMMUV3(obj); |
10a83cb9 PM |
1732 | SMMUv3Class *c = ARM_SMMUV3_GET_CLASS(s); |
1733 | ||
503819a3 PM |
1734 | if (c->parent_phases.hold) { |
1735 | c->parent_phases.hold(obj); | |
1736 | } | |
10a83cb9 PM |
1737 | |
1738 | smmuv3_init_regs(s); | |
1739 | } | |
1740 | ||
1741 | static void smmu_realize(DeviceState *d, Error **errp) | |
1742 | { | |
1743 | SMMUState *sys = ARM_SMMU(d); | |
1744 | SMMUv3State *s = ARM_SMMUV3(sys); | |
1745 | SMMUv3Class *c = ARM_SMMUV3_GET_CLASS(s); | |
1746 | SysBusDevice *dev = SYS_BUS_DEVICE(d); | |
1747 | Error *local_err = NULL; | |
1748 | ||
1749 | c->parent_realize(d, &local_err); | |
1750 | if (local_err) { | |
1751 | error_propagate(errp, local_err); | |
1752 | return; | |
1753 | } | |
1754 | ||
32cfd7f3 EA |
1755 | qemu_mutex_init(&s->mutex); |
1756 | ||
10a83cb9 PM |
1757 | memory_region_init_io(&sys->iomem, OBJECT(s), |
1758 | &smmu_mem_ops, sys, TYPE_ARM_SMMUV3, 0x20000); | |
1759 | ||
1760 | sys->mrtypename = TYPE_SMMUV3_IOMMU_MEMORY_REGION; | |
1761 | ||
1762 | sysbus_init_mmio(dev, &sys->iomem); | |
1763 | ||
1764 | smmu_init_irq(s, dev); | |
1765 | } | |
1766 | ||
1767 | static const VMStateDescription vmstate_smmuv3_queue = { | |
1768 | .name = "smmuv3_queue", | |
1769 | .version_id = 1, | |
1770 | .minimum_version_id = 1, | |
607ef570 | 1771 | .fields = (const VMStateField[]) { |
10a83cb9 PM |
1772 | VMSTATE_UINT64(base, SMMUQueue), |
1773 | VMSTATE_UINT32(prod, SMMUQueue), | |
1774 | VMSTATE_UINT32(cons, SMMUQueue), | |
1775 | VMSTATE_UINT8(log2size, SMMUQueue), | |
758b71f7 | 1776 | VMSTATE_END_OF_LIST(), |
10a83cb9 PM |
1777 | }, |
1778 | }; | |
1779 | ||
c2ecb424 MS |
1780 | static bool smmuv3_gbpa_needed(void *opaque) |
1781 | { | |
1782 | SMMUv3State *s = opaque; | |
1783 | ||
1784 | /* Only migrate GBPA if it has different reset value. */ | |
1785 | return s->gbpa != SMMU_GBPA_RESET_VAL; | |
1786 | } | |
1787 | ||
1788 | static const VMStateDescription vmstate_gbpa = { | |
1789 | .name = "smmuv3/gbpa", | |
1790 | .version_id = 1, | |
1791 | .minimum_version_id = 1, | |
1792 | .needed = smmuv3_gbpa_needed, | |
607ef570 | 1793 | .fields = (const VMStateField[]) { |
c2ecb424 MS |
1794 | VMSTATE_UINT32(gbpa, SMMUv3State), |
1795 | VMSTATE_END_OF_LIST() | |
1796 | } | |
1797 | }; | |
1798 | ||
10a83cb9 PM |
1799 | static const VMStateDescription vmstate_smmuv3 = { |
1800 | .name = "smmuv3", | |
1801 | .version_id = 1, | |
1802 | .minimum_version_id = 1, | |
a55aab61 | 1803 | .priority = MIG_PRI_IOMMU, |
607ef570 | 1804 | .fields = (const VMStateField[]) { |
10a83cb9 PM |
1805 | VMSTATE_UINT32(features, SMMUv3State), |
1806 | VMSTATE_UINT8(sid_size, SMMUv3State), | |
1807 | VMSTATE_UINT8(sid_split, SMMUv3State), | |
1808 | ||
1809 | VMSTATE_UINT32_ARRAY(cr, SMMUv3State, 3), | |
1810 | VMSTATE_UINT32(cr0ack, SMMUv3State), | |
1811 | VMSTATE_UINT32(statusr, SMMUv3State), | |
1812 | VMSTATE_UINT32(irq_ctrl, SMMUv3State), | |
1813 | VMSTATE_UINT32(gerror, SMMUv3State), | |
1814 | VMSTATE_UINT32(gerrorn, SMMUv3State), | |
1815 | VMSTATE_UINT64(gerror_irq_cfg0, SMMUv3State), | |
1816 | VMSTATE_UINT32(gerror_irq_cfg1, SMMUv3State), | |
1817 | VMSTATE_UINT32(gerror_irq_cfg2, SMMUv3State), | |
1818 | VMSTATE_UINT64(strtab_base, SMMUv3State), | |
1819 | VMSTATE_UINT32(strtab_base_cfg, SMMUv3State), | |
1820 | VMSTATE_UINT64(eventq_irq_cfg0, SMMUv3State), | |
1821 | VMSTATE_UINT32(eventq_irq_cfg1, SMMUv3State), | |
1822 | VMSTATE_UINT32(eventq_irq_cfg2, SMMUv3State), | |
1823 | ||
1824 | VMSTATE_STRUCT(cmdq, SMMUv3State, 0, vmstate_smmuv3_queue, SMMUQueue), | |
1825 | VMSTATE_STRUCT(eventq, SMMUv3State, 0, vmstate_smmuv3_queue, SMMUQueue), | |
1826 | ||
1827 | VMSTATE_END_OF_LIST(), | |
1828 | }, | |
607ef570 | 1829 | .subsections = (const VMStateDescription * const []) { |
c2ecb424 MS |
1830 | &vmstate_gbpa, |
1831 | NULL | |
1832 | } | |
10a83cb9 PM |
1833 | }; |
1834 | ||
8cefcc3b MS |
1835 | static Property smmuv3_properties[] = { |
1836 | /* | |
1837 | * Stages of translation advertised. | |
1838 | * "1": Stage 1 | |
1839 | * "2": Stage 2 | |
1840 | * Defaults to stage 1 | |
1841 | */ | |
1842 | DEFINE_PROP_STRING("stage", SMMUv3State, stage), | |
1843 | DEFINE_PROP_END_OF_LIST() | |
1844 | }; | |
1845 | ||
10a83cb9 PM |
1846 | static void smmuv3_instance_init(Object *obj) |
1847 | { | |
1848 | /* Nothing much to do here as of now */ | |
1849 | } | |
1850 | ||
1851 | static void smmuv3_class_init(ObjectClass *klass, void *data) | |
1852 | { | |
1853 | DeviceClass *dc = DEVICE_CLASS(klass); | |
503819a3 | 1854 | ResettableClass *rc = RESETTABLE_CLASS(klass); |
10a83cb9 PM |
1855 | SMMUv3Class *c = ARM_SMMUV3_CLASS(klass); |
1856 | ||
1857 | dc->vmsd = &vmstate_smmuv3; | |
503819a3 PM |
1858 | resettable_class_set_parent_phases(rc, NULL, smmu_reset_hold, NULL, |
1859 | &c->parent_phases); | |
10a83cb9 PM |
1860 | c->parent_realize = dc->realize; |
1861 | dc->realize = smmu_realize; | |
8cefcc3b | 1862 | device_class_set_props(dc, smmuv3_properties); |
10a83cb9 PM |
1863 | } |
1864 | ||
549d4005 EA |
1865 | static int smmuv3_notify_flag_changed(IOMMUMemoryRegion *iommu, |
1866 | IOMMUNotifierFlag old, | |
1867 | IOMMUNotifierFlag new, | |
1868 | Error **errp) | |
0d1ac82e | 1869 | { |
832e4222 EA |
1870 | SMMUDevice *sdev = container_of(iommu, SMMUDevice, iommu); |
1871 | SMMUv3State *s3 = sdev->smmu; | |
1872 | SMMUState *s = &(s3->smmu_state); | |
832e4222 | 1873 | |
958ec334 PX |
1874 | if (new & IOMMU_NOTIFIER_DEVIOTLB_UNMAP) { |
1875 | error_setg(errp, "SMMUv3 does not support dev-iotlb yet"); | |
1876 | return -EINVAL; | |
1877 | } | |
1878 | ||
832e4222 | 1879 | if (new & IOMMU_NOTIFIER_MAP) { |
549d4005 EA |
1880 | error_setg(errp, |
1881 | "device %02x.%02x.%x requires iommu MAP notifier which is " | |
1882 | "not currently supported", pci_bus_num(sdev->bus), | |
1883 | PCI_SLOT(sdev->devfn), PCI_FUNC(sdev->devfn)); | |
1884 | return -EINVAL; | |
832e4222 EA |
1885 | } |
1886 | ||
0d1ac82e | 1887 | if (old == IOMMU_NOTIFIER_NONE) { |
832e4222 | 1888 | trace_smmuv3_notify_flag_add(iommu->parent_obj.name); |
c6370441 EA |
1889 | QLIST_INSERT_HEAD(&s->devices_with_notifiers, sdev, next); |
1890 | } else if (new == IOMMU_NOTIFIER_NONE) { | |
1891 | trace_smmuv3_notify_flag_del(iommu->parent_obj.name); | |
1892 | QLIST_REMOVE(sdev, next); | |
0d1ac82e | 1893 | } |
549d4005 | 1894 | return 0; |
0d1ac82e EA |
1895 | } |
1896 | ||
10a83cb9 PM |
1897 | static void smmuv3_iommu_memory_region_class_init(ObjectClass *klass, |
1898 | void *data) | |
1899 | { | |
9bde7f06 EA |
1900 | IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass); |
1901 | ||
1902 | imrc->translate = smmuv3_translate; | |
0d1ac82e | 1903 | imrc->notify_flag_changed = smmuv3_notify_flag_changed; |
10a83cb9 PM |
1904 | } |
1905 | ||
1906 | static const TypeInfo smmuv3_type_info = { | |
1907 | .name = TYPE_ARM_SMMUV3, | |
1908 | .parent = TYPE_ARM_SMMU, | |
1909 | .instance_size = sizeof(SMMUv3State), | |
1910 | .instance_init = smmuv3_instance_init, | |
1911 | .class_size = sizeof(SMMUv3Class), | |
1912 | .class_init = smmuv3_class_init, | |
1913 | }; | |
1914 | ||
1915 | static const TypeInfo smmuv3_iommu_memory_region_info = { | |
1916 | .parent = TYPE_IOMMU_MEMORY_REGION, | |
1917 | .name = TYPE_SMMUV3_IOMMU_MEMORY_REGION, | |
1918 | .class_init = smmuv3_iommu_memory_region_class_init, | |
1919 | }; | |
1920 | ||
1921 | static void smmuv3_register_types(void) | |
1922 | { | |
1923 | type_register(&smmuv3_type_info); | |
1924 | type_register(&smmuv3_iommu_memory_region_info); | |
1925 | } | |
1926 | ||
1927 | type_init(smmuv3_register_types) | |
1928 |