]>
Commit | Line | Data |
---|---|---|
10a83cb9 PM |
1 | /* |
2 | * Copyright (C) 2014-2016 Broadcom Corporation | |
3 | * Copyright (c) 2017 Red Hat, Inc. | |
4 | * Written by Prem Mallappa, Eric Auger | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 as | |
8 | * published by the Free Software Foundation. | |
9 | * | |
10 | * This program is distributed in the hope that it will be useful, | |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
13 | * GNU General Public License for more details. | |
14 | * | |
15 | * You should have received a copy of the GNU General Public License along | |
16 | * with this program; if not, see <http://www.gnu.org/licenses/>. | |
17 | */ | |
18 | ||
19 | #include "qemu/osdep.h" | |
20 | #include "hw/boards.h" | |
21 | #include "sysemu/sysemu.h" | |
22 | #include "hw/sysbus.h" | |
23 | #include "hw/qdev-core.h" | |
24 | #include "hw/pci/pci.h" | |
25 | #include "exec/address-spaces.h" | |
26 | #include "trace.h" | |
27 | #include "qemu/log.h" | |
28 | #include "qemu/error-report.h" | |
29 | #include "qapi/error.h" | |
30 | ||
31 | #include "hw/arm/smmuv3.h" | |
32 | #include "smmuv3-internal.h" | |
33 | ||
6a736033 EA |
34 | /** |
35 | * smmuv3_trigger_irq - pulse @irq if enabled and update | |
36 | * GERROR register in case of GERROR interrupt | |
37 | * | |
38 | * @irq: irq type | |
39 | * @gerror_mask: mask of gerrors to toggle (relevant if @irq is GERROR) | |
40 | */ | |
fae4be38 EA |
41 | static void smmuv3_trigger_irq(SMMUv3State *s, SMMUIrq irq, |
42 | uint32_t gerror_mask) | |
6a736033 EA |
43 | { |
44 | ||
45 | bool pulse = false; | |
46 | ||
47 | switch (irq) { | |
48 | case SMMU_IRQ_EVTQ: | |
49 | pulse = smmuv3_eventq_irq_enabled(s); | |
50 | break; | |
51 | case SMMU_IRQ_PRIQ: | |
52 | qemu_log_mask(LOG_UNIMP, "PRI not yet supported\n"); | |
53 | break; | |
54 | case SMMU_IRQ_CMD_SYNC: | |
55 | pulse = true; | |
56 | break; | |
57 | case SMMU_IRQ_GERROR: | |
58 | { | |
59 | uint32_t pending = s->gerror ^ s->gerrorn; | |
60 | uint32_t new_gerrors = ~pending & gerror_mask; | |
61 | ||
62 | if (!new_gerrors) { | |
63 | /* only toggle non pending errors */ | |
64 | return; | |
65 | } | |
66 | s->gerror ^= new_gerrors; | |
67 | trace_smmuv3_write_gerror(new_gerrors, s->gerror); | |
68 | ||
69 | pulse = smmuv3_gerror_irq_enabled(s); | |
70 | break; | |
71 | } | |
72 | } | |
73 | if (pulse) { | |
74 | trace_smmuv3_trigger_irq(irq); | |
75 | qemu_irq_pulse(s->irq[irq]); | |
76 | } | |
77 | } | |
78 | ||
fae4be38 | 79 | static void smmuv3_write_gerrorn(SMMUv3State *s, uint32_t new_gerrorn) |
6a736033 EA |
80 | { |
81 | uint32_t pending = s->gerror ^ s->gerrorn; | |
82 | uint32_t toggled = s->gerrorn ^ new_gerrorn; | |
83 | ||
84 | if (toggled & ~pending) { | |
85 | qemu_log_mask(LOG_GUEST_ERROR, | |
86 | "guest toggles non pending errors = 0x%x\n", | |
87 | toggled & ~pending); | |
88 | } | |
89 | ||
90 | /* | |
91 | * We do not raise any error in case guest toggles bits corresponding | |
92 | * to not active IRQs (CONSTRAINED UNPREDICTABLE) | |
93 | */ | |
94 | s->gerrorn = new_gerrorn; | |
95 | ||
96 | trace_smmuv3_write_gerrorn(toggled & pending, s->gerrorn); | |
97 | } | |
98 | ||
dadd1a08 EA |
99 | static inline MemTxResult queue_read(SMMUQueue *q, void *data) |
100 | { | |
101 | dma_addr_t addr = Q_CONS_ENTRY(q); | |
102 | ||
103 | return dma_memory_read(&address_space_memory, addr, data, q->entry_size); | |
104 | } | |
105 | ||
106 | static MemTxResult queue_write(SMMUQueue *q, void *data) | |
107 | { | |
108 | dma_addr_t addr = Q_PROD_ENTRY(q); | |
109 | MemTxResult ret; | |
110 | ||
111 | ret = dma_memory_write(&address_space_memory, addr, data, q->entry_size); | |
112 | if (ret != MEMTX_OK) { | |
113 | return ret; | |
114 | } | |
115 | ||
116 | queue_prod_incr(q); | |
117 | return MEMTX_OK; | |
118 | } | |
119 | ||
120 | void smmuv3_write_eventq(SMMUv3State *s, Evt *evt) | |
121 | { | |
122 | SMMUQueue *q = &s->eventq; | |
123 | ||
124 | if (!smmuv3_eventq_enabled(s)) { | |
125 | return; | |
126 | } | |
127 | ||
128 | if (smmuv3_q_full(q)) { | |
129 | return; | |
130 | } | |
131 | ||
132 | queue_write(q, evt); | |
133 | ||
134 | if (smmuv3_q_empty(q)) { | |
135 | smmuv3_trigger_irq(s, SMMU_IRQ_EVTQ, 0); | |
136 | } | |
137 | } | |
138 | ||
10a83cb9 PM |
139 | static void smmuv3_init_regs(SMMUv3State *s) |
140 | { | |
141 | /** | |
142 | * IDR0: stage1 only, AArch64 only, coherent access, 16b ASID, | |
143 | * multi-level stream table | |
144 | */ | |
145 | s->idr[0] = FIELD_DP32(s->idr[0], IDR0, S1P, 1); /* stage 1 supported */ | |
146 | s->idr[0] = FIELD_DP32(s->idr[0], IDR0, TTF, 2); /* AArch64 PTW only */ | |
147 | s->idr[0] = FIELD_DP32(s->idr[0], IDR0, COHACC, 1); /* IO coherent */ | |
148 | s->idr[0] = FIELD_DP32(s->idr[0], IDR0, ASID16, 1); /* 16-bit ASID */ | |
149 | s->idr[0] = FIELD_DP32(s->idr[0], IDR0, TTENDIAN, 2); /* little endian */ | |
150 | s->idr[0] = FIELD_DP32(s->idr[0], IDR0, STALL_MODEL, 1); /* No stall */ | |
151 | /* terminated transaction will always be aborted/error returned */ | |
152 | s->idr[0] = FIELD_DP32(s->idr[0], IDR0, TERM_MODEL, 1); | |
153 | /* 2-level stream table supported */ | |
154 | s->idr[0] = FIELD_DP32(s->idr[0], IDR0, STLEVEL, 1); | |
155 | ||
156 | s->idr[1] = FIELD_DP32(s->idr[1], IDR1, SIDSIZE, SMMU_IDR1_SIDSIZE); | |
157 | s->idr[1] = FIELD_DP32(s->idr[1], IDR1, EVENTQS, SMMU_EVENTQS); | |
158 | s->idr[1] = FIELD_DP32(s->idr[1], IDR1, CMDQS, SMMU_CMDQS); | |
159 | ||
160 | /* 4K and 64K granule support */ | |
161 | s->idr[5] = FIELD_DP32(s->idr[5], IDR5, GRAN4K, 1); | |
162 | s->idr[5] = FIELD_DP32(s->idr[5], IDR5, GRAN64K, 1); | |
163 | s->idr[5] = FIELD_DP32(s->idr[5], IDR5, OAS, SMMU_IDR5_OAS); /* 44 bits */ | |
164 | ||
165 | s->cmdq.base = deposit64(s->cmdq.base, 0, 5, SMMU_CMDQS); | |
166 | s->cmdq.prod = 0; | |
167 | s->cmdq.cons = 0; | |
168 | s->cmdq.entry_size = sizeof(struct Cmd); | |
169 | s->eventq.base = deposit64(s->eventq.base, 0, 5, SMMU_EVENTQS); | |
170 | s->eventq.prod = 0; | |
171 | s->eventq.cons = 0; | |
172 | s->eventq.entry_size = sizeof(struct Evt); | |
173 | ||
174 | s->features = 0; | |
175 | s->sid_split = 0; | |
176 | } | |
177 | ||
fae4be38 | 178 | static int smmuv3_cmdq_consume(SMMUv3State *s) |
dadd1a08 EA |
179 | { |
180 | SMMUCmdError cmd_error = SMMU_CERROR_NONE; | |
181 | SMMUQueue *q = &s->cmdq; | |
182 | SMMUCommandType type = 0; | |
183 | ||
184 | if (!smmuv3_cmdq_enabled(s)) { | |
185 | return 0; | |
186 | } | |
187 | /* | |
188 | * some commands depend on register values, typically CR0. In case those | |
189 | * register values change while handling the command, spec says it | |
190 | * is UNPREDICTABLE whether the command is interpreted under the new | |
191 | * or old value. | |
192 | */ | |
193 | ||
194 | while (!smmuv3_q_empty(q)) { | |
195 | uint32_t pending = s->gerror ^ s->gerrorn; | |
196 | Cmd cmd; | |
197 | ||
198 | trace_smmuv3_cmdq_consume(Q_PROD(q), Q_CONS(q), | |
199 | Q_PROD_WRAP(q), Q_CONS_WRAP(q)); | |
200 | ||
201 | if (FIELD_EX32(pending, GERROR, CMDQ_ERR)) { | |
202 | break; | |
203 | } | |
204 | ||
205 | if (queue_read(q, &cmd) != MEMTX_OK) { | |
206 | cmd_error = SMMU_CERROR_ABT; | |
207 | break; | |
208 | } | |
209 | ||
210 | type = CMD_TYPE(&cmd); | |
211 | ||
212 | trace_smmuv3_cmdq_opcode(smmu_cmd_string(type)); | |
213 | ||
214 | switch (type) { | |
215 | case SMMU_CMD_SYNC: | |
216 | if (CMD_SYNC_CS(&cmd) & CMD_SYNC_SIG_IRQ) { | |
217 | smmuv3_trigger_irq(s, SMMU_IRQ_CMD_SYNC, 0); | |
218 | } | |
219 | break; | |
220 | case SMMU_CMD_PREFETCH_CONFIG: | |
221 | case SMMU_CMD_PREFETCH_ADDR: | |
222 | case SMMU_CMD_CFGI_STE: | |
223 | case SMMU_CMD_CFGI_STE_RANGE: /* same as SMMU_CMD_CFGI_ALL */ | |
224 | case SMMU_CMD_CFGI_CD: | |
225 | case SMMU_CMD_CFGI_CD_ALL: | |
226 | case SMMU_CMD_TLBI_NH_ALL: | |
227 | case SMMU_CMD_TLBI_NH_ASID: | |
228 | case SMMU_CMD_TLBI_NH_VA: | |
229 | case SMMU_CMD_TLBI_NH_VAA: | |
230 | case SMMU_CMD_TLBI_EL3_ALL: | |
231 | case SMMU_CMD_TLBI_EL3_VA: | |
232 | case SMMU_CMD_TLBI_EL2_ALL: | |
233 | case SMMU_CMD_TLBI_EL2_ASID: | |
234 | case SMMU_CMD_TLBI_EL2_VA: | |
235 | case SMMU_CMD_TLBI_EL2_VAA: | |
236 | case SMMU_CMD_TLBI_S12_VMALL: | |
237 | case SMMU_CMD_TLBI_S2_IPA: | |
238 | case SMMU_CMD_TLBI_NSNH_ALL: | |
239 | case SMMU_CMD_ATC_INV: | |
240 | case SMMU_CMD_PRI_RESP: | |
241 | case SMMU_CMD_RESUME: | |
242 | case SMMU_CMD_STALL_TERM: | |
243 | trace_smmuv3_unhandled_cmd(type); | |
244 | break; | |
245 | default: | |
246 | cmd_error = SMMU_CERROR_ILL; | |
247 | qemu_log_mask(LOG_GUEST_ERROR, | |
248 | "Illegal command type: %d\n", CMD_TYPE(&cmd)); | |
249 | break; | |
250 | } | |
251 | if (cmd_error) { | |
252 | break; | |
253 | } | |
254 | /* | |
255 | * We only increment the cons index after the completion of | |
256 | * the command. We do that because the SYNC returns immediately | |
257 | * and does not check the completion of previous commands | |
258 | */ | |
259 | queue_cons_incr(q); | |
260 | } | |
261 | ||
262 | if (cmd_error) { | |
263 | trace_smmuv3_cmdq_consume_error(smmu_cmd_string(type), cmd_error); | |
264 | smmu_write_cmdq_err(s, cmd_error); | |
265 | smmuv3_trigger_irq(s, SMMU_IRQ_GERROR, R_GERROR_CMDQ_ERR_MASK); | |
266 | } | |
267 | ||
268 | trace_smmuv3_cmdq_consume_out(Q_PROD(q), Q_CONS(q), | |
269 | Q_PROD_WRAP(q), Q_CONS_WRAP(q)); | |
270 | ||
271 | return 0; | |
272 | } | |
273 | ||
fae4be38 EA |
274 | static MemTxResult smmu_writell(SMMUv3State *s, hwaddr offset, |
275 | uint64_t data, MemTxAttrs attrs) | |
276 | { | |
277 | switch (offset) { | |
278 | case A_GERROR_IRQ_CFG0: | |
279 | s->gerror_irq_cfg0 = data; | |
280 | return MEMTX_OK; | |
281 | case A_STRTAB_BASE: | |
282 | s->strtab_base = data; | |
283 | return MEMTX_OK; | |
284 | case A_CMDQ_BASE: | |
285 | s->cmdq.base = data; | |
286 | s->cmdq.log2size = extract64(s->cmdq.base, 0, 5); | |
287 | if (s->cmdq.log2size > SMMU_CMDQS) { | |
288 | s->cmdq.log2size = SMMU_CMDQS; | |
289 | } | |
290 | return MEMTX_OK; | |
291 | case A_EVENTQ_BASE: | |
292 | s->eventq.base = data; | |
293 | s->eventq.log2size = extract64(s->eventq.base, 0, 5); | |
294 | if (s->eventq.log2size > SMMU_EVENTQS) { | |
295 | s->eventq.log2size = SMMU_EVENTQS; | |
296 | } | |
297 | return MEMTX_OK; | |
298 | case A_EVENTQ_IRQ_CFG0: | |
299 | s->eventq_irq_cfg0 = data; | |
300 | return MEMTX_OK; | |
301 | default: | |
302 | qemu_log_mask(LOG_UNIMP, | |
303 | "%s Unexpected 64-bit access to 0x%"PRIx64" (WI)\n", | |
304 | __func__, offset); | |
305 | return MEMTX_OK; | |
306 | } | |
307 | } | |
308 | ||
309 | static MemTxResult smmu_writel(SMMUv3State *s, hwaddr offset, | |
310 | uint64_t data, MemTxAttrs attrs) | |
311 | { | |
312 | switch (offset) { | |
313 | case A_CR0: | |
314 | s->cr[0] = data; | |
315 | s->cr0ack = data & ~SMMU_CR0_RESERVED; | |
316 | /* in case the command queue has been enabled */ | |
317 | smmuv3_cmdq_consume(s); | |
318 | return MEMTX_OK; | |
319 | case A_CR1: | |
320 | s->cr[1] = data; | |
321 | return MEMTX_OK; | |
322 | case A_CR2: | |
323 | s->cr[2] = data; | |
324 | return MEMTX_OK; | |
325 | case A_IRQ_CTRL: | |
326 | s->irq_ctrl = data; | |
327 | return MEMTX_OK; | |
328 | case A_GERRORN: | |
329 | smmuv3_write_gerrorn(s, data); | |
330 | /* | |
331 | * By acknowledging the CMDQ_ERR, SW may notify cmds can | |
332 | * be processed again | |
333 | */ | |
334 | smmuv3_cmdq_consume(s); | |
335 | return MEMTX_OK; | |
336 | case A_GERROR_IRQ_CFG0: /* 64b */ | |
337 | s->gerror_irq_cfg0 = deposit64(s->gerror_irq_cfg0, 0, 32, data); | |
338 | return MEMTX_OK; | |
339 | case A_GERROR_IRQ_CFG0 + 4: | |
340 | s->gerror_irq_cfg0 = deposit64(s->gerror_irq_cfg0, 32, 32, data); | |
341 | return MEMTX_OK; | |
342 | case A_GERROR_IRQ_CFG1: | |
343 | s->gerror_irq_cfg1 = data; | |
344 | return MEMTX_OK; | |
345 | case A_GERROR_IRQ_CFG2: | |
346 | s->gerror_irq_cfg2 = data; | |
347 | return MEMTX_OK; | |
348 | case A_STRTAB_BASE: /* 64b */ | |
349 | s->strtab_base = deposit64(s->strtab_base, 0, 32, data); | |
350 | return MEMTX_OK; | |
351 | case A_STRTAB_BASE + 4: | |
352 | s->strtab_base = deposit64(s->strtab_base, 32, 32, data); | |
353 | return MEMTX_OK; | |
354 | case A_STRTAB_BASE_CFG: | |
355 | s->strtab_base_cfg = data; | |
356 | if (FIELD_EX32(data, STRTAB_BASE_CFG, FMT) == 1) { | |
357 | s->sid_split = FIELD_EX32(data, STRTAB_BASE_CFG, SPLIT); | |
358 | s->features |= SMMU_FEATURE_2LVL_STE; | |
359 | } | |
360 | return MEMTX_OK; | |
361 | case A_CMDQ_BASE: /* 64b */ | |
362 | s->cmdq.base = deposit64(s->cmdq.base, 0, 32, data); | |
363 | s->cmdq.log2size = extract64(s->cmdq.base, 0, 5); | |
364 | if (s->cmdq.log2size > SMMU_CMDQS) { | |
365 | s->cmdq.log2size = SMMU_CMDQS; | |
366 | } | |
367 | return MEMTX_OK; | |
368 | case A_CMDQ_BASE + 4: /* 64b */ | |
369 | s->cmdq.base = deposit64(s->cmdq.base, 32, 32, data); | |
370 | return MEMTX_OK; | |
371 | case A_CMDQ_PROD: | |
372 | s->cmdq.prod = data; | |
373 | smmuv3_cmdq_consume(s); | |
374 | return MEMTX_OK; | |
375 | case A_CMDQ_CONS: | |
376 | s->cmdq.cons = data; | |
377 | return MEMTX_OK; | |
378 | case A_EVENTQ_BASE: /* 64b */ | |
379 | s->eventq.base = deposit64(s->eventq.base, 0, 32, data); | |
380 | s->eventq.log2size = extract64(s->eventq.base, 0, 5); | |
381 | if (s->eventq.log2size > SMMU_EVENTQS) { | |
382 | s->eventq.log2size = SMMU_EVENTQS; | |
383 | } | |
384 | return MEMTX_OK; | |
385 | case A_EVENTQ_BASE + 4: | |
386 | s->eventq.base = deposit64(s->eventq.base, 32, 32, data); | |
387 | return MEMTX_OK; | |
388 | case A_EVENTQ_PROD: | |
389 | s->eventq.prod = data; | |
390 | return MEMTX_OK; | |
391 | case A_EVENTQ_CONS: | |
392 | s->eventq.cons = data; | |
393 | return MEMTX_OK; | |
394 | case A_EVENTQ_IRQ_CFG0: /* 64b */ | |
395 | s->eventq_irq_cfg0 = deposit64(s->eventq_irq_cfg0, 0, 32, data); | |
396 | return MEMTX_OK; | |
397 | case A_EVENTQ_IRQ_CFG0 + 4: | |
398 | s->eventq_irq_cfg0 = deposit64(s->eventq_irq_cfg0, 32, 32, data); | |
399 | return MEMTX_OK; | |
400 | case A_EVENTQ_IRQ_CFG1: | |
401 | s->eventq_irq_cfg1 = data; | |
402 | return MEMTX_OK; | |
403 | case A_EVENTQ_IRQ_CFG2: | |
404 | s->eventq_irq_cfg2 = data; | |
405 | return MEMTX_OK; | |
406 | default: | |
407 | qemu_log_mask(LOG_UNIMP, | |
408 | "%s Unexpected 32-bit access to 0x%"PRIx64" (WI)\n", | |
409 | __func__, offset); | |
410 | return MEMTX_OK; | |
411 | } | |
412 | } | |
413 | ||
10a83cb9 PM |
414 | static MemTxResult smmu_write_mmio(void *opaque, hwaddr offset, uint64_t data, |
415 | unsigned size, MemTxAttrs attrs) | |
416 | { | |
fae4be38 EA |
417 | SMMUState *sys = opaque; |
418 | SMMUv3State *s = ARM_SMMUV3(sys); | |
419 | MemTxResult r; | |
420 | ||
421 | /* CONSTRAINED UNPREDICTABLE choice to have page0/1 be exact aliases */ | |
422 | offset &= ~0x10000; | |
423 | ||
424 | switch (size) { | |
425 | case 8: | |
426 | r = smmu_writell(s, offset, data, attrs); | |
427 | break; | |
428 | case 4: | |
429 | r = smmu_writel(s, offset, data, attrs); | |
430 | break; | |
431 | default: | |
432 | r = MEMTX_ERROR; | |
433 | break; | |
434 | } | |
435 | ||
436 | trace_smmuv3_write_mmio(offset, data, size, r); | |
437 | return r; | |
10a83cb9 PM |
438 | } |
439 | ||
440 | static MemTxResult smmu_readll(SMMUv3State *s, hwaddr offset, | |
441 | uint64_t *data, MemTxAttrs attrs) | |
442 | { | |
443 | switch (offset) { | |
444 | case A_GERROR_IRQ_CFG0: | |
445 | *data = s->gerror_irq_cfg0; | |
446 | return MEMTX_OK; | |
447 | case A_STRTAB_BASE: | |
448 | *data = s->strtab_base; | |
449 | return MEMTX_OK; | |
450 | case A_CMDQ_BASE: | |
451 | *data = s->cmdq.base; | |
452 | return MEMTX_OK; | |
453 | case A_EVENTQ_BASE: | |
454 | *data = s->eventq.base; | |
455 | return MEMTX_OK; | |
456 | default: | |
457 | *data = 0; | |
458 | qemu_log_mask(LOG_UNIMP, | |
459 | "%s Unexpected 64-bit access to 0x%"PRIx64" (RAZ)\n", | |
460 | __func__, offset); | |
461 | return MEMTX_OK; | |
462 | } | |
463 | } | |
464 | ||
465 | static MemTxResult smmu_readl(SMMUv3State *s, hwaddr offset, | |
466 | uint64_t *data, MemTxAttrs attrs) | |
467 | { | |
468 | switch (offset) { | |
469 | case A_IDREGS ... A_IDREGS + 0x1f: | |
470 | *data = smmuv3_idreg(offset - A_IDREGS); | |
471 | return MEMTX_OK; | |
472 | case A_IDR0 ... A_IDR5: | |
473 | *data = s->idr[(offset - A_IDR0) / 4]; | |
474 | return MEMTX_OK; | |
475 | case A_IIDR: | |
476 | *data = s->iidr; | |
477 | return MEMTX_OK; | |
478 | case A_CR0: | |
479 | *data = s->cr[0]; | |
480 | return MEMTX_OK; | |
481 | case A_CR0ACK: | |
482 | *data = s->cr0ack; | |
483 | return MEMTX_OK; | |
484 | case A_CR1: | |
485 | *data = s->cr[1]; | |
486 | return MEMTX_OK; | |
487 | case A_CR2: | |
488 | *data = s->cr[2]; | |
489 | return MEMTX_OK; | |
490 | case A_STATUSR: | |
491 | *data = s->statusr; | |
492 | return MEMTX_OK; | |
493 | case A_IRQ_CTRL: | |
494 | case A_IRQ_CTRL_ACK: | |
495 | *data = s->irq_ctrl; | |
496 | return MEMTX_OK; | |
497 | case A_GERROR: | |
498 | *data = s->gerror; | |
499 | return MEMTX_OK; | |
500 | case A_GERRORN: | |
501 | *data = s->gerrorn; | |
502 | return MEMTX_OK; | |
503 | case A_GERROR_IRQ_CFG0: /* 64b */ | |
504 | *data = extract64(s->gerror_irq_cfg0, 0, 32); | |
505 | return MEMTX_OK; | |
506 | case A_GERROR_IRQ_CFG0 + 4: | |
507 | *data = extract64(s->gerror_irq_cfg0, 32, 32); | |
508 | return MEMTX_OK; | |
509 | case A_GERROR_IRQ_CFG1: | |
510 | *data = s->gerror_irq_cfg1; | |
511 | return MEMTX_OK; | |
512 | case A_GERROR_IRQ_CFG2: | |
513 | *data = s->gerror_irq_cfg2; | |
514 | return MEMTX_OK; | |
515 | case A_STRTAB_BASE: /* 64b */ | |
516 | *data = extract64(s->strtab_base, 0, 32); | |
517 | return MEMTX_OK; | |
518 | case A_STRTAB_BASE + 4: /* 64b */ | |
519 | *data = extract64(s->strtab_base, 32, 32); | |
520 | return MEMTX_OK; | |
521 | case A_STRTAB_BASE_CFG: | |
522 | *data = s->strtab_base_cfg; | |
523 | return MEMTX_OK; | |
524 | case A_CMDQ_BASE: /* 64b */ | |
525 | *data = extract64(s->cmdq.base, 0, 32); | |
526 | return MEMTX_OK; | |
527 | case A_CMDQ_BASE + 4: | |
528 | *data = extract64(s->cmdq.base, 32, 32); | |
529 | return MEMTX_OK; | |
530 | case A_CMDQ_PROD: | |
531 | *data = s->cmdq.prod; | |
532 | return MEMTX_OK; | |
533 | case A_CMDQ_CONS: | |
534 | *data = s->cmdq.cons; | |
535 | return MEMTX_OK; | |
536 | case A_EVENTQ_BASE: /* 64b */ | |
537 | *data = extract64(s->eventq.base, 0, 32); | |
538 | return MEMTX_OK; | |
539 | case A_EVENTQ_BASE + 4: /* 64b */ | |
540 | *data = extract64(s->eventq.base, 32, 32); | |
541 | return MEMTX_OK; | |
542 | case A_EVENTQ_PROD: | |
543 | *data = s->eventq.prod; | |
544 | return MEMTX_OK; | |
545 | case A_EVENTQ_CONS: | |
546 | *data = s->eventq.cons; | |
547 | return MEMTX_OK; | |
548 | default: | |
549 | *data = 0; | |
550 | qemu_log_mask(LOG_UNIMP, | |
551 | "%s unhandled 32-bit access at 0x%"PRIx64" (RAZ)\n", | |
552 | __func__, offset); | |
553 | return MEMTX_OK; | |
554 | } | |
555 | } | |
556 | ||
557 | static MemTxResult smmu_read_mmio(void *opaque, hwaddr offset, uint64_t *data, | |
558 | unsigned size, MemTxAttrs attrs) | |
559 | { | |
560 | SMMUState *sys = opaque; | |
561 | SMMUv3State *s = ARM_SMMUV3(sys); | |
562 | MemTxResult r; | |
563 | ||
564 | /* CONSTRAINED UNPREDICTABLE choice to have page0/1 be exact aliases */ | |
565 | offset &= ~0x10000; | |
566 | ||
567 | switch (size) { | |
568 | case 8: | |
569 | r = smmu_readll(s, offset, data, attrs); | |
570 | break; | |
571 | case 4: | |
572 | r = smmu_readl(s, offset, data, attrs); | |
573 | break; | |
574 | default: | |
575 | r = MEMTX_ERROR; | |
576 | break; | |
577 | } | |
578 | ||
579 | trace_smmuv3_read_mmio(offset, *data, size, r); | |
580 | return r; | |
581 | } | |
582 | ||
583 | static const MemoryRegionOps smmu_mem_ops = { | |
584 | .read_with_attrs = smmu_read_mmio, | |
585 | .write_with_attrs = smmu_write_mmio, | |
586 | .endianness = DEVICE_LITTLE_ENDIAN, | |
587 | .valid = { | |
588 | .min_access_size = 4, | |
589 | .max_access_size = 8, | |
590 | }, | |
591 | .impl = { | |
592 | .min_access_size = 4, | |
593 | .max_access_size = 8, | |
594 | }, | |
595 | }; | |
596 | ||
597 | static void smmu_init_irq(SMMUv3State *s, SysBusDevice *dev) | |
598 | { | |
599 | int i; | |
600 | ||
601 | for (i = 0; i < ARRAY_SIZE(s->irq); i++) { | |
602 | sysbus_init_irq(dev, &s->irq[i]); | |
603 | } | |
604 | } | |
605 | ||
606 | static void smmu_reset(DeviceState *dev) | |
607 | { | |
608 | SMMUv3State *s = ARM_SMMUV3(dev); | |
609 | SMMUv3Class *c = ARM_SMMUV3_GET_CLASS(s); | |
610 | ||
611 | c->parent_reset(dev); | |
612 | ||
613 | smmuv3_init_regs(s); | |
614 | } | |
615 | ||
616 | static void smmu_realize(DeviceState *d, Error **errp) | |
617 | { | |
618 | SMMUState *sys = ARM_SMMU(d); | |
619 | SMMUv3State *s = ARM_SMMUV3(sys); | |
620 | SMMUv3Class *c = ARM_SMMUV3_GET_CLASS(s); | |
621 | SysBusDevice *dev = SYS_BUS_DEVICE(d); | |
622 | Error *local_err = NULL; | |
623 | ||
624 | c->parent_realize(d, &local_err); | |
625 | if (local_err) { | |
626 | error_propagate(errp, local_err); | |
627 | return; | |
628 | } | |
629 | ||
630 | memory_region_init_io(&sys->iomem, OBJECT(s), | |
631 | &smmu_mem_ops, sys, TYPE_ARM_SMMUV3, 0x20000); | |
632 | ||
633 | sys->mrtypename = TYPE_SMMUV3_IOMMU_MEMORY_REGION; | |
634 | ||
635 | sysbus_init_mmio(dev, &sys->iomem); | |
636 | ||
637 | smmu_init_irq(s, dev); | |
638 | } | |
639 | ||
640 | static const VMStateDescription vmstate_smmuv3_queue = { | |
641 | .name = "smmuv3_queue", | |
642 | .version_id = 1, | |
643 | .minimum_version_id = 1, | |
644 | .fields = (VMStateField[]) { | |
645 | VMSTATE_UINT64(base, SMMUQueue), | |
646 | VMSTATE_UINT32(prod, SMMUQueue), | |
647 | VMSTATE_UINT32(cons, SMMUQueue), | |
648 | VMSTATE_UINT8(log2size, SMMUQueue), | |
649 | }, | |
650 | }; | |
651 | ||
652 | static const VMStateDescription vmstate_smmuv3 = { | |
653 | .name = "smmuv3", | |
654 | .version_id = 1, | |
655 | .minimum_version_id = 1, | |
656 | .fields = (VMStateField[]) { | |
657 | VMSTATE_UINT32(features, SMMUv3State), | |
658 | VMSTATE_UINT8(sid_size, SMMUv3State), | |
659 | VMSTATE_UINT8(sid_split, SMMUv3State), | |
660 | ||
661 | VMSTATE_UINT32_ARRAY(cr, SMMUv3State, 3), | |
662 | VMSTATE_UINT32(cr0ack, SMMUv3State), | |
663 | VMSTATE_UINT32(statusr, SMMUv3State), | |
664 | VMSTATE_UINT32(irq_ctrl, SMMUv3State), | |
665 | VMSTATE_UINT32(gerror, SMMUv3State), | |
666 | VMSTATE_UINT32(gerrorn, SMMUv3State), | |
667 | VMSTATE_UINT64(gerror_irq_cfg0, SMMUv3State), | |
668 | VMSTATE_UINT32(gerror_irq_cfg1, SMMUv3State), | |
669 | VMSTATE_UINT32(gerror_irq_cfg2, SMMUv3State), | |
670 | VMSTATE_UINT64(strtab_base, SMMUv3State), | |
671 | VMSTATE_UINT32(strtab_base_cfg, SMMUv3State), | |
672 | VMSTATE_UINT64(eventq_irq_cfg0, SMMUv3State), | |
673 | VMSTATE_UINT32(eventq_irq_cfg1, SMMUv3State), | |
674 | VMSTATE_UINT32(eventq_irq_cfg2, SMMUv3State), | |
675 | ||
676 | VMSTATE_STRUCT(cmdq, SMMUv3State, 0, vmstate_smmuv3_queue, SMMUQueue), | |
677 | VMSTATE_STRUCT(eventq, SMMUv3State, 0, vmstate_smmuv3_queue, SMMUQueue), | |
678 | ||
679 | VMSTATE_END_OF_LIST(), | |
680 | }, | |
681 | }; | |
682 | ||
683 | static void smmuv3_instance_init(Object *obj) | |
684 | { | |
685 | /* Nothing much to do here as of now */ | |
686 | } | |
687 | ||
688 | static void smmuv3_class_init(ObjectClass *klass, void *data) | |
689 | { | |
690 | DeviceClass *dc = DEVICE_CLASS(klass); | |
691 | SMMUv3Class *c = ARM_SMMUV3_CLASS(klass); | |
692 | ||
693 | dc->vmsd = &vmstate_smmuv3; | |
694 | device_class_set_parent_reset(dc, smmu_reset, &c->parent_reset); | |
695 | c->parent_realize = dc->realize; | |
696 | dc->realize = smmu_realize; | |
697 | } | |
698 | ||
699 | static void smmuv3_iommu_memory_region_class_init(ObjectClass *klass, | |
700 | void *data) | |
701 | { | |
702 | } | |
703 | ||
704 | static const TypeInfo smmuv3_type_info = { | |
705 | .name = TYPE_ARM_SMMUV3, | |
706 | .parent = TYPE_ARM_SMMU, | |
707 | .instance_size = sizeof(SMMUv3State), | |
708 | .instance_init = smmuv3_instance_init, | |
709 | .class_size = sizeof(SMMUv3Class), | |
710 | .class_init = smmuv3_class_init, | |
711 | }; | |
712 | ||
713 | static const TypeInfo smmuv3_iommu_memory_region_info = { | |
714 | .parent = TYPE_IOMMU_MEMORY_REGION, | |
715 | .name = TYPE_SMMUV3_IOMMU_MEMORY_REGION, | |
716 | .class_init = smmuv3_iommu_memory_region_class_init, | |
717 | }; | |
718 | ||
719 | static void smmuv3_register_types(void) | |
720 | { | |
721 | type_register(&smmuv3_type_info); | |
722 | type_register(&smmuv3_iommu_memory_region_info); | |
723 | } | |
724 | ||
725 | type_init(smmuv3_register_types) | |
726 |