]> git.proxmox.com Git - mirror_qemu.git/blob - hw/intc/xive.c
ppc/pnv: export the xive_router_notify() routine
[mirror_qemu.git] / hw / intc / xive.c
1 /*
2 * QEMU PowerPC XIVE interrupt controller model
3 *
4 * Copyright (c) 2017-2018, IBM Corporation.
5 *
6 * This code is licensed under the GPL version 2 or later. See the
7 * COPYING file in the top-level directory.
8 */
9
10 #include "qemu/osdep.h"
11 #include "qemu/log.h"
12 #include "qapi/error.h"
13 #include "target/ppc/cpu.h"
14 #include "sysemu/cpus.h"
15 #include "sysemu/dma.h"
16 #include "hw/qdev-properties.h"
17 #include "monitor/monitor.h"
18 #include "hw/ppc/xive.h"
19 #include "hw/ppc/xive_regs.h"
20
21 /*
22 * XIVE Thread Interrupt Management context
23 */
24
25 /*
26 * Convert a priority number to an Interrupt Pending Buffer (IPB)
27 * register, which indicates a pending interrupt at the priority
28 * corresponding to the bit number
29 */
30 static uint8_t priority_to_ipb(uint8_t priority)
31 {
32 return priority > XIVE_PRIORITY_MAX ?
33 0 : 1 << (XIVE_PRIORITY_MAX - priority);
34 }
35
36 /*
37 * Convert an Interrupt Pending Buffer (IPB) register to a Pending
38 * Interrupt Priority Register (PIPR), which contains the priority of
39 * the most favored pending notification.
40 */
41 static uint8_t ipb_to_pipr(uint8_t ibp)
42 {
43 return ibp ? clz32((uint32_t)ibp << 24) : 0xff;
44 }
45
46 static void ipb_update(uint8_t *regs, uint8_t priority)
47 {
48 regs[TM_IPB] |= priority_to_ipb(priority);
49 regs[TM_PIPR] = ipb_to_pipr(regs[TM_IPB]);
50 }
51
52 static uint8_t exception_mask(uint8_t ring)
53 {
54 switch (ring) {
55 case TM_QW1_OS:
56 return TM_QW1_NSR_EO;
57 default:
58 g_assert_not_reached();
59 }
60 }
61
62 static uint64_t xive_tctx_accept(XiveTCTX *tctx, uint8_t ring)
63 {
64 uint8_t *regs = &tctx->regs[ring];
65 uint8_t nsr = regs[TM_NSR];
66 uint8_t mask = exception_mask(ring);
67
68 qemu_irq_lower(tctx->output);
69
70 if (regs[TM_NSR] & mask) {
71 uint8_t cppr = regs[TM_PIPR];
72
73 regs[TM_CPPR] = cppr;
74
75 /* Reset the pending buffer bit */
76 regs[TM_IPB] &= ~priority_to_ipb(cppr);
77 regs[TM_PIPR] = ipb_to_pipr(regs[TM_IPB]);
78
79 /* Drop Exception bit */
80 regs[TM_NSR] &= ~mask;
81 }
82
83 return (nsr << 8) | regs[TM_CPPR];
84 }
85
86 static void xive_tctx_notify(XiveTCTX *tctx, uint8_t ring)
87 {
88 uint8_t *regs = &tctx->regs[ring];
89
90 if (regs[TM_PIPR] < regs[TM_CPPR]) {
91 regs[TM_NSR] |= exception_mask(ring);
92 qemu_irq_raise(tctx->output);
93 }
94 }
95
96 static void xive_tctx_set_cppr(XiveTCTX *tctx, uint8_t ring, uint8_t cppr)
97 {
98 if (cppr > XIVE_PRIORITY_MAX) {
99 cppr = 0xff;
100 }
101
102 tctx->regs[ring + TM_CPPR] = cppr;
103
104 /* CPPR has changed, check if we need to raise a pending exception */
105 xive_tctx_notify(tctx, ring);
106 }
107
108 /*
109 * XIVE Thread Interrupt Management Area (TIMA)
110 */
111
112 /*
113 * Define an access map for each page of the TIMA that we will use in
114 * the memory region ops to filter values when doing loads and stores
115 * of raw registers values
116 *
117 * Registers accessibility bits :
118 *
119 * 0x0 - no access
120 * 0x1 - write only
121 * 0x2 - read only
122 * 0x3 - read/write
123 */
124
125 static const uint8_t xive_tm_hw_view[] = {
126 /* QW-0 User */ 3, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0,
127 /* QW-1 OS */ 3, 3, 3, 3, 3, 3, 0, 3, 3, 3, 3, 3, 0, 0, 0, 0,
128 /* QW-2 POOL */ 0, 0, 3, 3, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0,
129 /* QW-3 PHYS */ 3, 3, 3, 3, 0, 3, 0, 3, 3, 0, 0, 3, 3, 3, 3, 0,
130 };
131
132 static const uint8_t xive_tm_hv_view[] = {
133 /* QW-0 User */ 3, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0,
134 /* QW-1 OS */ 3, 3, 3, 3, 3, 3, 0, 3, 3, 3, 3, 3, 0, 0, 0, 0,
135 /* QW-2 POOL */ 0, 0, 3, 3, 0, 0, 0, 0, 0, 3, 3, 3, 0, 0, 0, 0,
136 /* QW-3 PHYS */ 3, 3, 3, 3, 0, 3, 0, 3, 3, 0, 0, 3, 0, 0, 0, 0,
137 };
138
139 static const uint8_t xive_tm_os_view[] = {
140 /* QW-0 User */ 3, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0,
141 /* QW-1 OS */ 2, 3, 2, 2, 2, 2, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0,
142 /* QW-2 POOL */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
143 /* QW-3 PHYS */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
144 };
145
146 static const uint8_t xive_tm_user_view[] = {
147 /* QW-0 User */ 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
148 /* QW-1 OS */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
149 /* QW-2 POOL */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
150 /* QW-3 PHYS */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
151 };
152
153 /*
154 * Overall TIMA access map for the thread interrupt management context
155 * registers
156 */
157 static const uint8_t *xive_tm_views[] = {
158 [XIVE_TM_HW_PAGE] = xive_tm_hw_view,
159 [XIVE_TM_HV_PAGE] = xive_tm_hv_view,
160 [XIVE_TM_OS_PAGE] = xive_tm_os_view,
161 [XIVE_TM_USER_PAGE] = xive_tm_user_view,
162 };
163
164 /*
165 * Computes a register access mask for a given offset in the TIMA
166 */
167 static uint64_t xive_tm_mask(hwaddr offset, unsigned size, bool write)
168 {
169 uint8_t page_offset = (offset >> TM_SHIFT) & 0x3;
170 uint8_t reg_offset = offset & 0x3F;
171 uint8_t reg_mask = write ? 0x1 : 0x2;
172 uint64_t mask = 0x0;
173 int i;
174
175 for (i = 0; i < size; i++) {
176 if (xive_tm_views[page_offset][reg_offset + i] & reg_mask) {
177 mask |= (uint64_t) 0xff << (8 * (size - i - 1));
178 }
179 }
180
181 return mask;
182 }
183
184 static void xive_tm_raw_write(XiveTCTX *tctx, hwaddr offset, uint64_t value,
185 unsigned size)
186 {
187 uint8_t ring_offset = offset & 0x30;
188 uint8_t reg_offset = offset & 0x3F;
189 uint64_t mask = xive_tm_mask(offset, size, true);
190 int i;
191
192 /*
193 * Only 4 or 8 bytes stores are allowed and the User ring is
194 * excluded
195 */
196 if (size < 4 || !mask || ring_offset == TM_QW0_USER) {
197 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid write access at TIMA @%"
198 HWADDR_PRIx"\n", offset);
199 return;
200 }
201
202 /*
203 * Use the register offset for the raw values and filter out
204 * reserved values
205 */
206 for (i = 0; i < size; i++) {
207 uint8_t byte_mask = (mask >> (8 * (size - i - 1)));
208 if (byte_mask) {
209 tctx->regs[reg_offset + i] = (value >> (8 * (size - i - 1))) &
210 byte_mask;
211 }
212 }
213 }
214
215 static uint64_t xive_tm_raw_read(XiveTCTX *tctx, hwaddr offset, unsigned size)
216 {
217 uint8_t ring_offset = offset & 0x30;
218 uint8_t reg_offset = offset & 0x3F;
219 uint64_t mask = xive_tm_mask(offset, size, false);
220 uint64_t ret;
221 int i;
222
223 /*
224 * Only 4 or 8 bytes loads are allowed and the User ring is
225 * excluded
226 */
227 if (size < 4 || !mask || ring_offset == TM_QW0_USER) {
228 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid read access at TIMA @%"
229 HWADDR_PRIx"\n", offset);
230 return -1;
231 }
232
233 /* Use the register offset for the raw values */
234 ret = 0;
235 for (i = 0; i < size; i++) {
236 ret |= (uint64_t) tctx->regs[reg_offset + i] << (8 * (size - i - 1));
237 }
238
239 /* filter out reserved values */
240 return ret & mask;
241 }
242
243 /*
244 * The TM context is mapped twice within each page. Stores and loads
245 * to the first mapping below 2K write and read the specified values
246 * without modification. The second mapping above 2K performs specific
247 * state changes (side effects) in addition to setting/returning the
248 * interrupt management area context of the processor thread.
249 */
250 static uint64_t xive_tm_ack_os_reg(XiveTCTX *tctx, hwaddr offset, unsigned size)
251 {
252 return xive_tctx_accept(tctx, TM_QW1_OS);
253 }
254
255 static void xive_tm_set_os_cppr(XiveTCTX *tctx, hwaddr offset,
256 uint64_t value, unsigned size)
257 {
258 xive_tctx_set_cppr(tctx, TM_QW1_OS, value & 0xff);
259 }
260
261 /*
262 * Adjust the IPB to allow a CPU to process event queues of other
263 * priorities during one physical interrupt cycle.
264 */
265 static void xive_tm_set_os_pending(XiveTCTX *tctx, hwaddr offset,
266 uint64_t value, unsigned size)
267 {
268 ipb_update(&tctx->regs[TM_QW1_OS], value & 0xff);
269 xive_tctx_notify(tctx, TM_QW1_OS);
270 }
271
272 /*
273 * Define a mapping of "special" operations depending on the TIMA page
274 * offset and the size of the operation.
275 */
276 typedef struct XiveTmOp {
277 uint8_t page_offset;
278 uint32_t op_offset;
279 unsigned size;
280 void (*write_handler)(XiveTCTX *tctx, hwaddr offset, uint64_t value,
281 unsigned size);
282 uint64_t (*read_handler)(XiveTCTX *tctx, hwaddr offset, unsigned size);
283 } XiveTmOp;
284
285 static const XiveTmOp xive_tm_operations[] = {
286 /*
287 * MMIOs below 2K : raw values and special operations without side
288 * effects
289 */
290 { XIVE_TM_OS_PAGE, TM_QW1_OS + TM_CPPR, 1, xive_tm_set_os_cppr, NULL },
291
292 /* MMIOs above 2K : special operations with side effects */
293 { XIVE_TM_OS_PAGE, TM_SPC_ACK_OS_REG, 2, NULL, xive_tm_ack_os_reg },
294 { XIVE_TM_OS_PAGE, TM_SPC_SET_OS_PENDING, 1, xive_tm_set_os_pending, NULL },
295 };
296
297 static const XiveTmOp *xive_tm_find_op(hwaddr offset, unsigned size, bool write)
298 {
299 uint8_t page_offset = (offset >> TM_SHIFT) & 0x3;
300 uint32_t op_offset = offset & 0xFFF;
301 int i;
302
303 for (i = 0; i < ARRAY_SIZE(xive_tm_operations); i++) {
304 const XiveTmOp *xto = &xive_tm_operations[i];
305
306 /* Accesses done from a more privileged TIMA page is allowed */
307 if (xto->page_offset >= page_offset &&
308 xto->op_offset == op_offset &&
309 xto->size == size &&
310 ((write && xto->write_handler) || (!write && xto->read_handler))) {
311 return xto;
312 }
313 }
314 return NULL;
315 }
316
317 /*
318 * TIMA MMIO handlers
319 */
320 void xive_tctx_tm_write(XiveTCTX *tctx, hwaddr offset, uint64_t value,
321 unsigned size)
322 {
323 const XiveTmOp *xto;
324
325 /*
326 * TODO: check V bit in Q[0-3]W2, check PTER bit associated with CPU
327 */
328
329 /*
330 * First, check for special operations in the 2K region
331 */
332 if (offset & 0x800) {
333 xto = xive_tm_find_op(offset, size, true);
334 if (!xto) {
335 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid write access at TIMA"
336 "@%"HWADDR_PRIx"\n", offset);
337 } else {
338 xto->write_handler(tctx, offset, value, size);
339 }
340 return;
341 }
342
343 /*
344 * Then, for special operations in the region below 2K.
345 */
346 xto = xive_tm_find_op(offset, size, true);
347 if (xto) {
348 xto->write_handler(tctx, offset, value, size);
349 return;
350 }
351
352 /*
353 * Finish with raw access to the register values
354 */
355 xive_tm_raw_write(tctx, offset, value, size);
356 }
357
358 uint64_t xive_tctx_tm_read(XiveTCTX *tctx, hwaddr offset, unsigned size)
359 {
360 const XiveTmOp *xto;
361
362 /*
363 * TODO: check V bit in Q[0-3]W2, check PTER bit associated with CPU
364 */
365
366 /*
367 * First, check for special operations in the 2K region
368 */
369 if (offset & 0x800) {
370 xto = xive_tm_find_op(offset, size, false);
371 if (!xto) {
372 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid read access to TIMA"
373 "@%"HWADDR_PRIx"\n", offset);
374 return -1;
375 }
376 return xto->read_handler(tctx, offset, size);
377 }
378
379 /*
380 * Then, for special operations in the region below 2K.
381 */
382 xto = xive_tm_find_op(offset, size, false);
383 if (xto) {
384 return xto->read_handler(tctx, offset, size);
385 }
386
387 /*
388 * Finish with raw access to the register values
389 */
390 return xive_tm_raw_read(tctx, offset, size);
391 }
392
393 static void xive_tm_write(void *opaque, hwaddr offset,
394 uint64_t value, unsigned size)
395 {
396 XiveTCTX *tctx = xive_router_get_tctx(XIVE_ROUTER(opaque), current_cpu);
397
398 xive_tctx_tm_write(tctx, offset, value, size);
399 }
400
401 static uint64_t xive_tm_read(void *opaque, hwaddr offset, unsigned size)
402 {
403 XiveTCTX *tctx = xive_router_get_tctx(XIVE_ROUTER(opaque), current_cpu);
404
405 return xive_tctx_tm_read(tctx, offset, size);
406 }
407
408 const MemoryRegionOps xive_tm_ops = {
409 .read = xive_tm_read,
410 .write = xive_tm_write,
411 .endianness = DEVICE_BIG_ENDIAN,
412 .valid = {
413 .min_access_size = 1,
414 .max_access_size = 8,
415 },
416 .impl = {
417 .min_access_size = 1,
418 .max_access_size = 8,
419 },
420 };
421
422 static inline uint32_t xive_tctx_word2(uint8_t *ring)
423 {
424 return *((uint32_t *) &ring[TM_WORD2]);
425 }
426
427 static char *xive_tctx_ring_print(uint8_t *ring)
428 {
429 uint32_t w2 = xive_tctx_word2(ring);
430
431 return g_strdup_printf("%02x %02x %02x %02x %02x "
432 "%02x %02x %02x %08x",
433 ring[TM_NSR], ring[TM_CPPR], ring[TM_IPB], ring[TM_LSMFB],
434 ring[TM_ACK_CNT], ring[TM_INC], ring[TM_AGE], ring[TM_PIPR],
435 be32_to_cpu(w2));
436 }
437
438 static const char * const xive_tctx_ring_names[] = {
439 "USER", "OS", "POOL", "PHYS",
440 };
441
442 void xive_tctx_pic_print_info(XiveTCTX *tctx, Monitor *mon)
443 {
444 int cpu_index = tctx->cs ? tctx->cs->cpu_index : -1;
445 int i;
446
447 monitor_printf(mon, "CPU[%04x]: QW NSR CPPR IPB LSMFB ACK# INC AGE PIPR"
448 " W2\n", cpu_index);
449
450 for (i = 0; i < XIVE_TM_RING_COUNT; i++) {
451 char *s = xive_tctx_ring_print(&tctx->regs[i * XIVE_TM_RING_SIZE]);
452 monitor_printf(mon, "CPU[%04x]: %4s %s\n", cpu_index,
453 xive_tctx_ring_names[i], s);
454 g_free(s);
455 }
456 }
457
458 static void xive_tctx_reset(void *dev)
459 {
460 XiveTCTX *tctx = XIVE_TCTX(dev);
461
462 memset(tctx->regs, 0, sizeof(tctx->regs));
463
464 /* Set some defaults */
465 tctx->regs[TM_QW1_OS + TM_LSMFB] = 0xFF;
466 tctx->regs[TM_QW1_OS + TM_ACK_CNT] = 0xFF;
467 tctx->regs[TM_QW1_OS + TM_AGE] = 0xFF;
468
469 /*
470 * Initialize PIPR to 0xFF to avoid phantom interrupts when the
471 * CPPR is first set.
472 */
473 tctx->regs[TM_QW1_OS + TM_PIPR] =
474 ipb_to_pipr(tctx->regs[TM_QW1_OS + TM_IPB]);
475 }
476
477 static void xive_tctx_realize(DeviceState *dev, Error **errp)
478 {
479 XiveTCTX *tctx = XIVE_TCTX(dev);
480 PowerPCCPU *cpu;
481 CPUPPCState *env;
482 Object *obj;
483 Error *local_err = NULL;
484
485 obj = object_property_get_link(OBJECT(dev), "cpu", &local_err);
486 if (!obj) {
487 error_propagate(errp, local_err);
488 error_prepend(errp, "required link 'cpu' not found: ");
489 return;
490 }
491
492 cpu = POWERPC_CPU(obj);
493 tctx->cs = CPU(obj);
494
495 env = &cpu->env;
496 switch (PPC_INPUT(env)) {
497 case PPC_FLAGS_INPUT_POWER9:
498 tctx->output = env->irq_inputs[POWER9_INPUT_INT];
499 break;
500
501 default:
502 error_setg(errp, "XIVE interrupt controller does not support "
503 "this CPU bus model");
504 return;
505 }
506
507 qemu_register_reset(xive_tctx_reset, dev);
508 }
509
510 static void xive_tctx_unrealize(DeviceState *dev, Error **errp)
511 {
512 qemu_unregister_reset(xive_tctx_reset, dev);
513 }
514
515 static const VMStateDescription vmstate_xive_tctx = {
516 .name = TYPE_XIVE_TCTX,
517 .version_id = 1,
518 .minimum_version_id = 1,
519 .fields = (VMStateField[]) {
520 VMSTATE_BUFFER(regs, XiveTCTX),
521 VMSTATE_END_OF_LIST()
522 },
523 };
524
525 static void xive_tctx_class_init(ObjectClass *klass, void *data)
526 {
527 DeviceClass *dc = DEVICE_CLASS(klass);
528
529 dc->desc = "XIVE Interrupt Thread Context";
530 dc->realize = xive_tctx_realize;
531 dc->unrealize = xive_tctx_unrealize;
532 dc->vmsd = &vmstate_xive_tctx;
533 }
534
535 static const TypeInfo xive_tctx_info = {
536 .name = TYPE_XIVE_TCTX,
537 .parent = TYPE_DEVICE,
538 .instance_size = sizeof(XiveTCTX),
539 .class_init = xive_tctx_class_init,
540 };
541
542 Object *xive_tctx_create(Object *cpu, XiveRouter *xrtr, Error **errp)
543 {
544 Error *local_err = NULL;
545 Object *obj;
546
547 obj = object_new(TYPE_XIVE_TCTX);
548 object_property_add_child(cpu, TYPE_XIVE_TCTX, obj, &error_abort);
549 object_unref(obj);
550 object_property_add_const_link(obj, "cpu", cpu, &error_abort);
551 object_property_set_bool(obj, true, "realized", &local_err);
552 if (local_err) {
553 goto error;
554 }
555
556 return obj;
557
558 error:
559 object_unparent(obj);
560 error_propagate(errp, local_err);
561 return NULL;
562 }
563
564 /*
565 * XIVE ESB helpers
566 */
567
568 static uint8_t xive_esb_set(uint8_t *pq, uint8_t value)
569 {
570 uint8_t old_pq = *pq & 0x3;
571
572 *pq &= ~0x3;
573 *pq |= value & 0x3;
574
575 return old_pq;
576 }
577
578 static bool xive_esb_trigger(uint8_t *pq)
579 {
580 uint8_t old_pq = *pq & 0x3;
581
582 switch (old_pq) {
583 case XIVE_ESB_RESET:
584 xive_esb_set(pq, XIVE_ESB_PENDING);
585 return true;
586 case XIVE_ESB_PENDING:
587 case XIVE_ESB_QUEUED:
588 xive_esb_set(pq, XIVE_ESB_QUEUED);
589 return false;
590 case XIVE_ESB_OFF:
591 xive_esb_set(pq, XIVE_ESB_OFF);
592 return false;
593 default:
594 g_assert_not_reached();
595 }
596 }
597
598 static bool xive_esb_eoi(uint8_t *pq)
599 {
600 uint8_t old_pq = *pq & 0x3;
601
602 switch (old_pq) {
603 case XIVE_ESB_RESET:
604 case XIVE_ESB_PENDING:
605 xive_esb_set(pq, XIVE_ESB_RESET);
606 return false;
607 case XIVE_ESB_QUEUED:
608 xive_esb_set(pq, XIVE_ESB_PENDING);
609 return true;
610 case XIVE_ESB_OFF:
611 xive_esb_set(pq, XIVE_ESB_OFF);
612 return false;
613 default:
614 g_assert_not_reached();
615 }
616 }
617
618 /*
619 * XIVE Interrupt Source (or IVSE)
620 */
621
622 uint8_t xive_source_esb_get(XiveSource *xsrc, uint32_t srcno)
623 {
624 assert(srcno < xsrc->nr_irqs);
625
626 return xsrc->status[srcno] & 0x3;
627 }
628
629 uint8_t xive_source_esb_set(XiveSource *xsrc, uint32_t srcno, uint8_t pq)
630 {
631 assert(srcno < xsrc->nr_irqs);
632
633 return xive_esb_set(&xsrc->status[srcno], pq);
634 }
635
636 /*
637 * Returns whether the event notification should be forwarded.
638 */
639 static bool xive_source_lsi_trigger(XiveSource *xsrc, uint32_t srcno)
640 {
641 uint8_t old_pq = xive_source_esb_get(xsrc, srcno);
642
643 xsrc->status[srcno] |= XIVE_STATUS_ASSERTED;
644
645 switch (old_pq) {
646 case XIVE_ESB_RESET:
647 xive_source_esb_set(xsrc, srcno, XIVE_ESB_PENDING);
648 return true;
649 default:
650 return false;
651 }
652 }
653
654 /*
655 * Returns whether the event notification should be forwarded.
656 */
657 static bool xive_source_esb_trigger(XiveSource *xsrc, uint32_t srcno)
658 {
659 bool ret;
660
661 assert(srcno < xsrc->nr_irqs);
662
663 ret = xive_esb_trigger(&xsrc->status[srcno]);
664
665 if (xive_source_irq_is_lsi(xsrc, srcno) &&
666 xive_source_esb_get(xsrc, srcno) == XIVE_ESB_QUEUED) {
667 qemu_log_mask(LOG_GUEST_ERROR,
668 "XIVE: queued an event on LSI IRQ %d\n", srcno);
669 }
670
671 return ret;
672 }
673
674 /*
675 * Returns whether the event notification should be forwarded.
676 */
677 static bool xive_source_esb_eoi(XiveSource *xsrc, uint32_t srcno)
678 {
679 bool ret;
680
681 assert(srcno < xsrc->nr_irqs);
682
683 ret = xive_esb_eoi(&xsrc->status[srcno]);
684
685 /*
686 * LSI sources do not set the Q bit but they can still be
687 * asserted, in which case we should forward a new event
688 * notification
689 */
690 if (xive_source_irq_is_lsi(xsrc, srcno) &&
691 xsrc->status[srcno] & XIVE_STATUS_ASSERTED) {
692 ret = xive_source_lsi_trigger(xsrc, srcno);
693 }
694
695 return ret;
696 }
697
698 /*
699 * Forward the source event notification to the Router
700 */
701 static void xive_source_notify(XiveSource *xsrc, int srcno)
702 {
703 XiveNotifierClass *xnc = XIVE_NOTIFIER_GET_CLASS(xsrc->xive);
704
705 if (xnc->notify) {
706 xnc->notify(xsrc->xive, srcno);
707 }
708 }
709
710 /*
711 * In a two pages ESB MMIO setting, even page is the trigger page, odd
712 * page is for management
713 */
714 static inline bool addr_is_even(hwaddr addr, uint32_t shift)
715 {
716 return !((addr >> shift) & 1);
717 }
718
719 static inline bool xive_source_is_trigger_page(XiveSource *xsrc, hwaddr addr)
720 {
721 return xive_source_esb_has_2page(xsrc) &&
722 addr_is_even(addr, xsrc->esb_shift - 1);
723 }
724
725 /*
726 * ESB MMIO loads
727 * Trigger page Management/EOI page
728 *
729 * ESB MMIO setting 2 pages 1 or 2 pages
730 *
731 * 0x000 .. 0x3FF -1 EOI and return 0|1
732 * 0x400 .. 0x7FF -1 EOI and return 0|1
733 * 0x800 .. 0xBFF -1 return PQ
734 * 0xC00 .. 0xCFF -1 return PQ and atomically PQ=00
735 * 0xD00 .. 0xDFF -1 return PQ and atomically PQ=01
736 * 0xE00 .. 0xDFF -1 return PQ and atomically PQ=10
737 * 0xF00 .. 0xDFF -1 return PQ and atomically PQ=11
738 */
739 static uint64_t xive_source_esb_read(void *opaque, hwaddr addr, unsigned size)
740 {
741 XiveSource *xsrc = XIVE_SOURCE(opaque);
742 uint32_t offset = addr & 0xFFF;
743 uint32_t srcno = addr >> xsrc->esb_shift;
744 uint64_t ret = -1;
745
746 /* In a two pages ESB MMIO setting, trigger page should not be read */
747 if (xive_source_is_trigger_page(xsrc, addr)) {
748 qemu_log_mask(LOG_GUEST_ERROR,
749 "XIVE: invalid load on IRQ %d trigger page at "
750 "0x%"HWADDR_PRIx"\n", srcno, addr);
751 return -1;
752 }
753
754 switch (offset) {
755 case XIVE_ESB_LOAD_EOI ... XIVE_ESB_LOAD_EOI + 0x7FF:
756 ret = xive_source_esb_eoi(xsrc, srcno);
757
758 /* Forward the source event notification for routing */
759 if (ret) {
760 xive_source_notify(xsrc, srcno);
761 }
762 break;
763
764 case XIVE_ESB_GET ... XIVE_ESB_GET + 0x3FF:
765 ret = xive_source_esb_get(xsrc, srcno);
766 break;
767
768 case XIVE_ESB_SET_PQ_00 ... XIVE_ESB_SET_PQ_00 + 0x0FF:
769 case XIVE_ESB_SET_PQ_01 ... XIVE_ESB_SET_PQ_01 + 0x0FF:
770 case XIVE_ESB_SET_PQ_10 ... XIVE_ESB_SET_PQ_10 + 0x0FF:
771 case XIVE_ESB_SET_PQ_11 ... XIVE_ESB_SET_PQ_11 + 0x0FF:
772 ret = xive_source_esb_set(xsrc, srcno, (offset >> 8) & 0x3);
773 break;
774 default:
775 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid ESB load addr %x\n",
776 offset);
777 }
778
779 return ret;
780 }
781
782 /*
783 * ESB MMIO stores
784 * Trigger page Management/EOI page
785 *
786 * ESB MMIO setting 2 pages 1 or 2 pages
787 *
788 * 0x000 .. 0x3FF Trigger Trigger
789 * 0x400 .. 0x7FF Trigger EOI
790 * 0x800 .. 0xBFF Trigger undefined
791 * 0xC00 .. 0xCFF Trigger PQ=00
792 * 0xD00 .. 0xDFF Trigger PQ=01
793 * 0xE00 .. 0xDFF Trigger PQ=10
794 * 0xF00 .. 0xDFF Trigger PQ=11
795 */
796 static void xive_source_esb_write(void *opaque, hwaddr addr,
797 uint64_t value, unsigned size)
798 {
799 XiveSource *xsrc = XIVE_SOURCE(opaque);
800 uint32_t offset = addr & 0xFFF;
801 uint32_t srcno = addr >> xsrc->esb_shift;
802 bool notify = false;
803
804 /* In a two pages ESB MMIO setting, trigger page only triggers */
805 if (xive_source_is_trigger_page(xsrc, addr)) {
806 notify = xive_source_esb_trigger(xsrc, srcno);
807 goto out;
808 }
809
810 switch (offset) {
811 case 0 ... 0x3FF:
812 notify = xive_source_esb_trigger(xsrc, srcno);
813 break;
814
815 case XIVE_ESB_STORE_EOI ... XIVE_ESB_STORE_EOI + 0x3FF:
816 if (!(xsrc->esb_flags & XIVE_SRC_STORE_EOI)) {
817 qemu_log_mask(LOG_GUEST_ERROR,
818 "XIVE: invalid Store EOI for IRQ %d\n", srcno);
819 return;
820 }
821
822 notify = xive_source_esb_eoi(xsrc, srcno);
823 break;
824
825 case XIVE_ESB_SET_PQ_00 ... XIVE_ESB_SET_PQ_00 + 0x0FF:
826 case XIVE_ESB_SET_PQ_01 ... XIVE_ESB_SET_PQ_01 + 0x0FF:
827 case XIVE_ESB_SET_PQ_10 ... XIVE_ESB_SET_PQ_10 + 0x0FF:
828 case XIVE_ESB_SET_PQ_11 ... XIVE_ESB_SET_PQ_11 + 0x0FF:
829 xive_source_esb_set(xsrc, srcno, (offset >> 8) & 0x3);
830 break;
831
832 default:
833 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid ESB write addr %x\n",
834 offset);
835 return;
836 }
837
838 out:
839 /* Forward the source event notification for routing */
840 if (notify) {
841 xive_source_notify(xsrc, srcno);
842 }
843 }
844
845 static const MemoryRegionOps xive_source_esb_ops = {
846 .read = xive_source_esb_read,
847 .write = xive_source_esb_write,
848 .endianness = DEVICE_BIG_ENDIAN,
849 .valid = {
850 .min_access_size = 8,
851 .max_access_size = 8,
852 },
853 .impl = {
854 .min_access_size = 8,
855 .max_access_size = 8,
856 },
857 };
858
859 void xive_source_set_irq(void *opaque, int srcno, int val)
860 {
861 XiveSource *xsrc = XIVE_SOURCE(opaque);
862 bool notify = false;
863
864 if (xive_source_irq_is_lsi(xsrc, srcno)) {
865 if (val) {
866 notify = xive_source_lsi_trigger(xsrc, srcno);
867 } else {
868 xsrc->status[srcno] &= ~XIVE_STATUS_ASSERTED;
869 }
870 } else {
871 if (val) {
872 notify = xive_source_esb_trigger(xsrc, srcno);
873 }
874 }
875
876 /* Forward the source event notification for routing */
877 if (notify) {
878 xive_source_notify(xsrc, srcno);
879 }
880 }
881
882 void xive_source_pic_print_info(XiveSource *xsrc, uint32_t offset, Monitor *mon)
883 {
884 int i;
885
886 for (i = 0; i < xsrc->nr_irqs; i++) {
887 uint8_t pq = xive_source_esb_get(xsrc, i);
888
889 if (pq == XIVE_ESB_OFF) {
890 continue;
891 }
892
893 monitor_printf(mon, " %08x %s %c%c%c\n", i + offset,
894 xive_source_irq_is_lsi(xsrc, i) ? "LSI" : "MSI",
895 pq & XIVE_ESB_VAL_P ? 'P' : '-',
896 pq & XIVE_ESB_VAL_Q ? 'Q' : '-',
897 xsrc->status[i] & XIVE_STATUS_ASSERTED ? 'A' : ' ');
898 }
899 }
900
901 static void xive_source_reset(void *dev)
902 {
903 XiveSource *xsrc = XIVE_SOURCE(dev);
904
905 /* Do not clear the LSI bitmap */
906
907 /* PQs are initialized to 0b01 (Q=1) which corresponds to "ints off" */
908 memset(xsrc->status, XIVE_ESB_OFF, xsrc->nr_irqs);
909 }
910
911 static void xive_source_realize(DeviceState *dev, Error **errp)
912 {
913 XiveSource *xsrc = XIVE_SOURCE(dev);
914 Object *obj;
915 Error *local_err = NULL;
916
917 obj = object_property_get_link(OBJECT(dev), "xive", &local_err);
918 if (!obj) {
919 error_propagate(errp, local_err);
920 error_prepend(errp, "required link 'xive' not found: ");
921 return;
922 }
923
924 xsrc->xive = XIVE_NOTIFIER(obj);
925
926 if (!xsrc->nr_irqs) {
927 error_setg(errp, "Number of interrupt needs to be greater than 0");
928 return;
929 }
930
931 if (xsrc->esb_shift != XIVE_ESB_4K &&
932 xsrc->esb_shift != XIVE_ESB_4K_2PAGE &&
933 xsrc->esb_shift != XIVE_ESB_64K &&
934 xsrc->esb_shift != XIVE_ESB_64K_2PAGE) {
935 error_setg(errp, "Invalid ESB shift setting");
936 return;
937 }
938
939 xsrc->status = g_malloc0(xsrc->nr_irqs);
940 xsrc->lsi_map = bitmap_new(xsrc->nr_irqs);
941
942 memory_region_init_io(&xsrc->esb_mmio, OBJECT(xsrc),
943 &xive_source_esb_ops, xsrc, "xive.esb",
944 (1ull << xsrc->esb_shift) * xsrc->nr_irqs);
945
946 qemu_register_reset(xive_source_reset, dev);
947 }
948
949 static const VMStateDescription vmstate_xive_source = {
950 .name = TYPE_XIVE_SOURCE,
951 .version_id = 1,
952 .minimum_version_id = 1,
953 .fields = (VMStateField[]) {
954 VMSTATE_UINT32_EQUAL(nr_irqs, XiveSource, NULL),
955 VMSTATE_VBUFFER_UINT32(status, XiveSource, 1, NULL, nr_irqs),
956 VMSTATE_END_OF_LIST()
957 },
958 };
959
960 /*
961 * The default XIVE interrupt source setting for the ESB MMIOs is two
962 * 64k pages without Store EOI, to be in sync with KVM.
963 */
964 static Property xive_source_properties[] = {
965 DEFINE_PROP_UINT64("flags", XiveSource, esb_flags, 0),
966 DEFINE_PROP_UINT32("nr-irqs", XiveSource, nr_irqs, 0),
967 DEFINE_PROP_UINT32("shift", XiveSource, esb_shift, XIVE_ESB_64K_2PAGE),
968 DEFINE_PROP_END_OF_LIST(),
969 };
970
971 static void xive_source_class_init(ObjectClass *klass, void *data)
972 {
973 DeviceClass *dc = DEVICE_CLASS(klass);
974
975 dc->desc = "XIVE Interrupt Source";
976 dc->props = xive_source_properties;
977 dc->realize = xive_source_realize;
978 dc->vmsd = &vmstate_xive_source;
979 }
980
981 static const TypeInfo xive_source_info = {
982 .name = TYPE_XIVE_SOURCE,
983 .parent = TYPE_DEVICE,
984 .instance_size = sizeof(XiveSource),
985 .class_init = xive_source_class_init,
986 };
987
988 /*
989 * XiveEND helpers
990 */
991
992 void xive_end_queue_pic_print_info(XiveEND *end, uint32_t width, Monitor *mon)
993 {
994 uint64_t qaddr_base = (uint64_t) be32_to_cpu(end->w2 & 0x0fffffff) << 32
995 | be32_to_cpu(end->w3);
996 uint32_t qsize = xive_get_field32(END_W0_QSIZE, end->w0);
997 uint32_t qindex = xive_get_field32(END_W1_PAGE_OFF, end->w1);
998 uint32_t qentries = 1 << (qsize + 10);
999 int i;
1000
1001 /*
1002 * print out the [ (qindex - (width - 1)) .. (qindex + 1)] window
1003 */
1004 monitor_printf(mon, " [ ");
1005 qindex = (qindex - (width - 1)) & (qentries - 1);
1006 for (i = 0; i < width; i++) {
1007 uint64_t qaddr = qaddr_base + (qindex << 2);
1008 uint32_t qdata = -1;
1009
1010 if (dma_memory_read(&address_space_memory, qaddr, &qdata,
1011 sizeof(qdata))) {
1012 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to read EQ @0x%"
1013 HWADDR_PRIx "\n", qaddr);
1014 return;
1015 }
1016 monitor_printf(mon, "%s%08x ", i == width - 1 ? "^" : "",
1017 be32_to_cpu(qdata));
1018 qindex = (qindex + 1) & (qentries - 1);
1019 }
1020 }
1021
1022 void xive_end_pic_print_info(XiveEND *end, uint32_t end_idx, Monitor *mon)
1023 {
1024 uint64_t qaddr_base = (uint64_t) be32_to_cpu(end->w2 & 0x0fffffff) << 32
1025 | be32_to_cpu(end->w3);
1026 uint32_t qindex = xive_get_field32(END_W1_PAGE_OFF, end->w1);
1027 uint32_t qgen = xive_get_field32(END_W1_GENERATION, end->w1);
1028 uint32_t qsize = xive_get_field32(END_W0_QSIZE, end->w0);
1029 uint32_t qentries = 1 << (qsize + 10);
1030
1031 uint32_t nvt = xive_get_field32(END_W6_NVT_INDEX, end->w6);
1032 uint8_t priority = xive_get_field32(END_W7_F0_PRIORITY, end->w7);
1033
1034 if (!xive_end_is_valid(end)) {
1035 return;
1036 }
1037
1038 monitor_printf(mon, " %08x %c%c%c%c%c prio:%d nvt:%04x eq:@%08"PRIx64
1039 "% 6d/%5d ^%d", end_idx,
1040 xive_end_is_valid(end) ? 'v' : '-',
1041 xive_end_is_enqueue(end) ? 'q' : '-',
1042 xive_end_is_notify(end) ? 'n' : '-',
1043 xive_end_is_backlog(end) ? 'b' : '-',
1044 xive_end_is_escalate(end) ? 'e' : '-',
1045 priority, nvt, qaddr_base, qindex, qentries, qgen);
1046
1047 xive_end_queue_pic_print_info(end, 6, mon);
1048 monitor_printf(mon, "]\n");
1049 }
1050
1051 static void xive_end_enqueue(XiveEND *end, uint32_t data)
1052 {
1053 uint64_t qaddr_base = (uint64_t) be32_to_cpu(end->w2 & 0x0fffffff) << 32
1054 | be32_to_cpu(end->w3);
1055 uint32_t qsize = xive_get_field32(END_W0_QSIZE, end->w0);
1056 uint32_t qindex = xive_get_field32(END_W1_PAGE_OFF, end->w1);
1057 uint32_t qgen = xive_get_field32(END_W1_GENERATION, end->w1);
1058
1059 uint64_t qaddr = qaddr_base + (qindex << 2);
1060 uint32_t qdata = cpu_to_be32((qgen << 31) | (data & 0x7fffffff));
1061 uint32_t qentries = 1 << (qsize + 10);
1062
1063 if (dma_memory_write(&address_space_memory, qaddr, &qdata, sizeof(qdata))) {
1064 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to write END data @0x%"
1065 HWADDR_PRIx "\n", qaddr);
1066 return;
1067 }
1068
1069 qindex = (qindex + 1) & (qentries - 1);
1070 if (qindex == 0) {
1071 qgen ^= 1;
1072 end->w1 = xive_set_field32(END_W1_GENERATION, end->w1, qgen);
1073 }
1074 end->w1 = xive_set_field32(END_W1_PAGE_OFF, end->w1, qindex);
1075 }
1076
1077 /*
1078 * XIVE Router (aka. Virtualization Controller or IVRE)
1079 */
1080
1081 int xive_router_get_eas(XiveRouter *xrtr, uint8_t eas_blk, uint32_t eas_idx,
1082 XiveEAS *eas)
1083 {
1084 XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr);
1085
1086 return xrc->get_eas(xrtr, eas_blk, eas_idx, eas);
1087 }
1088
1089 int xive_router_get_end(XiveRouter *xrtr, uint8_t end_blk, uint32_t end_idx,
1090 XiveEND *end)
1091 {
1092 XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr);
1093
1094 return xrc->get_end(xrtr, end_blk, end_idx, end);
1095 }
1096
1097 int xive_router_write_end(XiveRouter *xrtr, uint8_t end_blk, uint32_t end_idx,
1098 XiveEND *end, uint8_t word_number)
1099 {
1100 XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr);
1101
1102 return xrc->write_end(xrtr, end_blk, end_idx, end, word_number);
1103 }
1104
1105 int xive_router_get_nvt(XiveRouter *xrtr, uint8_t nvt_blk, uint32_t nvt_idx,
1106 XiveNVT *nvt)
1107 {
1108 XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr);
1109
1110 return xrc->get_nvt(xrtr, nvt_blk, nvt_idx, nvt);
1111 }
1112
1113 int xive_router_write_nvt(XiveRouter *xrtr, uint8_t nvt_blk, uint32_t nvt_idx,
1114 XiveNVT *nvt, uint8_t word_number)
1115 {
1116 XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr);
1117
1118 return xrc->write_nvt(xrtr, nvt_blk, nvt_idx, nvt, word_number);
1119 }
1120
1121 XiveTCTX *xive_router_get_tctx(XiveRouter *xrtr, CPUState *cs)
1122 {
1123 XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr);
1124
1125 return xrc->get_tctx(xrtr, cs);
1126 }
1127
1128 /*
1129 * By default on P9, the HW CAM line (23bits) is hardwired to :
1130 *
1131 * 0x000||0b1||4Bit chip number||7Bit Thread number.
1132 *
1133 * When the block grouping is enabled, the CAM line is changed to :
1134 *
1135 * 4Bit chip number||0x001||7Bit Thread number.
1136 */
1137 static uint32_t hw_cam_line(uint8_t chip_id, uint8_t tid)
1138 {
1139 return 1 << 11 | (chip_id & 0xf) << 7 | (tid & 0x7f);
1140 }
1141
1142 static bool xive_presenter_tctx_match_hw(XiveTCTX *tctx,
1143 uint8_t nvt_blk, uint32_t nvt_idx)
1144 {
1145 CPUPPCState *env = &POWERPC_CPU(tctx->cs)->env;
1146 uint32_t pir = env->spr_cb[SPR_PIR].default_value;
1147
1148 return hw_cam_line((pir >> 8) & 0xf, pir & 0x7f) ==
1149 hw_cam_line(nvt_blk, nvt_idx);
1150 }
1151
1152 /*
1153 * The thread context register words are in big-endian format.
1154 */
1155 static int xive_presenter_tctx_match(XiveTCTX *tctx, uint8_t format,
1156 uint8_t nvt_blk, uint32_t nvt_idx,
1157 bool cam_ignore, uint32_t logic_serv)
1158 {
1159 uint32_t cam = xive_nvt_cam_line(nvt_blk, nvt_idx);
1160 uint32_t qw3w2 = xive_tctx_word2(&tctx->regs[TM_QW3_HV_PHYS]);
1161 uint32_t qw2w2 = xive_tctx_word2(&tctx->regs[TM_QW2_HV_POOL]);
1162 uint32_t qw1w2 = xive_tctx_word2(&tctx->regs[TM_QW1_OS]);
1163 uint32_t qw0w2 = xive_tctx_word2(&tctx->regs[TM_QW0_USER]);
1164
1165 /*
1166 * TODO (PowerNV): ignore mode. The low order bits of the NVT
1167 * identifier are ignored in the "CAM" match.
1168 */
1169
1170 if (format == 0) {
1171 if (cam_ignore == true) {
1172 /*
1173 * F=0 & i=1: Logical server notification (bits ignored at
1174 * the end of the NVT identifier)
1175 */
1176 qemu_log_mask(LOG_UNIMP, "XIVE: no support for LS NVT %x/%x\n",
1177 nvt_blk, nvt_idx);
1178 return -1;
1179 }
1180
1181 /* F=0 & i=0: Specific NVT notification */
1182
1183 /* PHYS ring */
1184 if ((be32_to_cpu(qw3w2) & TM_QW3W2_VT) &&
1185 xive_presenter_tctx_match_hw(tctx, nvt_blk, nvt_idx)) {
1186 return TM_QW3_HV_PHYS;
1187 }
1188
1189 /* HV POOL ring */
1190 if ((be32_to_cpu(qw2w2) & TM_QW2W2_VP) &&
1191 cam == xive_get_field32(TM_QW2W2_POOL_CAM, qw2w2)) {
1192 return TM_QW2_HV_POOL;
1193 }
1194
1195 /* OS ring */
1196 if ((be32_to_cpu(qw1w2) & TM_QW1W2_VO) &&
1197 cam == xive_get_field32(TM_QW1W2_OS_CAM, qw1w2)) {
1198 return TM_QW1_OS;
1199 }
1200 } else {
1201 /* F=1 : User level Event-Based Branch (EBB) notification */
1202
1203 /* USER ring */
1204 if ((be32_to_cpu(qw1w2) & TM_QW1W2_VO) &&
1205 (cam == xive_get_field32(TM_QW1W2_OS_CAM, qw1w2)) &&
1206 (be32_to_cpu(qw0w2) & TM_QW0W2_VU) &&
1207 (logic_serv == xive_get_field32(TM_QW0W2_LOGIC_SERV, qw0w2))) {
1208 return TM_QW0_USER;
1209 }
1210 }
1211 return -1;
1212 }
1213
1214 typedef struct XiveTCTXMatch {
1215 XiveTCTX *tctx;
1216 uint8_t ring;
1217 } XiveTCTXMatch;
1218
1219 static bool xive_presenter_match(XiveRouter *xrtr, uint8_t format,
1220 uint8_t nvt_blk, uint32_t nvt_idx,
1221 bool cam_ignore, uint8_t priority,
1222 uint32_t logic_serv, XiveTCTXMatch *match)
1223 {
1224 CPUState *cs;
1225
1226 /*
1227 * TODO (PowerNV): handle chip_id overwrite of block field for
1228 * hardwired CAM compares
1229 */
1230
1231 CPU_FOREACH(cs) {
1232 XiveTCTX *tctx = xive_router_get_tctx(xrtr, cs);
1233 int ring;
1234
1235 /*
1236 * HW checks that the CPU is enabled in the Physical Thread
1237 * Enable Register (PTER).
1238 */
1239
1240 /*
1241 * Check the thread context CAM lines and record matches. We
1242 * will handle CPU exception delivery later
1243 */
1244 ring = xive_presenter_tctx_match(tctx, format, nvt_blk, nvt_idx,
1245 cam_ignore, logic_serv);
1246 /*
1247 * Save the context and follow on to catch duplicates, that we
1248 * don't support yet.
1249 */
1250 if (ring != -1) {
1251 if (match->tctx) {
1252 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: already found a thread "
1253 "context NVT %x/%x\n", nvt_blk, nvt_idx);
1254 return false;
1255 }
1256
1257 match->ring = ring;
1258 match->tctx = tctx;
1259 }
1260 }
1261
1262 if (!match->tctx) {
1263 qemu_log_mask(LOG_UNIMP, "XIVE: NVT %x/%x is not dispatched\n",
1264 nvt_blk, nvt_idx);
1265 return false;
1266 }
1267
1268 return true;
1269 }
1270
1271 /*
1272 * This is our simple Xive Presenter Engine model. It is merged in the
1273 * Router as it does not require an extra object.
1274 *
1275 * It receives notification requests sent by the IVRE to find one
1276 * matching NVT (or more) dispatched on the processor threads. In case
1277 * of a single NVT notification, the process is abreviated and the
1278 * thread is signaled if a match is found. In case of a logical server
1279 * notification (bits ignored at the end of the NVT identifier), the
1280 * IVPE and IVRE select a winning thread using different filters. This
1281 * involves 2 or 3 exchanges on the PowerBus that the model does not
1282 * support.
1283 *
1284 * The parameters represent what is sent on the PowerBus
1285 */
1286 static void xive_presenter_notify(XiveRouter *xrtr, uint8_t format,
1287 uint8_t nvt_blk, uint32_t nvt_idx,
1288 bool cam_ignore, uint8_t priority,
1289 uint32_t logic_serv)
1290 {
1291 XiveNVT nvt;
1292 XiveTCTXMatch match = { .tctx = NULL, .ring = 0 };
1293 bool found;
1294
1295 /* NVT cache lookup */
1296 if (xive_router_get_nvt(xrtr, nvt_blk, nvt_idx, &nvt)) {
1297 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: no NVT %x/%x\n",
1298 nvt_blk, nvt_idx);
1299 return;
1300 }
1301
1302 if (!xive_nvt_is_valid(&nvt)) {
1303 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVT %x/%x is invalid\n",
1304 nvt_blk, nvt_idx);
1305 return;
1306 }
1307
1308 found = xive_presenter_match(xrtr, format, nvt_blk, nvt_idx, cam_ignore,
1309 priority, logic_serv, &match);
1310 if (found) {
1311 ipb_update(&match.tctx->regs[match.ring], priority);
1312 xive_tctx_notify(match.tctx, match.ring);
1313 return;
1314 }
1315
1316 /* Record the IPB in the associated NVT structure */
1317 ipb_update((uint8_t *) &nvt.w4, priority);
1318 xive_router_write_nvt(xrtr, nvt_blk, nvt_idx, &nvt, 4);
1319
1320 /*
1321 * If no matching NVT is dispatched on a HW thread :
1322 * - update the NVT structure if backlog is activated
1323 * - escalate (ESe PQ bits and EAS in w4-5) if escalation is
1324 * activated
1325 */
1326 }
1327
1328 /*
1329 * An END trigger can come from an event trigger (IPI or HW) or from
1330 * another chip. We don't model the PowerBus but the END trigger
1331 * message has the same parameters than in the function below.
1332 */
1333 static void xive_router_end_notify(XiveRouter *xrtr, uint8_t end_blk,
1334 uint32_t end_idx, uint32_t end_data)
1335 {
1336 XiveEND end;
1337 uint8_t priority;
1338 uint8_t format;
1339
1340 /* END cache lookup */
1341 if (xive_router_get_end(xrtr, end_blk, end_idx, &end)) {
1342 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No END %x/%x\n", end_blk,
1343 end_idx);
1344 return;
1345 }
1346
1347 if (!xive_end_is_valid(&end)) {
1348 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: END %x/%x is invalid\n",
1349 end_blk, end_idx);
1350 return;
1351 }
1352
1353 if (xive_end_is_enqueue(&end)) {
1354 xive_end_enqueue(&end, end_data);
1355 /* Enqueuing event data modifies the EQ toggle and index */
1356 xive_router_write_end(xrtr, end_blk, end_idx, &end, 1);
1357 }
1358
1359 /*
1360 * The W7 format depends on the F bit in W6. It defines the type
1361 * of the notification :
1362 *
1363 * F=0 : single or multiple NVT notification
1364 * F=1 : User level Event-Based Branch (EBB) notification, no
1365 * priority
1366 */
1367 format = xive_get_field32(END_W6_FORMAT_BIT, end.w6);
1368 priority = xive_get_field32(END_W7_F0_PRIORITY, end.w7);
1369
1370 /* The END is masked */
1371 if (format == 0 && priority == 0xff) {
1372 return;
1373 }
1374
1375 /*
1376 * Check the END ESn (Event State Buffer for notification) for
1377 * even futher coalescing in the Router
1378 */
1379 if (!xive_end_is_notify(&end)) {
1380 uint8_t pq = xive_get_field32(END_W1_ESn, end.w1);
1381 bool notify = xive_esb_trigger(&pq);
1382
1383 if (pq != xive_get_field32(END_W1_ESn, end.w1)) {
1384 end.w1 = xive_set_field32(END_W1_ESn, end.w1, pq);
1385 xive_router_write_end(xrtr, end_blk, end_idx, &end, 1);
1386 }
1387
1388 /* ESn[Q]=1 : end of notification */
1389 if (!notify) {
1390 return;
1391 }
1392 }
1393
1394 /*
1395 * Follows IVPE notification
1396 */
1397 xive_presenter_notify(xrtr, format,
1398 xive_get_field32(END_W6_NVT_BLOCK, end.w6),
1399 xive_get_field32(END_W6_NVT_INDEX, end.w6),
1400 xive_get_field32(END_W7_F0_IGNORE, end.w7),
1401 priority,
1402 xive_get_field32(END_W7_F1_LOG_SERVER_ID, end.w7));
1403
1404 /* TODO: Auto EOI. */
1405 }
1406
1407 void xive_router_notify(XiveNotifier *xn, uint32_t lisn)
1408 {
1409 XiveRouter *xrtr = XIVE_ROUTER(xn);
1410 uint8_t eas_blk = XIVE_SRCNO_BLOCK(lisn);
1411 uint32_t eas_idx = XIVE_SRCNO_INDEX(lisn);
1412 XiveEAS eas;
1413
1414 /* EAS cache lookup */
1415 if (xive_router_get_eas(xrtr, eas_blk, eas_idx, &eas)) {
1416 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN %x\n", lisn);
1417 return;
1418 }
1419
1420 /*
1421 * The IVRE checks the State Bit Cache at this point. We skip the
1422 * SBC lookup because the state bits of the sources are modeled
1423 * internally in QEMU.
1424 */
1425
1426 if (!xive_eas_is_valid(&eas)) {
1427 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid LISN %x\n", lisn);
1428 return;
1429 }
1430
1431 if (xive_eas_is_masked(&eas)) {
1432 /* Notification completed */
1433 return;
1434 }
1435
1436 /*
1437 * The event trigger becomes an END trigger
1438 */
1439 xive_router_end_notify(xrtr,
1440 xive_get_field64(EAS_END_BLOCK, eas.w),
1441 xive_get_field64(EAS_END_INDEX, eas.w),
1442 xive_get_field64(EAS_END_DATA, eas.w));
1443 }
1444
1445 static void xive_router_class_init(ObjectClass *klass, void *data)
1446 {
1447 DeviceClass *dc = DEVICE_CLASS(klass);
1448 XiveNotifierClass *xnc = XIVE_NOTIFIER_CLASS(klass);
1449
1450 dc->desc = "XIVE Router Engine";
1451 xnc->notify = xive_router_notify;
1452 }
1453
1454 static const TypeInfo xive_router_info = {
1455 .name = TYPE_XIVE_ROUTER,
1456 .parent = TYPE_SYS_BUS_DEVICE,
1457 .abstract = true,
1458 .class_size = sizeof(XiveRouterClass),
1459 .class_init = xive_router_class_init,
1460 .interfaces = (InterfaceInfo[]) {
1461 { TYPE_XIVE_NOTIFIER },
1462 { }
1463 }
1464 };
1465
1466 void xive_eas_pic_print_info(XiveEAS *eas, uint32_t lisn, Monitor *mon)
1467 {
1468 if (!xive_eas_is_valid(eas)) {
1469 return;
1470 }
1471
1472 monitor_printf(mon, " %08x %s end:%02x/%04x data:%08x\n",
1473 lisn, xive_eas_is_masked(eas) ? "M" : " ",
1474 (uint8_t) xive_get_field64(EAS_END_BLOCK, eas->w),
1475 (uint32_t) xive_get_field64(EAS_END_INDEX, eas->w),
1476 (uint32_t) xive_get_field64(EAS_END_DATA, eas->w));
1477 }
1478
1479 /*
1480 * END ESB MMIO loads
1481 */
1482 static uint64_t xive_end_source_read(void *opaque, hwaddr addr, unsigned size)
1483 {
1484 XiveENDSource *xsrc = XIVE_END_SOURCE(opaque);
1485 uint32_t offset = addr & 0xFFF;
1486 uint8_t end_blk;
1487 uint32_t end_idx;
1488 XiveEND end;
1489 uint32_t end_esmask;
1490 uint8_t pq;
1491 uint64_t ret = -1;
1492
1493 end_blk = xsrc->block_id;
1494 end_idx = addr >> (xsrc->esb_shift + 1);
1495
1496 if (xive_router_get_end(xsrc->xrtr, end_blk, end_idx, &end)) {
1497 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No END %x/%x\n", end_blk,
1498 end_idx);
1499 return -1;
1500 }
1501
1502 if (!xive_end_is_valid(&end)) {
1503 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: END %x/%x is invalid\n",
1504 end_blk, end_idx);
1505 return -1;
1506 }
1507
1508 end_esmask = addr_is_even(addr, xsrc->esb_shift) ? END_W1_ESn : END_W1_ESe;
1509 pq = xive_get_field32(end_esmask, end.w1);
1510
1511 switch (offset) {
1512 case XIVE_ESB_LOAD_EOI ... XIVE_ESB_LOAD_EOI + 0x7FF:
1513 ret = xive_esb_eoi(&pq);
1514
1515 /* Forward the source event notification for routing ?? */
1516 break;
1517
1518 case XIVE_ESB_GET ... XIVE_ESB_GET + 0x3FF:
1519 ret = pq;
1520 break;
1521
1522 case XIVE_ESB_SET_PQ_00 ... XIVE_ESB_SET_PQ_00 + 0x0FF:
1523 case XIVE_ESB_SET_PQ_01 ... XIVE_ESB_SET_PQ_01 + 0x0FF:
1524 case XIVE_ESB_SET_PQ_10 ... XIVE_ESB_SET_PQ_10 + 0x0FF:
1525 case XIVE_ESB_SET_PQ_11 ... XIVE_ESB_SET_PQ_11 + 0x0FF:
1526 ret = xive_esb_set(&pq, (offset >> 8) & 0x3);
1527 break;
1528 default:
1529 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid END ESB load addr %d\n",
1530 offset);
1531 return -1;
1532 }
1533
1534 if (pq != xive_get_field32(end_esmask, end.w1)) {
1535 end.w1 = xive_set_field32(end_esmask, end.w1, pq);
1536 xive_router_write_end(xsrc->xrtr, end_blk, end_idx, &end, 1);
1537 }
1538
1539 return ret;
1540 }
1541
1542 /*
1543 * END ESB MMIO stores are invalid
1544 */
1545 static void xive_end_source_write(void *opaque, hwaddr addr,
1546 uint64_t value, unsigned size)
1547 {
1548 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid ESB write addr 0x%"
1549 HWADDR_PRIx"\n", addr);
1550 }
1551
1552 static const MemoryRegionOps xive_end_source_ops = {
1553 .read = xive_end_source_read,
1554 .write = xive_end_source_write,
1555 .endianness = DEVICE_BIG_ENDIAN,
1556 .valid = {
1557 .min_access_size = 8,
1558 .max_access_size = 8,
1559 },
1560 .impl = {
1561 .min_access_size = 8,
1562 .max_access_size = 8,
1563 },
1564 };
1565
1566 static void xive_end_source_realize(DeviceState *dev, Error **errp)
1567 {
1568 XiveENDSource *xsrc = XIVE_END_SOURCE(dev);
1569 Object *obj;
1570 Error *local_err = NULL;
1571
1572 obj = object_property_get_link(OBJECT(dev), "xive", &local_err);
1573 if (!obj) {
1574 error_propagate(errp, local_err);
1575 error_prepend(errp, "required link 'xive' not found: ");
1576 return;
1577 }
1578
1579 xsrc->xrtr = XIVE_ROUTER(obj);
1580
1581 if (!xsrc->nr_ends) {
1582 error_setg(errp, "Number of interrupt needs to be greater than 0");
1583 return;
1584 }
1585
1586 if (xsrc->esb_shift != XIVE_ESB_4K &&
1587 xsrc->esb_shift != XIVE_ESB_64K) {
1588 error_setg(errp, "Invalid ESB shift setting");
1589 return;
1590 }
1591
1592 /*
1593 * Each END is assigned an even/odd pair of MMIO pages, the even page
1594 * manages the ESn field while the odd page manages the ESe field.
1595 */
1596 memory_region_init_io(&xsrc->esb_mmio, OBJECT(xsrc),
1597 &xive_end_source_ops, xsrc, "xive.end",
1598 (1ull << (xsrc->esb_shift + 1)) * xsrc->nr_ends);
1599 }
1600
1601 static Property xive_end_source_properties[] = {
1602 DEFINE_PROP_UINT8("block-id", XiveENDSource, block_id, 0),
1603 DEFINE_PROP_UINT32("nr-ends", XiveENDSource, nr_ends, 0),
1604 DEFINE_PROP_UINT32("shift", XiveENDSource, esb_shift, XIVE_ESB_64K),
1605 DEFINE_PROP_END_OF_LIST(),
1606 };
1607
1608 static void xive_end_source_class_init(ObjectClass *klass, void *data)
1609 {
1610 DeviceClass *dc = DEVICE_CLASS(klass);
1611
1612 dc->desc = "XIVE END Source";
1613 dc->props = xive_end_source_properties;
1614 dc->realize = xive_end_source_realize;
1615 }
1616
1617 static const TypeInfo xive_end_source_info = {
1618 .name = TYPE_XIVE_END_SOURCE,
1619 .parent = TYPE_DEVICE,
1620 .instance_size = sizeof(XiveENDSource),
1621 .class_init = xive_end_source_class_init,
1622 };
1623
1624 /*
1625 * XIVE Notifier
1626 */
1627 static const TypeInfo xive_notifier_info = {
1628 .name = TYPE_XIVE_NOTIFIER,
1629 .parent = TYPE_INTERFACE,
1630 .class_size = sizeof(XiveNotifierClass),
1631 };
1632
1633 static void xive_register_types(void)
1634 {
1635 type_register_static(&xive_source_info);
1636 type_register_static(&xive_notifier_info);
1637 type_register_static(&xive_router_info);
1638 type_register_static(&xive_end_source_info);
1639 type_register_static(&xive_tctx_info);
1640 }
1641
1642 type_init(xive_register_types)