]> git.proxmox.com Git - qemu.git/blob - hw/pci/msix.c
tcg/aarch64: Implement tlb lookup fast path
[qemu.git] / hw / pci / msix.c
1 /*
2 * MSI-X device support
3 *
4 * This module includes support for MSI-X in pci devices.
5 *
6 * Author: Michael S. Tsirkin <mst@redhat.com>
7 *
8 * Copyright (c) 2009, Red Hat Inc, Michael S. Tsirkin (mst@redhat.com)
9 *
10 * This work is licensed under the terms of the GNU GPL, version 2. See
11 * the COPYING file in the top-level directory.
12 *
13 * Contributions after 2012-01-13 are licensed under the terms of the
14 * GNU GPL, version 2 or (at your option) any later version.
15 */
16
17 #include "hw/hw.h"
18 #include "hw/pci/msi.h"
19 #include "hw/pci/msix.h"
20 #include "hw/pci/pci.h"
21 #include "qemu/range.h"
22
23 #define MSIX_CAP_LENGTH 12
24
25 /* MSI enable bit and maskall bit are in byte 1 in FLAGS register */
26 #define MSIX_CONTROL_OFFSET (PCI_MSIX_FLAGS + 1)
27 #define MSIX_ENABLE_MASK (PCI_MSIX_FLAGS_ENABLE >> 8)
28 #define MSIX_MASKALL_MASK (PCI_MSIX_FLAGS_MASKALL >> 8)
29
30 MSIMessage msix_get_message(PCIDevice *dev, unsigned vector)
31 {
32 uint8_t *table_entry = dev->msix_table + vector * PCI_MSIX_ENTRY_SIZE;
33 MSIMessage msg;
34
35 msg.address = pci_get_quad(table_entry + PCI_MSIX_ENTRY_LOWER_ADDR);
36 msg.data = pci_get_long(table_entry + PCI_MSIX_ENTRY_DATA);
37 return msg;
38 }
39
40 /*
41 * Special API for POWER to configure the vectors through
42 * a side channel. Should never be used by devices.
43 */
44 void msix_set_message(PCIDevice *dev, int vector, struct MSIMessage msg)
45 {
46 uint8_t *table_entry = dev->msix_table + vector * PCI_MSIX_ENTRY_SIZE;
47
48 pci_set_quad(table_entry + PCI_MSIX_ENTRY_LOWER_ADDR, msg.address);
49 pci_set_long(table_entry + PCI_MSIX_ENTRY_DATA, msg.data);
50 table_entry[PCI_MSIX_ENTRY_VECTOR_CTRL] &= ~PCI_MSIX_ENTRY_CTRL_MASKBIT;
51 }
52
53 static uint8_t msix_pending_mask(int vector)
54 {
55 return 1 << (vector % 8);
56 }
57
58 static uint8_t *msix_pending_byte(PCIDevice *dev, int vector)
59 {
60 return dev->msix_pba + vector / 8;
61 }
62
63 static int msix_is_pending(PCIDevice *dev, int vector)
64 {
65 return *msix_pending_byte(dev, vector) & msix_pending_mask(vector);
66 }
67
68 void msix_set_pending(PCIDevice *dev, unsigned int vector)
69 {
70 *msix_pending_byte(dev, vector) |= msix_pending_mask(vector);
71 }
72
73 static void msix_clr_pending(PCIDevice *dev, int vector)
74 {
75 *msix_pending_byte(dev, vector) &= ~msix_pending_mask(vector);
76 }
77
78 static bool msix_vector_masked(PCIDevice *dev, unsigned int vector, bool fmask)
79 {
80 unsigned offset = vector * PCI_MSIX_ENTRY_SIZE + PCI_MSIX_ENTRY_VECTOR_CTRL;
81 return fmask || dev->msix_table[offset] & PCI_MSIX_ENTRY_CTRL_MASKBIT;
82 }
83
84 bool msix_is_masked(PCIDevice *dev, unsigned int vector)
85 {
86 return msix_vector_masked(dev, vector, dev->msix_function_masked);
87 }
88
89 static void msix_fire_vector_notifier(PCIDevice *dev,
90 unsigned int vector, bool is_masked)
91 {
92 MSIMessage msg;
93 int ret;
94
95 if (!dev->msix_vector_use_notifier) {
96 return;
97 }
98 if (is_masked) {
99 dev->msix_vector_release_notifier(dev, vector);
100 } else {
101 msg = msix_get_message(dev, vector);
102 ret = dev->msix_vector_use_notifier(dev, vector, msg);
103 assert(ret >= 0);
104 }
105 }
106
107 static void msix_handle_mask_update(PCIDevice *dev, int vector, bool was_masked)
108 {
109 bool is_masked = msix_is_masked(dev, vector);
110
111 if (is_masked == was_masked) {
112 return;
113 }
114
115 msix_fire_vector_notifier(dev, vector, is_masked);
116
117 if (!is_masked && msix_is_pending(dev, vector)) {
118 msix_clr_pending(dev, vector);
119 msix_notify(dev, vector);
120 }
121 }
122
123 static void msix_update_function_masked(PCIDevice *dev)
124 {
125 dev->msix_function_masked = !msix_enabled(dev) ||
126 (dev->config[dev->msix_cap + MSIX_CONTROL_OFFSET] & MSIX_MASKALL_MASK);
127 }
128
129 /* Handle MSI-X capability config write. */
130 void msix_write_config(PCIDevice *dev, uint32_t addr,
131 uint32_t val, int len)
132 {
133 unsigned enable_pos = dev->msix_cap + MSIX_CONTROL_OFFSET;
134 int vector;
135 bool was_masked;
136
137 if (!msix_present(dev) || !range_covers_byte(addr, len, enable_pos)) {
138 return;
139 }
140
141 was_masked = dev->msix_function_masked;
142 msix_update_function_masked(dev);
143
144 if (!msix_enabled(dev)) {
145 return;
146 }
147
148 pci_device_deassert_intx(dev);
149
150 if (dev->msix_function_masked == was_masked) {
151 return;
152 }
153
154 for (vector = 0; vector < dev->msix_entries_nr; ++vector) {
155 msix_handle_mask_update(dev, vector,
156 msix_vector_masked(dev, vector, was_masked));
157 }
158 }
159
160 static uint64_t msix_table_mmio_read(void *opaque, hwaddr addr,
161 unsigned size)
162 {
163 PCIDevice *dev = opaque;
164
165 return pci_get_long(dev->msix_table + addr);
166 }
167
168 static void msix_table_mmio_write(void *opaque, hwaddr addr,
169 uint64_t val, unsigned size)
170 {
171 PCIDevice *dev = opaque;
172 int vector = addr / PCI_MSIX_ENTRY_SIZE;
173 bool was_masked;
174
175 was_masked = msix_is_masked(dev, vector);
176 pci_set_long(dev->msix_table + addr, val);
177 msix_handle_mask_update(dev, vector, was_masked);
178 }
179
180 static const MemoryRegionOps msix_table_mmio_ops = {
181 .read = msix_table_mmio_read,
182 .write = msix_table_mmio_write,
183 .endianness = DEVICE_LITTLE_ENDIAN,
184 .valid = {
185 .min_access_size = 4,
186 .max_access_size = 4,
187 },
188 };
189
190 static uint64_t msix_pba_mmio_read(void *opaque, hwaddr addr,
191 unsigned size)
192 {
193 PCIDevice *dev = opaque;
194 if (dev->msix_vector_poll_notifier) {
195 unsigned vector_start = addr * 8;
196 unsigned vector_end = MIN(addr + size * 8, dev->msix_entries_nr);
197 dev->msix_vector_poll_notifier(dev, vector_start, vector_end);
198 }
199
200 return pci_get_long(dev->msix_pba + addr);
201 }
202
203 static const MemoryRegionOps msix_pba_mmio_ops = {
204 .read = msix_pba_mmio_read,
205 .endianness = DEVICE_LITTLE_ENDIAN,
206 .valid = {
207 .min_access_size = 4,
208 .max_access_size = 4,
209 },
210 };
211
212 static void msix_mask_all(struct PCIDevice *dev, unsigned nentries)
213 {
214 int vector;
215
216 for (vector = 0; vector < nentries; ++vector) {
217 unsigned offset =
218 vector * PCI_MSIX_ENTRY_SIZE + PCI_MSIX_ENTRY_VECTOR_CTRL;
219 bool was_masked = msix_is_masked(dev, vector);
220
221 dev->msix_table[offset] |= PCI_MSIX_ENTRY_CTRL_MASKBIT;
222 msix_handle_mask_update(dev, vector, was_masked);
223 }
224 }
225
226 /* Initialize the MSI-X structures */
227 int msix_init(struct PCIDevice *dev, unsigned short nentries,
228 MemoryRegion *table_bar, uint8_t table_bar_nr,
229 unsigned table_offset, MemoryRegion *pba_bar,
230 uint8_t pba_bar_nr, unsigned pba_offset, uint8_t cap_pos)
231 {
232 int cap;
233 unsigned table_size, pba_size;
234 uint8_t *config;
235
236 /* Nothing to do if MSI is not supported by interrupt controller */
237 if (!msi_supported) {
238 return -ENOTSUP;
239 }
240
241 if (nentries < 1 || nentries > PCI_MSIX_FLAGS_QSIZE + 1) {
242 return -EINVAL;
243 }
244
245 table_size = nentries * PCI_MSIX_ENTRY_SIZE;
246 pba_size = QEMU_ALIGN_UP(nentries, 64) / 8;
247
248 /* Sanity test: table & pba don't overlap, fit within BARs, min aligned */
249 if ((table_bar_nr == pba_bar_nr &&
250 ranges_overlap(table_offset, table_size, pba_offset, pba_size)) ||
251 table_offset + table_size > memory_region_size(table_bar) ||
252 pba_offset + pba_size > memory_region_size(pba_bar) ||
253 (table_offset | pba_offset) & PCI_MSIX_FLAGS_BIRMASK) {
254 return -EINVAL;
255 }
256
257 cap = pci_add_capability(dev, PCI_CAP_ID_MSIX, cap_pos, MSIX_CAP_LENGTH);
258 if (cap < 0) {
259 return cap;
260 }
261
262 dev->msix_cap = cap;
263 dev->cap_present |= QEMU_PCI_CAP_MSIX;
264 config = dev->config + cap;
265
266 pci_set_word(config + PCI_MSIX_FLAGS, nentries - 1);
267 dev->msix_entries_nr = nentries;
268 dev->msix_function_masked = true;
269
270 pci_set_long(config + PCI_MSIX_TABLE, table_offset | table_bar_nr);
271 pci_set_long(config + PCI_MSIX_PBA, pba_offset | pba_bar_nr);
272
273 /* Make flags bit writable. */
274 dev->wmask[cap + MSIX_CONTROL_OFFSET] |= MSIX_ENABLE_MASK |
275 MSIX_MASKALL_MASK;
276
277 dev->msix_table = g_malloc0(table_size);
278 dev->msix_pba = g_malloc0(pba_size);
279 dev->msix_entry_used = g_malloc0(nentries * sizeof *dev->msix_entry_used);
280
281 msix_mask_all(dev, nentries);
282
283 memory_region_init_io(&dev->msix_table_mmio, &msix_table_mmio_ops, dev,
284 "msix-table", table_size);
285 memory_region_add_subregion(table_bar, table_offset, &dev->msix_table_mmio);
286 memory_region_init_io(&dev->msix_pba_mmio, &msix_pba_mmio_ops, dev,
287 "msix-pba", pba_size);
288 memory_region_add_subregion(pba_bar, pba_offset, &dev->msix_pba_mmio);
289
290 return 0;
291 }
292
293 int msix_init_exclusive_bar(PCIDevice *dev, unsigned short nentries,
294 uint8_t bar_nr)
295 {
296 int ret;
297 char *name;
298
299 /*
300 * Migration compatibility dictates that this remains a 4k
301 * BAR with the vector table in the lower half and PBA in
302 * the upper half. Do not use these elsewhere!
303 */
304 #define MSIX_EXCLUSIVE_BAR_SIZE 4096
305 #define MSIX_EXCLUSIVE_BAR_TABLE_OFFSET 0
306 #define MSIX_EXCLUSIVE_BAR_PBA_OFFSET (MSIX_EXCLUSIVE_BAR_SIZE / 2)
307 #define MSIX_EXCLUSIVE_CAP_OFFSET 0
308
309 if (nentries * PCI_MSIX_ENTRY_SIZE > MSIX_EXCLUSIVE_BAR_PBA_OFFSET) {
310 return -EINVAL;
311 }
312
313 name = g_strdup_printf("%s-msix", dev->name);
314 memory_region_init(&dev->msix_exclusive_bar, name, MSIX_EXCLUSIVE_BAR_SIZE);
315 g_free(name);
316
317 ret = msix_init(dev, nentries, &dev->msix_exclusive_bar, bar_nr,
318 MSIX_EXCLUSIVE_BAR_TABLE_OFFSET, &dev->msix_exclusive_bar,
319 bar_nr, MSIX_EXCLUSIVE_BAR_PBA_OFFSET,
320 MSIX_EXCLUSIVE_CAP_OFFSET);
321 if (ret) {
322 memory_region_destroy(&dev->msix_exclusive_bar);
323 return ret;
324 }
325
326 pci_register_bar(dev, bar_nr, PCI_BASE_ADDRESS_SPACE_MEMORY,
327 &dev->msix_exclusive_bar);
328
329 return 0;
330 }
331
332 static void msix_free_irq_entries(PCIDevice *dev)
333 {
334 int vector;
335
336 for (vector = 0; vector < dev->msix_entries_nr; ++vector) {
337 dev->msix_entry_used[vector] = 0;
338 msix_clr_pending(dev, vector);
339 }
340 }
341
342 static void msix_clear_all_vectors(PCIDevice *dev)
343 {
344 int vector;
345
346 for (vector = 0; vector < dev->msix_entries_nr; ++vector) {
347 msix_clr_pending(dev, vector);
348 }
349 }
350
351 /* Clean up resources for the device. */
352 void msix_uninit(PCIDevice *dev, MemoryRegion *table_bar, MemoryRegion *pba_bar)
353 {
354 if (!msix_present(dev)) {
355 return;
356 }
357 pci_del_capability(dev, PCI_CAP_ID_MSIX, MSIX_CAP_LENGTH);
358 dev->msix_cap = 0;
359 msix_free_irq_entries(dev);
360 dev->msix_entries_nr = 0;
361 memory_region_del_subregion(pba_bar, &dev->msix_pba_mmio);
362 memory_region_destroy(&dev->msix_pba_mmio);
363 g_free(dev->msix_pba);
364 dev->msix_pba = NULL;
365 memory_region_del_subregion(table_bar, &dev->msix_table_mmio);
366 memory_region_destroy(&dev->msix_table_mmio);
367 g_free(dev->msix_table);
368 dev->msix_table = NULL;
369 g_free(dev->msix_entry_used);
370 dev->msix_entry_used = NULL;
371 dev->cap_present &= ~QEMU_PCI_CAP_MSIX;
372 }
373
374 void msix_uninit_exclusive_bar(PCIDevice *dev)
375 {
376 if (msix_present(dev)) {
377 msix_uninit(dev, &dev->msix_exclusive_bar, &dev->msix_exclusive_bar);
378 memory_region_destroy(&dev->msix_exclusive_bar);
379 }
380 }
381
382 void msix_save(PCIDevice *dev, QEMUFile *f)
383 {
384 unsigned n = dev->msix_entries_nr;
385
386 if (!msix_present(dev)) {
387 return;
388 }
389
390 qemu_put_buffer(f, dev->msix_table, n * PCI_MSIX_ENTRY_SIZE);
391 qemu_put_buffer(f, dev->msix_pba, (n + 7) / 8);
392 }
393
394 /* Should be called after restoring the config space. */
395 void msix_load(PCIDevice *dev, QEMUFile *f)
396 {
397 unsigned n = dev->msix_entries_nr;
398 unsigned int vector;
399
400 if (!msix_present(dev)) {
401 return;
402 }
403
404 msix_clear_all_vectors(dev);
405 qemu_get_buffer(f, dev->msix_table, n * PCI_MSIX_ENTRY_SIZE);
406 qemu_get_buffer(f, dev->msix_pba, (n + 7) / 8);
407 msix_update_function_masked(dev);
408
409 for (vector = 0; vector < n; vector++) {
410 msix_handle_mask_update(dev, vector, true);
411 }
412 }
413
414 /* Does device support MSI-X? */
415 int msix_present(PCIDevice *dev)
416 {
417 return dev->cap_present & QEMU_PCI_CAP_MSIX;
418 }
419
420 /* Is MSI-X enabled? */
421 int msix_enabled(PCIDevice *dev)
422 {
423 return (dev->cap_present & QEMU_PCI_CAP_MSIX) &&
424 (dev->config[dev->msix_cap + MSIX_CONTROL_OFFSET] &
425 MSIX_ENABLE_MASK);
426 }
427
428 /* Send an MSI-X message */
429 void msix_notify(PCIDevice *dev, unsigned vector)
430 {
431 MSIMessage msg;
432
433 if (vector >= dev->msix_entries_nr || !dev->msix_entry_used[vector])
434 return;
435 if (msix_is_masked(dev, vector)) {
436 msix_set_pending(dev, vector);
437 return;
438 }
439
440 msg = msix_get_message(dev, vector);
441
442 stl_le_phys(msg.address, msg.data);
443 }
444
445 void msix_reset(PCIDevice *dev)
446 {
447 if (!msix_present(dev)) {
448 return;
449 }
450 msix_clear_all_vectors(dev);
451 dev->config[dev->msix_cap + MSIX_CONTROL_OFFSET] &=
452 ~dev->wmask[dev->msix_cap + MSIX_CONTROL_OFFSET];
453 memset(dev->msix_table, 0, dev->msix_entries_nr * PCI_MSIX_ENTRY_SIZE);
454 memset(dev->msix_pba, 0, QEMU_ALIGN_UP(dev->msix_entries_nr, 64) / 8);
455 msix_mask_all(dev, dev->msix_entries_nr);
456 }
457
458 /* PCI spec suggests that devices make it possible for software to configure
459 * less vectors than supported by the device, but does not specify a standard
460 * mechanism for devices to do so.
461 *
462 * We support this by asking devices to declare vectors software is going to
463 * actually use, and checking this on the notification path. Devices that
464 * don't want to follow the spec suggestion can declare all vectors as used. */
465
466 /* Mark vector as used. */
467 int msix_vector_use(PCIDevice *dev, unsigned vector)
468 {
469 if (vector >= dev->msix_entries_nr)
470 return -EINVAL;
471 dev->msix_entry_used[vector]++;
472 return 0;
473 }
474
475 /* Mark vector as unused. */
476 void msix_vector_unuse(PCIDevice *dev, unsigned vector)
477 {
478 if (vector >= dev->msix_entries_nr || !dev->msix_entry_used[vector]) {
479 return;
480 }
481 if (--dev->msix_entry_used[vector]) {
482 return;
483 }
484 msix_clr_pending(dev, vector);
485 }
486
487 void msix_unuse_all_vectors(PCIDevice *dev)
488 {
489 if (!msix_present(dev)) {
490 return;
491 }
492 msix_free_irq_entries(dev);
493 }
494
495 unsigned int msix_nr_vectors_allocated(const PCIDevice *dev)
496 {
497 return dev->msix_entries_nr;
498 }
499
500 static int msix_set_notifier_for_vector(PCIDevice *dev, unsigned int vector)
501 {
502 MSIMessage msg;
503
504 if (msix_is_masked(dev, vector)) {
505 return 0;
506 }
507 msg = msix_get_message(dev, vector);
508 return dev->msix_vector_use_notifier(dev, vector, msg);
509 }
510
511 static void msix_unset_notifier_for_vector(PCIDevice *dev, unsigned int vector)
512 {
513 if (msix_is_masked(dev, vector)) {
514 return;
515 }
516 dev->msix_vector_release_notifier(dev, vector);
517 }
518
519 int msix_set_vector_notifiers(PCIDevice *dev,
520 MSIVectorUseNotifier use_notifier,
521 MSIVectorReleaseNotifier release_notifier,
522 MSIVectorPollNotifier poll_notifier)
523 {
524 int vector, ret;
525
526 assert(use_notifier && release_notifier);
527
528 dev->msix_vector_use_notifier = use_notifier;
529 dev->msix_vector_release_notifier = release_notifier;
530 dev->msix_vector_poll_notifier = poll_notifier;
531
532 if ((dev->config[dev->msix_cap + MSIX_CONTROL_OFFSET] &
533 (MSIX_ENABLE_MASK | MSIX_MASKALL_MASK)) == MSIX_ENABLE_MASK) {
534 for (vector = 0; vector < dev->msix_entries_nr; vector++) {
535 ret = msix_set_notifier_for_vector(dev, vector);
536 if (ret < 0) {
537 goto undo;
538 }
539 }
540 }
541 if (dev->msix_vector_poll_notifier) {
542 dev->msix_vector_poll_notifier(dev, 0, dev->msix_entries_nr);
543 }
544 return 0;
545
546 undo:
547 while (--vector >= 0) {
548 msix_unset_notifier_for_vector(dev, vector);
549 }
550 dev->msix_vector_use_notifier = NULL;
551 dev->msix_vector_release_notifier = NULL;
552 return ret;
553 }
554
555 void msix_unset_vector_notifiers(PCIDevice *dev)
556 {
557 int vector;
558
559 assert(dev->msix_vector_use_notifier &&
560 dev->msix_vector_release_notifier);
561
562 if ((dev->config[dev->msix_cap + MSIX_CONTROL_OFFSET] &
563 (MSIX_ENABLE_MASK | MSIX_MASKALL_MASK)) == MSIX_ENABLE_MASK) {
564 for (vector = 0; vector < dev->msix_entries_nr; vector++) {
565 msix_unset_notifier_for_vector(dev, vector);
566 }
567 }
568 dev->msix_vector_use_notifier = NULL;
569 dev->msix_vector_release_notifier = NULL;
570 dev->msix_vector_poll_notifier = NULL;
571 }
572
573 static void put_msix_state(QEMUFile *f, void *pv, size_t size)
574 {
575 msix_save(pv, f);
576 }
577
578 static int get_msix_state(QEMUFile *f, void *pv, size_t size)
579 {
580 msix_load(pv, f);
581 return 0;
582 }
583
584 static VMStateInfo vmstate_info_msix = {
585 .name = "msix state",
586 .get = get_msix_state,
587 .put = put_msix_state,
588 };
589
590 const VMStateDescription vmstate_msix = {
591 .name = "msix",
592 .fields = (VMStateField[]) {
593 {
594 .name = "msix",
595 .version_id = 0,
596 .field_exists = NULL,
597 .size = 0, /* ouch */
598 .info = &vmstate_info_msix,
599 .flags = VMS_SINGLE,
600 .offset = 0,
601 },
602 VMSTATE_END_OF_LIST()
603 }
604 };