]>
Commit | Line | Data |
---|---|---|
30edc14b KRW |
1 | /* |
2 | * PCI Backend Operations - respond to PCI requests from Frontend | |
3 | * | |
4 | * Author: Ryan Wilson <hap9@epoch.ncsc.mil> | |
5 | */ | |
6 | #include <linux/module.h> | |
7 | #include <linux/wait.h> | |
8 | #include <linux/bitops.h> | |
9 | #include <xen/events.h> | |
10 | #include <linux/sched.h> | |
11 | #include "pciback.h" | |
12 | ||
13 | int verbose_request; | |
14 | module_param(verbose_request, int, 0644); | |
15 | ||
16 | /* Ensure a device is "turned off" and ready to be exported. | |
17 | * (Also see pciback_config_reset to ensure virtual configuration space is | |
18 | * ready to be re-exported) | |
19 | */ | |
20 | void pciback_reset_device(struct pci_dev *dev) | |
21 | { | |
22 | u16 cmd; | |
23 | ||
24 | /* Disable devices (but not bridges) */ | |
25 | if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL) { | |
26 | pci_disable_device(dev); | |
27 | ||
28 | pci_write_config_word(dev, PCI_COMMAND, 0); | |
29 | ||
30 | dev->is_busmaster = 0; | |
31 | } else { | |
32 | pci_read_config_word(dev, PCI_COMMAND, &cmd); | |
33 | if (cmd & (PCI_COMMAND_INVALIDATE)) { | |
34 | cmd &= ~(PCI_COMMAND_INVALIDATE); | |
35 | pci_write_config_word(dev, PCI_COMMAND, cmd); | |
36 | ||
37 | dev->is_busmaster = 0; | |
38 | } | |
39 | } | |
40 | } | |
41 | /* | |
42 | * Now the same evtchn is used for both pcifront conf_read_write request | |
43 | * as well as pcie aer front end ack. We use a new work_queue to schedule | |
44 | * pciback conf_read_write service for avoiding confict with aer_core | |
45 | * do_recovery job which also use the system default work_queue | |
46 | */ | |
47 | void test_and_schedule_op(struct pciback_device *pdev) | |
48 | { | |
49 | /* Check that frontend is requesting an operation and that we are not | |
50 | * already processing a request */ | |
51 | if (test_bit(_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags) | |
52 | && !test_and_set_bit(_PDEVF_op_active, &pdev->flags)) { | |
53 | queue_work(pciback_wq, &pdev->op_work); | |
54 | } | |
55 | /*_XEN_PCIB_active should have been cleared by pcifront. And also make | |
56 | sure pciback is waiting for ack by checking _PCIB_op_pending*/ | |
57 | if (!test_bit(_XEN_PCIB_active, (unsigned long *)&pdev->sh_info->flags) | |
58 | && test_bit(_PCIB_op_pending, &pdev->flags)) { | |
59 | wake_up(&aer_wait_queue); | |
60 | } | |
61 | } | |
62 | ||
63 | /* Performing the configuration space reads/writes must not be done in atomic | |
64 | * context because some of the pci_* functions can sleep (mostly due to ACPI | |
65 | * use of semaphores). This function is intended to be called from a work | |
66 | * queue in process context taking a struct pciback_device as a parameter */ | |
67 | ||
68 | void pciback_do_op(struct work_struct *data) | |
69 | { | |
70 | struct pciback_device *pdev = | |
71 | container_of(data, struct pciback_device, op_work); | |
72 | struct pci_dev *dev; | |
73 | struct xen_pci_op *op = &pdev->sh_info->op; | |
74 | ||
75 | dev = pciback_get_pci_dev(pdev, op->domain, op->bus, op->devfn); | |
76 | ||
77 | if (dev == NULL) | |
78 | op->err = XEN_PCI_ERR_dev_not_found; | |
79 | else { | |
80 | switch (op->cmd) { | |
81 | case XEN_PCI_OP_conf_read: | |
82 | op->err = pciback_config_read(dev, | |
83 | op->offset, op->size, &op->value); | |
84 | break; | |
85 | case XEN_PCI_OP_conf_write: | |
86 | op->err = pciback_config_write(dev, | |
87 | op->offset, op->size, op->value); | |
88 | break; | |
89 | #ifdef CONFIG_PCI_MSI | |
90 | case XEN_PCI_OP_enable_msi: | |
91 | op->err = pciback_enable_msi(pdev, dev, op); | |
92 | break; | |
93 | case XEN_PCI_OP_disable_msi: | |
94 | op->err = pciback_disable_msi(pdev, dev, op); | |
95 | break; | |
96 | case XEN_PCI_OP_enable_msix: | |
97 | op->err = pciback_enable_msix(pdev, dev, op); | |
98 | break; | |
99 | case XEN_PCI_OP_disable_msix: | |
100 | op->err = pciback_disable_msix(pdev, dev, op); | |
101 | break; | |
102 | #endif | |
103 | default: | |
104 | op->err = XEN_PCI_ERR_not_implemented; | |
105 | break; | |
106 | } | |
107 | } | |
108 | /* Tell the driver domain that we're done. */ | |
109 | wmb(); | |
110 | clear_bit(_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags); | |
111 | notify_remote_via_irq(pdev->evtchn_irq); | |
112 | ||
113 | /* Mark that we're done. */ | |
114 | smp_mb__before_clear_bit(); /* /after/ clearing PCIF_active */ | |
115 | clear_bit(_PDEVF_op_active, &pdev->flags); | |
116 | smp_mb__after_clear_bit(); /* /before/ final check for work */ | |
117 | ||
118 | /* Check to see if the driver domain tried to start another request in | |
119 | * between clearing _XEN_PCIF_active and clearing _PDEVF_op_active. | |
120 | */ | |
121 | test_and_schedule_op(pdev); | |
122 | } | |
123 | ||
124 | irqreturn_t pciback_handle_event(int irq, void *dev_id) | |
125 | { | |
126 | struct pciback_device *pdev = dev_id; | |
127 | ||
128 | test_and_schedule_op(pdev); | |
129 | ||
130 | return IRQ_HANDLED; | |
131 | } |