]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/s390/pci/pci_clp.c
s390/pci: PCI adapter interrupts for MSI/MSI-X
[mirror_ubuntu-artful-kernel.git] / arch / s390 / pci / pci_clp.c
CommitLineData
a755a45d
JG
1/*
2 * Copyright IBM Corp. 2012
3 *
4 * Author(s):
5 * Jan Glauber <jang@linux.vnet.ibm.com>
6 */
7
8#define COMPONENT "zPCI"
9#define pr_fmt(fmt) COMPONENT ": " fmt
10
11#include <linux/kernel.h>
12#include <linux/slab.h>
13#include <linux/err.h>
14#include <linux/delay.h>
15#include <linux/pci.h>
16#include <asm/pci_clp.h>
17
18/*
19 * Call Logical Processor
20 * Retry logic is handled by the caller.
21 */
22static inline u8 clp_instr(void *req)
23{
24 u64 ilpm;
25 u8 cc;
26
27 asm volatile (
28 " .insn rrf,0xb9a00000,%[ilpm],%[req],0x0,0x2\n"
29 " ipm %[cc]\n"
30 " srl %[cc],28\n"
31 : [cc] "=d" (cc), [ilpm] "=d" (ilpm)
32 : [req] "a" (req)
33 : "cc", "memory");
34 return cc;
35}
36
37static void *clp_alloc_block(void)
38{
39 struct page *page = alloc_pages(GFP_KERNEL, get_order(CLP_BLK_SIZE));
40 return (page) ? page_address(page) : NULL;
41}
42
43static void clp_free_block(void *ptr)
44{
45 free_pages((unsigned long) ptr, get_order(CLP_BLK_SIZE));
46}
47
48static void clp_store_query_pci_fngrp(struct zpci_dev *zdev,
49 struct clp_rsp_query_pci_grp *response)
50{
9a4da8a5
JG
51 zdev->msi_addr = response->msia;
52
53 pr_debug("Supported number of MSI vectors: %u\n", response->noi);
a755a45d
JG
54 switch (response->version) {
55 case 1:
56 zdev->max_bus_speed = PCIE_SPEED_5_0GT;
57 break;
58 default:
59 zdev->max_bus_speed = PCI_SPEED_UNKNOWN;
60 break;
61 }
62}
63
64static int clp_query_pci_fngrp(struct zpci_dev *zdev, u8 pfgid)
65{
66 struct clp_req_rsp_query_pci_grp *rrb;
67 int rc;
68
69 rrb = clp_alloc_block();
70 if (!rrb)
71 return -ENOMEM;
72
73 memset(rrb, 0, sizeof(*rrb));
74 rrb->request.hdr.len = sizeof(rrb->request);
75 rrb->request.hdr.cmd = CLP_QUERY_PCI_FNGRP;
76 rrb->response.hdr.len = sizeof(rrb->response);
77 rrb->request.pfgid = pfgid;
78
79 rc = clp_instr(rrb);
80 if (!rc && rrb->response.hdr.rsp == CLP_RC_OK)
81 clp_store_query_pci_fngrp(zdev, &rrb->response);
82 else {
83 pr_err("Query PCI FNGRP failed with response: %x cc: %d\n",
84 rrb->response.hdr.rsp, rc);
85 rc = -EIO;
86 }
87 clp_free_block(rrb);
88 return rc;
89}
90
91static int clp_store_query_pci_fn(struct zpci_dev *zdev,
92 struct clp_rsp_query_pci *response)
93{
94 int i;
95
96 for (i = 0; i < PCI_BAR_COUNT; i++) {
97 zdev->bars[i].val = le32_to_cpu(response->bar[i]);
98 zdev->bars[i].size = response->bar_size[i];
99 }
100 zdev->pchid = response->pchid;
101 zdev->pfgid = response->pfgid;
102 return 0;
103}
104
105static int clp_query_pci_fn(struct zpci_dev *zdev, u32 fh)
106{
107 struct clp_req_rsp_query_pci *rrb;
108 int rc;
109
110 rrb = clp_alloc_block();
111 if (!rrb)
112 return -ENOMEM;
113
114 memset(rrb, 0, sizeof(*rrb));
115 rrb->request.hdr.len = sizeof(rrb->request);
116 rrb->request.hdr.cmd = CLP_QUERY_PCI_FN;
117 rrb->response.hdr.len = sizeof(rrb->response);
118 rrb->request.fh = fh;
119
120 rc = clp_instr(rrb);
121 if (!rc && rrb->response.hdr.rsp == CLP_RC_OK) {
122 rc = clp_store_query_pci_fn(zdev, &rrb->response);
123 if (rc)
124 goto out;
125 if (rrb->response.pfgid)
126 rc = clp_query_pci_fngrp(zdev, rrb->response.pfgid);
127 } else {
128 pr_err("Query PCI failed with response: %x cc: %d\n",
129 rrb->response.hdr.rsp, rc);
130 rc = -EIO;
131 }
132out:
133 clp_free_block(rrb);
134 return rc;
135}
136
137int clp_add_pci_device(u32 fid, u32 fh, int configured)
138{
139 struct zpci_dev *zdev;
140 int rc;
141
142 zdev = zpci_alloc_device();
143 if (IS_ERR(zdev))
144 return PTR_ERR(zdev);
145
146 zdev->fh = fh;
147 zdev->fid = fid;
148
149 /* Query function properties and update zdev */
150 rc = clp_query_pci_fn(zdev, fh);
151 if (rc)
152 goto error;
153
154 if (configured)
155 zdev->state = ZPCI_FN_STATE_CONFIGURED;
156 else
157 zdev->state = ZPCI_FN_STATE_STANDBY;
158
159 rc = zpci_create_device(zdev);
160 if (rc)
161 goto error;
162 return 0;
163
164error:
165 zpci_free_device(zdev);
166 return rc;
167}
168
169/*
170 * Enable/Disable a given PCI function defined by its function handle.
171 */
172static int clp_set_pci_fn(u32 *fh, u8 nr_dma_as, u8 command)
173{
174 struct clp_req_rsp_set_pci *rrb;
175 int rc, retries = 1000;
176
177 rrb = clp_alloc_block();
178 if (!rrb)
179 return -ENOMEM;
180
181 do {
182 memset(rrb, 0, sizeof(*rrb));
183 rrb->request.hdr.len = sizeof(rrb->request);
184 rrb->request.hdr.cmd = CLP_SET_PCI_FN;
185 rrb->response.hdr.len = sizeof(rrb->response);
186 rrb->request.fh = *fh;
187 rrb->request.oc = command;
188 rrb->request.ndas = nr_dma_as;
189
190 rc = clp_instr(rrb);
191 if (rrb->response.hdr.rsp == CLP_RC_SETPCIFN_BUSY) {
192 retries--;
193 if (retries < 0)
194 break;
195 msleep(1);
196 }
197 } while (rrb->response.hdr.rsp == CLP_RC_SETPCIFN_BUSY);
198
199 if (!rc && rrb->response.hdr.rsp == CLP_RC_OK)
200 *fh = rrb->response.fh;
201 else {
202 pr_err("Set PCI FN failed with response: %x cc: %d\n",
203 rrb->response.hdr.rsp, rc);
204 rc = -EIO;
205 }
206 clp_free_block(rrb);
207 return rc;
208}
209
210int clp_enable_fh(struct zpci_dev *zdev, u8 nr_dma_as)
211{
212 u32 fh = zdev->fh;
213 int rc;
214
215 rc = clp_set_pci_fn(&fh, nr_dma_as, CLP_SET_ENABLE_PCI_FN);
216 if (!rc)
217 /* Success -> store enabled handle in zdev */
218 zdev->fh = fh;
219 return rc;
220}
221
222int clp_disable_fh(struct zpci_dev *zdev)
223{
224 u32 fh = zdev->fh;
225 int rc;
226
227 if (!zdev_enabled(zdev))
228 return 0;
229
230 dev_info(&zdev->pdev->dev, "disabling fn handle: 0x%x\n", fh);
231 rc = clp_set_pci_fn(&fh, 0, CLP_SET_DISABLE_PCI_FN);
232 if (!rc)
233 /* Success -> store disabled handle in zdev */
234 zdev->fh = fh;
235 else
236 dev_err(&zdev->pdev->dev,
237 "Failed to disable fn handle: 0x%x\n", fh);
238 return rc;
239}
240
241static void clp_check_pcifn_entry(struct clp_fh_list_entry *entry)
242{
243 int present, rc;
244
245 if (!entry->vendor_id)
246 return;
247
248 /* TODO: be a little bit more scalable */
249 present = zpci_fid_present(entry->fid);
250
251 if (present)
252 pr_debug("%s: device %x already present\n", __func__, entry->fid);
253
254 /* skip already used functions */
255 if (present && entry->config_state)
256 return;
257
258 /* aev 306: function moved to stand-by state */
259 if (present && !entry->config_state) {
260 /*
261 * The handle is already disabled, that means no iota/irq freeing via
262 * the firmware interfaces anymore. Need to free resources manually
263 * (DMA memory, debug, sysfs)...
264 */
265 zpci_stop_device(get_zdev_by_fid(entry->fid));
266 return;
267 }
268
269 rc = clp_add_pci_device(entry->fid, entry->fh, entry->config_state);
270 if (rc)
271 pr_err("Failed to add fid: 0x%x\n", entry->fid);
272}
273
274int clp_find_pci_devices(void)
275{
276 struct clp_req_rsp_list_pci *rrb;
277 u64 resume_token = 0;
278 int entries, i, rc;
279
280 rrb = clp_alloc_block();
281 if (!rrb)
282 return -ENOMEM;
283
284 do {
285 memset(rrb, 0, sizeof(*rrb));
286 rrb->request.hdr.len = sizeof(rrb->request);
287 rrb->request.hdr.cmd = CLP_LIST_PCI;
288 /* store as many entries as possible */
289 rrb->response.hdr.len = CLP_BLK_SIZE - LIST_PCI_HDR_LEN;
290 rrb->request.resume_token = resume_token;
291
292 /* Get PCI function handle list */
293 rc = clp_instr(rrb);
294 if (rc || rrb->response.hdr.rsp != CLP_RC_OK) {
295 pr_err("List PCI failed with response: 0x%x cc: %d\n",
296 rrb->response.hdr.rsp, rc);
297 rc = -EIO;
298 goto out;
299 }
300
301 WARN_ON_ONCE(rrb->response.entry_size !=
302 sizeof(struct clp_fh_list_entry));
303
304 entries = (rrb->response.hdr.len - LIST_PCI_HDR_LEN) /
305 rrb->response.entry_size;
306 pr_info("Detected number of PCI functions: %u\n", entries);
307
308 /* Store the returned resume token as input for the next call */
309 resume_token = rrb->response.resume_token;
310
311 for (i = 0; i < entries; i++)
312 clp_check_pcifn_entry(&rrb->response.fh_list[i]);
313 } while (resume_token);
314
315 pr_debug("Maximum number of supported PCI functions: %u\n",
316 rrb->response.max_fn);
317out:
318 clp_free_block(rrb);
319 return rc;
320}