]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/s390/pci/pci_clp.c
Merge branch 'clk-fixes' into clk-next
[mirror_ubuntu-artful-kernel.git] / arch / s390 / pci / pci_clp.c
1 /*
2 * Copyright IBM Corp. 2012
3 *
4 * Author(s):
5 * Jan Glauber <jang@linux.vnet.ibm.com>
6 */
7
8 #define KMSG_COMPONENT "zpci"
9 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
10
11 #include <linux/compat.h>
12 #include <linux/kernel.h>
13 #include <linux/miscdevice.h>
14 #include <linux/slab.h>
15 #include <linux/err.h>
16 #include <linux/delay.h>
17 #include <linux/pci.h>
18 #include <linux/uaccess.h>
19 #include <asm/pci_debug.h>
20 #include <asm/pci_clp.h>
21 #include <asm/compat.h>
22 #include <asm/clp.h>
23 #include <uapi/asm/clp.h>
24
25 bool zpci_unique_uid;
26
27 static inline void zpci_err_clp(unsigned int rsp, int rc)
28 {
29 struct {
30 unsigned int rsp;
31 int rc;
32 } __packed data = {rsp, rc};
33
34 zpci_err_hex(&data, sizeof(data));
35 }
36
37 /*
38 * Call Logical Processor with c=1, lps=0 and command 1
39 * to get the bit mask of installed logical processors
40 */
41 static inline int clp_get_ilp(unsigned long *ilp)
42 {
43 unsigned long mask;
44 int cc = 3;
45
46 asm volatile (
47 " .insn rrf,0xb9a00000,%[mask],%[cmd],8,0\n"
48 "0: ipm %[cc]\n"
49 " srl %[cc],28\n"
50 "1:\n"
51 EX_TABLE(0b, 1b)
52 : [cc] "+d" (cc), [mask] "=d" (mask) : [cmd] "a" (1)
53 : "cc");
54 *ilp = mask;
55 return cc;
56 }
57
58 /*
59 * Call Logical Processor with c=0, the give constant lps and an lpcb request.
60 */
61 static inline int clp_req(void *data, unsigned int lps)
62 {
63 struct { u8 _[CLP_BLK_SIZE]; } *req = data;
64 u64 ignored;
65 int cc = 3;
66
67 asm volatile (
68 " .insn rrf,0xb9a00000,%[ign],%[req],0,%[lps]\n"
69 "0: ipm %[cc]\n"
70 " srl %[cc],28\n"
71 "1:\n"
72 EX_TABLE(0b, 1b)
73 : [cc] "+d" (cc), [ign] "=d" (ignored), "+m" (*req)
74 : [req] "a" (req), [lps] "i" (lps)
75 : "cc");
76 return cc;
77 }
78
79 static void *clp_alloc_block(gfp_t gfp_mask)
80 {
81 return (void *) __get_free_pages(gfp_mask, get_order(CLP_BLK_SIZE));
82 }
83
84 static void clp_free_block(void *ptr)
85 {
86 free_pages((unsigned long) ptr, get_order(CLP_BLK_SIZE));
87 }
88
89 static void clp_store_query_pci_fngrp(struct zpci_dev *zdev,
90 struct clp_rsp_query_pci_grp *response)
91 {
92 zdev->tlb_refresh = response->refresh;
93 zdev->dma_mask = response->dasm;
94 zdev->msi_addr = response->msia;
95 zdev->max_msi = response->noi;
96 zdev->fmb_update = response->mui;
97
98 switch (response->version) {
99 case 1:
100 zdev->max_bus_speed = PCIE_SPEED_5_0GT;
101 break;
102 default:
103 zdev->max_bus_speed = PCI_SPEED_UNKNOWN;
104 break;
105 }
106 }
107
108 static int clp_query_pci_fngrp(struct zpci_dev *zdev, u8 pfgid)
109 {
110 struct clp_req_rsp_query_pci_grp *rrb;
111 int rc;
112
113 rrb = clp_alloc_block(GFP_KERNEL);
114 if (!rrb)
115 return -ENOMEM;
116
117 memset(rrb, 0, sizeof(*rrb));
118 rrb->request.hdr.len = sizeof(rrb->request);
119 rrb->request.hdr.cmd = CLP_QUERY_PCI_FNGRP;
120 rrb->response.hdr.len = sizeof(rrb->response);
121 rrb->request.pfgid = pfgid;
122
123 rc = clp_req(rrb, CLP_LPS_PCI);
124 if (!rc && rrb->response.hdr.rsp == CLP_RC_OK)
125 clp_store_query_pci_fngrp(zdev, &rrb->response);
126 else {
127 zpci_err("Q PCI FGRP:\n");
128 zpci_err_clp(rrb->response.hdr.rsp, rc);
129 rc = -EIO;
130 }
131 clp_free_block(rrb);
132 return rc;
133 }
134
135 static int clp_store_query_pci_fn(struct zpci_dev *zdev,
136 struct clp_rsp_query_pci *response)
137 {
138 int i;
139
140 for (i = 0; i < PCI_BAR_COUNT; i++) {
141 zdev->bars[i].val = le32_to_cpu(response->bar[i]);
142 zdev->bars[i].size = response->bar_size[i];
143 }
144 zdev->start_dma = response->sdma;
145 zdev->end_dma = response->edma;
146 zdev->pchid = response->pchid;
147 zdev->pfgid = response->pfgid;
148 zdev->pft = response->pft;
149 zdev->vfn = response->vfn;
150 zdev->uid = response->uid;
151 zdev->fmb_length = sizeof(u32) * response->fmb_len;
152
153 memcpy(zdev->pfip, response->pfip, sizeof(zdev->pfip));
154 if (response->util_str_avail) {
155 memcpy(zdev->util_str, response->util_str,
156 sizeof(zdev->util_str));
157 }
158
159 return 0;
160 }
161
162 static int clp_query_pci_fn(struct zpci_dev *zdev, u32 fh)
163 {
164 struct clp_req_rsp_query_pci *rrb;
165 int rc;
166
167 rrb = clp_alloc_block(GFP_KERNEL);
168 if (!rrb)
169 return -ENOMEM;
170
171 memset(rrb, 0, sizeof(*rrb));
172 rrb->request.hdr.len = sizeof(rrb->request);
173 rrb->request.hdr.cmd = CLP_QUERY_PCI_FN;
174 rrb->response.hdr.len = sizeof(rrb->response);
175 rrb->request.fh = fh;
176
177 rc = clp_req(rrb, CLP_LPS_PCI);
178 if (!rc && rrb->response.hdr.rsp == CLP_RC_OK) {
179 rc = clp_store_query_pci_fn(zdev, &rrb->response);
180 if (rc)
181 goto out;
182 rc = clp_query_pci_fngrp(zdev, rrb->response.pfgid);
183 } else {
184 zpci_err("Q PCI FN:\n");
185 zpci_err_clp(rrb->response.hdr.rsp, rc);
186 rc = -EIO;
187 }
188 out:
189 clp_free_block(rrb);
190 return rc;
191 }
192
193 int clp_add_pci_device(u32 fid, u32 fh, int configured)
194 {
195 struct zpci_dev *zdev;
196 int rc;
197
198 zpci_dbg(3, "add fid:%x, fh:%x, c:%d\n", fid, fh, configured);
199 zdev = kzalloc(sizeof(*zdev), GFP_KERNEL);
200 if (!zdev)
201 return -ENOMEM;
202
203 zdev->fh = fh;
204 zdev->fid = fid;
205
206 /* Query function properties and update zdev */
207 rc = clp_query_pci_fn(zdev, fh);
208 if (rc)
209 goto error;
210
211 if (configured)
212 zdev->state = ZPCI_FN_STATE_CONFIGURED;
213 else
214 zdev->state = ZPCI_FN_STATE_STANDBY;
215
216 rc = zpci_create_device(zdev);
217 if (rc)
218 goto error;
219 return 0;
220
221 error:
222 kfree(zdev);
223 return rc;
224 }
225
226 /*
227 * Enable/Disable a given PCI function defined by its function handle.
228 */
229 static int clp_set_pci_fn(u32 *fh, u8 nr_dma_as, u8 command)
230 {
231 struct clp_req_rsp_set_pci *rrb;
232 int rc, retries = 100;
233
234 rrb = clp_alloc_block(GFP_KERNEL);
235 if (!rrb)
236 return -ENOMEM;
237
238 do {
239 memset(rrb, 0, sizeof(*rrb));
240 rrb->request.hdr.len = sizeof(rrb->request);
241 rrb->request.hdr.cmd = CLP_SET_PCI_FN;
242 rrb->response.hdr.len = sizeof(rrb->response);
243 rrb->request.fh = *fh;
244 rrb->request.oc = command;
245 rrb->request.ndas = nr_dma_as;
246
247 rc = clp_req(rrb, CLP_LPS_PCI);
248 if (rrb->response.hdr.rsp == CLP_RC_SETPCIFN_BUSY) {
249 retries--;
250 if (retries < 0)
251 break;
252 msleep(20);
253 }
254 } while (rrb->response.hdr.rsp == CLP_RC_SETPCIFN_BUSY);
255
256 if (!rc && rrb->response.hdr.rsp == CLP_RC_OK)
257 *fh = rrb->response.fh;
258 else {
259 zpci_err("Set PCI FN:\n");
260 zpci_err_clp(rrb->response.hdr.rsp, rc);
261 rc = -EIO;
262 }
263 clp_free_block(rrb);
264 return rc;
265 }
266
267 int clp_enable_fh(struct zpci_dev *zdev, u8 nr_dma_as)
268 {
269 u32 fh = zdev->fh;
270 int rc;
271
272 rc = clp_set_pci_fn(&fh, nr_dma_as, CLP_SET_ENABLE_PCI_FN);
273 if (!rc)
274 /* Success -> store enabled handle in zdev */
275 zdev->fh = fh;
276
277 zpci_dbg(3, "ena fid:%x, fh:%x, rc:%d\n", zdev->fid, zdev->fh, rc);
278 return rc;
279 }
280
281 int clp_disable_fh(struct zpci_dev *zdev)
282 {
283 u32 fh = zdev->fh;
284 int rc;
285
286 if (!zdev_enabled(zdev))
287 return 0;
288
289 rc = clp_set_pci_fn(&fh, 0, CLP_SET_DISABLE_PCI_FN);
290 if (!rc)
291 /* Success -> store disabled handle in zdev */
292 zdev->fh = fh;
293
294 zpci_dbg(3, "dis fid:%x, fh:%x, rc:%d\n", zdev->fid, zdev->fh, rc);
295 return rc;
296 }
297
298 static int clp_list_pci(struct clp_req_rsp_list_pci *rrb,
299 void (*cb)(struct clp_fh_list_entry *entry))
300 {
301 u64 resume_token = 0;
302 int entries, i, rc;
303
304 do {
305 memset(rrb, 0, sizeof(*rrb));
306 rrb->request.hdr.len = sizeof(rrb->request);
307 rrb->request.hdr.cmd = CLP_LIST_PCI;
308 /* store as many entries as possible */
309 rrb->response.hdr.len = CLP_BLK_SIZE - LIST_PCI_HDR_LEN;
310 rrb->request.resume_token = resume_token;
311
312 /* Get PCI function handle list */
313 rc = clp_req(rrb, CLP_LPS_PCI);
314 if (rc || rrb->response.hdr.rsp != CLP_RC_OK) {
315 zpci_err("List PCI FN:\n");
316 zpci_err_clp(rrb->response.hdr.rsp, rc);
317 rc = -EIO;
318 goto out;
319 }
320
321 zpci_unique_uid = rrb->response.uid_checking;
322 WARN_ON_ONCE(rrb->response.entry_size !=
323 sizeof(struct clp_fh_list_entry));
324
325 entries = (rrb->response.hdr.len - LIST_PCI_HDR_LEN) /
326 rrb->response.entry_size;
327
328 resume_token = rrb->response.resume_token;
329 for (i = 0; i < entries; i++)
330 cb(&rrb->response.fh_list[i]);
331 } while (resume_token);
332 out:
333 return rc;
334 }
335
336 static void __clp_add(struct clp_fh_list_entry *entry)
337 {
338 if (!entry->vendor_id)
339 return;
340
341 clp_add_pci_device(entry->fid, entry->fh, entry->config_state);
342 }
343
344 static void __clp_rescan(struct clp_fh_list_entry *entry)
345 {
346 struct zpci_dev *zdev;
347
348 if (!entry->vendor_id)
349 return;
350
351 zdev = get_zdev_by_fid(entry->fid);
352 if (!zdev) {
353 clp_add_pci_device(entry->fid, entry->fh, entry->config_state);
354 return;
355 }
356
357 if (!entry->config_state) {
358 /*
359 * The handle is already disabled, that means no iota/irq freeing via
360 * the firmware interfaces anymore. Need to free resources manually
361 * (DMA memory, debug, sysfs)...
362 */
363 zpci_stop_device(zdev);
364 }
365 }
366
367 static void __clp_update(struct clp_fh_list_entry *entry)
368 {
369 struct zpci_dev *zdev;
370
371 if (!entry->vendor_id)
372 return;
373
374 zdev = get_zdev_by_fid(entry->fid);
375 if (!zdev)
376 return;
377
378 zdev->fh = entry->fh;
379 }
380
381 int clp_scan_pci_devices(void)
382 {
383 struct clp_req_rsp_list_pci *rrb;
384 int rc;
385
386 rrb = clp_alloc_block(GFP_KERNEL);
387 if (!rrb)
388 return -ENOMEM;
389
390 rc = clp_list_pci(rrb, __clp_add);
391
392 clp_free_block(rrb);
393 return rc;
394 }
395
396 int clp_rescan_pci_devices(void)
397 {
398 struct clp_req_rsp_list_pci *rrb;
399 int rc;
400
401 rrb = clp_alloc_block(GFP_KERNEL);
402 if (!rrb)
403 return -ENOMEM;
404
405 rc = clp_list_pci(rrb, __clp_rescan);
406
407 clp_free_block(rrb);
408 return rc;
409 }
410
411 int clp_rescan_pci_devices_simple(void)
412 {
413 struct clp_req_rsp_list_pci *rrb;
414 int rc;
415
416 rrb = clp_alloc_block(GFP_NOWAIT);
417 if (!rrb)
418 return -ENOMEM;
419
420 rc = clp_list_pci(rrb, __clp_update);
421
422 clp_free_block(rrb);
423 return rc;
424 }
425
426 static int clp_base_slpc(struct clp_req *req, struct clp_req_rsp_slpc *lpcb)
427 {
428 unsigned long limit = PAGE_SIZE - sizeof(lpcb->request);
429
430 if (lpcb->request.hdr.len != sizeof(lpcb->request) ||
431 lpcb->response.hdr.len > limit)
432 return -EINVAL;
433 return clp_req(lpcb, CLP_LPS_BASE) ? -EOPNOTSUPP : 0;
434 }
435
436 static int clp_base_command(struct clp_req *req, struct clp_req_hdr *lpcb)
437 {
438 switch (lpcb->cmd) {
439 case 0x0001: /* store logical-processor characteristics */
440 return clp_base_slpc(req, (void *) lpcb);
441 default:
442 return -EINVAL;
443 }
444 }
445
446 static int clp_pci_slpc(struct clp_req *req, struct clp_req_rsp_slpc *lpcb)
447 {
448 unsigned long limit = PAGE_SIZE - sizeof(lpcb->request);
449
450 if (lpcb->request.hdr.len != sizeof(lpcb->request) ||
451 lpcb->response.hdr.len > limit)
452 return -EINVAL;
453 return clp_req(lpcb, CLP_LPS_PCI) ? -EOPNOTSUPP : 0;
454 }
455
456 static int clp_pci_list(struct clp_req *req, struct clp_req_rsp_list_pci *lpcb)
457 {
458 unsigned long limit = PAGE_SIZE - sizeof(lpcb->request);
459
460 if (lpcb->request.hdr.len != sizeof(lpcb->request) ||
461 lpcb->response.hdr.len > limit)
462 return -EINVAL;
463 if (lpcb->request.reserved2 != 0)
464 return -EINVAL;
465 return clp_req(lpcb, CLP_LPS_PCI) ? -EOPNOTSUPP : 0;
466 }
467
468 static int clp_pci_query(struct clp_req *req,
469 struct clp_req_rsp_query_pci *lpcb)
470 {
471 unsigned long limit = PAGE_SIZE - sizeof(lpcb->request);
472
473 if (lpcb->request.hdr.len != sizeof(lpcb->request) ||
474 lpcb->response.hdr.len > limit)
475 return -EINVAL;
476 if (lpcb->request.reserved2 != 0 || lpcb->request.reserved3 != 0)
477 return -EINVAL;
478 return clp_req(lpcb, CLP_LPS_PCI) ? -EOPNOTSUPP : 0;
479 }
480
481 static int clp_pci_query_grp(struct clp_req *req,
482 struct clp_req_rsp_query_pci_grp *lpcb)
483 {
484 unsigned long limit = PAGE_SIZE - sizeof(lpcb->request);
485
486 if (lpcb->request.hdr.len != sizeof(lpcb->request) ||
487 lpcb->response.hdr.len > limit)
488 return -EINVAL;
489 if (lpcb->request.reserved2 != 0 || lpcb->request.reserved3 != 0 ||
490 lpcb->request.reserved4 != 0)
491 return -EINVAL;
492 return clp_req(lpcb, CLP_LPS_PCI) ? -EOPNOTSUPP : 0;
493 }
494
495 static int clp_pci_command(struct clp_req *req, struct clp_req_hdr *lpcb)
496 {
497 switch (lpcb->cmd) {
498 case 0x0001: /* store logical-processor characteristics */
499 return clp_pci_slpc(req, (void *) lpcb);
500 case 0x0002: /* list PCI functions */
501 return clp_pci_list(req, (void *) lpcb);
502 case 0x0003: /* query PCI function */
503 return clp_pci_query(req, (void *) lpcb);
504 case 0x0004: /* query PCI function group */
505 return clp_pci_query_grp(req, (void *) lpcb);
506 default:
507 return -EINVAL;
508 }
509 }
510
511 static int clp_normal_command(struct clp_req *req)
512 {
513 struct clp_req_hdr *lpcb;
514 void __user *uptr;
515 int rc;
516
517 rc = -EINVAL;
518 if (req->lps != 0 && req->lps != 2)
519 goto out;
520
521 rc = -ENOMEM;
522 lpcb = clp_alloc_block(GFP_KERNEL);
523 if (!lpcb)
524 goto out;
525
526 rc = -EFAULT;
527 uptr = (void __force __user *)(unsigned long) req->data_p;
528 if (copy_from_user(lpcb, uptr, PAGE_SIZE) != 0)
529 goto out_free;
530
531 rc = -EINVAL;
532 if (lpcb->fmt != 0 || lpcb->reserved1 != 0 || lpcb->reserved2 != 0)
533 goto out_free;
534
535 switch (req->lps) {
536 case 0:
537 rc = clp_base_command(req, lpcb);
538 break;
539 case 2:
540 rc = clp_pci_command(req, lpcb);
541 break;
542 }
543 if (rc)
544 goto out_free;
545
546 rc = -EFAULT;
547 if (copy_to_user(uptr, lpcb, PAGE_SIZE) != 0)
548 goto out_free;
549
550 rc = 0;
551
552 out_free:
553 clp_free_block(lpcb);
554 out:
555 return rc;
556 }
557
558 static int clp_immediate_command(struct clp_req *req)
559 {
560 void __user *uptr;
561 unsigned long ilp;
562 int exists;
563
564 if (req->cmd > 1 || clp_get_ilp(&ilp) != 0)
565 return -EINVAL;
566
567 uptr = (void __force __user *)(unsigned long) req->data_p;
568 if (req->cmd == 0) {
569 /* Command code 0: test for a specific processor */
570 exists = test_bit_inv(req->lps, &ilp);
571 return put_user(exists, (int __user *) uptr);
572 }
573 /* Command code 1: return bit mask of installed processors */
574 return put_user(ilp, (unsigned long __user *) uptr);
575 }
576
577 static long clp_misc_ioctl(struct file *filp, unsigned int cmd,
578 unsigned long arg)
579 {
580 struct clp_req req;
581 void __user *argp;
582
583 if (cmd != CLP_SYNC)
584 return -EINVAL;
585
586 argp = is_compat_task() ? compat_ptr(arg) : (void __user *) arg;
587 if (copy_from_user(&req, argp, sizeof(req)))
588 return -EFAULT;
589 if (req.r != 0)
590 return -EINVAL;
591 return req.c ? clp_immediate_command(&req) : clp_normal_command(&req);
592 }
593
594 static int clp_misc_release(struct inode *inode, struct file *filp)
595 {
596 return 0;
597 }
598
599 static const struct file_operations clp_misc_fops = {
600 .owner = THIS_MODULE,
601 .open = nonseekable_open,
602 .release = clp_misc_release,
603 .unlocked_ioctl = clp_misc_ioctl,
604 .compat_ioctl = clp_misc_ioctl,
605 .llseek = no_llseek,
606 };
607
608 static struct miscdevice clp_misc_device = {
609 .minor = MISC_DYNAMIC_MINOR,
610 .name = "clp",
611 .fops = &clp_misc_fops,
612 };
613
614 static int __init clp_misc_init(void)
615 {
616 return misc_register(&clp_misc_device);
617 }
618
619 device_initcall(clp_misc_init);