]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/s390/pci/pci.c
s390/pci: s390 specific PCI sysfs attributes
[mirror_ubuntu-artful-kernel.git] / arch / s390 / pci / pci.c
1 /*
2 * Copyright IBM Corp. 2012
3 *
4 * Author(s):
5 * Jan Glauber <jang@linux.vnet.ibm.com>
6 *
7 * The System z PCI code is a rewrite from a prototype by
8 * the following people (Kudoz!):
9 * Alexander Schmidt <alexschm@de.ibm.com>
10 * Christoph Raisch <raisch@de.ibm.com>
11 * Hannes Hering <hering2@de.ibm.com>
12 * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
13 * Jan-Bernd Themann <themann@de.ibm.com>
14 * Stefan Roscher <stefan.roscher@de.ibm.com>
15 * Thomas Klein <tklein@de.ibm.com>
16 */
17
18 #define COMPONENT "zPCI"
19 #define pr_fmt(fmt) COMPONENT ": " fmt
20
21 #include <linux/kernel.h>
22 #include <linux/slab.h>
23 #include <linux/err.h>
24 #include <linux/export.h>
25 #include <linux/delay.h>
26 #include <linux/irq.h>
27 #include <linux/kernel_stat.h>
28 #include <linux/seq_file.h>
29 #include <linux/pci.h>
30 #include <linux/msi.h>
31
32 #include <asm/isc.h>
33 #include <asm/airq.h>
34 #include <asm/facility.h>
35 #include <asm/pci_insn.h>
36 #include <asm/pci_clp.h>
37 #include <asm/pci_dma.h>
38
39 #define DEBUG /* enable pr_debug */
40
41 #define SIC_IRQ_MODE_ALL 0
42 #define SIC_IRQ_MODE_SINGLE 1
43
44 #define ZPCI_NR_DMA_SPACES 1
45 #define ZPCI_MSI_VEC_BITS 6
46 #define ZPCI_NR_DEVICES CONFIG_PCI_NR_FUNCTIONS
47
48 /* list of all detected zpci devices */
49 LIST_HEAD(zpci_list);
50 EXPORT_SYMBOL_GPL(zpci_list);
51 DEFINE_MUTEX(zpci_list_lock);
52 EXPORT_SYMBOL_GPL(zpci_list_lock);
53
54 struct pci_hp_callback_ops hotplug_ops;
55 EXPORT_SYMBOL_GPL(hotplug_ops);
56
57 static DECLARE_BITMAP(zpci_domain, ZPCI_NR_DEVICES);
58 static DEFINE_SPINLOCK(zpci_domain_lock);
59
60 struct callback {
61 irq_handler_t handler;
62 void *data;
63 };
64
65 struct zdev_irq_map {
66 unsigned long aibv; /* AI bit vector */
67 int msi_vecs; /* consecutive MSI-vectors used */
68 int __unused;
69 struct callback cb[ZPCI_NR_MSI_VECS]; /* callback handler array */
70 spinlock_t lock; /* protect callbacks against de-reg */
71 };
72
73 struct intr_bucket {
74 /* amap of adapters, one bit per dev, corresponds to one irq nr */
75 unsigned long *alloc;
76 /* AI summary bit, global page for all devices */
77 unsigned long *aisb;
78 /* pointer to aibv and callback data in zdev */
79 struct zdev_irq_map *imap[ZPCI_NR_DEVICES];
80 /* protects the whole bucket struct */
81 spinlock_t lock;
82 };
83
84 static struct intr_bucket *bucket;
85
86 /* Adapter local summary indicator */
87 static u8 *zpci_irq_si;
88
89 static atomic_t irq_retries = ATOMIC_INIT(0);
90
91 /* I/O Map */
92 static DEFINE_SPINLOCK(zpci_iomap_lock);
93 static DECLARE_BITMAP(zpci_iomap, ZPCI_IOMAP_MAX_ENTRIES);
94 struct zpci_iomap_entry *zpci_iomap_start;
95 EXPORT_SYMBOL_GPL(zpci_iomap_start);
96
97 /* highest irq summary bit */
98 static int __read_mostly aisb_max;
99
100 static struct kmem_cache *zdev_irq_cache;
101
102 static inline int irq_to_msi_nr(unsigned int irq)
103 {
104 return irq & ZPCI_MSI_MASK;
105 }
106
107 static inline int irq_to_dev_nr(unsigned int irq)
108 {
109 return irq >> ZPCI_MSI_VEC_BITS;
110 }
111
112 static inline struct zdev_irq_map *get_imap(unsigned int irq)
113 {
114 return bucket->imap[irq_to_dev_nr(irq)];
115 }
116
117 struct zpci_dev *get_zdev(struct pci_dev *pdev)
118 {
119 return (struct zpci_dev *) pdev->sysdata;
120 }
121
122 struct zpci_dev *get_zdev_by_fid(u32 fid)
123 {
124 struct zpci_dev *tmp, *zdev = NULL;
125
126 mutex_lock(&zpci_list_lock);
127 list_for_each_entry(tmp, &zpci_list, entry) {
128 if (tmp->fid == fid) {
129 zdev = tmp;
130 break;
131 }
132 }
133 mutex_unlock(&zpci_list_lock);
134 return zdev;
135 }
136
137 bool zpci_fid_present(u32 fid)
138 {
139 return (get_zdev_by_fid(fid) != NULL) ? true : false;
140 }
141
142 static struct zpci_dev *get_zdev_by_bus(struct pci_bus *bus)
143 {
144 return (bus && bus->sysdata) ? (struct zpci_dev *) bus->sysdata : NULL;
145 }
146
147 int pci_domain_nr(struct pci_bus *bus)
148 {
149 return ((struct zpci_dev *) bus->sysdata)->domain;
150 }
151 EXPORT_SYMBOL_GPL(pci_domain_nr);
152
153 int pci_proc_domain(struct pci_bus *bus)
154 {
155 return pci_domain_nr(bus);
156 }
157 EXPORT_SYMBOL_GPL(pci_proc_domain);
158
159 /* Store PCI function information block */
160 static int zpci_store_fib(struct zpci_dev *zdev, u8 *fc)
161 {
162 struct zpci_fib *fib;
163 u8 status, cc;
164
165 fib = (void *) get_zeroed_page(GFP_KERNEL);
166 if (!fib)
167 return -ENOMEM;
168
169 do {
170 cc = __stpcifc(zdev->fh, 0, fib, &status);
171 if (cc == 2) {
172 msleep(ZPCI_INSN_BUSY_DELAY);
173 memset(fib, 0, PAGE_SIZE);
174 }
175 } while (cc == 2);
176
177 if (cc)
178 pr_err_once("%s: cc: %u status: %u\n",
179 __func__, cc, status);
180
181 /* Return PCI function controls */
182 *fc = fib->fc;
183
184 free_page((unsigned long) fib);
185 return (cc) ? -EIO : 0;
186 }
187
188 /* Modify PCI: Register adapter interruptions */
189 static int zpci_register_airq(struct zpci_dev *zdev, unsigned int aisb,
190 u64 aibv)
191 {
192 u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_REG_INT);
193 struct zpci_fib *fib;
194 int rc;
195
196 fib = (void *) get_zeroed_page(GFP_KERNEL);
197 if (!fib)
198 return -ENOMEM;
199
200 fib->isc = PCI_ISC;
201 fib->noi = zdev->irq_map->msi_vecs;
202 fib->sum = 1; /* enable summary notifications */
203 fib->aibv = aibv;
204 fib->aibvo = 0; /* every function has its own page */
205 fib->aisb = (u64) bucket->aisb + aisb / 8;
206 fib->aisbo = aisb & ZPCI_MSI_MASK;
207
208 rc = mpcifc_instr(req, fib);
209 pr_debug("%s mpcifc returned noi: %d\n", __func__, fib->noi);
210
211 free_page((unsigned long) fib);
212 return rc;
213 }
214
215 struct mod_pci_args {
216 u64 base;
217 u64 limit;
218 u64 iota;
219 };
220
221 static int mod_pci(struct zpci_dev *zdev, int fn, u8 dmaas, struct mod_pci_args *args)
222 {
223 u64 req = ZPCI_CREATE_REQ(zdev->fh, dmaas, fn);
224 struct zpci_fib *fib;
225 int rc;
226
227 /* The FIB must be available even if it's not used */
228 fib = (void *) get_zeroed_page(GFP_KERNEL);
229 if (!fib)
230 return -ENOMEM;
231
232 fib->pba = args->base;
233 fib->pal = args->limit;
234 fib->iota = args->iota;
235
236 rc = mpcifc_instr(req, fib);
237 free_page((unsigned long) fib);
238 return rc;
239 }
240
241 /* Modify PCI: Register I/O address translation parameters */
242 int zpci_register_ioat(struct zpci_dev *zdev, u8 dmaas,
243 u64 base, u64 limit, u64 iota)
244 {
245 struct mod_pci_args args = { base, limit, iota };
246
247 WARN_ON_ONCE(iota & 0x3fff);
248 args.iota |= ZPCI_IOTA_RTTO_FLAG;
249 return mod_pci(zdev, ZPCI_MOD_FC_REG_IOAT, dmaas, &args);
250 }
251
252 /* Modify PCI: Unregister I/O address translation parameters */
253 int zpci_unregister_ioat(struct zpci_dev *zdev, u8 dmaas)
254 {
255 struct mod_pci_args args = { 0, 0, 0 };
256
257 return mod_pci(zdev, ZPCI_MOD_FC_DEREG_IOAT, dmaas, &args);
258 }
259
260 /* Modify PCI: Unregister adapter interruptions */
261 static int zpci_unregister_airq(struct zpci_dev *zdev)
262 {
263 struct mod_pci_args args = { 0, 0, 0 };
264
265 return mod_pci(zdev, ZPCI_MOD_FC_DEREG_INT, 0, &args);
266 }
267
268 #define ZPCI_PCIAS_CFGSPC 15
269
270 static int zpci_cfg_load(struct zpci_dev *zdev, int offset, u32 *val, u8 len)
271 {
272 u64 req = ZPCI_CREATE_REQ(zdev->fh, ZPCI_PCIAS_CFGSPC, len);
273 u64 data;
274 int rc;
275
276 rc = pcilg_instr(&data, req, offset);
277 data = data << ((8 - len) * 8);
278 data = le64_to_cpu(data);
279 if (!rc)
280 *val = (u32) data;
281 else
282 *val = 0xffffffff;
283 return rc;
284 }
285
286 static int zpci_cfg_store(struct zpci_dev *zdev, int offset, u32 val, u8 len)
287 {
288 u64 req = ZPCI_CREATE_REQ(zdev->fh, ZPCI_PCIAS_CFGSPC, len);
289 u64 data = val;
290 int rc;
291
292 data = cpu_to_le64(data);
293 data = data >> ((8 - len) * 8);
294 rc = pcistg_instr(data, req, offset);
295 return rc;
296 }
297
298 void synchronize_irq(unsigned int irq)
299 {
300 /*
301 * Not needed, the handler is protected by a lock and IRQs that occur
302 * after the handler is deleted are just NOPs.
303 */
304 }
305 EXPORT_SYMBOL_GPL(synchronize_irq);
306
307 void enable_irq(unsigned int irq)
308 {
309 struct msi_desc *msi = irq_get_msi_desc(irq);
310
311 zpci_msi_set_mask_bits(msi, 1, 0);
312 }
313 EXPORT_SYMBOL_GPL(enable_irq);
314
315 void disable_irq(unsigned int irq)
316 {
317 struct msi_desc *msi = irq_get_msi_desc(irq);
318
319 zpci_msi_set_mask_bits(msi, 1, 1);
320 }
321 EXPORT_SYMBOL_GPL(disable_irq);
322
323 void disable_irq_nosync(unsigned int irq)
324 {
325 disable_irq(irq);
326 }
327 EXPORT_SYMBOL_GPL(disable_irq_nosync);
328
329 unsigned long probe_irq_on(void)
330 {
331 return 0;
332 }
333 EXPORT_SYMBOL_GPL(probe_irq_on);
334
335 int probe_irq_off(unsigned long val)
336 {
337 return 0;
338 }
339 EXPORT_SYMBOL_GPL(probe_irq_off);
340
341 unsigned int probe_irq_mask(unsigned long val)
342 {
343 return val;
344 }
345 EXPORT_SYMBOL_GPL(probe_irq_mask);
346
347 void __devinit pcibios_fixup_bus(struct pci_bus *bus)
348 {
349 }
350
351 resource_size_t pcibios_align_resource(void *data, const struct resource *res,
352 resource_size_t size,
353 resource_size_t align)
354 {
355 return 0;
356 }
357
358 /* Create a virtual mapping cookie for a PCI BAR */
359 void __iomem *pci_iomap(struct pci_dev *pdev, int bar, unsigned long max)
360 {
361 struct zpci_dev *zdev = get_zdev(pdev);
362 u64 addr;
363 int idx;
364
365 if ((bar & 7) != bar)
366 return NULL;
367
368 idx = zdev->bars[bar].map_idx;
369 spin_lock(&zpci_iomap_lock);
370 zpci_iomap_start[idx].fh = zdev->fh;
371 zpci_iomap_start[idx].bar = bar;
372 spin_unlock(&zpci_iomap_lock);
373
374 addr = ZPCI_IOMAP_ADDR_BASE | ((u64) idx << 48);
375 return (void __iomem *) addr;
376 }
377 EXPORT_SYMBOL_GPL(pci_iomap);
378
379 void pci_iounmap(struct pci_dev *pdev, void __iomem *addr)
380 {
381 unsigned int idx;
382
383 idx = (((__force u64) addr) & ~ZPCI_IOMAP_ADDR_BASE) >> 48;
384 spin_lock(&zpci_iomap_lock);
385 zpci_iomap_start[idx].fh = 0;
386 zpci_iomap_start[idx].bar = 0;
387 spin_unlock(&zpci_iomap_lock);
388 }
389 EXPORT_SYMBOL_GPL(pci_iounmap);
390
391 static int pci_read(struct pci_bus *bus, unsigned int devfn, int where,
392 int size, u32 *val)
393 {
394 struct zpci_dev *zdev = get_zdev_by_bus(bus);
395
396 if (!zdev || devfn != ZPCI_DEVFN)
397 return 0;
398 return zpci_cfg_load(zdev, where, val, size);
399 }
400
401 static int pci_write(struct pci_bus *bus, unsigned int devfn, int where,
402 int size, u32 val)
403 {
404 struct zpci_dev *zdev = get_zdev_by_bus(bus);
405
406 if (!zdev || devfn != ZPCI_DEVFN)
407 return 0;
408 return zpci_cfg_store(zdev, where, val, size);
409 }
410
411 static struct pci_ops pci_root_ops = {
412 .read = pci_read,
413 .write = pci_write,
414 };
415
416 /* store the last handled bit to implement fair scheduling of devices */
417 static DEFINE_PER_CPU(unsigned long, next_sbit);
418
419 static void zpci_irq_handler(void *dont, void *need)
420 {
421 unsigned long sbit, mbit, last = 0, start = __get_cpu_var(next_sbit);
422 int rescan = 0, max = aisb_max;
423 struct zdev_irq_map *imap;
424
425 kstat_cpu(smp_processor_id()).irqs[IOINT_PCI]++;
426 sbit = start;
427
428 scan:
429 /* find summary_bit */
430 for_each_set_bit_left_cont(sbit, bucket->aisb, max) {
431 clear_bit(63 - (sbit & 63), bucket->aisb + (sbit >> 6));
432 last = sbit;
433
434 /* find vector bit */
435 imap = bucket->imap[sbit];
436 for_each_set_bit_left(mbit, &imap->aibv, imap->msi_vecs) {
437 kstat_cpu(smp_processor_id()).irqs[IOINT_MSI]++;
438 clear_bit(63 - mbit, &imap->aibv);
439
440 spin_lock(&imap->lock);
441 if (imap->cb[mbit].handler)
442 imap->cb[mbit].handler(mbit,
443 imap->cb[mbit].data);
444 spin_unlock(&imap->lock);
445 }
446 }
447
448 if (rescan)
449 goto out;
450
451 /* scan the skipped bits */
452 if (start > 0) {
453 sbit = 0;
454 max = start;
455 start = 0;
456 goto scan;
457 }
458
459 /* enable interrupts again */
460 sic_instr(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC);
461
462 /* check again to not lose initiative */
463 rmb();
464 max = aisb_max;
465 sbit = find_first_bit_left(bucket->aisb, max);
466 if (sbit != max) {
467 atomic_inc(&irq_retries);
468 rescan++;
469 goto scan;
470 }
471 out:
472 /* store next device bit to scan */
473 __get_cpu_var(next_sbit) = (++last >= aisb_max) ? 0 : last;
474 }
475
476 /* msi_vecs - number of requested interrupts, 0 place function to error state */
477 static int zpci_setup_msi(struct pci_dev *pdev, int msi_vecs)
478 {
479 struct zpci_dev *zdev = get_zdev(pdev);
480 unsigned int aisb, msi_nr;
481 struct msi_desc *msi;
482 int rc;
483
484 /* store the number of used MSI vectors */
485 zdev->irq_map->msi_vecs = min(msi_vecs, ZPCI_NR_MSI_VECS);
486
487 spin_lock(&bucket->lock);
488 aisb = find_first_zero_bit(bucket->alloc, PAGE_SIZE);
489 /* alloc map exhausted? */
490 if (aisb == PAGE_SIZE) {
491 spin_unlock(&bucket->lock);
492 return -EIO;
493 }
494 set_bit(aisb, bucket->alloc);
495 spin_unlock(&bucket->lock);
496
497 zdev->aisb = aisb;
498 if (aisb + 1 > aisb_max)
499 aisb_max = aisb + 1;
500
501 /* wire up IRQ shortcut pointer */
502 bucket->imap[zdev->aisb] = zdev->irq_map;
503 pr_debug("%s: imap[%u] linked to %p\n", __func__, zdev->aisb, zdev->irq_map);
504
505 /* TODO: irq number 0 wont be found if we return less than requested MSIs.
506 * ignore it for now and fix in common code.
507 */
508 msi_nr = aisb << ZPCI_MSI_VEC_BITS;
509
510 list_for_each_entry(msi, &pdev->msi_list, list) {
511 rc = zpci_setup_msi_irq(zdev, msi, msi_nr,
512 aisb << ZPCI_MSI_VEC_BITS);
513 if (rc)
514 return rc;
515 msi_nr++;
516 }
517
518 rc = zpci_register_airq(zdev, aisb, (u64) &zdev->irq_map->aibv);
519 if (rc) {
520 clear_bit(aisb, bucket->alloc);
521 dev_err(&pdev->dev, "register MSI failed with: %d\n", rc);
522 return rc;
523 }
524 return (zdev->irq_map->msi_vecs == msi_vecs) ?
525 0 : zdev->irq_map->msi_vecs;
526 }
527
528 static void zpci_teardown_msi(struct pci_dev *pdev)
529 {
530 struct zpci_dev *zdev = get_zdev(pdev);
531 struct msi_desc *msi;
532 int aisb, rc;
533
534 rc = zpci_unregister_airq(zdev);
535 if (rc) {
536 dev_err(&pdev->dev, "deregister MSI failed with: %d\n", rc);
537 return;
538 }
539
540 msi = list_first_entry(&pdev->msi_list, struct msi_desc, list);
541 aisb = irq_to_dev_nr(msi->irq);
542
543 list_for_each_entry(msi, &pdev->msi_list, list)
544 zpci_teardown_msi_irq(zdev, msi);
545
546 clear_bit(aisb, bucket->alloc);
547 if (aisb + 1 == aisb_max)
548 aisb_max--;
549 }
550
551 int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
552 {
553 pr_debug("%s: requesting %d MSI-X interrupts...", __func__, nvec);
554 if (type != PCI_CAP_ID_MSIX && type != PCI_CAP_ID_MSI)
555 return -EINVAL;
556 return zpci_setup_msi(pdev, nvec);
557 }
558
559 void arch_teardown_msi_irqs(struct pci_dev *pdev)
560 {
561 pr_info("%s: on pdev: %p\n", __func__, pdev);
562 zpci_teardown_msi(pdev);
563 }
564
565 static void zpci_map_resources(struct zpci_dev *zdev)
566 {
567 struct pci_dev *pdev = zdev->pdev;
568 resource_size_t len;
569 int i;
570
571 for (i = 0; i < PCI_BAR_COUNT; i++) {
572 len = pci_resource_len(pdev, i);
573 if (!len)
574 continue;
575 pdev->resource[i].start = (resource_size_t) pci_iomap(pdev, i, 0);
576 pdev->resource[i].end = pdev->resource[i].start + len - 1;
577 pr_debug("BAR%i: -> start: %Lx end: %Lx\n",
578 i, pdev->resource[i].start, pdev->resource[i].end);
579 }
580 };
581
582 static void zpci_unmap_resources(struct pci_dev *pdev)
583 {
584 resource_size_t len;
585 int i;
586
587 for (i = 0; i < PCI_BAR_COUNT; i++) {
588 len = pci_resource_len(pdev, i);
589 if (!len)
590 continue;
591 pci_iounmap(pdev, (void *) pdev->resource[i].start);
592 }
593 };
594
595 struct zpci_dev *zpci_alloc_device(void)
596 {
597 struct zpci_dev *zdev;
598
599 /* Alloc memory for our private pci device data */
600 zdev = kzalloc(sizeof(*zdev), GFP_KERNEL);
601 if (!zdev)
602 return ERR_PTR(-ENOMEM);
603
604 /* Alloc aibv & callback space */
605 zdev->irq_map = kmem_cache_alloc(zdev_irq_cache, GFP_KERNEL);
606 if (!zdev->irq_map)
607 goto error;
608 memset(zdev->irq_map, 0, sizeof(*zdev->irq_map));
609 WARN_ON((u64) zdev->irq_map & 0xff);
610 return zdev;
611
612 error:
613 kfree(zdev);
614 return ERR_PTR(-ENOMEM);
615 }
616
617 void zpci_free_device(struct zpci_dev *zdev)
618 {
619 kmem_cache_free(zdev_irq_cache, zdev->irq_map);
620 kfree(zdev);
621 }
622
623 /* Called on removal of pci_dev, leaves zpci and bus device */
624 static void zpci_remove_device(struct pci_dev *pdev)
625 {
626 struct zpci_dev *zdev = get_zdev(pdev);
627
628 dev_info(&pdev->dev, "Removing device %u\n", zdev->domain);
629 zdev->state = ZPCI_FN_STATE_CONFIGURED;
630 zpci_dma_exit_device(zdev);
631 zpci_sysfs_remove_device(&pdev->dev);
632 zpci_unmap_resources(pdev);
633 list_del(&zdev->entry); /* can be called from init */
634 zdev->pdev = NULL;
635 }
636
637 static void zpci_scan_devices(void)
638 {
639 struct zpci_dev *zdev;
640
641 mutex_lock(&zpci_list_lock);
642 list_for_each_entry(zdev, &zpci_list, entry)
643 if (zdev->state == ZPCI_FN_STATE_CONFIGURED)
644 zpci_scan_device(zdev);
645 mutex_unlock(&zpci_list_lock);
646 }
647
648 /*
649 * Too late for any s390 specific setup, since interrupts must be set up
650 * already which requires DMA setup too and the pci scan will access the
651 * config space, which only works if the function handle is enabled.
652 */
653 int pcibios_enable_device(struct pci_dev *pdev, int mask)
654 {
655 struct resource *res;
656 u16 cmd;
657 int i;
658
659 pci_read_config_word(pdev, PCI_COMMAND, &cmd);
660
661 for (i = 0; i < PCI_BAR_COUNT; i++) {
662 res = &pdev->resource[i];
663
664 if (res->flags & IORESOURCE_IO)
665 return -EINVAL;
666
667 if (res->flags & IORESOURCE_MEM)
668 cmd |= PCI_COMMAND_MEMORY;
669 }
670 pci_write_config_word(pdev, PCI_COMMAND, cmd);
671 return 0;
672 }
673
674 void pcibios_disable_device(struct pci_dev *pdev)
675 {
676 zpci_remove_device(pdev);
677 pdev->sysdata = NULL;
678 }
679
680 int pcibios_add_platform_entries(struct pci_dev *pdev)
681 {
682 return zpci_sysfs_add_device(&pdev->dev);
683 }
684
685 int zpci_request_irq(unsigned int irq, irq_handler_t handler, void *data)
686 {
687 int msi_nr = irq_to_msi_nr(irq);
688 struct zdev_irq_map *imap;
689 struct msi_desc *msi;
690
691 msi = irq_get_msi_desc(irq);
692 if (!msi)
693 return -EIO;
694
695 imap = get_imap(irq);
696 spin_lock_init(&imap->lock);
697
698 pr_debug("%s: register handler for IRQ:MSI %d:%d\n", __func__, irq >> 6, msi_nr);
699 imap->cb[msi_nr].handler = handler;
700 imap->cb[msi_nr].data = data;
701
702 /*
703 * The generic MSI code returns with the interrupt disabled on the
704 * card, using the MSI mask bits. Firmware doesn't appear to unmask
705 * at that level, so we do it here by hand.
706 */
707 zpci_msi_set_mask_bits(msi, 1, 0);
708 return 0;
709 }
710
711 void zpci_free_irq(unsigned int irq)
712 {
713 struct zdev_irq_map *imap = get_imap(irq);
714 int msi_nr = irq_to_msi_nr(irq);
715 unsigned long flags;
716
717 pr_debug("%s: for irq: %d\n", __func__, irq);
718
719 spin_lock_irqsave(&imap->lock, flags);
720 imap->cb[msi_nr].handler = NULL;
721 imap->cb[msi_nr].data = NULL;
722 spin_unlock_irqrestore(&imap->lock, flags);
723 }
724
725 int request_irq(unsigned int irq, irq_handler_t handler,
726 unsigned long irqflags, const char *devname, void *dev_id)
727 {
728 pr_debug("%s: irq: %d handler: %p flags: %lx dev: %s\n",
729 __func__, irq, handler, irqflags, devname);
730
731 return zpci_request_irq(irq, handler, dev_id);
732 }
733 EXPORT_SYMBOL_GPL(request_irq);
734
735 void free_irq(unsigned int irq, void *dev_id)
736 {
737 zpci_free_irq(irq);
738 }
739 EXPORT_SYMBOL_GPL(free_irq);
740
741 static int __init zpci_irq_init(void)
742 {
743 int cpu, rc;
744
745 bucket = kzalloc(sizeof(*bucket), GFP_KERNEL);
746 if (!bucket)
747 return -ENOMEM;
748
749 bucket->aisb = (unsigned long *) get_zeroed_page(GFP_KERNEL);
750 if (!bucket->aisb) {
751 rc = -ENOMEM;
752 goto out_aisb;
753 }
754
755 bucket->alloc = (unsigned long *) get_zeroed_page(GFP_KERNEL);
756 if (!bucket->alloc) {
757 rc = -ENOMEM;
758 goto out_alloc;
759 }
760
761 isc_register(PCI_ISC);
762 zpci_irq_si = s390_register_adapter_interrupt(&zpci_irq_handler, NULL, PCI_ISC);
763 if (IS_ERR(zpci_irq_si)) {
764 rc = PTR_ERR(zpci_irq_si);
765 zpci_irq_si = NULL;
766 goto out_ai;
767 }
768
769 for_each_online_cpu(cpu)
770 per_cpu(next_sbit, cpu) = 0;
771
772 spin_lock_init(&bucket->lock);
773 /* set summary to 1 to be called every time for the ISC */
774 *zpci_irq_si = 1;
775 sic_instr(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC);
776 return 0;
777
778 out_ai:
779 isc_unregister(PCI_ISC);
780 free_page((unsigned long) bucket->alloc);
781 out_alloc:
782 free_page((unsigned long) bucket->aisb);
783 out_aisb:
784 kfree(bucket);
785 return rc;
786 }
787
788 static void zpci_irq_exit(void)
789 {
790 free_page((unsigned long) bucket->alloc);
791 free_page((unsigned long) bucket->aisb);
792 s390_unregister_adapter_interrupt(zpci_irq_si, PCI_ISC);
793 isc_unregister(PCI_ISC);
794 kfree(bucket);
795 }
796
797 static struct resource *zpci_alloc_bus_resource(unsigned long start, unsigned long size,
798 unsigned long flags, int domain)
799 {
800 struct resource *r;
801 char *name;
802 int rc;
803
804 r = kzalloc(sizeof(*r), GFP_KERNEL);
805 if (!r)
806 return ERR_PTR(-ENOMEM);
807 r->start = start;
808 r->end = r->start + size - 1;
809 r->flags = flags;
810 r->parent = &iomem_resource;
811 name = kmalloc(18, GFP_KERNEL);
812 if (!name) {
813 kfree(r);
814 return ERR_PTR(-ENOMEM);
815 }
816 sprintf(name, "PCI Bus: %04x:%02x", domain, ZPCI_BUS_NR);
817 r->name = name;
818
819 rc = request_resource(&iomem_resource, r);
820 if (rc)
821 pr_debug("request resource %pR failed\n", r);
822 return r;
823 }
824
825 static int zpci_alloc_iomap(struct zpci_dev *zdev)
826 {
827 int entry;
828
829 spin_lock(&zpci_iomap_lock);
830 entry = find_first_zero_bit(zpci_iomap, ZPCI_IOMAP_MAX_ENTRIES);
831 if (entry == ZPCI_IOMAP_MAX_ENTRIES) {
832 spin_unlock(&zpci_iomap_lock);
833 return -ENOSPC;
834 }
835 set_bit(entry, zpci_iomap);
836 spin_unlock(&zpci_iomap_lock);
837 return entry;
838 }
839
840 static void zpci_free_iomap(struct zpci_dev *zdev, int entry)
841 {
842 spin_lock(&zpci_iomap_lock);
843 memset(&zpci_iomap_start[entry], 0, sizeof(struct zpci_iomap_entry));
844 clear_bit(entry, zpci_iomap);
845 spin_unlock(&zpci_iomap_lock);
846 }
847
848 static int zpci_create_device_bus(struct zpci_dev *zdev)
849 {
850 struct resource *res;
851 LIST_HEAD(resources);
852 int i;
853
854 /* allocate mapping entry for each used bar */
855 for (i = 0; i < PCI_BAR_COUNT; i++) {
856 unsigned long addr, size, flags;
857 int entry;
858
859 if (!zdev->bars[i].size)
860 continue;
861 entry = zpci_alloc_iomap(zdev);
862 if (entry < 0)
863 return entry;
864 zdev->bars[i].map_idx = entry;
865
866 /* only MMIO is supported */
867 flags = IORESOURCE_MEM;
868 if (zdev->bars[i].val & 8)
869 flags |= IORESOURCE_PREFETCH;
870 if (zdev->bars[i].val & 4)
871 flags |= IORESOURCE_MEM_64;
872
873 addr = ZPCI_IOMAP_ADDR_BASE + ((u64) entry << 48);
874
875 size = 1UL << zdev->bars[i].size;
876
877 res = zpci_alloc_bus_resource(addr, size, flags, zdev->domain);
878 if (IS_ERR(res)) {
879 zpci_free_iomap(zdev, entry);
880 return PTR_ERR(res);
881 }
882 pci_add_resource(&resources, res);
883 }
884
885 zdev->bus = pci_create_root_bus(NULL, ZPCI_BUS_NR, &pci_root_ops,
886 zdev, &resources);
887 if (!zdev->bus)
888 return -EIO;
889
890 zdev->bus->max_bus_speed = zdev->max_bus_speed;
891 return 0;
892 }
893
894 static int zpci_alloc_domain(struct zpci_dev *zdev)
895 {
896 spin_lock(&zpci_domain_lock);
897 zdev->domain = find_first_zero_bit(zpci_domain, ZPCI_NR_DEVICES);
898 if (zdev->domain == ZPCI_NR_DEVICES) {
899 spin_unlock(&zpci_domain_lock);
900 return -ENOSPC;
901 }
902 set_bit(zdev->domain, zpci_domain);
903 spin_unlock(&zpci_domain_lock);
904 return 0;
905 }
906
907 static void zpci_free_domain(struct zpci_dev *zdev)
908 {
909 spin_lock(&zpci_domain_lock);
910 clear_bit(zdev->domain, zpci_domain);
911 spin_unlock(&zpci_domain_lock);
912 }
913
914 int zpci_enable_device(struct zpci_dev *zdev)
915 {
916 int rc;
917
918 rc = clp_enable_fh(zdev, ZPCI_NR_DMA_SPACES);
919 if (rc)
920 goto out;
921 pr_info("Enabled fh: 0x%x fid: 0x%x\n", zdev->fh, zdev->fid);
922
923 rc = zpci_dma_init_device(zdev);
924 if (rc)
925 goto out_dma;
926 return 0;
927
928 out_dma:
929 clp_disable_fh(zdev);
930 out:
931 return rc;
932 }
933 EXPORT_SYMBOL_GPL(zpci_enable_device);
934
935 int zpci_create_device(struct zpci_dev *zdev)
936 {
937 int rc;
938
939 rc = zpci_alloc_domain(zdev);
940 if (rc)
941 goto out;
942
943 rc = zpci_create_device_bus(zdev);
944 if (rc)
945 goto out_bus;
946
947 mutex_lock(&zpci_list_lock);
948 list_add_tail(&zdev->entry, &zpci_list);
949 if (hotplug_ops.create_slot)
950 hotplug_ops.create_slot(zdev);
951 mutex_unlock(&zpci_list_lock);
952
953 if (zdev->state == ZPCI_FN_STATE_STANDBY)
954 return 0;
955
956 rc = zpci_enable_device(zdev);
957 if (rc)
958 goto out_start;
959 return 0;
960
961 out_start:
962 mutex_lock(&zpci_list_lock);
963 list_del(&zdev->entry);
964 if (hotplug_ops.remove_slot)
965 hotplug_ops.remove_slot(zdev);
966 mutex_unlock(&zpci_list_lock);
967 out_bus:
968 zpci_free_domain(zdev);
969 out:
970 return rc;
971 }
972
973 void zpci_stop_device(struct zpci_dev *zdev)
974 {
975 zpci_dma_exit_device(zdev);
976 /*
977 * Note: SCLP disables fh via set-pci-fn so don't
978 * do that here.
979 */
980 }
981 EXPORT_SYMBOL_GPL(zpci_stop_device);
982
983 int zpci_scan_device(struct zpci_dev *zdev)
984 {
985 zdev->pdev = pci_scan_single_device(zdev->bus, ZPCI_DEVFN);
986 if (!zdev->pdev) {
987 pr_err("pci_scan_single_device failed for fid: 0x%x\n",
988 zdev->fid);
989 goto out;
990 }
991
992 zpci_map_resources(zdev);
993 pci_bus_add_devices(zdev->bus);
994
995 /* now that pdev was added to the bus mark it as used */
996 zdev->state = ZPCI_FN_STATE_ONLINE;
997 return 0;
998
999 out:
1000 zpci_dma_exit_device(zdev);
1001 clp_disable_fh(zdev);
1002 return -EIO;
1003 }
1004 EXPORT_SYMBOL_GPL(zpci_scan_device);
1005
1006 static inline int barsize(u8 size)
1007 {
1008 return (size) ? (1 << size) >> 10 : 0;
1009 }
1010
1011 static int zpci_mem_init(void)
1012 {
1013 zdev_irq_cache = kmem_cache_create("PCI_IRQ_cache", sizeof(struct zdev_irq_map),
1014 L1_CACHE_BYTES, SLAB_HWCACHE_ALIGN, NULL);
1015 if (!zdev_irq_cache)
1016 goto error_zdev;
1017
1018 /* TODO: use realloc */
1019 zpci_iomap_start = kzalloc(ZPCI_IOMAP_MAX_ENTRIES * sizeof(*zpci_iomap_start),
1020 GFP_KERNEL);
1021 if (!zpci_iomap_start)
1022 goto error_iomap;
1023 return 0;
1024
1025 error_iomap:
1026 kmem_cache_destroy(zdev_irq_cache);
1027 error_zdev:
1028 return -ENOMEM;
1029 }
1030
1031 static void zpci_mem_exit(void)
1032 {
1033 kfree(zpci_iomap_start);
1034 kmem_cache_destroy(zdev_irq_cache);
1035 }
1036
1037 unsigned int pci_probe = 1;
1038 EXPORT_SYMBOL_GPL(pci_probe);
1039
1040 char * __init pcibios_setup(char *str)
1041 {
1042 if (!strcmp(str, "off")) {
1043 pci_probe = 0;
1044 return NULL;
1045 }
1046 return str;
1047 }
1048
1049 static int __init pci_base_init(void)
1050 {
1051 int rc;
1052
1053 if (!pci_probe)
1054 return 0;
1055
1056 if (!test_facility(2) || !test_facility(69)
1057 || !test_facility(71) || !test_facility(72))
1058 return 0;
1059
1060 pr_info("Probing PCI hardware: PCI:%d SID:%d AEN:%d\n",
1061 test_facility(69), test_facility(70),
1062 test_facility(71));
1063
1064 rc = zpci_mem_init();
1065 if (rc)
1066 goto out_mem;
1067
1068 rc = zpci_msihash_init();
1069 if (rc)
1070 goto out_hash;
1071
1072 rc = zpci_irq_init();
1073 if (rc)
1074 goto out_irq;
1075
1076 rc = zpci_dma_init();
1077 if (rc)
1078 goto out_dma;
1079
1080 rc = clp_find_pci_devices();
1081 if (rc)
1082 goto out_find;
1083
1084 zpci_scan_devices();
1085 return 0;
1086
1087 out_find:
1088 zpci_dma_exit();
1089 out_dma:
1090 zpci_irq_exit();
1091 out_irq:
1092 zpci_msihash_exit();
1093 out_hash:
1094 zpci_mem_exit();
1095 out_mem:
1096 return rc;
1097 }
1098 subsys_initcall(pci_base_init);