]>
Commit | Line | Data |
---|---|---|
cd248341 JG |
1 | /* |
2 | * Copyright IBM Corp. 2012 | |
3 | * | |
4 | * Author(s): | |
5 | * Jan Glauber <jang@linux.vnet.ibm.com> | |
6 | * | |
7 | * The System z PCI code is a rewrite from a prototype by | |
8 | * the following people (Kudoz!): | |
9 | * Alexander Schmidt <alexschm@de.ibm.com> | |
10 | * Christoph Raisch <raisch@de.ibm.com> | |
11 | * Hannes Hering <hering2@de.ibm.com> | |
12 | * Hoang-Nam Nguyen <hnguyen@de.ibm.com> | |
13 | * Jan-Bernd Themann <themann@de.ibm.com> | |
14 | * Stefan Roscher <stefan.roscher@de.ibm.com> | |
15 | * Thomas Klein <tklein@de.ibm.com> | |
16 | */ | |
17 | ||
18 | #define COMPONENT "zPCI" | |
19 | #define pr_fmt(fmt) COMPONENT ": " fmt | |
20 | ||
21 | #include <linux/kernel.h> | |
22 | #include <linux/slab.h> | |
23 | #include <linux/err.h> | |
24 | #include <linux/export.h> | |
25 | #include <linux/delay.h> | |
9a4da8a5 JG |
26 | #include <linux/irq.h> |
27 | #include <linux/kernel_stat.h> | |
cd248341 JG |
28 | #include <linux/seq_file.h> |
29 | #include <linux/pci.h> | |
30 | #include <linux/msi.h> | |
31 | ||
9a4da8a5 JG |
32 | #include <asm/isc.h> |
33 | #include <asm/airq.h> | |
cd248341 JG |
34 | #include <asm/facility.h> |
35 | #include <asm/pci_insn.h> | |
a755a45d | 36 | #include <asm/pci_clp.h> |
828b35f6 | 37 | #include <asm/pci_dma.h> |
cd248341 JG |
38 | |
39 | #define DEBUG /* enable pr_debug */ | |
40 | ||
9a4da8a5 JG |
41 | #define SIC_IRQ_MODE_ALL 0 |
42 | #define SIC_IRQ_MODE_SINGLE 1 | |
43 | ||
cd248341 | 44 | #define ZPCI_NR_DMA_SPACES 1 |
9a4da8a5 | 45 | #define ZPCI_MSI_VEC_BITS 6 |
cd248341 JG |
46 | #define ZPCI_NR_DEVICES CONFIG_PCI_NR_FUNCTIONS |
47 | ||
48 | /* list of all detected zpci devices */ | |
49 | LIST_HEAD(zpci_list); | |
50 | DEFINE_MUTEX(zpci_list_lock); | |
51 | ||
52 | static DECLARE_BITMAP(zpci_domain, ZPCI_NR_DEVICES); | |
53 | static DEFINE_SPINLOCK(zpci_domain_lock); | |
54 | ||
9a4da8a5 JG |
55 | struct callback { |
56 | irq_handler_t handler; | |
57 | void *data; | |
58 | }; | |
59 | ||
60 | struct zdev_irq_map { | |
61 | unsigned long aibv; /* AI bit vector */ | |
62 | int msi_vecs; /* consecutive MSI-vectors used */ | |
63 | int __unused; | |
64 | struct callback cb[ZPCI_NR_MSI_VECS]; /* callback handler array */ | |
65 | spinlock_t lock; /* protect callbacks against de-reg */ | |
66 | }; | |
67 | ||
68 | struct intr_bucket { | |
69 | /* amap of adapters, one bit per dev, corresponds to one irq nr */ | |
70 | unsigned long *alloc; | |
71 | /* AI summary bit, global page for all devices */ | |
72 | unsigned long *aisb; | |
73 | /* pointer to aibv and callback data in zdev */ | |
74 | struct zdev_irq_map *imap[ZPCI_NR_DEVICES]; | |
75 | /* protects the whole bucket struct */ | |
76 | spinlock_t lock; | |
77 | }; | |
78 | ||
79 | static struct intr_bucket *bucket; | |
80 | ||
81 | /* Adapter local summary indicator */ | |
82 | static u8 *zpci_irq_si; | |
83 | ||
84 | static atomic_t irq_retries = ATOMIC_INIT(0); | |
85 | ||
cd248341 JG |
86 | /* I/O Map */ |
87 | static DEFINE_SPINLOCK(zpci_iomap_lock); | |
88 | static DECLARE_BITMAP(zpci_iomap, ZPCI_IOMAP_MAX_ENTRIES); | |
89 | struct zpci_iomap_entry *zpci_iomap_start; | |
90 | EXPORT_SYMBOL_GPL(zpci_iomap_start); | |
91 | ||
9a4da8a5 JG |
92 | /* highest irq summary bit */ |
93 | static int __read_mostly aisb_max; | |
94 | ||
95 | static struct kmem_cache *zdev_irq_cache; | |
96 | ||
97 | static inline int irq_to_msi_nr(unsigned int irq) | |
98 | { | |
99 | return irq & ZPCI_MSI_MASK; | |
100 | } | |
101 | ||
102 | static inline int irq_to_dev_nr(unsigned int irq) | |
103 | { | |
104 | return irq >> ZPCI_MSI_VEC_BITS; | |
105 | } | |
106 | ||
107 | static inline struct zdev_irq_map *get_imap(unsigned int irq) | |
108 | { | |
109 | return bucket->imap[irq_to_dev_nr(irq)]; | |
110 | } | |
111 | ||
cd248341 JG |
112 | struct zpci_dev *get_zdev(struct pci_dev *pdev) |
113 | { | |
114 | return (struct zpci_dev *) pdev->sysdata; | |
115 | } | |
116 | ||
117 | struct zpci_dev *get_zdev_by_fid(u32 fid) | |
118 | { | |
119 | struct zpci_dev *tmp, *zdev = NULL; | |
120 | ||
121 | mutex_lock(&zpci_list_lock); | |
122 | list_for_each_entry(tmp, &zpci_list, entry) { | |
123 | if (tmp->fid == fid) { | |
124 | zdev = tmp; | |
125 | break; | |
126 | } | |
127 | } | |
128 | mutex_unlock(&zpci_list_lock); | |
129 | return zdev; | |
130 | } | |
131 | ||
132 | bool zpci_fid_present(u32 fid) | |
133 | { | |
134 | return (get_zdev_by_fid(fid) != NULL) ? true : false; | |
135 | } | |
136 | ||
137 | static struct zpci_dev *get_zdev_by_bus(struct pci_bus *bus) | |
138 | { | |
139 | return (bus && bus->sysdata) ? (struct zpci_dev *) bus->sysdata : NULL; | |
140 | } | |
141 | ||
142 | int pci_domain_nr(struct pci_bus *bus) | |
143 | { | |
144 | return ((struct zpci_dev *) bus->sysdata)->domain; | |
145 | } | |
146 | EXPORT_SYMBOL_GPL(pci_domain_nr); | |
147 | ||
148 | int pci_proc_domain(struct pci_bus *bus) | |
149 | { | |
150 | return pci_domain_nr(bus); | |
151 | } | |
152 | EXPORT_SYMBOL_GPL(pci_proc_domain); | |
153 | ||
154 | /* Store PCI function information block */ | |
155 | static int zpci_store_fib(struct zpci_dev *zdev, u8 *fc) | |
156 | { | |
157 | struct zpci_fib *fib; | |
158 | u8 status, cc; | |
159 | ||
160 | fib = (void *) get_zeroed_page(GFP_KERNEL); | |
161 | if (!fib) | |
162 | return -ENOMEM; | |
163 | ||
164 | do { | |
165 | cc = __stpcifc(zdev->fh, 0, fib, &status); | |
166 | if (cc == 2) { | |
167 | msleep(ZPCI_INSN_BUSY_DELAY); | |
168 | memset(fib, 0, PAGE_SIZE); | |
169 | } | |
170 | } while (cc == 2); | |
171 | ||
172 | if (cc) | |
173 | pr_err_once("%s: cc: %u status: %u\n", | |
174 | __func__, cc, status); | |
175 | ||
176 | /* Return PCI function controls */ | |
177 | *fc = fib->fc; | |
178 | ||
179 | free_page((unsigned long) fib); | |
180 | return (cc) ? -EIO : 0; | |
181 | } | |
182 | ||
9a4da8a5 JG |
183 | /* Modify PCI: Register adapter interruptions */ |
184 | static int zpci_register_airq(struct zpci_dev *zdev, unsigned int aisb, | |
185 | u64 aibv) | |
186 | { | |
187 | u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_REG_INT); | |
188 | struct zpci_fib *fib; | |
189 | int rc; | |
190 | ||
191 | fib = (void *) get_zeroed_page(GFP_KERNEL); | |
192 | if (!fib) | |
193 | return -ENOMEM; | |
194 | ||
195 | fib->isc = PCI_ISC; | |
196 | fib->noi = zdev->irq_map->msi_vecs; | |
197 | fib->sum = 1; /* enable summary notifications */ | |
198 | fib->aibv = aibv; | |
199 | fib->aibvo = 0; /* every function has its own page */ | |
200 | fib->aisb = (u64) bucket->aisb + aisb / 8; | |
201 | fib->aisbo = aisb & ZPCI_MSI_MASK; | |
202 | ||
203 | rc = mpcifc_instr(req, fib); | |
204 | pr_debug("%s mpcifc returned noi: %d\n", __func__, fib->noi); | |
205 | ||
206 | free_page((unsigned long) fib); | |
207 | return rc; | |
208 | } | |
209 | ||
210 | struct mod_pci_args { | |
211 | u64 base; | |
212 | u64 limit; | |
213 | u64 iota; | |
214 | }; | |
215 | ||
216 | static int mod_pci(struct zpci_dev *zdev, int fn, u8 dmaas, struct mod_pci_args *args) | |
217 | { | |
218 | u64 req = ZPCI_CREATE_REQ(zdev->fh, dmaas, fn); | |
219 | struct zpci_fib *fib; | |
220 | int rc; | |
221 | ||
222 | /* The FIB must be available even if it's not used */ | |
223 | fib = (void *) get_zeroed_page(GFP_KERNEL); | |
224 | if (!fib) | |
225 | return -ENOMEM; | |
226 | ||
227 | fib->pba = args->base; | |
228 | fib->pal = args->limit; | |
229 | fib->iota = args->iota; | |
230 | ||
231 | rc = mpcifc_instr(req, fib); | |
232 | free_page((unsigned long) fib); | |
233 | return rc; | |
234 | } | |
235 | ||
828b35f6 JG |
236 | /* Modify PCI: Register I/O address translation parameters */ |
237 | int zpci_register_ioat(struct zpci_dev *zdev, u8 dmaas, | |
238 | u64 base, u64 limit, u64 iota) | |
239 | { | |
240 | struct mod_pci_args args = { base, limit, iota }; | |
241 | ||
242 | WARN_ON_ONCE(iota & 0x3fff); | |
243 | args.iota |= ZPCI_IOTA_RTTO_FLAG; | |
244 | return mod_pci(zdev, ZPCI_MOD_FC_REG_IOAT, dmaas, &args); | |
245 | } | |
246 | ||
247 | /* Modify PCI: Unregister I/O address translation parameters */ | |
248 | int zpci_unregister_ioat(struct zpci_dev *zdev, u8 dmaas) | |
249 | { | |
250 | struct mod_pci_args args = { 0, 0, 0 }; | |
251 | ||
252 | return mod_pci(zdev, ZPCI_MOD_FC_DEREG_IOAT, dmaas, &args); | |
253 | } | |
254 | ||
9a4da8a5 JG |
255 | /* Modify PCI: Unregister adapter interruptions */ |
256 | static int zpci_unregister_airq(struct zpci_dev *zdev) | |
257 | { | |
258 | struct mod_pci_args args = { 0, 0, 0 }; | |
259 | ||
260 | return mod_pci(zdev, ZPCI_MOD_FC_DEREG_INT, 0, &args); | |
261 | } | |
262 | ||
cd248341 JG |
263 | #define ZPCI_PCIAS_CFGSPC 15 |
264 | ||
265 | static int zpci_cfg_load(struct zpci_dev *zdev, int offset, u32 *val, u8 len) | |
266 | { | |
267 | u64 req = ZPCI_CREATE_REQ(zdev->fh, ZPCI_PCIAS_CFGSPC, len); | |
268 | u64 data; | |
269 | int rc; | |
270 | ||
271 | rc = pcilg_instr(&data, req, offset); | |
272 | data = data << ((8 - len) * 8); | |
273 | data = le64_to_cpu(data); | |
274 | if (!rc) | |
275 | *val = (u32) data; | |
276 | else | |
277 | *val = 0xffffffff; | |
278 | return rc; | |
279 | } | |
280 | ||
281 | static int zpci_cfg_store(struct zpci_dev *zdev, int offset, u32 val, u8 len) | |
282 | { | |
283 | u64 req = ZPCI_CREATE_REQ(zdev->fh, ZPCI_PCIAS_CFGSPC, len); | |
284 | u64 data = val; | |
285 | int rc; | |
286 | ||
287 | data = cpu_to_le64(data); | |
288 | data = data >> ((8 - len) * 8); | |
289 | rc = pcistg_instr(data, req, offset); | |
290 | return rc; | |
291 | } | |
292 | ||
9a4da8a5 JG |
293 | void synchronize_irq(unsigned int irq) |
294 | { | |
295 | /* | |
296 | * Not needed, the handler is protected by a lock and IRQs that occur | |
297 | * after the handler is deleted are just NOPs. | |
298 | */ | |
299 | } | |
300 | EXPORT_SYMBOL_GPL(synchronize_irq); | |
301 | ||
302 | void enable_irq(unsigned int irq) | |
303 | { | |
304 | struct msi_desc *msi = irq_get_msi_desc(irq); | |
305 | ||
306 | zpci_msi_set_mask_bits(msi, 1, 0); | |
307 | } | |
308 | EXPORT_SYMBOL_GPL(enable_irq); | |
309 | ||
310 | void disable_irq(unsigned int irq) | |
311 | { | |
312 | struct msi_desc *msi = irq_get_msi_desc(irq); | |
313 | ||
314 | zpci_msi_set_mask_bits(msi, 1, 1); | |
315 | } | |
316 | EXPORT_SYMBOL_GPL(disable_irq); | |
317 | ||
318 | void disable_irq_nosync(unsigned int irq) | |
319 | { | |
320 | disable_irq(irq); | |
321 | } | |
322 | EXPORT_SYMBOL_GPL(disable_irq_nosync); | |
323 | ||
324 | unsigned long probe_irq_on(void) | |
325 | { | |
326 | return 0; | |
327 | } | |
328 | EXPORT_SYMBOL_GPL(probe_irq_on); | |
329 | ||
330 | int probe_irq_off(unsigned long val) | |
331 | { | |
332 | return 0; | |
333 | } | |
334 | EXPORT_SYMBOL_GPL(probe_irq_off); | |
335 | ||
336 | unsigned int probe_irq_mask(unsigned long val) | |
337 | { | |
338 | return val; | |
339 | } | |
340 | EXPORT_SYMBOL_GPL(probe_irq_mask); | |
341 | ||
cd248341 JG |
342 | void __devinit pcibios_fixup_bus(struct pci_bus *bus) |
343 | { | |
344 | } | |
345 | ||
346 | resource_size_t pcibios_align_resource(void *data, const struct resource *res, | |
347 | resource_size_t size, | |
348 | resource_size_t align) | |
349 | { | |
350 | return 0; | |
351 | } | |
352 | ||
353 | /* Create a virtual mapping cookie for a PCI BAR */ | |
354 | void __iomem *pci_iomap(struct pci_dev *pdev, int bar, unsigned long max) | |
355 | { | |
356 | struct zpci_dev *zdev = get_zdev(pdev); | |
357 | u64 addr; | |
358 | int idx; | |
359 | ||
360 | if ((bar & 7) != bar) | |
361 | return NULL; | |
362 | ||
363 | idx = zdev->bars[bar].map_idx; | |
364 | spin_lock(&zpci_iomap_lock); | |
365 | zpci_iomap_start[idx].fh = zdev->fh; | |
366 | zpci_iomap_start[idx].bar = bar; | |
367 | spin_unlock(&zpci_iomap_lock); | |
368 | ||
369 | addr = ZPCI_IOMAP_ADDR_BASE | ((u64) idx << 48); | |
370 | return (void __iomem *) addr; | |
371 | } | |
372 | EXPORT_SYMBOL_GPL(pci_iomap); | |
373 | ||
374 | void pci_iounmap(struct pci_dev *pdev, void __iomem *addr) | |
375 | { | |
376 | unsigned int idx; | |
377 | ||
378 | idx = (((__force u64) addr) & ~ZPCI_IOMAP_ADDR_BASE) >> 48; | |
379 | spin_lock(&zpci_iomap_lock); | |
380 | zpci_iomap_start[idx].fh = 0; | |
381 | zpci_iomap_start[idx].bar = 0; | |
382 | spin_unlock(&zpci_iomap_lock); | |
383 | } | |
384 | EXPORT_SYMBOL_GPL(pci_iounmap); | |
385 | ||
386 | static int pci_read(struct pci_bus *bus, unsigned int devfn, int where, | |
387 | int size, u32 *val) | |
388 | { | |
389 | struct zpci_dev *zdev = get_zdev_by_bus(bus); | |
390 | ||
391 | if (!zdev || devfn != ZPCI_DEVFN) | |
392 | return 0; | |
393 | return zpci_cfg_load(zdev, where, val, size); | |
394 | } | |
395 | ||
396 | static int pci_write(struct pci_bus *bus, unsigned int devfn, int where, | |
397 | int size, u32 val) | |
398 | { | |
399 | struct zpci_dev *zdev = get_zdev_by_bus(bus); | |
400 | ||
401 | if (!zdev || devfn != ZPCI_DEVFN) | |
402 | return 0; | |
403 | return zpci_cfg_store(zdev, where, val, size); | |
404 | } | |
405 | ||
406 | static struct pci_ops pci_root_ops = { | |
407 | .read = pci_read, | |
408 | .write = pci_write, | |
409 | }; | |
410 | ||
9a4da8a5 JG |
411 | /* store the last handled bit to implement fair scheduling of devices */ |
412 | static DEFINE_PER_CPU(unsigned long, next_sbit); | |
413 | ||
414 | static void zpci_irq_handler(void *dont, void *need) | |
415 | { | |
416 | unsigned long sbit, mbit, last = 0, start = __get_cpu_var(next_sbit); | |
417 | int rescan = 0, max = aisb_max; | |
418 | struct zdev_irq_map *imap; | |
419 | ||
420 | kstat_cpu(smp_processor_id()).irqs[IOINT_PCI]++; | |
421 | sbit = start; | |
422 | ||
423 | scan: | |
424 | /* find summary_bit */ | |
425 | for_each_set_bit_left_cont(sbit, bucket->aisb, max) { | |
426 | clear_bit(63 - (sbit & 63), bucket->aisb + (sbit >> 6)); | |
427 | last = sbit; | |
428 | ||
429 | /* find vector bit */ | |
430 | imap = bucket->imap[sbit]; | |
431 | for_each_set_bit_left(mbit, &imap->aibv, imap->msi_vecs) { | |
432 | kstat_cpu(smp_processor_id()).irqs[IOINT_MSI]++; | |
433 | clear_bit(63 - mbit, &imap->aibv); | |
434 | ||
435 | spin_lock(&imap->lock); | |
436 | if (imap->cb[mbit].handler) | |
437 | imap->cb[mbit].handler(mbit, | |
438 | imap->cb[mbit].data); | |
439 | spin_unlock(&imap->lock); | |
440 | } | |
441 | } | |
442 | ||
443 | if (rescan) | |
444 | goto out; | |
445 | ||
446 | /* scan the skipped bits */ | |
447 | if (start > 0) { | |
448 | sbit = 0; | |
449 | max = start; | |
450 | start = 0; | |
451 | goto scan; | |
452 | } | |
453 | ||
454 | /* enable interrupts again */ | |
455 | sic_instr(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC); | |
456 | ||
457 | /* check again to not lose initiative */ | |
458 | rmb(); | |
459 | max = aisb_max; | |
460 | sbit = find_first_bit_left(bucket->aisb, max); | |
461 | if (sbit != max) { | |
462 | atomic_inc(&irq_retries); | |
463 | rescan++; | |
464 | goto scan; | |
465 | } | |
466 | out: | |
467 | /* store next device bit to scan */ | |
468 | __get_cpu_var(next_sbit) = (++last >= aisb_max) ? 0 : last; | |
469 | } | |
470 | ||
471 | /* msi_vecs - number of requested interrupts, 0 place function to error state */ | |
472 | static int zpci_setup_msi(struct pci_dev *pdev, int msi_vecs) | |
473 | { | |
474 | struct zpci_dev *zdev = get_zdev(pdev); | |
475 | unsigned int aisb, msi_nr; | |
476 | struct msi_desc *msi; | |
477 | int rc; | |
478 | ||
479 | /* store the number of used MSI vectors */ | |
480 | zdev->irq_map->msi_vecs = min(msi_vecs, ZPCI_NR_MSI_VECS); | |
481 | ||
482 | spin_lock(&bucket->lock); | |
483 | aisb = find_first_zero_bit(bucket->alloc, PAGE_SIZE); | |
484 | /* alloc map exhausted? */ | |
485 | if (aisb == PAGE_SIZE) { | |
486 | spin_unlock(&bucket->lock); | |
487 | return -EIO; | |
488 | } | |
489 | set_bit(aisb, bucket->alloc); | |
490 | spin_unlock(&bucket->lock); | |
491 | ||
492 | zdev->aisb = aisb; | |
493 | if (aisb + 1 > aisb_max) | |
494 | aisb_max = aisb + 1; | |
495 | ||
496 | /* wire up IRQ shortcut pointer */ | |
497 | bucket->imap[zdev->aisb] = zdev->irq_map; | |
498 | pr_debug("%s: imap[%u] linked to %p\n", __func__, zdev->aisb, zdev->irq_map); | |
499 | ||
500 | /* TODO: irq number 0 wont be found if we return less than requested MSIs. | |
501 | * ignore it for now and fix in common code. | |
502 | */ | |
503 | msi_nr = aisb << ZPCI_MSI_VEC_BITS; | |
504 | ||
505 | list_for_each_entry(msi, &pdev->msi_list, list) { | |
506 | rc = zpci_setup_msi_irq(zdev, msi, msi_nr, | |
507 | aisb << ZPCI_MSI_VEC_BITS); | |
508 | if (rc) | |
509 | return rc; | |
510 | msi_nr++; | |
511 | } | |
512 | ||
513 | rc = zpci_register_airq(zdev, aisb, (u64) &zdev->irq_map->aibv); | |
514 | if (rc) { | |
515 | clear_bit(aisb, bucket->alloc); | |
516 | dev_err(&pdev->dev, "register MSI failed with: %d\n", rc); | |
517 | return rc; | |
518 | } | |
519 | return (zdev->irq_map->msi_vecs == msi_vecs) ? | |
520 | 0 : zdev->irq_map->msi_vecs; | |
521 | } | |
522 | ||
523 | static void zpci_teardown_msi(struct pci_dev *pdev) | |
524 | { | |
525 | struct zpci_dev *zdev = get_zdev(pdev); | |
526 | struct msi_desc *msi; | |
527 | int aisb, rc; | |
528 | ||
529 | rc = zpci_unregister_airq(zdev); | |
530 | if (rc) { | |
531 | dev_err(&pdev->dev, "deregister MSI failed with: %d\n", rc); | |
532 | return; | |
533 | } | |
534 | ||
535 | msi = list_first_entry(&pdev->msi_list, struct msi_desc, list); | |
536 | aisb = irq_to_dev_nr(msi->irq); | |
537 | ||
538 | list_for_each_entry(msi, &pdev->msi_list, list) | |
539 | zpci_teardown_msi_irq(zdev, msi); | |
540 | ||
541 | clear_bit(aisb, bucket->alloc); | |
542 | if (aisb + 1 == aisb_max) | |
543 | aisb_max--; | |
544 | } | |
545 | ||
546 | int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) | |
547 | { | |
548 | pr_debug("%s: requesting %d MSI-X interrupts...", __func__, nvec); | |
549 | if (type != PCI_CAP_ID_MSIX && type != PCI_CAP_ID_MSI) | |
550 | return -EINVAL; | |
551 | return zpci_setup_msi(pdev, nvec); | |
552 | } | |
553 | ||
554 | void arch_teardown_msi_irqs(struct pci_dev *pdev) | |
555 | { | |
556 | pr_info("%s: on pdev: %p\n", __func__, pdev); | |
557 | zpci_teardown_msi(pdev); | |
558 | } | |
559 | ||
cd248341 JG |
560 | static void zpci_map_resources(struct zpci_dev *zdev) |
561 | { | |
562 | struct pci_dev *pdev = zdev->pdev; | |
563 | resource_size_t len; | |
564 | int i; | |
565 | ||
566 | for (i = 0; i < PCI_BAR_COUNT; i++) { | |
567 | len = pci_resource_len(pdev, i); | |
568 | if (!len) | |
569 | continue; | |
570 | pdev->resource[i].start = (resource_size_t) pci_iomap(pdev, i, 0); | |
571 | pdev->resource[i].end = pdev->resource[i].start + len - 1; | |
572 | pr_debug("BAR%i: -> start: %Lx end: %Lx\n", | |
573 | i, pdev->resource[i].start, pdev->resource[i].end); | |
574 | } | |
575 | }; | |
576 | ||
577 | static void zpci_unmap_resources(struct pci_dev *pdev) | |
578 | { | |
579 | resource_size_t len; | |
580 | int i; | |
581 | ||
582 | for (i = 0; i < PCI_BAR_COUNT; i++) { | |
583 | len = pci_resource_len(pdev, i); | |
584 | if (!len) | |
585 | continue; | |
586 | pci_iounmap(pdev, (void *) pdev->resource[i].start); | |
587 | } | |
588 | }; | |
589 | ||
590 | struct zpci_dev *zpci_alloc_device(void) | |
591 | { | |
592 | struct zpci_dev *zdev; | |
593 | ||
594 | /* Alloc memory for our private pci device data */ | |
595 | zdev = kzalloc(sizeof(*zdev), GFP_KERNEL); | |
596 | if (!zdev) | |
597 | return ERR_PTR(-ENOMEM); | |
9a4da8a5 JG |
598 | |
599 | /* Alloc aibv & callback space */ | |
600 | zdev->irq_map = kmem_cache_alloc(zdev_irq_cache, GFP_KERNEL); | |
601 | if (!zdev->irq_map) | |
602 | goto error; | |
603 | memset(zdev->irq_map, 0, sizeof(*zdev->irq_map)); | |
604 | WARN_ON((u64) zdev->irq_map & 0xff); | |
cd248341 | 605 | return zdev; |
9a4da8a5 JG |
606 | |
607 | error: | |
608 | kfree(zdev); | |
609 | return ERR_PTR(-ENOMEM); | |
cd248341 JG |
610 | } |
611 | ||
612 | void zpci_free_device(struct zpci_dev *zdev) | |
613 | { | |
9a4da8a5 | 614 | kmem_cache_free(zdev_irq_cache, zdev->irq_map); |
cd248341 JG |
615 | kfree(zdev); |
616 | } | |
617 | ||
618 | /* Called on removal of pci_dev, leaves zpci and bus device */ | |
619 | static void zpci_remove_device(struct pci_dev *pdev) | |
620 | { | |
621 | struct zpci_dev *zdev = get_zdev(pdev); | |
622 | ||
623 | dev_info(&pdev->dev, "Removing device %u\n", zdev->domain); | |
624 | zdev->state = ZPCI_FN_STATE_CONFIGURED; | |
828b35f6 | 625 | zpci_dma_exit_device(zdev); |
cd248341 JG |
626 | zpci_unmap_resources(pdev); |
627 | list_del(&zdev->entry); /* can be called from init */ | |
628 | zdev->pdev = NULL; | |
629 | } | |
630 | ||
631 | static void zpci_scan_devices(void) | |
632 | { | |
633 | struct zpci_dev *zdev; | |
634 | ||
635 | mutex_lock(&zpci_list_lock); | |
636 | list_for_each_entry(zdev, &zpci_list, entry) | |
637 | if (zdev->state == ZPCI_FN_STATE_CONFIGURED) | |
638 | zpci_scan_device(zdev); | |
639 | mutex_unlock(&zpci_list_lock); | |
640 | } | |
641 | ||
642 | /* | |
643 | * Too late for any s390 specific setup, since interrupts must be set up | |
644 | * already which requires DMA setup too and the pci scan will access the | |
645 | * config space, which only works if the function handle is enabled. | |
646 | */ | |
647 | int pcibios_enable_device(struct pci_dev *pdev, int mask) | |
648 | { | |
649 | struct resource *res; | |
650 | u16 cmd; | |
651 | int i; | |
652 | ||
653 | pci_read_config_word(pdev, PCI_COMMAND, &cmd); | |
654 | ||
655 | for (i = 0; i < PCI_BAR_COUNT; i++) { | |
656 | res = &pdev->resource[i]; | |
657 | ||
658 | if (res->flags & IORESOURCE_IO) | |
659 | return -EINVAL; | |
660 | ||
661 | if (res->flags & IORESOURCE_MEM) | |
662 | cmd |= PCI_COMMAND_MEMORY; | |
663 | } | |
664 | pci_write_config_word(pdev, PCI_COMMAND, cmd); | |
665 | return 0; | |
666 | } | |
667 | ||
668 | void pcibios_disable_device(struct pci_dev *pdev) | |
669 | { | |
670 | zpci_remove_device(pdev); | |
671 | pdev->sysdata = NULL; | |
672 | } | |
673 | ||
9a4da8a5 JG |
674 | int zpci_request_irq(unsigned int irq, irq_handler_t handler, void *data) |
675 | { | |
676 | int msi_nr = irq_to_msi_nr(irq); | |
677 | struct zdev_irq_map *imap; | |
678 | struct msi_desc *msi; | |
679 | ||
680 | msi = irq_get_msi_desc(irq); | |
681 | if (!msi) | |
682 | return -EIO; | |
683 | ||
684 | imap = get_imap(irq); | |
685 | spin_lock_init(&imap->lock); | |
686 | ||
687 | pr_debug("%s: register handler for IRQ:MSI %d:%d\n", __func__, irq >> 6, msi_nr); | |
688 | imap->cb[msi_nr].handler = handler; | |
689 | imap->cb[msi_nr].data = data; | |
690 | ||
691 | /* | |
692 | * The generic MSI code returns with the interrupt disabled on the | |
693 | * card, using the MSI mask bits. Firmware doesn't appear to unmask | |
694 | * at that level, so we do it here by hand. | |
695 | */ | |
696 | zpci_msi_set_mask_bits(msi, 1, 0); | |
697 | return 0; | |
698 | } | |
699 | ||
700 | void zpci_free_irq(unsigned int irq) | |
701 | { | |
702 | struct zdev_irq_map *imap = get_imap(irq); | |
703 | int msi_nr = irq_to_msi_nr(irq); | |
704 | unsigned long flags; | |
705 | ||
706 | pr_debug("%s: for irq: %d\n", __func__, irq); | |
707 | ||
708 | spin_lock_irqsave(&imap->lock, flags); | |
709 | imap->cb[msi_nr].handler = NULL; | |
710 | imap->cb[msi_nr].data = NULL; | |
711 | spin_unlock_irqrestore(&imap->lock, flags); | |
712 | } | |
713 | ||
714 | int request_irq(unsigned int irq, irq_handler_t handler, | |
715 | unsigned long irqflags, const char *devname, void *dev_id) | |
716 | { | |
717 | pr_debug("%s: irq: %d handler: %p flags: %lx dev: %s\n", | |
718 | __func__, irq, handler, irqflags, devname); | |
719 | ||
720 | return zpci_request_irq(irq, handler, dev_id); | |
721 | } | |
722 | EXPORT_SYMBOL_GPL(request_irq); | |
723 | ||
724 | void free_irq(unsigned int irq, void *dev_id) | |
725 | { | |
726 | zpci_free_irq(irq); | |
727 | } | |
728 | EXPORT_SYMBOL_GPL(free_irq); | |
729 | ||
730 | static int __init zpci_irq_init(void) | |
731 | { | |
732 | int cpu, rc; | |
733 | ||
734 | bucket = kzalloc(sizeof(*bucket), GFP_KERNEL); | |
735 | if (!bucket) | |
736 | return -ENOMEM; | |
737 | ||
738 | bucket->aisb = (unsigned long *) get_zeroed_page(GFP_KERNEL); | |
739 | if (!bucket->aisb) { | |
740 | rc = -ENOMEM; | |
741 | goto out_aisb; | |
742 | } | |
743 | ||
744 | bucket->alloc = (unsigned long *) get_zeroed_page(GFP_KERNEL); | |
745 | if (!bucket->alloc) { | |
746 | rc = -ENOMEM; | |
747 | goto out_alloc; | |
748 | } | |
749 | ||
750 | isc_register(PCI_ISC); | |
751 | zpci_irq_si = s390_register_adapter_interrupt(&zpci_irq_handler, NULL, PCI_ISC); | |
752 | if (IS_ERR(zpci_irq_si)) { | |
753 | rc = PTR_ERR(zpci_irq_si); | |
754 | zpci_irq_si = NULL; | |
755 | goto out_ai; | |
756 | } | |
757 | ||
758 | for_each_online_cpu(cpu) | |
759 | per_cpu(next_sbit, cpu) = 0; | |
760 | ||
761 | spin_lock_init(&bucket->lock); | |
762 | /* set summary to 1 to be called every time for the ISC */ | |
763 | *zpci_irq_si = 1; | |
764 | sic_instr(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC); | |
765 | return 0; | |
766 | ||
767 | out_ai: | |
768 | isc_unregister(PCI_ISC); | |
769 | free_page((unsigned long) bucket->alloc); | |
770 | out_alloc: | |
771 | free_page((unsigned long) bucket->aisb); | |
772 | out_aisb: | |
773 | kfree(bucket); | |
774 | return rc; | |
775 | } | |
776 | ||
777 | static void zpci_irq_exit(void) | |
778 | { | |
779 | free_page((unsigned long) bucket->alloc); | |
780 | free_page((unsigned long) bucket->aisb); | |
781 | s390_unregister_adapter_interrupt(zpci_irq_si, PCI_ISC); | |
782 | isc_unregister(PCI_ISC); | |
783 | kfree(bucket); | |
784 | } | |
785 | ||
cd248341 JG |
786 | static struct resource *zpci_alloc_bus_resource(unsigned long start, unsigned long size, |
787 | unsigned long flags, int domain) | |
788 | { | |
789 | struct resource *r; | |
790 | char *name; | |
791 | int rc; | |
792 | ||
793 | r = kzalloc(sizeof(*r), GFP_KERNEL); | |
794 | if (!r) | |
795 | return ERR_PTR(-ENOMEM); | |
796 | r->start = start; | |
797 | r->end = r->start + size - 1; | |
798 | r->flags = flags; | |
799 | r->parent = &iomem_resource; | |
800 | name = kmalloc(18, GFP_KERNEL); | |
801 | if (!name) { | |
802 | kfree(r); | |
803 | return ERR_PTR(-ENOMEM); | |
804 | } | |
805 | sprintf(name, "PCI Bus: %04x:%02x", domain, ZPCI_BUS_NR); | |
806 | r->name = name; | |
807 | ||
808 | rc = request_resource(&iomem_resource, r); | |
809 | if (rc) | |
810 | pr_debug("request resource %pR failed\n", r); | |
811 | return r; | |
812 | } | |
813 | ||
814 | static int zpci_alloc_iomap(struct zpci_dev *zdev) | |
815 | { | |
816 | int entry; | |
817 | ||
818 | spin_lock(&zpci_iomap_lock); | |
819 | entry = find_first_zero_bit(zpci_iomap, ZPCI_IOMAP_MAX_ENTRIES); | |
820 | if (entry == ZPCI_IOMAP_MAX_ENTRIES) { | |
821 | spin_unlock(&zpci_iomap_lock); | |
822 | return -ENOSPC; | |
823 | } | |
824 | set_bit(entry, zpci_iomap); | |
825 | spin_unlock(&zpci_iomap_lock); | |
826 | return entry; | |
827 | } | |
828 | ||
829 | static void zpci_free_iomap(struct zpci_dev *zdev, int entry) | |
830 | { | |
831 | spin_lock(&zpci_iomap_lock); | |
832 | memset(&zpci_iomap_start[entry], 0, sizeof(struct zpci_iomap_entry)); | |
833 | clear_bit(entry, zpci_iomap); | |
834 | spin_unlock(&zpci_iomap_lock); | |
835 | } | |
836 | ||
837 | static int zpci_create_device_bus(struct zpci_dev *zdev) | |
838 | { | |
839 | struct resource *res; | |
840 | LIST_HEAD(resources); | |
841 | int i; | |
842 | ||
843 | /* allocate mapping entry for each used bar */ | |
844 | for (i = 0; i < PCI_BAR_COUNT; i++) { | |
845 | unsigned long addr, size, flags; | |
846 | int entry; | |
847 | ||
848 | if (!zdev->bars[i].size) | |
849 | continue; | |
850 | entry = zpci_alloc_iomap(zdev); | |
851 | if (entry < 0) | |
852 | return entry; | |
853 | zdev->bars[i].map_idx = entry; | |
854 | ||
855 | /* only MMIO is supported */ | |
856 | flags = IORESOURCE_MEM; | |
857 | if (zdev->bars[i].val & 8) | |
858 | flags |= IORESOURCE_PREFETCH; | |
859 | if (zdev->bars[i].val & 4) | |
860 | flags |= IORESOURCE_MEM_64; | |
861 | ||
862 | addr = ZPCI_IOMAP_ADDR_BASE + ((u64) entry << 48); | |
863 | ||
864 | size = 1UL << zdev->bars[i].size; | |
865 | ||
866 | res = zpci_alloc_bus_resource(addr, size, flags, zdev->domain); | |
867 | if (IS_ERR(res)) { | |
868 | zpci_free_iomap(zdev, entry); | |
869 | return PTR_ERR(res); | |
870 | } | |
871 | pci_add_resource(&resources, res); | |
872 | } | |
873 | ||
874 | zdev->bus = pci_create_root_bus(NULL, ZPCI_BUS_NR, &pci_root_ops, | |
875 | zdev, &resources); | |
876 | if (!zdev->bus) | |
877 | return -EIO; | |
878 | ||
879 | zdev->bus->max_bus_speed = zdev->max_bus_speed; | |
880 | return 0; | |
881 | } | |
882 | ||
883 | static int zpci_alloc_domain(struct zpci_dev *zdev) | |
884 | { | |
885 | spin_lock(&zpci_domain_lock); | |
886 | zdev->domain = find_first_zero_bit(zpci_domain, ZPCI_NR_DEVICES); | |
887 | if (zdev->domain == ZPCI_NR_DEVICES) { | |
888 | spin_unlock(&zpci_domain_lock); | |
889 | return -ENOSPC; | |
890 | } | |
891 | set_bit(zdev->domain, zpci_domain); | |
892 | spin_unlock(&zpci_domain_lock); | |
893 | return 0; | |
894 | } | |
895 | ||
896 | static void zpci_free_domain(struct zpci_dev *zdev) | |
897 | { | |
898 | spin_lock(&zpci_domain_lock); | |
899 | clear_bit(zdev->domain, zpci_domain); | |
900 | spin_unlock(&zpci_domain_lock); | |
901 | } | |
902 | ||
a755a45d JG |
903 | int zpci_enable_device(struct zpci_dev *zdev) |
904 | { | |
905 | int rc; | |
906 | ||
907 | rc = clp_enable_fh(zdev, ZPCI_NR_DMA_SPACES); | |
908 | if (rc) | |
909 | goto out; | |
910 | pr_info("Enabled fh: 0x%x fid: 0x%x\n", zdev->fh, zdev->fid); | |
828b35f6 JG |
911 | |
912 | rc = zpci_dma_init_device(zdev); | |
913 | if (rc) | |
914 | goto out_dma; | |
a755a45d | 915 | return 0; |
828b35f6 JG |
916 | |
917 | out_dma: | |
918 | clp_disable_fh(zdev); | |
a755a45d JG |
919 | out: |
920 | return rc; | |
921 | } | |
922 | EXPORT_SYMBOL_GPL(zpci_enable_device); | |
923 | ||
cd248341 JG |
924 | int zpci_create_device(struct zpci_dev *zdev) |
925 | { | |
926 | int rc; | |
927 | ||
928 | rc = zpci_alloc_domain(zdev); | |
929 | if (rc) | |
930 | goto out; | |
931 | ||
932 | rc = zpci_create_device_bus(zdev); | |
933 | if (rc) | |
934 | goto out_bus; | |
935 | ||
936 | mutex_lock(&zpci_list_lock); | |
937 | list_add_tail(&zdev->entry, &zpci_list); | |
938 | mutex_unlock(&zpci_list_lock); | |
939 | ||
940 | if (zdev->state == ZPCI_FN_STATE_STANDBY) | |
941 | return 0; | |
942 | ||
a755a45d JG |
943 | rc = zpci_enable_device(zdev); |
944 | if (rc) | |
945 | goto out_start; | |
cd248341 JG |
946 | return 0; |
947 | ||
a755a45d JG |
948 | out_start: |
949 | mutex_lock(&zpci_list_lock); | |
950 | list_del(&zdev->entry); | |
951 | mutex_unlock(&zpci_list_lock); | |
cd248341 JG |
952 | out_bus: |
953 | zpci_free_domain(zdev); | |
954 | out: | |
955 | return rc; | |
956 | } | |
957 | ||
958 | void zpci_stop_device(struct zpci_dev *zdev) | |
959 | { | |
828b35f6 | 960 | zpci_dma_exit_device(zdev); |
cd248341 JG |
961 | /* |
962 | * Note: SCLP disables fh via set-pci-fn so don't | |
963 | * do that here. | |
964 | */ | |
965 | } | |
966 | EXPORT_SYMBOL_GPL(zpci_stop_device); | |
967 | ||
968 | int zpci_scan_device(struct zpci_dev *zdev) | |
969 | { | |
970 | zdev->pdev = pci_scan_single_device(zdev->bus, ZPCI_DEVFN); | |
971 | if (!zdev->pdev) { | |
972 | pr_err("pci_scan_single_device failed for fid: 0x%x\n", | |
973 | zdev->fid); | |
974 | goto out; | |
975 | } | |
976 | ||
977 | zpci_map_resources(zdev); | |
978 | pci_bus_add_devices(zdev->bus); | |
979 | ||
980 | /* now that pdev was added to the bus mark it as used */ | |
981 | zdev->state = ZPCI_FN_STATE_ONLINE; | |
982 | return 0; | |
983 | ||
984 | out: | |
828b35f6 | 985 | zpci_dma_exit_device(zdev); |
a755a45d | 986 | clp_disable_fh(zdev); |
cd248341 JG |
987 | return -EIO; |
988 | } | |
989 | EXPORT_SYMBOL_GPL(zpci_scan_device); | |
990 | ||
991 | static inline int barsize(u8 size) | |
992 | { | |
993 | return (size) ? (1 << size) >> 10 : 0; | |
994 | } | |
995 | ||
996 | static int zpci_mem_init(void) | |
997 | { | |
9a4da8a5 JG |
998 | zdev_irq_cache = kmem_cache_create("PCI_IRQ_cache", sizeof(struct zdev_irq_map), |
999 | L1_CACHE_BYTES, SLAB_HWCACHE_ALIGN, NULL); | |
1000 | if (!zdev_irq_cache) | |
1001 | goto error_zdev; | |
1002 | ||
cd248341 JG |
1003 | /* TODO: use realloc */ |
1004 | zpci_iomap_start = kzalloc(ZPCI_IOMAP_MAX_ENTRIES * sizeof(*zpci_iomap_start), | |
1005 | GFP_KERNEL); | |
1006 | if (!zpci_iomap_start) | |
9a4da8a5 | 1007 | goto error_iomap; |
cd248341 JG |
1008 | return 0; |
1009 | ||
9a4da8a5 JG |
1010 | error_iomap: |
1011 | kmem_cache_destroy(zdev_irq_cache); | |
cd248341 JG |
1012 | error_zdev: |
1013 | return -ENOMEM; | |
1014 | } | |
1015 | ||
1016 | static void zpci_mem_exit(void) | |
1017 | { | |
1018 | kfree(zpci_iomap_start); | |
9a4da8a5 | 1019 | kmem_cache_destroy(zdev_irq_cache); |
cd248341 JG |
1020 | } |
1021 | ||
1022 | unsigned int pci_probe = 1; | |
1023 | EXPORT_SYMBOL_GPL(pci_probe); | |
1024 | ||
1025 | char * __init pcibios_setup(char *str) | |
1026 | { | |
1027 | if (!strcmp(str, "off")) { | |
1028 | pci_probe = 0; | |
1029 | return NULL; | |
1030 | } | |
1031 | return str; | |
1032 | } | |
1033 | ||
1034 | static int __init pci_base_init(void) | |
1035 | { | |
1036 | int rc; | |
1037 | ||
1038 | if (!pci_probe) | |
1039 | return 0; | |
1040 | ||
1041 | if (!test_facility(2) || !test_facility(69) | |
1042 | || !test_facility(71) || !test_facility(72)) | |
1043 | return 0; | |
1044 | ||
1045 | pr_info("Probing PCI hardware: PCI:%d SID:%d AEN:%d\n", | |
1046 | test_facility(69), test_facility(70), | |
1047 | test_facility(71)); | |
1048 | ||
1049 | rc = zpci_mem_init(); | |
1050 | if (rc) | |
1051 | goto out_mem; | |
1052 | ||
9a4da8a5 JG |
1053 | rc = zpci_msihash_init(); |
1054 | if (rc) | |
1055 | goto out_hash; | |
1056 | ||
1057 | rc = zpci_irq_init(); | |
1058 | if (rc) | |
1059 | goto out_irq; | |
1060 | ||
828b35f6 JG |
1061 | rc = zpci_dma_init(); |
1062 | if (rc) | |
1063 | goto out_dma; | |
1064 | ||
a755a45d JG |
1065 | rc = clp_find_pci_devices(); |
1066 | if (rc) | |
1067 | goto out_find; | |
1068 | ||
cd248341 JG |
1069 | zpci_scan_devices(); |
1070 | return 0; | |
1071 | ||
a755a45d | 1072 | out_find: |
828b35f6 JG |
1073 | zpci_dma_exit(); |
1074 | out_dma: | |
9a4da8a5 JG |
1075 | zpci_irq_exit(); |
1076 | out_irq: | |
1077 | zpci_msihash_exit(); | |
1078 | out_hash: | |
cd248341 JG |
1079 | zpci_mem_exit(); |
1080 | out_mem: | |
1081 | return rc; | |
1082 | } | |
1083 | subsys_initcall(pci_base_init); |