]>
Commit | Line | Data |
---|---|---|
c9221da9 MM |
1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | |
3 | * License. See the file "COPYING" in the main directory of this archive | |
4 | * for more details. | |
5 | * | |
6 | * Copyright (C) 2003-2005 Silicon Graphics, Inc. All Rights Reserved. | |
7 | */ | |
8 | ||
9 | #include <linux/types.h> | |
10 | #include <linux/interrupt.h> | |
11 | #include <linux/pci.h> | |
12 | #include <asm/sn/sn_sal.h> | |
13 | #include <asm/sn/addrs.h> | |
14 | #include <asm/sn/pcidev.h> | |
15 | #include <asm/sn/pcibus_provider_defs.h> | |
16 | #include <asm/sn/tioce_provider.h> | |
17 | ||
18 | /** | |
19 | * Bus address ranges for the 5 flavors of TIOCE DMA | |
20 | */ | |
21 | ||
22 | #define TIOCE_D64_MIN 0x8000000000000000UL | |
23 | #define TIOCE_D64_MAX 0xffffffffffffffffUL | |
24 | #define TIOCE_D64_ADDR(a) ((a) >= TIOCE_D64_MIN) | |
25 | ||
26 | #define TIOCE_D32_MIN 0x0000000080000000UL | |
27 | #define TIOCE_D32_MAX 0x00000000ffffffffUL | |
28 | #define TIOCE_D32_ADDR(a) ((a) >= TIOCE_D32_MIN && (a) <= TIOCE_D32_MAX) | |
29 | ||
30 | #define TIOCE_M32_MIN 0x0000000000000000UL | |
31 | #define TIOCE_M32_MAX 0x000000007fffffffUL | |
32 | #define TIOCE_M32_ADDR(a) ((a) >= TIOCE_M32_MIN && (a) <= TIOCE_M32_MAX) | |
33 | ||
34 | #define TIOCE_M40_MIN 0x0000004000000000UL | |
35 | #define TIOCE_M40_MAX 0x0000007fffffffffUL | |
36 | #define TIOCE_M40_ADDR(a) ((a) >= TIOCE_M40_MIN && (a) <= TIOCE_M40_MAX) | |
37 | ||
38 | #define TIOCE_M40S_MIN 0x0000008000000000UL | |
39 | #define TIOCE_M40S_MAX 0x000000ffffffffffUL | |
40 | #define TIOCE_M40S_ADDR(a) ((a) >= TIOCE_M40S_MIN && (a) <= TIOCE_M40S_MAX) | |
41 | ||
42 | /* | |
43 | * ATE manipulation macros. | |
44 | */ | |
45 | ||
46 | #define ATE_PAGESHIFT(ps) (__ffs(ps)) | |
47 | #define ATE_PAGEMASK(ps) ((ps)-1) | |
48 | ||
49 | #define ATE_PAGE(x, ps) ((x) >> ATE_PAGESHIFT(ps)) | |
50 | #define ATE_NPAGES(start, len, pagesize) \ | |
51 | (ATE_PAGE((start)+(len)-1, pagesize) - ATE_PAGE(start, pagesize) + 1) | |
52 | ||
53 | #define ATE_VALID(ate) ((ate) & (1UL << 63)) | |
54 | #define ATE_MAKE(addr, ps) (((addr) & ~ATE_PAGEMASK(ps)) | (1UL << 63)) | |
55 | ||
56 | /* | |
57 | * Flavors of ate-based mapping supported by tioce_alloc_map() | |
58 | */ | |
59 | ||
60 | #define TIOCE_ATE_M32 1 | |
61 | #define TIOCE_ATE_M40 2 | |
62 | #define TIOCE_ATE_M40S 3 | |
63 | ||
64 | #define KB(x) ((x) << 10) | |
65 | #define MB(x) ((x) << 20) | |
66 | #define GB(x) ((x) << 30) | |
67 | ||
68 | /** | |
69 | * tioce_dma_d64 - create a DMA mapping using 64-bit direct mode | |
70 | * @ct_addr: system coretalk address | |
71 | * | |
72 | * Map @ct_addr into 64-bit CE bus space. No device context is necessary | |
73 | * and no CE mapping are consumed. | |
74 | * | |
75 | * Bits 53:0 come from the coretalk address. The remaining bits are set as | |
76 | * follows: | |
77 | * | |
78 | * 63 - must be 1 to indicate d64 mode to CE hardware | |
79 | * 62 - barrier bit ... controlled with tioce_dma_barrier() | |
80 | * 61 - 0 since this is not an MSI transaction | |
81 | * 60:54 - reserved, MBZ | |
82 | */ | |
83 | static uint64_t | |
84 | tioce_dma_d64(unsigned long ct_addr) | |
85 | { | |
86 | uint64_t bus_addr; | |
87 | ||
88 | bus_addr = ct_addr | (1UL << 63); | |
89 | ||
90 | return bus_addr; | |
91 | } | |
92 | ||
93 | /** | |
94 | * pcidev_to_tioce - return misc ce related pointers given a pci_dev | |
95 | * @pci_dev: pci device context | |
96 | * @base: ptr to store struct tioce_mmr * for the CE holding this device | |
97 | * @kernel: ptr to store struct tioce_kernel * for the CE holding this device | |
98 | * @port: ptr to store the CE port number that this device is on | |
99 | * | |
100 | * Return pointers to various CE-related structures for the CE upstream of | |
101 | * @pci_dev. | |
102 | */ | |
103 | static inline void | |
104 | pcidev_to_tioce(struct pci_dev *pdev, struct tioce **base, | |
105 | struct tioce_kernel **kernel, int *port) | |
106 | { | |
107 | struct pcidev_info *pcidev_info; | |
108 | struct tioce_common *ce_common; | |
109 | struct tioce_kernel *ce_kernel; | |
110 | ||
111 | pcidev_info = SN_PCIDEV_INFO(pdev); | |
112 | ce_common = (struct tioce_common *)pcidev_info->pdi_pcibus_info; | |
113 | ce_kernel = (struct tioce_kernel *)ce_common->ce_kernel_private; | |
114 | ||
115 | if (base) | |
116 | *base = (struct tioce *)ce_common->ce_pcibus.bs_base; | |
117 | if (kernel) | |
118 | *kernel = ce_kernel; | |
119 | ||
120 | /* | |
121 | * we use port as a zero-based value internally, even though the | |
122 | * documentation is 1-based. | |
123 | */ | |
124 | if (port) | |
125 | *port = | |
126 | (pdev->bus->number < ce_kernel->ce_port1_secondary) ? 0 : 1; | |
127 | } | |
128 | ||
129 | /** | |
130 | * tioce_alloc_map - Given a coretalk address, map it to pcie bus address | |
131 | * space using one of the various ATE-based address modes. | |
132 | * @ce_kern: tioce context | |
133 | * @type: map mode to use | |
134 | * @port: 0-based port that the requesting device is downstream of | |
135 | * @ct_addr: the coretalk address to map | |
136 | * @len: number of bytes to map | |
137 | * | |
138 | * Given the addressing type, set up various paramaters that define the | |
139 | * ATE pool to use. Search for a contiguous block of entries to cover the | |
140 | * length, and if enough resources exist, fill in the ATE's and construct a | |
141 | * tioce_dmamap struct to track the mapping. | |
142 | */ | |
143 | static uint64_t | |
144 | tioce_alloc_map(struct tioce_kernel *ce_kern, int type, int port, | |
145 | uint64_t ct_addr, int len) | |
146 | { | |
147 | int i; | |
148 | int j; | |
149 | int first; | |
150 | int last; | |
151 | int entries; | |
152 | int nates; | |
153 | int pagesize; | |
154 | uint64_t *ate_shadow; | |
155 | uint64_t *ate_reg; | |
156 | uint64_t addr; | |
157 | struct tioce *ce_mmr; | |
158 | uint64_t bus_base; | |
159 | struct tioce_dmamap *map; | |
160 | ||
161 | ce_mmr = (struct tioce *)ce_kern->ce_common->ce_pcibus.bs_base; | |
162 | ||
163 | switch (type) { | |
164 | case TIOCE_ATE_M32: | |
165 | /* | |
166 | * The first 64 entries of the ate3240 pool are dedicated to | |
167 | * super-page (TIOCE_ATE_M40S) mode. | |
168 | */ | |
169 | first = 64; | |
170 | entries = TIOCE_NUM_M3240_ATES - 64; | |
171 | ate_shadow = ce_kern->ce_ate3240_shadow; | |
172 | ate_reg = ce_mmr->ce_ure_ate3240; | |
173 | pagesize = ce_kern->ce_ate3240_pagesize; | |
174 | bus_base = TIOCE_M32_MIN; | |
175 | break; | |
176 | case TIOCE_ATE_M40: | |
177 | first = 0; | |
178 | entries = TIOCE_NUM_M40_ATES; | |
179 | ate_shadow = ce_kern->ce_ate40_shadow; | |
180 | ate_reg = ce_mmr->ce_ure_ate40; | |
181 | pagesize = MB(64); | |
182 | bus_base = TIOCE_M40_MIN; | |
183 | break; | |
184 | case TIOCE_ATE_M40S: | |
185 | /* | |
186 | * ate3240 entries 0-31 are dedicated to port1 super-page | |
187 | * mappings. ate3240 entries 32-63 are dedicated to port2. | |
188 | */ | |
189 | first = port * 32; | |
190 | entries = 32; | |
191 | ate_shadow = ce_kern->ce_ate3240_shadow; | |
192 | ate_reg = ce_mmr->ce_ure_ate3240; | |
193 | pagesize = GB(16); | |
194 | bus_base = TIOCE_M40S_MIN; | |
195 | break; | |
196 | default: | |
197 | return 0; | |
198 | } | |
199 | ||
200 | nates = ATE_NPAGES(ct_addr, len, pagesize); | |
201 | if (nates > entries) | |
202 | return 0; | |
203 | ||
204 | last = first + entries - nates; | |
205 | for (i = first; i <= last; i++) { | |
206 | if (ATE_VALID(ate_shadow[i])) | |
207 | continue; | |
208 | ||
209 | for (j = i; j < i + nates; j++) | |
210 | if (ATE_VALID(ate_shadow[j])) | |
211 | break; | |
212 | ||
213 | if (j >= i + nates) | |
214 | break; | |
215 | } | |
216 | ||
217 | if (i > last) | |
218 | return 0; | |
219 | ||
220 | map = kcalloc(1, sizeof(struct tioce_dmamap), GFP_ATOMIC); | |
221 | if (!map) | |
222 | return 0; | |
223 | ||
224 | addr = ct_addr; | |
225 | for (j = 0; j < nates; j++) { | |
226 | uint64_t ate; | |
227 | ||
228 | ate = ATE_MAKE(addr, pagesize); | |
229 | ate_shadow[i + j] = ate; | |
230 | ate_reg[i + j] = ate; | |
231 | addr += pagesize; | |
232 | } | |
233 | ||
234 | map->refcnt = 1; | |
235 | map->nbytes = nates * pagesize; | |
236 | map->ct_start = ct_addr & ~ATE_PAGEMASK(pagesize); | |
237 | map->pci_start = bus_base + (i * pagesize); | |
238 | map->ate_hw = &ate_reg[i]; | |
239 | map->ate_shadow = &ate_shadow[i]; | |
240 | map->ate_count = nates; | |
241 | ||
242 | list_add(&map->ce_dmamap_list, &ce_kern->ce_dmamap_list); | |
243 | ||
244 | return (map->pci_start + (ct_addr - map->ct_start)); | |
245 | } | |
246 | ||
247 | /** | |
248 | * tioce_dma_d32 - create a DMA mapping using 32-bit direct mode | |
249 | * @pdev: linux pci_dev representing the function | |
250 | * @paddr: system physical address | |
251 | * | |
252 | * Map @paddr into 32-bit bus space of the CE associated with @pcidev_info. | |
253 | */ | |
254 | static uint64_t | |
255 | tioce_dma_d32(struct pci_dev *pdev, uint64_t ct_addr) | |
256 | { | |
257 | int dma_ok; | |
258 | int port; | |
259 | struct tioce *ce_mmr; | |
260 | struct tioce_kernel *ce_kern; | |
261 | uint64_t ct_upper; | |
262 | uint64_t ct_lower; | |
263 | dma_addr_t bus_addr; | |
264 | ||
265 | ct_upper = ct_addr & ~0x3fffffffUL; | |
266 | ct_lower = ct_addr & 0x3fffffffUL; | |
267 | ||
268 | pcidev_to_tioce(pdev, &ce_mmr, &ce_kern, &port); | |
269 | ||
270 | if (ce_kern->ce_port[port].dirmap_refcnt == 0) { | |
271 | volatile uint64_t tmp; | |
272 | ||
273 | ce_kern->ce_port[port].dirmap_shadow = ct_upper; | |
274 | ce_mmr->ce_ure_dir_map[port] = ct_upper; | |
275 | tmp = ce_mmr->ce_ure_dir_map[port]; | |
276 | dma_ok = 1; | |
277 | } else | |
278 | dma_ok = (ce_kern->ce_port[port].dirmap_shadow == ct_upper); | |
279 | ||
280 | if (dma_ok) { | |
281 | ce_kern->ce_port[port].dirmap_refcnt++; | |
282 | bus_addr = TIOCE_D32_MIN + ct_lower; | |
283 | } else | |
284 | bus_addr = 0; | |
285 | ||
286 | return bus_addr; | |
287 | } | |
288 | ||
289 | /** | |
290 | * tioce_dma_barrier - swizzle a TIOCE bus address to include or exclude | |
291 | * the barrier bit. | |
292 | * @bus_addr: bus address to swizzle | |
293 | * | |
294 | * Given a TIOCE bus address, set the appropriate bit to indicate barrier | |
295 | * attributes. | |
296 | */ | |
297 | static uint64_t | |
298 | tioce_dma_barrier(uint64_t bus_addr, int on) | |
299 | { | |
300 | uint64_t barrier_bit; | |
301 | ||
302 | /* barrier not supported in M40/M40S mode */ | |
303 | if (TIOCE_M40_ADDR(bus_addr) || TIOCE_M40S_ADDR(bus_addr)) | |
304 | return bus_addr; | |
305 | ||
306 | if (TIOCE_D64_ADDR(bus_addr)) | |
307 | barrier_bit = (1UL << 62); | |
308 | else /* must be m32 or d32 */ | |
309 | barrier_bit = (1UL << 30); | |
310 | ||
311 | return (on) ? (bus_addr | barrier_bit) : (bus_addr & ~barrier_bit); | |
312 | } | |
313 | ||
314 | /** | |
315 | * tioce_dma_unmap - release CE mapping resources | |
316 | * @pdev: linux pci_dev representing the function | |
317 | * @bus_addr: bus address returned by an earlier tioce_dma_map | |
318 | * @dir: mapping direction (unused) | |
319 | * | |
320 | * Locate mapping resources associated with @bus_addr and release them. | |
321 | * For mappings created using the direct modes there are no resources | |
322 | * to release. | |
323 | */ | |
324 | void | |
325 | tioce_dma_unmap(struct pci_dev *pdev, dma_addr_t bus_addr, int dir) | |
326 | { | |
327 | int i; | |
328 | int port; | |
329 | struct tioce_kernel *ce_kern; | |
330 | struct tioce *ce_mmr; | |
331 | unsigned long flags; | |
332 | ||
333 | bus_addr = tioce_dma_barrier(bus_addr, 0); | |
334 | pcidev_to_tioce(pdev, &ce_mmr, &ce_kern, &port); | |
335 | ||
336 | /* nothing to do for D64 */ | |
337 | ||
338 | if (TIOCE_D64_ADDR(bus_addr)) | |
339 | return; | |
340 | ||
341 | spin_lock_irqsave(&ce_kern->ce_lock, flags); | |
342 | ||
343 | if (TIOCE_D32_ADDR(bus_addr)) { | |
344 | if (--ce_kern->ce_port[port].dirmap_refcnt == 0) { | |
345 | ce_kern->ce_port[port].dirmap_shadow = 0; | |
346 | ce_mmr->ce_ure_dir_map[port] = 0; | |
347 | } | |
348 | } else { | |
349 | struct tioce_dmamap *map; | |
350 | ||
351 | list_for_each_entry(map, &ce_kern->ce_dmamap_list, | |
352 | ce_dmamap_list) { | |
353 | uint64_t last; | |
354 | ||
355 | last = map->pci_start + map->nbytes - 1; | |
356 | if (bus_addr >= map->pci_start && bus_addr <= last) | |
357 | break; | |
358 | } | |
359 | ||
360 | if (&map->ce_dmamap_list == &ce_kern->ce_dmamap_list) { | |
361 | printk(KERN_WARNING | |
362 | "%s: %s - no map found for bus_addr 0x%lx\n", | |
363 | __FUNCTION__, pci_name(pdev), bus_addr); | |
364 | } else if (--map->refcnt == 0) { | |
365 | for (i = 0; i < map->ate_count; i++) { | |
366 | map->ate_shadow[i] = 0; | |
367 | map->ate_hw[i] = 0; | |
368 | } | |
369 | ||
370 | list_del(&map->ce_dmamap_list); | |
371 | kfree(map); | |
372 | } | |
373 | } | |
374 | ||
375 | spin_unlock_irqrestore(&ce_kern->ce_lock, flags); | |
376 | } | |
377 | ||
378 | /** | |
379 | * tioce_do_dma_map - map pages for PCI DMA | |
380 | * @pdev: linux pci_dev representing the function | |
381 | * @paddr: host physical address to map | |
382 | * @byte_count: bytes to map | |
383 | * | |
384 | * This is the main wrapper for mapping host physical pages to CE PCI space. | |
385 | * The mapping mode used is based on the device's dma_mask. | |
386 | */ | |
387 | static uint64_t | |
388 | tioce_do_dma_map(struct pci_dev *pdev, uint64_t paddr, size_t byte_count, | |
389 | int barrier) | |
390 | { | |
391 | unsigned long flags; | |
392 | uint64_t ct_addr; | |
393 | uint64_t mapaddr = 0; | |
394 | struct tioce_kernel *ce_kern; | |
395 | struct tioce_dmamap *map; | |
396 | int port; | |
397 | uint64_t dma_mask; | |
398 | ||
399 | dma_mask = (barrier) ? pdev->dev.coherent_dma_mask : pdev->dma_mask; | |
400 | ||
401 | /* cards must be able to address at least 31 bits */ | |
402 | if (dma_mask < 0x7fffffffUL) | |
403 | return 0; | |
404 | ||
405 | ct_addr = PHYS_TO_TIODMA(paddr); | |
406 | ||
407 | /* | |
408 | * If the device can generate 64 bit addresses, create a D64 map. | |
409 | * Since this should never fail, bypass the rest of the checks. | |
410 | */ | |
411 | if (dma_mask == ~0UL) { | |
412 | mapaddr = tioce_dma_d64(ct_addr); | |
413 | goto dma_map_done; | |
414 | } | |
415 | ||
416 | pcidev_to_tioce(pdev, NULL, &ce_kern, &port); | |
417 | ||
418 | spin_lock_irqsave(&ce_kern->ce_lock, flags); | |
419 | ||
420 | /* | |
421 | * D64 didn't work ... See if we have an existing map that covers | |
422 | * this address range. Must account for devices dma_mask here since | |
423 | * an existing map might have been done in a mode using more pci | |
424 | * address bits than this device can support. | |
425 | */ | |
426 | list_for_each_entry(map, &ce_kern->ce_dmamap_list, ce_dmamap_list) { | |
427 | uint64_t last; | |
428 | ||
429 | last = map->ct_start + map->nbytes - 1; | |
430 | if (ct_addr >= map->ct_start && | |
431 | ct_addr + byte_count - 1 <= last && | |
432 | map->pci_start <= dma_mask) { | |
433 | map->refcnt++; | |
434 | mapaddr = map->pci_start + (ct_addr - map->ct_start); | |
435 | break; | |
436 | } | |
437 | } | |
438 | ||
439 | /* | |
440 | * If we don't have a map yet, and the card can generate 40 | |
441 | * bit addresses, try the M40/M40S modes. Note these modes do not | |
442 | * support a barrier bit, so if we need a consistent map these | |
443 | * won't work. | |
444 | */ | |
445 | if (!mapaddr && !barrier && dma_mask >= 0xffffffffffUL) { | |
446 | /* | |
447 | * We have two options for 40-bit mappings: 16GB "super" ATE's | |
448 | * and 64MB "regular" ATE's. We'll try both if needed for a | |
449 | * given mapping but which one we try first depends on the | |
450 | * size. For requests >64MB, prefer to use a super page with | |
451 | * regular as the fallback. Otherwise, try in the reverse order. | |
452 | */ | |
453 | ||
454 | if (byte_count > MB(64)) { | |
455 | mapaddr = tioce_alloc_map(ce_kern, TIOCE_ATE_M40S, | |
456 | port, ct_addr, byte_count); | |
457 | if (!mapaddr) | |
458 | mapaddr = | |
459 | tioce_alloc_map(ce_kern, TIOCE_ATE_M40, -1, | |
460 | ct_addr, byte_count); | |
461 | } else { | |
462 | mapaddr = tioce_alloc_map(ce_kern, TIOCE_ATE_M40, -1, | |
463 | ct_addr, byte_count); | |
464 | if (!mapaddr) | |
465 | mapaddr = | |
466 | tioce_alloc_map(ce_kern, TIOCE_ATE_M40S, | |
467 | port, ct_addr, byte_count); | |
468 | } | |
469 | } | |
470 | ||
471 | /* | |
472 | * 32-bit direct is the next mode to try | |
473 | */ | |
474 | if (!mapaddr && dma_mask >= 0xffffffffUL) | |
475 | mapaddr = tioce_dma_d32(pdev, ct_addr); | |
476 | ||
477 | /* | |
478 | * Last resort, try 32-bit ATE-based map. | |
479 | */ | |
480 | if (!mapaddr) | |
481 | mapaddr = | |
482 | tioce_alloc_map(ce_kern, TIOCE_ATE_M32, -1, ct_addr, | |
483 | byte_count); | |
484 | ||
485 | spin_unlock_irqrestore(&ce_kern->ce_lock, flags); | |
486 | ||
487 | dma_map_done: | |
488 | if (mapaddr & barrier) | |
489 | mapaddr = tioce_dma_barrier(mapaddr, 1); | |
490 | ||
491 | return mapaddr; | |
492 | } | |
493 | ||
494 | /** | |
495 | * tioce_dma - standard pci dma map interface | |
496 | * @pdev: pci device requesting the map | |
497 | * @paddr: system physical address to map into pci space | |
498 | * @byte_count: # bytes to map | |
499 | * | |
500 | * Simply call tioce_do_dma_map() to create a map with the barrier bit clear | |
501 | * in the address. | |
502 | */ | |
503 | static uint64_t | |
504 | tioce_dma(struct pci_dev *pdev, uint64_t paddr, size_t byte_count) | |
505 | { | |
506 | return tioce_do_dma_map(pdev, paddr, byte_count, 0); | |
507 | } | |
508 | ||
509 | /** | |
510 | * tioce_dma_consistent - consistent pci dma map interface | |
511 | * @pdev: pci device requesting the map | |
512 | * @paddr: system physical address to map into pci space | |
513 | * @byte_count: # bytes to map | |
514 | * | |
515 | * Simply call tioce_do_dma_map() to create a map with the barrier bit set | |
516 | * in the address. | |
517 | */ static uint64_t | |
518 | tioce_dma_consistent(struct pci_dev *pdev, uint64_t paddr, size_t byte_count) | |
519 | { | |
520 | return tioce_do_dma_map(pdev, paddr, byte_count, 1); | |
521 | } | |
522 | ||
523 | /** | |
524 | * tioce_error_intr_handler - SGI TIO CE error interrupt handler | |
525 | * @irq: unused | |
526 | * @arg: pointer to tioce_common struct for the given CE | |
527 | * @pt: unused | |
528 | * | |
529 | * Handle a CE error interrupt. Simply a wrapper around a SAL call which | |
530 | * defers processing to the SGI prom. | |
531 | */ static irqreturn_t | |
532 | tioce_error_intr_handler(int irq, void *arg, struct pt_regs *pt) | |
533 | { | |
534 | struct tioce_common *soft = arg; | |
535 | struct ia64_sal_retval ret_stuff; | |
536 | ret_stuff.status = 0; | |
537 | ret_stuff.v0 = 0; | |
538 | ||
539 | SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_ERROR_INTERRUPT, | |
540 | soft->ce_pcibus.bs_persist_segment, | |
541 | soft->ce_pcibus.bs_persist_busnum, 0, 0, 0, 0, 0); | |
542 | ||
543 | return IRQ_HANDLED; | |
544 | } | |
545 | ||
546 | /** | |
547 | * tioce_kern_init - init kernel structures related to a given TIOCE | |
548 | * @tioce_common: ptr to a cached tioce_common struct that originated in prom | |
549 | */ static struct tioce_kernel * | |
550 | tioce_kern_init(struct tioce_common *tioce_common) | |
551 | { | |
552 | int i; | |
553 | uint32_t tmp; | |
554 | struct tioce *tioce_mmr; | |
555 | struct tioce_kernel *tioce_kern; | |
556 | ||
557 | tioce_kern = kcalloc(1, sizeof(struct tioce_kernel), GFP_KERNEL); | |
558 | if (!tioce_kern) { | |
559 | return NULL; | |
560 | } | |
561 | ||
562 | tioce_kern->ce_common = tioce_common; | |
563 | spin_lock_init(&tioce_kern->ce_lock); | |
564 | INIT_LIST_HEAD(&tioce_kern->ce_dmamap_list); | |
565 | tioce_common->ce_kernel_private = (uint64_t) tioce_kern; | |
566 | ||
567 | /* | |
568 | * Determine the secondary bus number of the port2 logical PPB. | |
569 | * This is used to decide whether a given pci device resides on | |
570 | * port1 or port2. Note: We don't have enough plumbing set up | |
571 | * here to use pci_read_config_xxx() so use the raw_pci_ops vector. | |
572 | */ | |
573 | ||
574 | raw_pci_ops->read(tioce_common->ce_pcibus.bs_persist_segment, | |
575 | tioce_common->ce_pcibus.bs_persist_busnum, | |
576 | PCI_DEVFN(2, 0), PCI_SECONDARY_BUS, 1, &tmp); | |
577 | tioce_kern->ce_port1_secondary = (uint8_t) tmp; | |
578 | ||
579 | /* | |
580 | * Set PMU pagesize to the largest size available, and zero out | |
581 | * the ate's. | |
582 | */ | |
583 | ||
584 | tioce_mmr = (struct tioce *)tioce_common->ce_pcibus.bs_base; | |
585 | tioce_mmr->ce_ure_page_map &= ~CE_URE_PAGESIZE_MASK; | |
586 | tioce_mmr->ce_ure_page_map |= CE_URE_256K_PAGESIZE; | |
587 | tioce_kern->ce_ate3240_pagesize = KB(256); | |
588 | ||
589 | for (i = 0; i < TIOCE_NUM_M40_ATES; i++) { | |
590 | tioce_kern->ce_ate40_shadow[i] = 0; | |
591 | tioce_mmr->ce_ure_ate40[i] = 0; | |
592 | } | |
593 | ||
594 | for (i = 0; i < TIOCE_NUM_M3240_ATES; i++) { | |
595 | tioce_kern->ce_ate3240_shadow[i] = 0; | |
596 | tioce_mmr->ce_ure_ate3240[i] = 0; | |
597 | } | |
598 | ||
599 | return tioce_kern; | |
600 | } | |
601 | ||
602 | /** | |
603 | * tioce_force_interrupt - implement altix force_interrupt() backend for CE | |
604 | * @sn_irq_info: sn asic irq that we need an interrupt generated for | |
605 | * | |
606 | * Given an sn_irq_info struct, set the proper bit in ce_adm_force_int to | |
607 | * force a secondary interrupt to be generated. This is to work around an | |
608 | * asic issue where there is a small window of opportunity for a legacy device | |
609 | * interrupt to be lost. | |
610 | */ | |
611 | static void | |
612 | tioce_force_interrupt(struct sn_irq_info *sn_irq_info) | |
613 | { | |
614 | struct pcidev_info *pcidev_info; | |
615 | struct tioce_common *ce_common; | |
616 | struct tioce *ce_mmr; | |
617 | uint64_t force_int_val; | |
618 | ||
619 | if (!sn_irq_info->irq_bridge) | |
620 | return; | |
621 | ||
622 | if (sn_irq_info->irq_bridge_type != PCIIO_ASIC_TYPE_TIOCE) | |
623 | return; | |
624 | ||
625 | pcidev_info = (struct pcidev_info *)sn_irq_info->irq_pciioinfo; | |
626 | if (!pcidev_info) | |
627 | return; | |
628 | ||
629 | ce_common = (struct tioce_common *)pcidev_info->pdi_pcibus_info; | |
630 | ce_mmr = (struct tioce *)ce_common->ce_pcibus.bs_base; | |
631 | ||
632 | /* | |
633 | * irq_int_bit is originally set up by prom, and holds the interrupt | |
634 | * bit shift (not mask) as defined by the bit definitions in the | |
635 | * ce_adm_int mmr. These shifts are not the same for the | |
636 | * ce_adm_force_int register, so do an explicit mapping here to make | |
637 | * things clearer. | |
638 | */ | |
639 | ||
640 | switch (sn_irq_info->irq_int_bit) { | |
641 | case CE_ADM_INT_PCIE_PORT1_DEV_A_SHFT: | |
642 | force_int_val = 1UL << CE_ADM_FORCE_INT_PCIE_PORT1_DEV_A_SHFT; | |
643 | break; | |
644 | case CE_ADM_INT_PCIE_PORT1_DEV_B_SHFT: | |
645 | force_int_val = 1UL << CE_ADM_FORCE_INT_PCIE_PORT1_DEV_B_SHFT; | |
646 | break; | |
647 | case CE_ADM_INT_PCIE_PORT1_DEV_C_SHFT: | |
648 | force_int_val = 1UL << CE_ADM_FORCE_INT_PCIE_PORT1_DEV_C_SHFT; | |
649 | break; | |
650 | case CE_ADM_INT_PCIE_PORT1_DEV_D_SHFT: | |
651 | force_int_val = 1UL << CE_ADM_FORCE_INT_PCIE_PORT1_DEV_D_SHFT; | |
652 | break; | |
653 | case CE_ADM_INT_PCIE_PORT2_DEV_A_SHFT: | |
654 | force_int_val = 1UL << CE_ADM_FORCE_INT_PCIE_PORT2_DEV_A_SHFT; | |
655 | break; | |
656 | case CE_ADM_INT_PCIE_PORT2_DEV_B_SHFT: | |
657 | force_int_val = 1UL << CE_ADM_FORCE_INT_PCIE_PORT2_DEV_B_SHFT; | |
658 | break; | |
659 | case CE_ADM_INT_PCIE_PORT2_DEV_C_SHFT: | |
660 | force_int_val = 1UL << CE_ADM_FORCE_INT_PCIE_PORT2_DEV_C_SHFT; | |
661 | break; | |
662 | case CE_ADM_INT_PCIE_PORT2_DEV_D_SHFT: | |
663 | force_int_val = 1UL << CE_ADM_FORCE_INT_PCIE_PORT2_DEV_D_SHFT; | |
664 | break; | |
665 | default: | |
666 | return; | |
667 | } | |
668 | ce_mmr->ce_adm_force_int = force_int_val; | |
669 | } | |
670 | ||
8409668b MM |
671 | /** |
672 | * tioce_target_interrupt - implement set_irq_affinity for tioce resident | |
673 | * functions. Note: only applies to line interrupts, not MSI's. | |
674 | * | |
675 | * @sn_irq_info: SN IRQ context | |
676 | * | |
677 | * Given an sn_irq_info, set the associated CE device's interrupt destination | |
678 | * register. Since the interrupt destination registers are on a per-ce-slot | |
679 | * basis, this will retarget line interrupts for all functions downstream of | |
680 | * the slot. | |
681 | */ | |
682 | static void | |
683 | tioce_target_interrupt(struct sn_irq_info *sn_irq_info) | |
684 | { | |
685 | struct pcidev_info *pcidev_info; | |
686 | struct tioce_common *ce_common; | |
687 | struct tioce *ce_mmr; | |
688 | int bit; | |
689 | ||
690 | pcidev_info = (struct pcidev_info *)sn_irq_info->irq_pciioinfo; | |
691 | if (!pcidev_info) | |
692 | return; | |
693 | ||
694 | ce_common = (struct tioce_common *)pcidev_info->pdi_pcibus_info; | |
695 | ce_mmr = (struct tioce *)ce_common->ce_pcibus.bs_base; | |
696 | ||
697 | bit = sn_irq_info->irq_int_bit; | |
698 | ||
699 | ce_mmr->ce_adm_int_mask |= (1UL << bit); | |
700 | ce_mmr->ce_adm_int_dest[bit] = | |
701 | ((uint64_t)sn_irq_info->irq_irq << INTR_VECTOR_SHFT) | | |
702 | sn_irq_info->irq_xtalkaddr; | |
703 | ce_mmr->ce_adm_int_mask &= ~(1UL << bit); | |
704 | ||
705 | tioce_force_interrupt(sn_irq_info); | |
706 | } | |
707 | ||
c9221da9 MM |
708 | /** |
709 | * tioce_bus_fixup - perform final PCI fixup for a TIO CE bus | |
710 | * @prom_bussoft: Common prom/kernel struct representing the bus | |
711 | * | |
712 | * Replicates the tioce_common pointed to by @prom_bussoft in kernel | |
713 | * space. Allocates and initializes a kernel-only area for a given CE, | |
714 | * and sets up an irq for handling CE error interrupts. | |
715 | * | |
716 | * On successful setup, returns the kernel version of tioce_common back to | |
717 | * the caller. | |
718 | */ | |
719 | static void * | |
720 | tioce_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *controller) | |
721 | { | |
722 | struct tioce_common *tioce_common; | |
723 | ||
724 | /* | |
725 | * Allocate kernel bus soft and copy from prom. | |
726 | */ | |
727 | ||
728 | tioce_common = kcalloc(1, sizeof(struct tioce_common), GFP_KERNEL); | |
729 | if (!tioce_common) | |
730 | return NULL; | |
731 | ||
732 | memcpy(tioce_common, prom_bussoft, sizeof(struct tioce_common)); | |
733 | tioce_common->ce_pcibus.bs_base |= __IA64_UNCACHED_OFFSET; | |
734 | ||
735 | if (tioce_kern_init(tioce_common) == NULL) { | |
736 | kfree(tioce_common); | |
737 | return NULL; | |
738 | } | |
739 | ||
740 | if (request_irq(SGI_PCIASIC_ERROR, | |
741 | tioce_error_intr_handler, | |
742 | SA_SHIRQ, "TIOCE error", (void *)tioce_common)) | |
743 | printk(KERN_WARNING | |
744 | "%s: Unable to get irq %d. " | |
745 | "Error interrupts won't be routed for " | |
746 | "TIOCE bus %04x:%02x\n", | |
747 | __FUNCTION__, SGI_PCIASIC_ERROR, | |
748 | tioce_common->ce_pcibus.bs_persist_segment, | |
749 | tioce_common->ce_pcibus.bs_persist_busnum); | |
750 | ||
751 | return tioce_common; | |
752 | } | |
753 | ||
754 | static struct sn_pcibus_provider tioce_pci_interfaces = { | |
755 | .dma_map = tioce_dma, | |
756 | .dma_map_consistent = tioce_dma_consistent, | |
757 | .dma_unmap = tioce_dma_unmap, | |
758 | .bus_fixup = tioce_bus_fixup, | |
8409668b MM |
759 | .force_interrupt = tioce_force_interrupt, |
760 | .target_interrupt = tioce_target_interrupt | |
c9221da9 MM |
761 | }; |
762 | ||
763 | /** | |
764 | * tioce_init_provider - init SN PCI provider ops for TIO CE | |
765 | */ | |
766 | int | |
767 | tioce_init_provider(void) | |
768 | { | |
769 | sn_pci_provider[PCIIO_ASIC_TYPE_TIOCE] = &tioce_pci_interfaces; | |
770 | return 0; | |
771 | } |