]>
Commit | Line | Data |
---|---|---|
c9221da9 MM |
1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | |
3 | * License. See the file "COPYING" in the main directory of this archive | |
4 | * for more details. | |
5 | * | |
f640f94e | 6 | * Copyright (C) 2003-2006 Silicon Graphics, Inc. All Rights Reserved. |
c9221da9 MM |
7 | */ |
8 | ||
9 | #include <linux/types.h> | |
10 | #include <linux/interrupt.h> | |
11 | #include <linux/pci.h> | |
12 | #include <asm/sn/sn_sal.h> | |
13 | #include <asm/sn/addrs.h> | |
1fa92957 | 14 | #include <asm/sn/io.h> |
c9221da9 MM |
15 | #include <asm/sn/pcidev.h> |
16 | #include <asm/sn/pcibus_provider_defs.h> | |
17 | #include <asm/sn/tioce_provider.h> | |
13938ca7 MM |
18 | #include <asm/sn/sn2/sn_hwperf.h> |
19 | ||
20 | /* | |
21 | * 1/26/2006 | |
22 | * | |
23 | * WAR for SGI PV 944642. For revA TIOCE, need to use the following recipe | |
24 | * (taken from the above PV) before and after accessing tioce internal MMR's | |
25 | * to avoid tioce lockups. | |
26 | * | |
27 | * The recipe as taken from the PV: | |
28 | * | |
29 | * if(mmr address < 0x45000) { | |
30 | * if(mmr address == 0 or 0x80) | |
31 | * mmr wrt or read address 0xc0 | |
32 | * else if(mmr address == 0x148 or 0x200) | |
33 | * mmr wrt or read address 0x28 | |
34 | * else | |
35 | * mmr wrt or read address 0x158 | |
36 | * | |
37 | * do desired mmr access (rd or wrt) | |
38 | * | |
39 | * if(mmr address == 0x100) | |
40 | * mmr wrt or read address 0x38 | |
41 | * mmr wrt or read address 0xb050 | |
42 | * } else | |
43 | * do desired mmr access | |
44 | * | |
45 | * According to hw, we can use reads instead of writes to the above addres | |
46 | * | |
47 | * Note this WAR can only to be used for accessing internal MMR's in the | |
48 | * TIOCE Coretalk Address Range 0x0 - 0x07ff_ffff. This includes the | |
49 | * "Local CE Registers and Memories" and "PCI Compatible Config Space" address | |
50 | * spaces from table 2-1 of the "CE Programmer's Reference Overview" document. | |
51 | * | |
52 | * All registers defined in struct tioce will meet that criteria. | |
53 | */ | |
54 | ||
55 | static void inline | |
56 | tioce_mmr_war_pre(struct tioce_kernel *kern, void *mmr_addr) | |
57 | { | |
58 | u64 mmr_base; | |
59 | u64 mmr_offset; | |
60 | ||
61 | if (kern->ce_common->ce_rev != TIOCE_REV_A) | |
62 | return; | |
63 | ||
64 | mmr_base = kern->ce_common->ce_pcibus.bs_base; | |
65 | mmr_offset = (u64)mmr_addr - mmr_base; | |
66 | ||
67 | if (mmr_offset < 0x45000) { | |
68 | u64 mmr_war_offset; | |
69 | ||
70 | if (mmr_offset == 0 || mmr_offset == 0x80) | |
71 | mmr_war_offset = 0xc0; | |
72 | else if (mmr_offset == 0x148 || mmr_offset == 0x200) | |
73 | mmr_war_offset = 0x28; | |
74 | else | |
75 | mmr_war_offset = 0x158; | |
76 | ||
e037cda5 | 77 | readq_relaxed((void __iomem *)(mmr_base + mmr_war_offset)); |
13938ca7 MM |
78 | } |
79 | } | |
80 | ||
81 | static void inline | |
82 | tioce_mmr_war_post(struct tioce_kernel *kern, void *mmr_addr) | |
83 | { | |
84 | u64 mmr_base; | |
85 | u64 mmr_offset; | |
86 | ||
87 | if (kern->ce_common->ce_rev != TIOCE_REV_A) | |
88 | return; | |
89 | ||
90 | mmr_base = kern->ce_common->ce_pcibus.bs_base; | |
91 | mmr_offset = (u64)mmr_addr - mmr_base; | |
92 | ||
93 | if (mmr_offset < 0x45000) { | |
94 | if (mmr_offset == 0x100) | |
e037cda5 KO |
95 | readq_relaxed((void __iomem *)(mmr_base + 0x38)); |
96 | readq_relaxed((void __iomem *)(mmr_base + 0xb050)); | |
13938ca7 MM |
97 | } |
98 | } | |
99 | ||
100 | /* load mmr contents into a variable */ | |
101 | #define tioce_mmr_load(kern, mmrp, varp) do {\ | |
102 | tioce_mmr_war_pre(kern, mmrp); \ | |
103 | *(varp) = readq_relaxed(mmrp); \ | |
104 | tioce_mmr_war_post(kern, mmrp); \ | |
105 | } while (0) | |
106 | ||
107 | /* store variable contents into mmr */ | |
108 | #define tioce_mmr_store(kern, mmrp, varp) do {\ | |
109 | tioce_mmr_war_pre(kern, mmrp); \ | |
110 | writeq(*varp, mmrp); \ | |
111 | tioce_mmr_war_post(kern, mmrp); \ | |
112 | } while (0) | |
113 | ||
114 | /* store immediate value into mmr */ | |
115 | #define tioce_mmr_storei(kern, mmrp, val) do {\ | |
116 | tioce_mmr_war_pre(kern, mmrp); \ | |
117 | writeq(val, mmrp); \ | |
118 | tioce_mmr_war_post(kern, mmrp); \ | |
119 | } while (0) | |
120 | ||
121 | /* set bits (immediate value) into mmr */ | |
122 | #define tioce_mmr_seti(kern, mmrp, bits) do {\ | |
123 | u64 tmp; \ | |
124 | tioce_mmr_load(kern, mmrp, &tmp); \ | |
125 | tmp |= (bits); \ | |
126 | tioce_mmr_store(kern, mmrp, &tmp); \ | |
127 | } while (0) | |
128 | ||
129 | /* clear bits (immediate value) into mmr */ | |
130 | #define tioce_mmr_clri(kern, mmrp, bits) do { \ | |
131 | u64 tmp; \ | |
132 | tioce_mmr_load(kern, mmrp, &tmp); \ | |
133 | tmp &= ~(bits); \ | |
134 | tioce_mmr_store(kern, mmrp, &tmp); \ | |
135 | } while (0) | |
c9221da9 MM |
136 | |
137 | /** | |
138 | * Bus address ranges for the 5 flavors of TIOCE DMA | |
139 | */ | |
140 | ||
141 | #define TIOCE_D64_MIN 0x8000000000000000UL | |
142 | #define TIOCE_D64_MAX 0xffffffffffffffffUL | |
143 | #define TIOCE_D64_ADDR(a) ((a) >= TIOCE_D64_MIN) | |
144 | ||
145 | #define TIOCE_D32_MIN 0x0000000080000000UL | |
146 | #define TIOCE_D32_MAX 0x00000000ffffffffUL | |
147 | #define TIOCE_D32_ADDR(a) ((a) >= TIOCE_D32_MIN && (a) <= TIOCE_D32_MAX) | |
148 | ||
149 | #define TIOCE_M32_MIN 0x0000000000000000UL | |
150 | #define TIOCE_M32_MAX 0x000000007fffffffUL | |
151 | #define TIOCE_M32_ADDR(a) ((a) >= TIOCE_M32_MIN && (a) <= TIOCE_M32_MAX) | |
152 | ||
153 | #define TIOCE_M40_MIN 0x0000004000000000UL | |
154 | #define TIOCE_M40_MAX 0x0000007fffffffffUL | |
155 | #define TIOCE_M40_ADDR(a) ((a) >= TIOCE_M40_MIN && (a) <= TIOCE_M40_MAX) | |
156 | ||
157 | #define TIOCE_M40S_MIN 0x0000008000000000UL | |
158 | #define TIOCE_M40S_MAX 0x000000ffffffffffUL | |
159 | #define TIOCE_M40S_ADDR(a) ((a) >= TIOCE_M40S_MIN && (a) <= TIOCE_M40S_MAX) | |
160 | ||
161 | /* | |
162 | * ATE manipulation macros. | |
163 | */ | |
164 | ||
165 | #define ATE_PAGESHIFT(ps) (__ffs(ps)) | |
166 | #define ATE_PAGEMASK(ps) ((ps)-1) | |
167 | ||
168 | #define ATE_PAGE(x, ps) ((x) >> ATE_PAGESHIFT(ps)) | |
169 | #define ATE_NPAGES(start, len, pagesize) \ | |
170 | (ATE_PAGE((start)+(len)-1, pagesize) - ATE_PAGE(start, pagesize) + 1) | |
171 | ||
172 | #define ATE_VALID(ate) ((ate) & (1UL << 63)) | |
83821d3f MM |
173 | #define ATE_MAKE(addr, ps, msi) \ |
174 | (((addr) & ~ATE_PAGEMASK(ps)) | (1UL << 63) | ((msi)?(1UL << 62):0)) | |
c9221da9 MM |
175 | |
176 | /* | |
177 | * Flavors of ate-based mapping supported by tioce_alloc_map() | |
178 | */ | |
179 | ||
180 | #define TIOCE_ATE_M32 1 | |
181 | #define TIOCE_ATE_M40 2 | |
182 | #define TIOCE_ATE_M40S 3 | |
183 | ||
13938ca7 MM |
184 | #define KB(x) ((u64)(x) << 10) |
185 | #define MB(x) ((u64)(x) << 20) | |
186 | #define GB(x) ((u64)(x) << 30) | |
c9221da9 MM |
187 | |
188 | /** | |
189 | * tioce_dma_d64 - create a DMA mapping using 64-bit direct mode | |
190 | * @ct_addr: system coretalk address | |
191 | * | |
192 | * Map @ct_addr into 64-bit CE bus space. No device context is necessary | |
193 | * and no CE mapping are consumed. | |
194 | * | |
195 | * Bits 53:0 come from the coretalk address. The remaining bits are set as | |
196 | * follows: | |
197 | * | |
198 | * 63 - must be 1 to indicate d64 mode to CE hardware | |
199 | * 62 - barrier bit ... controlled with tioce_dma_barrier() | |
83821d3f | 200 | * 61 - msi bit ... specified through dma_flags |
c9221da9 MM |
201 | * 60:54 - reserved, MBZ |
202 | */ | |
53493dcf | 203 | static u64 |
83821d3f | 204 | tioce_dma_d64(unsigned long ct_addr, int dma_flags) |
c9221da9 | 205 | { |
53493dcf | 206 | u64 bus_addr; |
c9221da9 MM |
207 | |
208 | bus_addr = ct_addr | (1UL << 63); | |
83821d3f MM |
209 | if (dma_flags & SN_DMA_MSI) |
210 | bus_addr |= (1UL << 61); | |
c9221da9 MM |
211 | |
212 | return bus_addr; | |
213 | } | |
214 | ||
215 | /** | |
216 | * pcidev_to_tioce - return misc ce related pointers given a pci_dev | |
217 | * @pci_dev: pci device context | |
218 | * @base: ptr to store struct tioce_mmr * for the CE holding this device | |
219 | * @kernel: ptr to store struct tioce_kernel * for the CE holding this device | |
220 | * @port: ptr to store the CE port number that this device is on | |
221 | * | |
222 | * Return pointers to various CE-related structures for the CE upstream of | |
223 | * @pci_dev. | |
224 | */ | |
225 | static inline void | |
226 | pcidev_to_tioce(struct pci_dev *pdev, struct tioce **base, | |
227 | struct tioce_kernel **kernel, int *port) | |
228 | { | |
229 | struct pcidev_info *pcidev_info; | |
230 | struct tioce_common *ce_common; | |
231 | struct tioce_kernel *ce_kernel; | |
232 | ||
233 | pcidev_info = SN_PCIDEV_INFO(pdev); | |
234 | ce_common = (struct tioce_common *)pcidev_info->pdi_pcibus_info; | |
235 | ce_kernel = (struct tioce_kernel *)ce_common->ce_kernel_private; | |
236 | ||
237 | if (base) | |
238 | *base = (struct tioce *)ce_common->ce_pcibus.bs_base; | |
239 | if (kernel) | |
240 | *kernel = ce_kernel; | |
241 | ||
242 | /* | |
243 | * we use port as a zero-based value internally, even though the | |
244 | * documentation is 1-based. | |
245 | */ | |
246 | if (port) | |
247 | *port = | |
248 | (pdev->bus->number < ce_kernel->ce_port1_secondary) ? 0 : 1; | |
249 | } | |
250 | ||
251 | /** | |
252 | * tioce_alloc_map - Given a coretalk address, map it to pcie bus address | |
253 | * space using one of the various ATE-based address modes. | |
254 | * @ce_kern: tioce context | |
255 | * @type: map mode to use | |
256 | * @port: 0-based port that the requesting device is downstream of | |
257 | * @ct_addr: the coretalk address to map | |
258 | * @len: number of bytes to map | |
259 | * | |
260 | * Given the addressing type, set up various paramaters that define the | |
261 | * ATE pool to use. Search for a contiguous block of entries to cover the | |
262 | * length, and if enough resources exist, fill in the ATE's and construct a | |
263 | * tioce_dmamap struct to track the mapping. | |
264 | */ | |
53493dcf | 265 | static u64 |
c9221da9 | 266 | tioce_alloc_map(struct tioce_kernel *ce_kern, int type, int port, |
83821d3f | 267 | u64 ct_addr, int len, int dma_flags) |
c9221da9 MM |
268 | { |
269 | int i; | |
270 | int j; | |
271 | int first; | |
272 | int last; | |
273 | int entries; | |
274 | int nates; | |
13938ca7 | 275 | u64 pagesize; |
83821d3f | 276 | int msi_capable, msi_wanted; |
53493dcf PB |
277 | u64 *ate_shadow; |
278 | u64 *ate_reg; | |
279 | u64 addr; | |
c9221da9 | 280 | struct tioce *ce_mmr; |
53493dcf | 281 | u64 bus_base; |
c9221da9 MM |
282 | struct tioce_dmamap *map; |
283 | ||
284 | ce_mmr = (struct tioce *)ce_kern->ce_common->ce_pcibus.bs_base; | |
285 | ||
286 | switch (type) { | |
287 | case TIOCE_ATE_M32: | |
288 | /* | |
289 | * The first 64 entries of the ate3240 pool are dedicated to | |
290 | * super-page (TIOCE_ATE_M40S) mode. | |
291 | */ | |
292 | first = 64; | |
293 | entries = TIOCE_NUM_M3240_ATES - 64; | |
294 | ate_shadow = ce_kern->ce_ate3240_shadow; | |
295 | ate_reg = ce_mmr->ce_ure_ate3240; | |
296 | pagesize = ce_kern->ce_ate3240_pagesize; | |
297 | bus_base = TIOCE_M32_MIN; | |
83821d3f | 298 | msi_capable = 1; |
c9221da9 MM |
299 | break; |
300 | case TIOCE_ATE_M40: | |
301 | first = 0; | |
302 | entries = TIOCE_NUM_M40_ATES; | |
303 | ate_shadow = ce_kern->ce_ate40_shadow; | |
304 | ate_reg = ce_mmr->ce_ure_ate40; | |
305 | pagesize = MB(64); | |
306 | bus_base = TIOCE_M40_MIN; | |
83821d3f | 307 | msi_capable = 0; |
c9221da9 MM |
308 | break; |
309 | case TIOCE_ATE_M40S: | |
310 | /* | |
311 | * ate3240 entries 0-31 are dedicated to port1 super-page | |
312 | * mappings. ate3240 entries 32-63 are dedicated to port2. | |
313 | */ | |
314 | first = port * 32; | |
315 | entries = 32; | |
316 | ate_shadow = ce_kern->ce_ate3240_shadow; | |
317 | ate_reg = ce_mmr->ce_ure_ate3240; | |
318 | pagesize = GB(16); | |
319 | bus_base = TIOCE_M40S_MIN; | |
83821d3f | 320 | msi_capable = 0; |
c9221da9 MM |
321 | break; |
322 | default: | |
323 | return 0; | |
324 | } | |
325 | ||
83821d3f MM |
326 | msi_wanted = dma_flags & SN_DMA_MSI; |
327 | if (msi_wanted && !msi_capable) | |
328 | return 0; | |
329 | ||
c9221da9 MM |
330 | nates = ATE_NPAGES(ct_addr, len, pagesize); |
331 | if (nates > entries) | |
332 | return 0; | |
333 | ||
334 | last = first + entries - nates; | |
335 | for (i = first; i <= last; i++) { | |
336 | if (ATE_VALID(ate_shadow[i])) | |
337 | continue; | |
338 | ||
339 | for (j = i; j < i + nates; j++) | |
340 | if (ATE_VALID(ate_shadow[j])) | |
341 | break; | |
342 | ||
343 | if (j >= i + nates) | |
344 | break; | |
345 | } | |
346 | ||
347 | if (i > last) | |
348 | return 0; | |
349 | ||
baf47fb6 | 350 | map = kzalloc(sizeof(struct tioce_dmamap), GFP_ATOMIC); |
c9221da9 MM |
351 | if (!map) |
352 | return 0; | |
353 | ||
354 | addr = ct_addr; | |
355 | for (j = 0; j < nates; j++) { | |
53493dcf | 356 | u64 ate; |
c9221da9 | 357 | |
83821d3f | 358 | ate = ATE_MAKE(addr, pagesize, msi_wanted); |
c9221da9 | 359 | ate_shadow[i + j] = ate; |
13938ca7 | 360 | tioce_mmr_storei(ce_kern, &ate_reg[i + j], ate); |
c9221da9 MM |
361 | addr += pagesize; |
362 | } | |
363 | ||
364 | map->refcnt = 1; | |
365 | map->nbytes = nates * pagesize; | |
366 | map->ct_start = ct_addr & ~ATE_PAGEMASK(pagesize); | |
367 | map->pci_start = bus_base + (i * pagesize); | |
368 | map->ate_hw = &ate_reg[i]; | |
369 | map->ate_shadow = &ate_shadow[i]; | |
370 | map->ate_count = nates; | |
371 | ||
372 | list_add(&map->ce_dmamap_list, &ce_kern->ce_dmamap_list); | |
373 | ||
374 | return (map->pci_start + (ct_addr - map->ct_start)); | |
375 | } | |
376 | ||
377 | /** | |
378 | * tioce_dma_d32 - create a DMA mapping using 32-bit direct mode | |
379 | * @pdev: linux pci_dev representing the function | |
380 | * @paddr: system physical address | |
381 | * | |
382 | * Map @paddr into 32-bit bus space of the CE associated with @pcidev_info. | |
383 | */ | |
53493dcf | 384 | static u64 |
83821d3f | 385 | tioce_dma_d32(struct pci_dev *pdev, u64 ct_addr, int dma_flags) |
c9221da9 MM |
386 | { |
387 | int dma_ok; | |
388 | int port; | |
389 | struct tioce *ce_mmr; | |
390 | struct tioce_kernel *ce_kern; | |
53493dcf PB |
391 | u64 ct_upper; |
392 | u64 ct_lower; | |
c9221da9 MM |
393 | dma_addr_t bus_addr; |
394 | ||
83821d3f MM |
395 | if (dma_flags & SN_DMA_MSI) |
396 | return 0; | |
397 | ||
c9221da9 MM |
398 | ct_upper = ct_addr & ~0x3fffffffUL; |
399 | ct_lower = ct_addr & 0x3fffffffUL; | |
400 | ||
401 | pcidev_to_tioce(pdev, &ce_mmr, &ce_kern, &port); | |
402 | ||
403 | if (ce_kern->ce_port[port].dirmap_refcnt == 0) { | |
53493dcf | 404 | u64 tmp; |
c9221da9 MM |
405 | |
406 | ce_kern->ce_port[port].dirmap_shadow = ct_upper; | |
13938ca7 MM |
407 | tioce_mmr_storei(ce_kern, &ce_mmr->ce_ure_dir_map[port], |
408 | ct_upper); | |
c9221da9 MM |
409 | tmp = ce_mmr->ce_ure_dir_map[port]; |
410 | dma_ok = 1; | |
411 | } else | |
412 | dma_ok = (ce_kern->ce_port[port].dirmap_shadow == ct_upper); | |
413 | ||
414 | if (dma_ok) { | |
415 | ce_kern->ce_port[port].dirmap_refcnt++; | |
416 | bus_addr = TIOCE_D32_MIN + ct_lower; | |
417 | } else | |
418 | bus_addr = 0; | |
419 | ||
420 | return bus_addr; | |
421 | } | |
422 | ||
423 | /** | |
424 | * tioce_dma_barrier - swizzle a TIOCE bus address to include or exclude | |
425 | * the barrier bit. | |
426 | * @bus_addr: bus address to swizzle | |
427 | * | |
428 | * Given a TIOCE bus address, set the appropriate bit to indicate barrier | |
429 | * attributes. | |
430 | */ | |
53493dcf PB |
431 | static u64 |
432 | tioce_dma_barrier(u64 bus_addr, int on) | |
c9221da9 | 433 | { |
53493dcf | 434 | u64 barrier_bit; |
c9221da9 MM |
435 | |
436 | /* barrier not supported in M40/M40S mode */ | |
437 | if (TIOCE_M40_ADDR(bus_addr) || TIOCE_M40S_ADDR(bus_addr)) | |
438 | return bus_addr; | |
439 | ||
440 | if (TIOCE_D64_ADDR(bus_addr)) | |
441 | barrier_bit = (1UL << 62); | |
442 | else /* must be m32 or d32 */ | |
443 | barrier_bit = (1UL << 30); | |
444 | ||
445 | return (on) ? (bus_addr | barrier_bit) : (bus_addr & ~barrier_bit); | |
446 | } | |
447 | ||
448 | /** | |
449 | * tioce_dma_unmap - release CE mapping resources | |
450 | * @pdev: linux pci_dev representing the function | |
451 | * @bus_addr: bus address returned by an earlier tioce_dma_map | |
452 | * @dir: mapping direction (unused) | |
453 | * | |
454 | * Locate mapping resources associated with @bus_addr and release them. | |
455 | * For mappings created using the direct modes there are no resources | |
456 | * to release. | |
457 | */ | |
458 | void | |
459 | tioce_dma_unmap(struct pci_dev *pdev, dma_addr_t bus_addr, int dir) | |
460 | { | |
461 | int i; | |
462 | int port; | |
463 | struct tioce_kernel *ce_kern; | |
464 | struct tioce *ce_mmr; | |
465 | unsigned long flags; | |
466 | ||
467 | bus_addr = tioce_dma_barrier(bus_addr, 0); | |
468 | pcidev_to_tioce(pdev, &ce_mmr, &ce_kern, &port); | |
469 | ||
470 | /* nothing to do for D64 */ | |
471 | ||
472 | if (TIOCE_D64_ADDR(bus_addr)) | |
473 | return; | |
474 | ||
475 | spin_lock_irqsave(&ce_kern->ce_lock, flags); | |
476 | ||
477 | if (TIOCE_D32_ADDR(bus_addr)) { | |
478 | if (--ce_kern->ce_port[port].dirmap_refcnt == 0) { | |
479 | ce_kern->ce_port[port].dirmap_shadow = 0; | |
13938ca7 MM |
480 | tioce_mmr_storei(ce_kern, &ce_mmr->ce_ure_dir_map[port], |
481 | 0); | |
c9221da9 MM |
482 | } |
483 | } else { | |
484 | struct tioce_dmamap *map; | |
485 | ||
486 | list_for_each_entry(map, &ce_kern->ce_dmamap_list, | |
487 | ce_dmamap_list) { | |
53493dcf | 488 | u64 last; |
c9221da9 MM |
489 | |
490 | last = map->pci_start + map->nbytes - 1; | |
491 | if (bus_addr >= map->pci_start && bus_addr <= last) | |
492 | break; | |
493 | } | |
494 | ||
495 | if (&map->ce_dmamap_list == &ce_kern->ce_dmamap_list) { | |
496 | printk(KERN_WARNING | |
497 | "%s: %s - no map found for bus_addr 0x%lx\n", | |
498 | __FUNCTION__, pci_name(pdev), bus_addr); | |
499 | } else if (--map->refcnt == 0) { | |
500 | for (i = 0; i < map->ate_count; i++) { | |
501 | map->ate_shadow[i] = 0; | |
13938ca7 | 502 | tioce_mmr_storei(ce_kern, &map->ate_hw[i], 0); |
c9221da9 MM |
503 | } |
504 | ||
505 | list_del(&map->ce_dmamap_list); | |
506 | kfree(map); | |
507 | } | |
508 | } | |
509 | ||
510 | spin_unlock_irqrestore(&ce_kern->ce_lock, flags); | |
511 | } | |
512 | ||
513 | /** | |
514 | * tioce_do_dma_map - map pages for PCI DMA | |
515 | * @pdev: linux pci_dev representing the function | |
516 | * @paddr: host physical address to map | |
517 | * @byte_count: bytes to map | |
518 | * | |
519 | * This is the main wrapper for mapping host physical pages to CE PCI space. | |
520 | * The mapping mode used is based on the device's dma_mask. | |
521 | */ | |
53493dcf PB |
522 | static u64 |
523 | tioce_do_dma_map(struct pci_dev *pdev, u64 paddr, size_t byte_count, | |
83821d3f | 524 | int barrier, int dma_flags) |
c9221da9 MM |
525 | { |
526 | unsigned long flags; | |
53493dcf PB |
527 | u64 ct_addr; |
528 | u64 mapaddr = 0; | |
c9221da9 MM |
529 | struct tioce_kernel *ce_kern; |
530 | struct tioce_dmamap *map; | |
531 | int port; | |
53493dcf | 532 | u64 dma_mask; |
c9221da9 MM |
533 | |
534 | dma_mask = (barrier) ? pdev->dev.coherent_dma_mask : pdev->dma_mask; | |
535 | ||
536 | /* cards must be able to address at least 31 bits */ | |
537 | if (dma_mask < 0x7fffffffUL) | |
538 | return 0; | |
539 | ||
83821d3f MM |
540 | if (SN_DMA_ADDRTYPE(dma_flags) == SN_DMA_ADDR_PHYS) |
541 | ct_addr = PHYS_TO_TIODMA(paddr); | |
542 | else | |
543 | ct_addr = paddr; | |
c9221da9 MM |
544 | |
545 | /* | |
546 | * If the device can generate 64 bit addresses, create a D64 map. | |
c9221da9 MM |
547 | */ |
548 | if (dma_mask == ~0UL) { | |
83821d3f MM |
549 | mapaddr = tioce_dma_d64(ct_addr, dma_flags); |
550 | if (mapaddr) | |
551 | goto dma_map_done; | |
c9221da9 MM |
552 | } |
553 | ||
554 | pcidev_to_tioce(pdev, NULL, &ce_kern, &port); | |
555 | ||
556 | spin_lock_irqsave(&ce_kern->ce_lock, flags); | |
557 | ||
558 | /* | |
559 | * D64 didn't work ... See if we have an existing map that covers | |
560 | * this address range. Must account for devices dma_mask here since | |
561 | * an existing map might have been done in a mode using more pci | |
562 | * address bits than this device can support. | |
563 | */ | |
564 | list_for_each_entry(map, &ce_kern->ce_dmamap_list, ce_dmamap_list) { | |
53493dcf | 565 | u64 last; |
c9221da9 MM |
566 | |
567 | last = map->ct_start + map->nbytes - 1; | |
568 | if (ct_addr >= map->ct_start && | |
569 | ct_addr + byte_count - 1 <= last && | |
570 | map->pci_start <= dma_mask) { | |
571 | map->refcnt++; | |
572 | mapaddr = map->pci_start + (ct_addr - map->ct_start); | |
573 | break; | |
574 | } | |
575 | } | |
576 | ||
577 | /* | |
578 | * If we don't have a map yet, and the card can generate 40 | |
579 | * bit addresses, try the M40/M40S modes. Note these modes do not | |
580 | * support a barrier bit, so if we need a consistent map these | |
581 | * won't work. | |
582 | */ | |
583 | if (!mapaddr && !barrier && dma_mask >= 0xffffffffffUL) { | |
584 | /* | |
585 | * We have two options for 40-bit mappings: 16GB "super" ATE's | |
586 | * and 64MB "regular" ATE's. We'll try both if needed for a | |
587 | * given mapping but which one we try first depends on the | |
588 | * size. For requests >64MB, prefer to use a super page with | |
589 | * regular as the fallback. Otherwise, try in the reverse order. | |
590 | */ | |
591 | ||
592 | if (byte_count > MB(64)) { | |
593 | mapaddr = tioce_alloc_map(ce_kern, TIOCE_ATE_M40S, | |
83821d3f MM |
594 | port, ct_addr, byte_count, |
595 | dma_flags); | |
c9221da9 MM |
596 | if (!mapaddr) |
597 | mapaddr = | |
598 | tioce_alloc_map(ce_kern, TIOCE_ATE_M40, -1, | |
83821d3f MM |
599 | ct_addr, byte_count, |
600 | dma_flags); | |
c9221da9 MM |
601 | } else { |
602 | mapaddr = tioce_alloc_map(ce_kern, TIOCE_ATE_M40, -1, | |
83821d3f MM |
603 | ct_addr, byte_count, |
604 | dma_flags); | |
c9221da9 MM |
605 | if (!mapaddr) |
606 | mapaddr = | |
607 | tioce_alloc_map(ce_kern, TIOCE_ATE_M40S, | |
83821d3f MM |
608 | port, ct_addr, byte_count, |
609 | dma_flags); | |
c9221da9 MM |
610 | } |
611 | } | |
612 | ||
613 | /* | |
614 | * 32-bit direct is the next mode to try | |
615 | */ | |
616 | if (!mapaddr && dma_mask >= 0xffffffffUL) | |
83821d3f | 617 | mapaddr = tioce_dma_d32(pdev, ct_addr, dma_flags); |
c9221da9 MM |
618 | |
619 | /* | |
620 | * Last resort, try 32-bit ATE-based map. | |
621 | */ | |
622 | if (!mapaddr) | |
623 | mapaddr = | |
624 | tioce_alloc_map(ce_kern, TIOCE_ATE_M32, -1, ct_addr, | |
83821d3f | 625 | byte_count, dma_flags); |
c9221da9 MM |
626 | |
627 | spin_unlock_irqrestore(&ce_kern->ce_lock, flags); | |
628 | ||
629 | dma_map_done: | |
13938ca7 | 630 | if (mapaddr && barrier) |
c9221da9 MM |
631 | mapaddr = tioce_dma_barrier(mapaddr, 1); |
632 | ||
633 | return mapaddr; | |
634 | } | |
635 | ||
636 | /** | |
637 | * tioce_dma - standard pci dma map interface | |
638 | * @pdev: pci device requesting the map | |
639 | * @paddr: system physical address to map into pci space | |
640 | * @byte_count: # bytes to map | |
641 | * | |
642 | * Simply call tioce_do_dma_map() to create a map with the barrier bit clear | |
643 | * in the address. | |
644 | */ | |
53493dcf | 645 | static u64 |
83821d3f | 646 | tioce_dma(struct pci_dev *pdev, u64 paddr, size_t byte_count, int dma_flags) |
c9221da9 | 647 | { |
83821d3f | 648 | return tioce_do_dma_map(pdev, paddr, byte_count, 0, dma_flags); |
c9221da9 MM |
649 | } |
650 | ||
651 | /** | |
652 | * tioce_dma_consistent - consistent pci dma map interface | |
653 | * @pdev: pci device requesting the map | |
654 | * @paddr: system physical address to map into pci space | |
655 | * @byte_count: # bytes to map | |
656 | * | |
657 | * Simply call tioce_do_dma_map() to create a map with the barrier bit set | |
658 | * in the address. | |
53493dcf | 659 | */ static u64 |
83821d3f | 660 | tioce_dma_consistent(struct pci_dev *pdev, u64 paddr, size_t byte_count, int dma_flags) |
c9221da9 | 661 | { |
83821d3f | 662 | return tioce_do_dma_map(pdev, paddr, byte_count, 1, dma_flags); |
c9221da9 MM |
663 | } |
664 | ||
665 | /** | |
666 | * tioce_error_intr_handler - SGI TIO CE error interrupt handler | |
667 | * @irq: unused | |
668 | * @arg: pointer to tioce_common struct for the given CE | |
c9221da9 MM |
669 | * |
670 | * Handle a CE error interrupt. Simply a wrapper around a SAL call which | |
671 | * defers processing to the SGI prom. | |
672 | */ static irqreturn_t | |
7d12e780 | 673 | tioce_error_intr_handler(int irq, void *arg) |
c9221da9 MM |
674 | { |
675 | struct tioce_common *soft = arg; | |
676 | struct ia64_sal_retval ret_stuff; | |
677 | ret_stuff.status = 0; | |
678 | ret_stuff.v0 = 0; | |
679 | ||
680 | SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_ERROR_INTERRUPT, | |
681 | soft->ce_pcibus.bs_persist_segment, | |
682 | soft->ce_pcibus.bs_persist_busnum, 0, 0, 0, 0, 0); | |
683 | ||
13938ca7 MM |
684 | if (ret_stuff.v0) |
685 | panic("tioce_error_intr_handler: Fatal TIOCE error"); | |
686 | ||
c9221da9 MM |
687 | return IRQ_HANDLED; |
688 | } | |
689 | ||
13938ca7 MM |
690 | /** |
691 | * tioce_reserve_m32 - reserve M32 ate's for the indicated address range | |
692 | * @tioce_kernel: TIOCE context to reserve ate's for | |
693 | * @base: starting bus address to reserve | |
694 | * @limit: last bus address to reserve | |
695 | * | |
696 | * If base/limit falls within the range of bus space mapped through the | |
697 | * M32 space, reserve the resources corresponding to the range. | |
698 | */ | |
699 | static void | |
700 | tioce_reserve_m32(struct tioce_kernel *ce_kern, u64 base, u64 limit) | |
701 | { | |
702 | int ate_index, last_ate, ps; | |
703 | struct tioce *ce_mmr; | |
704 | ||
13938ca7 MM |
705 | ce_mmr = (struct tioce *)ce_kern->ce_common->ce_pcibus.bs_base; |
706 | ps = ce_kern->ce_ate3240_pagesize; | |
707 | ate_index = ATE_PAGE(base, ps); | |
708 | last_ate = ate_index + ATE_NPAGES(base, limit-base+1, ps) - 1; | |
709 | ||
710 | if (ate_index < 64) | |
711 | ate_index = 64; | |
712 | ||
cda3d4a0 MH |
713 | if (last_ate >= TIOCE_NUM_M3240_ATES) |
714 | last_ate = TIOCE_NUM_M3240_ATES - 1; | |
715 | ||
13938ca7 MM |
716 | while (ate_index <= last_ate) { |
717 | u64 ate; | |
718 | ||
83821d3f | 719 | ate = ATE_MAKE(0xdeadbeef, ps, 0); |
13938ca7 MM |
720 | ce_kern->ce_ate3240_shadow[ate_index] = ate; |
721 | tioce_mmr_storei(ce_kern, &ce_mmr->ce_ure_ate3240[ate_index], | |
722 | ate); | |
723 | ate_index++; | |
724 | } | |
725 | } | |
726 | ||
c9221da9 MM |
727 | /** |
728 | * tioce_kern_init - init kernel structures related to a given TIOCE | |
729 | * @tioce_common: ptr to a cached tioce_common struct that originated in prom | |
13938ca7 MM |
730 | */ |
731 | static struct tioce_kernel * | |
c9221da9 MM |
732 | tioce_kern_init(struct tioce_common *tioce_common) |
733 | { | |
734 | int i; | |
13938ca7 MM |
735 | int ps; |
736 | int dev; | |
53493dcf | 737 | u32 tmp; |
13938ca7 | 738 | unsigned int seg, bus; |
c9221da9 MM |
739 | struct tioce *tioce_mmr; |
740 | struct tioce_kernel *tioce_kern; | |
741 | ||
baf47fb6 | 742 | tioce_kern = kzalloc(sizeof(struct tioce_kernel), GFP_KERNEL); |
c9221da9 MM |
743 | if (!tioce_kern) { |
744 | return NULL; | |
745 | } | |
746 | ||
747 | tioce_kern->ce_common = tioce_common; | |
748 | spin_lock_init(&tioce_kern->ce_lock); | |
749 | INIT_LIST_HEAD(&tioce_kern->ce_dmamap_list); | |
53493dcf | 750 | tioce_common->ce_kernel_private = (u64) tioce_kern; |
c9221da9 MM |
751 | |
752 | /* | |
753 | * Determine the secondary bus number of the port2 logical PPB. | |
754 | * This is used to decide whether a given pci device resides on | |
755 | * port1 or port2. Note: We don't have enough plumbing set up | |
756 | * here to use pci_read_config_xxx() so use the raw_pci_ops vector. | |
757 | */ | |
758 | ||
13938ca7 MM |
759 | seg = tioce_common->ce_pcibus.bs_persist_segment; |
760 | bus = tioce_common->ce_pcibus.bs_persist_busnum; | |
761 | ||
762 | raw_pci_ops->read(seg, bus, PCI_DEVFN(2, 0), PCI_SECONDARY_BUS, 1,&tmp); | |
53493dcf | 763 | tioce_kern->ce_port1_secondary = (u8) tmp; |
c9221da9 MM |
764 | |
765 | /* | |
766 | * Set PMU pagesize to the largest size available, and zero out | |
767 | * the ate's. | |
768 | */ | |
769 | ||
770 | tioce_mmr = (struct tioce *)tioce_common->ce_pcibus.bs_base; | |
13938ca7 MM |
771 | tioce_mmr_clri(tioce_kern, &tioce_mmr->ce_ure_page_map, |
772 | CE_URE_PAGESIZE_MASK); | |
773 | tioce_mmr_seti(tioce_kern, &tioce_mmr->ce_ure_page_map, | |
774 | CE_URE_256K_PAGESIZE); | |
775 | ps = tioce_kern->ce_ate3240_pagesize = KB(256); | |
c9221da9 MM |
776 | |
777 | for (i = 0; i < TIOCE_NUM_M40_ATES; i++) { | |
778 | tioce_kern->ce_ate40_shadow[i] = 0; | |
13938ca7 | 779 | tioce_mmr_storei(tioce_kern, &tioce_mmr->ce_ure_ate40[i], 0); |
c9221da9 MM |
780 | } |
781 | ||
782 | for (i = 0; i < TIOCE_NUM_M3240_ATES; i++) { | |
783 | tioce_kern->ce_ate3240_shadow[i] = 0; | |
13938ca7 MM |
784 | tioce_mmr_storei(tioce_kern, &tioce_mmr->ce_ure_ate3240[i], 0); |
785 | } | |
786 | ||
787 | /* | |
788 | * Reserve ATE's corresponding to reserved address ranges. These | |
789 | * include: | |
790 | * | |
791 | * Memory space covered by each PPB mem base/limit register | |
792 | * Memory space covered by each PPB prefetch base/limit register | |
793 | * | |
794 | * These bus ranges are for pio (downstream) traffic only, and so | |
795 | * cannot be used for DMA. | |
796 | */ | |
797 | ||
798 | for (dev = 1; dev <= 2; dev++) { | |
799 | u64 base, limit; | |
800 | ||
801 | /* mem base/limit */ | |
802 | ||
803 | raw_pci_ops->read(seg, bus, PCI_DEVFN(dev, 0), | |
804 | PCI_MEMORY_BASE, 2, &tmp); | |
805 | base = (u64)tmp << 16; | |
806 | ||
807 | raw_pci_ops->read(seg, bus, PCI_DEVFN(dev, 0), | |
808 | PCI_MEMORY_LIMIT, 2, &tmp); | |
809 | limit = (u64)tmp << 16; | |
810 | limit |= 0xfffffUL; | |
811 | ||
812 | if (base < limit) | |
813 | tioce_reserve_m32(tioce_kern, base, limit); | |
814 | ||
815 | /* | |
816 | * prefetch mem base/limit. The tioce ppb's have 64-bit | |
817 | * decoders, so read the upper portions w/o checking the | |
818 | * attributes. | |
819 | */ | |
820 | ||
821 | raw_pci_ops->read(seg, bus, PCI_DEVFN(dev, 0), | |
822 | PCI_PREF_MEMORY_BASE, 2, &tmp); | |
823 | base = ((u64)tmp & PCI_PREF_RANGE_MASK) << 16; | |
824 | ||
825 | raw_pci_ops->read(seg, bus, PCI_DEVFN(dev, 0), | |
826 | PCI_PREF_BASE_UPPER32, 4, &tmp); | |
827 | base |= (u64)tmp << 32; | |
828 | ||
829 | raw_pci_ops->read(seg, bus, PCI_DEVFN(dev, 0), | |
830 | PCI_PREF_MEMORY_LIMIT, 2, &tmp); | |
831 | ||
832 | limit = ((u64)tmp & PCI_PREF_RANGE_MASK) << 16; | |
833 | limit |= 0xfffffUL; | |
834 | ||
835 | raw_pci_ops->read(seg, bus, PCI_DEVFN(dev, 0), | |
836 | PCI_PREF_LIMIT_UPPER32, 4, &tmp); | |
837 | limit |= (u64)tmp << 32; | |
838 | ||
839 | if ((base < limit) && TIOCE_M32_ADDR(base)) | |
840 | tioce_reserve_m32(tioce_kern, base, limit); | |
c9221da9 MM |
841 | } |
842 | ||
843 | return tioce_kern; | |
844 | } | |
845 | ||
846 | /** | |
847 | * tioce_force_interrupt - implement altix force_interrupt() backend for CE | |
848 | * @sn_irq_info: sn asic irq that we need an interrupt generated for | |
849 | * | |
850 | * Given an sn_irq_info struct, set the proper bit in ce_adm_force_int to | |
851 | * force a secondary interrupt to be generated. This is to work around an | |
852 | * asic issue where there is a small window of opportunity for a legacy device | |
853 | * interrupt to be lost. | |
854 | */ | |
855 | static void | |
856 | tioce_force_interrupt(struct sn_irq_info *sn_irq_info) | |
857 | { | |
858 | struct pcidev_info *pcidev_info; | |
859 | struct tioce_common *ce_common; | |
13938ca7 | 860 | struct tioce_kernel *ce_kern; |
c9221da9 | 861 | struct tioce *ce_mmr; |
53493dcf | 862 | u64 force_int_val; |
c9221da9 MM |
863 | |
864 | if (!sn_irq_info->irq_bridge) | |
865 | return; | |
866 | ||
867 | if (sn_irq_info->irq_bridge_type != PCIIO_ASIC_TYPE_TIOCE) | |
868 | return; | |
869 | ||
870 | pcidev_info = (struct pcidev_info *)sn_irq_info->irq_pciioinfo; | |
871 | if (!pcidev_info) | |
872 | return; | |
873 | ||
874 | ce_common = (struct tioce_common *)pcidev_info->pdi_pcibus_info; | |
875 | ce_mmr = (struct tioce *)ce_common->ce_pcibus.bs_base; | |
13938ca7 MM |
876 | ce_kern = (struct tioce_kernel *)ce_common->ce_kernel_private; |
877 | ||
878 | /* | |
879 | * TIOCE Rev A workaround (PV 945826), force an interrupt by writing | |
880 | * the TIO_INTx register directly (1/26/2006) | |
881 | */ | |
882 | if (ce_common->ce_rev == TIOCE_REV_A) { | |
883 | u64 int_bit_mask = (1ULL << sn_irq_info->irq_int_bit); | |
884 | u64 status; | |
885 | ||
886 | tioce_mmr_load(ce_kern, &ce_mmr->ce_adm_int_status, &status); | |
887 | if (status & int_bit_mask) { | |
888 | u64 force_irq = (1 << 8) | sn_irq_info->irq_irq; | |
889 | u64 ctalk = sn_irq_info->irq_xtalkaddr; | |
890 | u64 nasid, offset; | |
891 | ||
892 | nasid = (ctalk & CTALK_NASID_MASK) >> CTALK_NASID_SHFT; | |
893 | offset = (ctalk & CTALK_NODE_OFFSET); | |
894 | HUB_S(TIO_IOSPACE_ADDR(nasid, offset), force_irq); | |
895 | } | |
896 | ||
897 | return; | |
898 | } | |
c9221da9 MM |
899 | |
900 | /* | |
901 | * irq_int_bit is originally set up by prom, and holds the interrupt | |
902 | * bit shift (not mask) as defined by the bit definitions in the | |
903 | * ce_adm_int mmr. These shifts are not the same for the | |
904 | * ce_adm_force_int register, so do an explicit mapping here to make | |
905 | * things clearer. | |
906 | */ | |
907 | ||
908 | switch (sn_irq_info->irq_int_bit) { | |
909 | case CE_ADM_INT_PCIE_PORT1_DEV_A_SHFT: | |
910 | force_int_val = 1UL << CE_ADM_FORCE_INT_PCIE_PORT1_DEV_A_SHFT; | |
911 | break; | |
912 | case CE_ADM_INT_PCIE_PORT1_DEV_B_SHFT: | |
913 | force_int_val = 1UL << CE_ADM_FORCE_INT_PCIE_PORT1_DEV_B_SHFT; | |
914 | break; | |
915 | case CE_ADM_INT_PCIE_PORT1_DEV_C_SHFT: | |
916 | force_int_val = 1UL << CE_ADM_FORCE_INT_PCIE_PORT1_DEV_C_SHFT; | |
917 | break; | |
918 | case CE_ADM_INT_PCIE_PORT1_DEV_D_SHFT: | |
919 | force_int_val = 1UL << CE_ADM_FORCE_INT_PCIE_PORT1_DEV_D_SHFT; | |
920 | break; | |
921 | case CE_ADM_INT_PCIE_PORT2_DEV_A_SHFT: | |
922 | force_int_val = 1UL << CE_ADM_FORCE_INT_PCIE_PORT2_DEV_A_SHFT; | |
923 | break; | |
924 | case CE_ADM_INT_PCIE_PORT2_DEV_B_SHFT: | |
925 | force_int_val = 1UL << CE_ADM_FORCE_INT_PCIE_PORT2_DEV_B_SHFT; | |
926 | break; | |
927 | case CE_ADM_INT_PCIE_PORT2_DEV_C_SHFT: | |
928 | force_int_val = 1UL << CE_ADM_FORCE_INT_PCIE_PORT2_DEV_C_SHFT; | |
929 | break; | |
930 | case CE_ADM_INT_PCIE_PORT2_DEV_D_SHFT: | |
931 | force_int_val = 1UL << CE_ADM_FORCE_INT_PCIE_PORT2_DEV_D_SHFT; | |
932 | break; | |
933 | default: | |
934 | return; | |
935 | } | |
13938ca7 | 936 | tioce_mmr_storei(ce_kern, &ce_mmr->ce_adm_force_int, force_int_val); |
c9221da9 MM |
937 | } |
938 | ||
8409668b MM |
939 | /** |
940 | * tioce_target_interrupt - implement set_irq_affinity for tioce resident | |
941 | * functions. Note: only applies to line interrupts, not MSI's. | |
942 | * | |
943 | * @sn_irq_info: SN IRQ context | |
944 | * | |
945 | * Given an sn_irq_info, set the associated CE device's interrupt destination | |
946 | * register. Since the interrupt destination registers are on a per-ce-slot | |
947 | * basis, this will retarget line interrupts for all functions downstream of | |
948 | * the slot. | |
949 | */ | |
950 | static void | |
951 | tioce_target_interrupt(struct sn_irq_info *sn_irq_info) | |
952 | { | |
953 | struct pcidev_info *pcidev_info; | |
954 | struct tioce_common *ce_common; | |
13938ca7 | 955 | struct tioce_kernel *ce_kern; |
8409668b MM |
956 | struct tioce *ce_mmr; |
957 | int bit; | |
53493dcf | 958 | u64 vector; |
8409668b MM |
959 | |
960 | pcidev_info = (struct pcidev_info *)sn_irq_info->irq_pciioinfo; | |
961 | if (!pcidev_info) | |
962 | return; | |
963 | ||
964 | ce_common = (struct tioce_common *)pcidev_info->pdi_pcibus_info; | |
965 | ce_mmr = (struct tioce *)ce_common->ce_pcibus.bs_base; | |
13938ca7 | 966 | ce_kern = (struct tioce_kernel *)ce_common->ce_kernel_private; |
8409668b MM |
967 | |
968 | bit = sn_irq_info->irq_int_bit; | |
969 | ||
13938ca7 | 970 | tioce_mmr_seti(ce_kern, &ce_mmr->ce_adm_int_mask, (1UL << bit)); |
53493dcf | 971 | vector = (u64)sn_irq_info->irq_irq << INTR_VECTOR_SHFT; |
5fbcf9a5 | 972 | vector |= sn_irq_info->irq_xtalkaddr; |
13938ca7 MM |
973 | tioce_mmr_storei(ce_kern, &ce_mmr->ce_adm_int_dest[bit], vector); |
974 | tioce_mmr_clri(ce_kern, &ce_mmr->ce_adm_int_mask, (1UL << bit)); | |
8409668b MM |
975 | |
976 | tioce_force_interrupt(sn_irq_info); | |
977 | } | |
978 | ||
c9221da9 MM |
979 | /** |
980 | * tioce_bus_fixup - perform final PCI fixup for a TIO CE bus | |
981 | * @prom_bussoft: Common prom/kernel struct representing the bus | |
982 | * | |
983 | * Replicates the tioce_common pointed to by @prom_bussoft in kernel | |
984 | * space. Allocates and initializes a kernel-only area for a given CE, | |
985 | * and sets up an irq for handling CE error interrupts. | |
986 | * | |
987 | * On successful setup, returns the kernel version of tioce_common back to | |
988 | * the caller. | |
989 | */ | |
990 | static void * | |
991 | tioce_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *controller) | |
992 | { | |
13938ca7 MM |
993 | int my_nasid; |
994 | cnodeid_t my_cnode, mem_cnode; | |
c9221da9 | 995 | struct tioce_common *tioce_common; |
13938ca7 MM |
996 | struct tioce_kernel *tioce_kern; |
997 | struct tioce *tioce_mmr; | |
c9221da9 MM |
998 | |
999 | /* | |
1000 | * Allocate kernel bus soft and copy from prom. | |
1001 | */ | |
1002 | ||
baf47fb6 | 1003 | tioce_common = kzalloc(sizeof(struct tioce_common), GFP_KERNEL); |
c9221da9 MM |
1004 | if (!tioce_common) |
1005 | return NULL; | |
1006 | ||
1007 | memcpy(tioce_common, prom_bussoft, sizeof(struct tioce_common)); | |
1008 | tioce_common->ce_pcibus.bs_base |= __IA64_UNCACHED_OFFSET; | |
1009 | ||
13938ca7 MM |
1010 | tioce_kern = tioce_kern_init(tioce_common); |
1011 | if (tioce_kern == NULL) { | |
c9221da9 MM |
1012 | kfree(tioce_common); |
1013 | return NULL; | |
1014 | } | |
1015 | ||
13938ca7 MM |
1016 | /* |
1017 | * Clear out any transient errors before registering the error | |
1018 | * interrupt handler. | |
1019 | */ | |
1020 | ||
1021 | tioce_mmr = (struct tioce *)tioce_common->ce_pcibus.bs_base; | |
1022 | tioce_mmr_seti(tioce_kern, &tioce_mmr->ce_adm_int_status_alias, ~0ULL); | |
1023 | tioce_mmr_seti(tioce_kern, &tioce_mmr->ce_adm_error_summary_alias, | |
1024 | ~0ULL); | |
f640f94e | 1025 | tioce_mmr_seti(tioce_kern, &tioce_mmr->ce_dre_comp_err_addr, 0ULL); |
13938ca7 | 1026 | |
c9221da9 MM |
1027 | if (request_irq(SGI_PCIASIC_ERROR, |
1028 | tioce_error_intr_handler, | |
121a4226 | 1029 | IRQF_SHARED, "TIOCE error", (void *)tioce_common)) |
c9221da9 MM |
1030 | printk(KERN_WARNING |
1031 | "%s: Unable to get irq %d. " | |
1032 | "Error interrupts won't be routed for " | |
1033 | "TIOCE bus %04x:%02x\n", | |
1034 | __FUNCTION__, SGI_PCIASIC_ERROR, | |
1035 | tioce_common->ce_pcibus.bs_persist_segment, | |
1036 | tioce_common->ce_pcibus.bs_persist_busnum); | |
1037 | ||
13938ca7 MM |
1038 | /* |
1039 | * identify closest nasid for memory allocations | |
1040 | */ | |
1041 | ||
1042 | my_nasid = NASID_GET(tioce_common->ce_pcibus.bs_base); | |
1043 | my_cnode = nasid_to_cnodeid(my_nasid); | |
1044 | ||
1045 | if (sn_hwperf_get_nearest_node(my_cnode, &mem_cnode, NULL) < 0) { | |
1046 | printk(KERN_WARNING "tioce_bus_fixup: failed to find " | |
1047 | "closest node with MEM to TIO node %d\n", my_cnode); | |
1048 | mem_cnode = (cnodeid_t)-1; /* use any node */ | |
1049 | } | |
1050 | ||
1051 | controller->node = mem_cnode; | |
1052 | ||
c9221da9 MM |
1053 | return tioce_common; |
1054 | } | |
1055 | ||
1056 | static struct sn_pcibus_provider tioce_pci_interfaces = { | |
1057 | .dma_map = tioce_dma, | |
1058 | .dma_map_consistent = tioce_dma_consistent, | |
1059 | .dma_unmap = tioce_dma_unmap, | |
1060 | .bus_fixup = tioce_bus_fixup, | |
8409668b MM |
1061 | .force_interrupt = tioce_force_interrupt, |
1062 | .target_interrupt = tioce_target_interrupt | |
c9221da9 MM |
1063 | }; |
1064 | ||
1065 | /** | |
1066 | * tioce_init_provider - init SN PCI provider ops for TIO CE | |
1067 | */ | |
1068 | int | |
1069 | tioce_init_provider(void) | |
1070 | { | |
1071 | sn_pci_provider[PCIIO_ASIC_TYPE_TIOCE] = &tioce_pci_interfaces; | |
1072 | return 0; | |
1073 | } |