]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - arch/ia64/sn/pci/tioce_provider.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6 into for-linus
[mirror_ubuntu-jammy-kernel.git] / arch / ia64 / sn / pci / tioce_provider.c
CommitLineData
c9221da9
MM
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
f640f94e 6 * Copyright (C) 2003-2006 Silicon Graphics, Inc. All Rights Reserved.
c9221da9
MM
7 */
8
9#include <linux/types.h>
10#include <linux/interrupt.h>
11#include <linux/pci.h>
12#include <asm/sn/sn_sal.h>
13#include <asm/sn/addrs.h>
1fa92957 14#include <asm/sn/io.h>
c9221da9
MM
15#include <asm/sn/pcidev.h>
16#include <asm/sn/pcibus_provider_defs.h>
17#include <asm/sn/tioce_provider.h>
13938ca7
MM
18
19/*
20 * 1/26/2006
21 *
22 * WAR for SGI PV 944642. For revA TIOCE, need to use the following recipe
23 * (taken from the above PV) before and after accessing tioce internal MMR's
24 * to avoid tioce lockups.
25 *
26 * The recipe as taken from the PV:
27 *
28 * if(mmr address < 0x45000) {
29 * if(mmr address == 0 or 0x80)
30 * mmr wrt or read address 0xc0
31 * else if(mmr address == 0x148 or 0x200)
32 * mmr wrt or read address 0x28
33 * else
34 * mmr wrt or read address 0x158
35 *
36 * do desired mmr access (rd or wrt)
37 *
38 * if(mmr address == 0x100)
39 * mmr wrt or read address 0x38
40 * mmr wrt or read address 0xb050
41 * } else
42 * do desired mmr access
43 *
313d8e57 44 * According to hw, we can use reads instead of writes to the above address
13938ca7
MM
45 *
46 * Note this WAR can only to be used for accessing internal MMR's in the
47 * TIOCE Coretalk Address Range 0x0 - 0x07ff_ffff. This includes the
48 * "Local CE Registers and Memories" and "PCI Compatible Config Space" address
49 * spaces from table 2-1 of the "CE Programmer's Reference Overview" document.
50 *
51 * All registers defined in struct tioce will meet that criteria.
52 */
53
54static void inline
a9f627c9 55tioce_mmr_war_pre(struct tioce_kernel *kern, void __iomem *mmr_addr)
13938ca7
MM
56{
57 u64 mmr_base;
58 u64 mmr_offset;
59
60 if (kern->ce_common->ce_rev != TIOCE_REV_A)
61 return;
62
63 mmr_base = kern->ce_common->ce_pcibus.bs_base;
a9f627c9 64 mmr_offset = (unsigned long)mmr_addr - mmr_base;
13938ca7
MM
65
66 if (mmr_offset < 0x45000) {
67 u64 mmr_war_offset;
68
69 if (mmr_offset == 0 || mmr_offset == 0x80)
70 mmr_war_offset = 0xc0;
71 else if (mmr_offset == 0x148 || mmr_offset == 0x200)
72 mmr_war_offset = 0x28;
73 else
74 mmr_war_offset = 0x158;
75
e037cda5 76 readq_relaxed((void __iomem *)(mmr_base + mmr_war_offset));
13938ca7
MM
77 }
78}
79
80static void inline
a9f627c9 81tioce_mmr_war_post(struct tioce_kernel *kern, void __iomem *mmr_addr)
13938ca7
MM
82{
83 u64 mmr_base;
84 u64 mmr_offset;
85
86 if (kern->ce_common->ce_rev != TIOCE_REV_A)
87 return;
88
89 mmr_base = kern->ce_common->ce_pcibus.bs_base;
a9f627c9 90 mmr_offset = (unsigned long)mmr_addr - mmr_base;
13938ca7
MM
91
92 if (mmr_offset < 0x45000) {
93 if (mmr_offset == 0x100)
e037cda5
KO
94 readq_relaxed((void __iomem *)(mmr_base + 0x38));
95 readq_relaxed((void __iomem *)(mmr_base + 0xb050));
13938ca7
MM
96 }
97}
98
99/* load mmr contents into a variable */
100#define tioce_mmr_load(kern, mmrp, varp) do {\
101 tioce_mmr_war_pre(kern, mmrp); \
102 *(varp) = readq_relaxed(mmrp); \
103 tioce_mmr_war_post(kern, mmrp); \
104} while (0)
105
106/* store variable contents into mmr */
107#define tioce_mmr_store(kern, mmrp, varp) do {\
108 tioce_mmr_war_pre(kern, mmrp); \
109 writeq(*varp, mmrp); \
110 tioce_mmr_war_post(kern, mmrp); \
111} while (0)
112
113/* store immediate value into mmr */
114#define tioce_mmr_storei(kern, mmrp, val) do {\
115 tioce_mmr_war_pre(kern, mmrp); \
116 writeq(val, mmrp); \
117 tioce_mmr_war_post(kern, mmrp); \
118} while (0)
119
120/* set bits (immediate value) into mmr */
121#define tioce_mmr_seti(kern, mmrp, bits) do {\
122 u64 tmp; \
123 tioce_mmr_load(kern, mmrp, &tmp); \
124 tmp |= (bits); \
125 tioce_mmr_store(kern, mmrp, &tmp); \
126} while (0)
127
128/* clear bits (immediate value) into mmr */
129#define tioce_mmr_clri(kern, mmrp, bits) do { \
130 u64 tmp; \
131 tioce_mmr_load(kern, mmrp, &tmp); \
132 tmp &= ~(bits); \
133 tioce_mmr_store(kern, mmrp, &tmp); \
134} while (0)
c9221da9
MM
135
136/**
137 * Bus address ranges for the 5 flavors of TIOCE DMA
138 */
139
140#define TIOCE_D64_MIN 0x8000000000000000UL
141#define TIOCE_D64_MAX 0xffffffffffffffffUL
142#define TIOCE_D64_ADDR(a) ((a) >= TIOCE_D64_MIN)
143
144#define TIOCE_D32_MIN 0x0000000080000000UL
145#define TIOCE_D32_MAX 0x00000000ffffffffUL
146#define TIOCE_D32_ADDR(a) ((a) >= TIOCE_D32_MIN && (a) <= TIOCE_D32_MAX)
147
148#define TIOCE_M32_MIN 0x0000000000000000UL
149#define TIOCE_M32_MAX 0x000000007fffffffUL
150#define TIOCE_M32_ADDR(a) ((a) >= TIOCE_M32_MIN && (a) <= TIOCE_M32_MAX)
151
152#define TIOCE_M40_MIN 0x0000004000000000UL
153#define TIOCE_M40_MAX 0x0000007fffffffffUL
154#define TIOCE_M40_ADDR(a) ((a) >= TIOCE_M40_MIN && (a) <= TIOCE_M40_MAX)
155
156#define TIOCE_M40S_MIN 0x0000008000000000UL
157#define TIOCE_M40S_MAX 0x000000ffffffffffUL
158#define TIOCE_M40S_ADDR(a) ((a) >= TIOCE_M40S_MIN && (a) <= TIOCE_M40S_MAX)
159
160/*
161 * ATE manipulation macros.
162 */
163
164#define ATE_PAGESHIFT(ps) (__ffs(ps))
165#define ATE_PAGEMASK(ps) ((ps)-1)
166
167#define ATE_PAGE(x, ps) ((x) >> ATE_PAGESHIFT(ps))
168#define ATE_NPAGES(start, len, pagesize) \
169 (ATE_PAGE((start)+(len)-1, pagesize) - ATE_PAGE(start, pagesize) + 1)
170
171#define ATE_VALID(ate) ((ate) & (1UL << 63))
83821d3f
MM
172#define ATE_MAKE(addr, ps, msi) \
173 (((addr) & ~ATE_PAGEMASK(ps)) | (1UL << 63) | ((msi)?(1UL << 62):0))
c9221da9
MM
174
175/*
176 * Flavors of ate-based mapping supported by tioce_alloc_map()
177 */
178
179#define TIOCE_ATE_M32 1
180#define TIOCE_ATE_M40 2
181#define TIOCE_ATE_M40S 3
182
13938ca7
MM
183#define KB(x) ((u64)(x) << 10)
184#define MB(x) ((u64)(x) << 20)
185#define GB(x) ((u64)(x) << 30)
c9221da9
MM
186
187/**
188 * tioce_dma_d64 - create a DMA mapping using 64-bit direct mode
189 * @ct_addr: system coretalk address
190 *
191 * Map @ct_addr into 64-bit CE bus space. No device context is necessary
192 * and no CE mapping are consumed.
193 *
194 * Bits 53:0 come from the coretalk address. The remaining bits are set as
195 * follows:
196 *
197 * 63 - must be 1 to indicate d64 mode to CE hardware
198 * 62 - barrier bit ... controlled with tioce_dma_barrier()
83821d3f 199 * 61 - msi bit ... specified through dma_flags
c9221da9
MM
200 * 60:54 - reserved, MBZ
201 */
53493dcf 202static u64
83821d3f 203tioce_dma_d64(unsigned long ct_addr, int dma_flags)
c9221da9 204{
53493dcf 205 u64 bus_addr;
c9221da9
MM
206
207 bus_addr = ct_addr | (1UL << 63);
83821d3f
MM
208 if (dma_flags & SN_DMA_MSI)
209 bus_addr |= (1UL << 61);
c9221da9
MM
210
211 return bus_addr;
212}
213
214/**
215 * pcidev_to_tioce - return misc ce related pointers given a pci_dev
216 * @pci_dev: pci device context
217 * @base: ptr to store struct tioce_mmr * for the CE holding this device
218 * @kernel: ptr to store struct tioce_kernel * for the CE holding this device
219 * @port: ptr to store the CE port number that this device is on
220 *
221 * Return pointers to various CE-related structures for the CE upstream of
222 * @pci_dev.
223 */
224static inline void
a9f627c9 225pcidev_to_tioce(struct pci_dev *pdev, struct tioce __iomem **base,
c9221da9
MM
226 struct tioce_kernel **kernel, int *port)
227{
228 struct pcidev_info *pcidev_info;
229 struct tioce_common *ce_common;
230 struct tioce_kernel *ce_kernel;
231
232 pcidev_info = SN_PCIDEV_INFO(pdev);
233 ce_common = (struct tioce_common *)pcidev_info->pdi_pcibus_info;
234 ce_kernel = (struct tioce_kernel *)ce_common->ce_kernel_private;
235
236 if (base)
a9f627c9 237 *base = (struct tioce __iomem *)ce_common->ce_pcibus.bs_base;
c9221da9
MM
238 if (kernel)
239 *kernel = ce_kernel;
240
241 /*
242 * we use port as a zero-based value internally, even though the
243 * documentation is 1-based.
244 */
245 if (port)
246 *port =
247 (pdev->bus->number < ce_kernel->ce_port1_secondary) ? 0 : 1;
248}
249
250/**
251 * tioce_alloc_map - Given a coretalk address, map it to pcie bus address
252 * space using one of the various ATE-based address modes.
253 * @ce_kern: tioce context
254 * @type: map mode to use
255 * @port: 0-based port that the requesting device is downstream of
256 * @ct_addr: the coretalk address to map
257 * @len: number of bytes to map
258 *
72fdbdce 259 * Given the addressing type, set up various parameters that define the
c9221da9 260 * ATE pool to use. Search for a contiguous block of entries to cover the
72fdbdce 261 * length, and if enough resources exist, fill in the ATEs and construct a
c9221da9
MM
262 * tioce_dmamap struct to track the mapping.
263 */
53493dcf 264static u64
c9221da9 265tioce_alloc_map(struct tioce_kernel *ce_kern, int type, int port,
83821d3f 266 u64 ct_addr, int len, int dma_flags)
c9221da9
MM
267{
268 int i;
269 int j;
270 int first;
271 int last;
272 int entries;
273 int nates;
13938ca7 274 u64 pagesize;
83821d3f 275 int msi_capable, msi_wanted;
53493dcf 276 u64 *ate_shadow;
a9f627c9 277 u64 __iomem *ate_reg;
53493dcf 278 u64 addr;
a9f627c9 279 struct tioce __iomem *ce_mmr;
53493dcf 280 u64 bus_base;
c9221da9
MM
281 struct tioce_dmamap *map;
282
a9f627c9 283 ce_mmr = (struct tioce __iomem *)ce_kern->ce_common->ce_pcibus.bs_base;
c9221da9
MM
284
285 switch (type) {
286 case TIOCE_ATE_M32:
287 /*
288 * The first 64 entries of the ate3240 pool are dedicated to
289 * super-page (TIOCE_ATE_M40S) mode.
290 */
291 first = 64;
292 entries = TIOCE_NUM_M3240_ATES - 64;
293 ate_shadow = ce_kern->ce_ate3240_shadow;
294 ate_reg = ce_mmr->ce_ure_ate3240;
295 pagesize = ce_kern->ce_ate3240_pagesize;
296 bus_base = TIOCE_M32_MIN;
83821d3f 297 msi_capable = 1;
c9221da9
MM
298 break;
299 case TIOCE_ATE_M40:
300 first = 0;
301 entries = TIOCE_NUM_M40_ATES;
302 ate_shadow = ce_kern->ce_ate40_shadow;
303 ate_reg = ce_mmr->ce_ure_ate40;
304 pagesize = MB(64);
305 bus_base = TIOCE_M40_MIN;
83821d3f 306 msi_capable = 0;
c9221da9
MM
307 break;
308 case TIOCE_ATE_M40S:
309 /*
310 * ate3240 entries 0-31 are dedicated to port1 super-page
311 * mappings. ate3240 entries 32-63 are dedicated to port2.
312 */
313 first = port * 32;
314 entries = 32;
315 ate_shadow = ce_kern->ce_ate3240_shadow;
316 ate_reg = ce_mmr->ce_ure_ate3240;
317 pagesize = GB(16);
318 bus_base = TIOCE_M40S_MIN;
83821d3f 319 msi_capable = 0;
c9221da9
MM
320 break;
321 default:
322 return 0;
323 }
324
83821d3f
MM
325 msi_wanted = dma_flags & SN_DMA_MSI;
326 if (msi_wanted && !msi_capable)
327 return 0;
328
c9221da9
MM
329 nates = ATE_NPAGES(ct_addr, len, pagesize);
330 if (nates > entries)
331 return 0;
332
333 last = first + entries - nates;
334 for (i = first; i <= last; i++) {
335 if (ATE_VALID(ate_shadow[i]))
336 continue;
337
338 for (j = i; j < i + nates; j++)
339 if (ATE_VALID(ate_shadow[j]))
340 break;
341
342 if (j >= i + nates)
343 break;
344 }
345
346 if (i > last)
347 return 0;
348
baf47fb6 349 map = kzalloc(sizeof(struct tioce_dmamap), GFP_ATOMIC);
c9221da9
MM
350 if (!map)
351 return 0;
352
353 addr = ct_addr;
354 for (j = 0; j < nates; j++) {
53493dcf 355 u64 ate;
c9221da9 356
83821d3f 357 ate = ATE_MAKE(addr, pagesize, msi_wanted);
c9221da9 358 ate_shadow[i + j] = ate;
13938ca7 359 tioce_mmr_storei(ce_kern, &ate_reg[i + j], ate);
c9221da9
MM
360 addr += pagesize;
361 }
362
363 map->refcnt = 1;
364 map->nbytes = nates * pagesize;
365 map->ct_start = ct_addr & ~ATE_PAGEMASK(pagesize);
366 map->pci_start = bus_base + (i * pagesize);
367 map->ate_hw = &ate_reg[i];
368 map->ate_shadow = &ate_shadow[i];
369 map->ate_count = nates;
370
371 list_add(&map->ce_dmamap_list, &ce_kern->ce_dmamap_list);
372
373 return (map->pci_start + (ct_addr - map->ct_start));
374}
375
376/**
377 * tioce_dma_d32 - create a DMA mapping using 32-bit direct mode
378 * @pdev: linux pci_dev representing the function
379 * @paddr: system physical address
380 *
381 * Map @paddr into 32-bit bus space of the CE associated with @pcidev_info.
382 */
53493dcf 383static u64
83821d3f 384tioce_dma_d32(struct pci_dev *pdev, u64 ct_addr, int dma_flags)
c9221da9
MM
385{
386 int dma_ok;
387 int port;
a9f627c9 388 struct tioce __iomem *ce_mmr;
c9221da9 389 struct tioce_kernel *ce_kern;
53493dcf
PB
390 u64 ct_upper;
391 u64 ct_lower;
c9221da9
MM
392 dma_addr_t bus_addr;
393
83821d3f
MM
394 if (dma_flags & SN_DMA_MSI)
395 return 0;
396
c9221da9
MM
397 ct_upper = ct_addr & ~0x3fffffffUL;
398 ct_lower = ct_addr & 0x3fffffffUL;
399
400 pcidev_to_tioce(pdev, &ce_mmr, &ce_kern, &port);
401
402 if (ce_kern->ce_port[port].dirmap_refcnt == 0) {
53493dcf 403 u64 tmp;
c9221da9
MM
404
405 ce_kern->ce_port[port].dirmap_shadow = ct_upper;
13938ca7
MM
406 tioce_mmr_storei(ce_kern, &ce_mmr->ce_ure_dir_map[port],
407 ct_upper);
c9221da9
MM
408 tmp = ce_mmr->ce_ure_dir_map[port];
409 dma_ok = 1;
410 } else
411 dma_ok = (ce_kern->ce_port[port].dirmap_shadow == ct_upper);
412
413 if (dma_ok) {
414 ce_kern->ce_port[port].dirmap_refcnt++;
415 bus_addr = TIOCE_D32_MIN + ct_lower;
416 } else
417 bus_addr = 0;
418
419 return bus_addr;
420}
421
422/**
423 * tioce_dma_barrier - swizzle a TIOCE bus address to include or exclude
424 * the barrier bit.
425 * @bus_addr: bus address to swizzle
426 *
427 * Given a TIOCE bus address, set the appropriate bit to indicate barrier
428 * attributes.
429 */
53493dcf
PB
430static u64
431tioce_dma_barrier(u64 bus_addr, int on)
c9221da9 432{
53493dcf 433 u64 barrier_bit;
c9221da9
MM
434
435 /* barrier not supported in M40/M40S mode */
436 if (TIOCE_M40_ADDR(bus_addr) || TIOCE_M40S_ADDR(bus_addr))
437 return bus_addr;
438
439 if (TIOCE_D64_ADDR(bus_addr))
440 barrier_bit = (1UL << 62);
441 else /* must be m32 or d32 */
442 barrier_bit = (1UL << 30);
443
444 return (on) ? (bus_addr | barrier_bit) : (bus_addr & ~barrier_bit);
445}
446
447/**
448 * tioce_dma_unmap - release CE mapping resources
449 * @pdev: linux pci_dev representing the function
450 * @bus_addr: bus address returned by an earlier tioce_dma_map
451 * @dir: mapping direction (unused)
452 *
453 * Locate mapping resources associated with @bus_addr and release them.
454 * For mappings created using the direct modes there are no resources
455 * to release.
456 */
457void
458tioce_dma_unmap(struct pci_dev *pdev, dma_addr_t bus_addr, int dir)
459{
460 int i;
461 int port;
462 struct tioce_kernel *ce_kern;
a9f627c9 463 struct tioce __iomem *ce_mmr;
c9221da9
MM
464 unsigned long flags;
465
466 bus_addr = tioce_dma_barrier(bus_addr, 0);
467 pcidev_to_tioce(pdev, &ce_mmr, &ce_kern, &port);
468
469 /* nothing to do for D64 */
470
471 if (TIOCE_D64_ADDR(bus_addr))
472 return;
473
474 spin_lock_irqsave(&ce_kern->ce_lock, flags);
475
476 if (TIOCE_D32_ADDR(bus_addr)) {
477 if (--ce_kern->ce_port[port].dirmap_refcnt == 0) {
478 ce_kern->ce_port[port].dirmap_shadow = 0;
13938ca7
MM
479 tioce_mmr_storei(ce_kern, &ce_mmr->ce_ure_dir_map[port],
480 0);
c9221da9
MM
481 }
482 } else {
483 struct tioce_dmamap *map;
484
485 list_for_each_entry(map, &ce_kern->ce_dmamap_list,
486 ce_dmamap_list) {
53493dcf 487 u64 last;
c9221da9
MM
488
489 last = map->pci_start + map->nbytes - 1;
490 if (bus_addr >= map->pci_start && bus_addr <= last)
491 break;
492 }
493
494 if (&map->ce_dmamap_list == &ce_kern->ce_dmamap_list) {
495 printk(KERN_WARNING
496 "%s: %s - no map found for bus_addr 0x%lx\n",
497 __FUNCTION__, pci_name(pdev), bus_addr);
498 } else if (--map->refcnt == 0) {
499 for (i = 0; i < map->ate_count; i++) {
500 map->ate_shadow[i] = 0;
13938ca7 501 tioce_mmr_storei(ce_kern, &map->ate_hw[i], 0);
c9221da9
MM
502 }
503
504 list_del(&map->ce_dmamap_list);
505 kfree(map);
506 }
507 }
508
509 spin_unlock_irqrestore(&ce_kern->ce_lock, flags);
510}
511
512/**
513 * tioce_do_dma_map - map pages for PCI DMA
514 * @pdev: linux pci_dev representing the function
515 * @paddr: host physical address to map
516 * @byte_count: bytes to map
517 *
518 * This is the main wrapper for mapping host physical pages to CE PCI space.
519 * The mapping mode used is based on the device's dma_mask.
520 */
53493dcf
PB
521static u64
522tioce_do_dma_map(struct pci_dev *pdev, u64 paddr, size_t byte_count,
83821d3f 523 int barrier, int dma_flags)
c9221da9
MM
524{
525 unsigned long flags;
53493dcf
PB
526 u64 ct_addr;
527 u64 mapaddr = 0;
c9221da9
MM
528 struct tioce_kernel *ce_kern;
529 struct tioce_dmamap *map;
530 int port;
53493dcf 531 u64 dma_mask;
c9221da9
MM
532
533 dma_mask = (barrier) ? pdev->dev.coherent_dma_mask : pdev->dma_mask;
534
535 /* cards must be able to address at least 31 bits */
536 if (dma_mask < 0x7fffffffUL)
537 return 0;
538
83821d3f
MM
539 if (SN_DMA_ADDRTYPE(dma_flags) == SN_DMA_ADDR_PHYS)
540 ct_addr = PHYS_TO_TIODMA(paddr);
541 else
542 ct_addr = paddr;
c9221da9
MM
543
544 /*
545 * If the device can generate 64 bit addresses, create a D64 map.
c9221da9
MM
546 */
547 if (dma_mask == ~0UL) {
83821d3f
MM
548 mapaddr = tioce_dma_d64(ct_addr, dma_flags);
549 if (mapaddr)
550 goto dma_map_done;
c9221da9
MM
551 }
552
553 pcidev_to_tioce(pdev, NULL, &ce_kern, &port);
554
555 spin_lock_irqsave(&ce_kern->ce_lock, flags);
556
557 /*
558 * D64 didn't work ... See if we have an existing map that covers
559 * this address range. Must account for devices dma_mask here since
560 * an existing map might have been done in a mode using more pci
561 * address bits than this device can support.
562 */
563 list_for_each_entry(map, &ce_kern->ce_dmamap_list, ce_dmamap_list) {
53493dcf 564 u64 last;
c9221da9
MM
565
566 last = map->ct_start + map->nbytes - 1;
567 if (ct_addr >= map->ct_start &&
568 ct_addr + byte_count - 1 <= last &&
569 map->pci_start <= dma_mask) {
570 map->refcnt++;
571 mapaddr = map->pci_start + (ct_addr - map->ct_start);
572 break;
573 }
574 }
575
576 /*
577 * If we don't have a map yet, and the card can generate 40
578 * bit addresses, try the M40/M40S modes. Note these modes do not
579 * support a barrier bit, so if we need a consistent map these
580 * won't work.
581 */
582 if (!mapaddr && !barrier && dma_mask >= 0xffffffffffUL) {
583 /*
72fdbdce
SA
584 * We have two options for 40-bit mappings: 16GB "super" ATEs
585 * and 64MB "regular" ATEs. We'll try both if needed for a
c9221da9
MM
586 * given mapping but which one we try first depends on the
587 * size. For requests >64MB, prefer to use a super page with
588 * regular as the fallback. Otherwise, try in the reverse order.
589 */
590
591 if (byte_count > MB(64)) {
592 mapaddr = tioce_alloc_map(ce_kern, TIOCE_ATE_M40S,
83821d3f
MM
593 port, ct_addr, byte_count,
594 dma_flags);
c9221da9
MM
595 if (!mapaddr)
596 mapaddr =
597 tioce_alloc_map(ce_kern, TIOCE_ATE_M40, -1,
83821d3f
MM
598 ct_addr, byte_count,
599 dma_flags);
c9221da9
MM
600 } else {
601 mapaddr = tioce_alloc_map(ce_kern, TIOCE_ATE_M40, -1,
83821d3f
MM
602 ct_addr, byte_count,
603 dma_flags);
c9221da9
MM
604 if (!mapaddr)
605 mapaddr =
606 tioce_alloc_map(ce_kern, TIOCE_ATE_M40S,
83821d3f
MM
607 port, ct_addr, byte_count,
608 dma_flags);
c9221da9
MM
609 }
610 }
611
612 /*
613 * 32-bit direct is the next mode to try
614 */
615 if (!mapaddr && dma_mask >= 0xffffffffUL)
83821d3f 616 mapaddr = tioce_dma_d32(pdev, ct_addr, dma_flags);
c9221da9
MM
617
618 /*
619 * Last resort, try 32-bit ATE-based map.
620 */
621 if (!mapaddr)
622 mapaddr =
623 tioce_alloc_map(ce_kern, TIOCE_ATE_M32, -1, ct_addr,
83821d3f 624 byte_count, dma_flags);
c9221da9
MM
625
626 spin_unlock_irqrestore(&ce_kern->ce_lock, flags);
627
628dma_map_done:
13938ca7 629 if (mapaddr && barrier)
c9221da9
MM
630 mapaddr = tioce_dma_barrier(mapaddr, 1);
631
632 return mapaddr;
633}
634
635/**
636 * tioce_dma - standard pci dma map interface
637 * @pdev: pci device requesting the map
638 * @paddr: system physical address to map into pci space
639 * @byte_count: # bytes to map
640 *
641 * Simply call tioce_do_dma_map() to create a map with the barrier bit clear
642 * in the address.
643 */
53493dcf 644static u64
83821d3f 645tioce_dma(struct pci_dev *pdev, u64 paddr, size_t byte_count, int dma_flags)
c9221da9 646{
83821d3f 647 return tioce_do_dma_map(pdev, paddr, byte_count, 0, dma_flags);
c9221da9
MM
648}
649
650/**
651 * tioce_dma_consistent - consistent pci dma map interface
652 * @pdev: pci device requesting the map
653 * @paddr: system physical address to map into pci space
654 * @byte_count: # bytes to map
655 *
656 * Simply call tioce_do_dma_map() to create a map with the barrier bit set
657 * in the address.
53493dcf 658 */ static u64
83821d3f 659tioce_dma_consistent(struct pci_dev *pdev, u64 paddr, size_t byte_count, int dma_flags)
c9221da9 660{
83821d3f 661 return tioce_do_dma_map(pdev, paddr, byte_count, 1, dma_flags);
c9221da9
MM
662}
663
664/**
665 * tioce_error_intr_handler - SGI TIO CE error interrupt handler
666 * @irq: unused
667 * @arg: pointer to tioce_common struct for the given CE
c9221da9
MM
668 *
669 * Handle a CE error interrupt. Simply a wrapper around a SAL call which
670 * defers processing to the SGI prom.
671 */ static irqreturn_t
7d12e780 672tioce_error_intr_handler(int irq, void *arg)
c9221da9
MM
673{
674 struct tioce_common *soft = arg;
675 struct ia64_sal_retval ret_stuff;
676 ret_stuff.status = 0;
677 ret_stuff.v0 = 0;
678
679 SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_ERROR_INTERRUPT,
680 soft->ce_pcibus.bs_persist_segment,
681 soft->ce_pcibus.bs_persist_busnum, 0, 0, 0, 0, 0);
682
13938ca7
MM
683 if (ret_stuff.v0)
684 panic("tioce_error_intr_handler: Fatal TIOCE error");
685
c9221da9
MM
686 return IRQ_HANDLED;
687}
688
13938ca7 689/**
72fdbdce
SA
690 * tioce_reserve_m32 - reserve M32 ATEs for the indicated address range
691 * @tioce_kernel: TIOCE context to reserve ATEs for
13938ca7
MM
692 * @base: starting bus address to reserve
693 * @limit: last bus address to reserve
694 *
695 * If base/limit falls within the range of bus space mapped through the
696 * M32 space, reserve the resources corresponding to the range.
697 */
698static void
699tioce_reserve_m32(struct tioce_kernel *ce_kern, u64 base, u64 limit)
700{
701 int ate_index, last_ate, ps;
a9f627c9 702 struct tioce __iomem *ce_mmr;
13938ca7 703
a9f627c9 704 ce_mmr = (struct tioce __iomem *)ce_kern->ce_common->ce_pcibus.bs_base;
13938ca7
MM
705 ps = ce_kern->ce_ate3240_pagesize;
706 ate_index = ATE_PAGE(base, ps);
707 last_ate = ate_index + ATE_NPAGES(base, limit-base+1, ps) - 1;
708
709 if (ate_index < 64)
710 ate_index = 64;
711
cda3d4a0
MH
712 if (last_ate >= TIOCE_NUM_M3240_ATES)
713 last_ate = TIOCE_NUM_M3240_ATES - 1;
714
13938ca7
MM
715 while (ate_index <= last_ate) {
716 u64 ate;
717
83821d3f 718 ate = ATE_MAKE(0xdeadbeef, ps, 0);
13938ca7
MM
719 ce_kern->ce_ate3240_shadow[ate_index] = ate;
720 tioce_mmr_storei(ce_kern, &ce_mmr->ce_ure_ate3240[ate_index],
721 ate);
722 ate_index++;
723 }
724}
725
c9221da9
MM
726/**
727 * tioce_kern_init - init kernel structures related to a given TIOCE
728 * @tioce_common: ptr to a cached tioce_common struct that originated in prom
13938ca7
MM
729 */
730static struct tioce_kernel *
c9221da9
MM
731tioce_kern_init(struct tioce_common *tioce_common)
732{
733 int i;
13938ca7
MM
734 int ps;
735 int dev;
53493dcf 736 u32 tmp;
13938ca7 737 unsigned int seg, bus;
a9f627c9 738 struct tioce __iomem *tioce_mmr;
c9221da9
MM
739 struct tioce_kernel *tioce_kern;
740
baf47fb6 741 tioce_kern = kzalloc(sizeof(struct tioce_kernel), GFP_KERNEL);
c9221da9
MM
742 if (!tioce_kern) {
743 return NULL;
744 }
745
746 tioce_kern->ce_common = tioce_common;
747 spin_lock_init(&tioce_kern->ce_lock);
748 INIT_LIST_HEAD(&tioce_kern->ce_dmamap_list);
53493dcf 749 tioce_common->ce_kernel_private = (u64) tioce_kern;
c9221da9
MM
750
751 /*
752 * Determine the secondary bus number of the port2 logical PPB.
753 * This is used to decide whether a given pci device resides on
754 * port1 or port2. Note: We don't have enough plumbing set up
b6ce068a 755 * here to use pci_read_config_xxx() so use raw_pci_read().
c9221da9
MM
756 */
757
13938ca7
MM
758 seg = tioce_common->ce_pcibus.bs_persist_segment;
759 bus = tioce_common->ce_pcibus.bs_persist_busnum;
760
b6ce068a 761 raw_pci_read(seg, bus, PCI_DEVFN(2, 0), PCI_SECONDARY_BUS, 1,&tmp);
53493dcf 762 tioce_kern->ce_port1_secondary = (u8) tmp;
c9221da9
MM
763
764 /*
765 * Set PMU pagesize to the largest size available, and zero out
72fdbdce 766 * the ATEs.
c9221da9
MM
767 */
768
a9f627c9 769 tioce_mmr = (struct tioce __iomem *)tioce_common->ce_pcibus.bs_base;
13938ca7
MM
770 tioce_mmr_clri(tioce_kern, &tioce_mmr->ce_ure_page_map,
771 CE_URE_PAGESIZE_MASK);
772 tioce_mmr_seti(tioce_kern, &tioce_mmr->ce_ure_page_map,
773 CE_URE_256K_PAGESIZE);
774 ps = tioce_kern->ce_ate3240_pagesize = KB(256);
c9221da9
MM
775
776 for (i = 0; i < TIOCE_NUM_M40_ATES; i++) {
777 tioce_kern->ce_ate40_shadow[i] = 0;
13938ca7 778 tioce_mmr_storei(tioce_kern, &tioce_mmr->ce_ure_ate40[i], 0);
c9221da9
MM
779 }
780
781 for (i = 0; i < TIOCE_NUM_M3240_ATES; i++) {
782 tioce_kern->ce_ate3240_shadow[i] = 0;
13938ca7
MM
783 tioce_mmr_storei(tioce_kern, &tioce_mmr->ce_ure_ate3240[i], 0);
784 }
785
786 /*
72fdbdce 787 * Reserve ATEs corresponding to reserved address ranges. These
13938ca7
MM
788 * include:
789 *
790 * Memory space covered by each PPB mem base/limit register
791 * Memory space covered by each PPB prefetch base/limit register
792 *
793 * These bus ranges are for pio (downstream) traffic only, and so
794 * cannot be used for DMA.
795 */
796
797 for (dev = 1; dev <= 2; dev++) {
798 u64 base, limit;
799
800 /* mem base/limit */
801
b6ce068a 802 raw_pci_read(seg, bus, PCI_DEVFN(dev, 0),
13938ca7
MM
803 PCI_MEMORY_BASE, 2, &tmp);
804 base = (u64)tmp << 16;
805
b6ce068a 806 raw_pci_read(seg, bus, PCI_DEVFN(dev, 0),
13938ca7
MM
807 PCI_MEMORY_LIMIT, 2, &tmp);
808 limit = (u64)tmp << 16;
809 limit |= 0xfffffUL;
810
811 if (base < limit)
812 tioce_reserve_m32(tioce_kern, base, limit);
813
814 /*
815 * prefetch mem base/limit. The tioce ppb's have 64-bit
816 * decoders, so read the upper portions w/o checking the
817 * attributes.
818 */
819
b6ce068a 820 raw_pci_read(seg, bus, PCI_DEVFN(dev, 0),
13938ca7
MM
821 PCI_PREF_MEMORY_BASE, 2, &tmp);
822 base = ((u64)tmp & PCI_PREF_RANGE_MASK) << 16;
823
b6ce068a 824 raw_pci_read(seg, bus, PCI_DEVFN(dev, 0),
13938ca7
MM
825 PCI_PREF_BASE_UPPER32, 4, &tmp);
826 base |= (u64)tmp << 32;
827
b6ce068a 828 raw_pci_read(seg, bus, PCI_DEVFN(dev, 0),
13938ca7
MM
829 PCI_PREF_MEMORY_LIMIT, 2, &tmp);
830
831 limit = ((u64)tmp & PCI_PREF_RANGE_MASK) << 16;
832 limit |= 0xfffffUL;
833
b6ce068a 834 raw_pci_read(seg, bus, PCI_DEVFN(dev, 0),
13938ca7
MM
835 PCI_PREF_LIMIT_UPPER32, 4, &tmp);
836 limit |= (u64)tmp << 32;
837
838 if ((base < limit) && TIOCE_M32_ADDR(base))
839 tioce_reserve_m32(tioce_kern, base, limit);
c9221da9
MM
840 }
841
842 return tioce_kern;
843}
844
845/**
846 * tioce_force_interrupt - implement altix force_interrupt() backend for CE
847 * @sn_irq_info: sn asic irq that we need an interrupt generated for
848 *
849 * Given an sn_irq_info struct, set the proper bit in ce_adm_force_int to
850 * force a secondary interrupt to be generated. This is to work around an
851 * asic issue where there is a small window of opportunity for a legacy device
852 * interrupt to be lost.
853 */
854static void
855tioce_force_interrupt(struct sn_irq_info *sn_irq_info)
856{
857 struct pcidev_info *pcidev_info;
858 struct tioce_common *ce_common;
13938ca7 859 struct tioce_kernel *ce_kern;
a9f627c9 860 struct tioce __iomem *ce_mmr;
53493dcf 861 u64 force_int_val;
c9221da9
MM
862
863 if (!sn_irq_info->irq_bridge)
864 return;
865
866 if (sn_irq_info->irq_bridge_type != PCIIO_ASIC_TYPE_TIOCE)
867 return;
868
869 pcidev_info = (struct pcidev_info *)sn_irq_info->irq_pciioinfo;
870 if (!pcidev_info)
871 return;
872
873 ce_common = (struct tioce_common *)pcidev_info->pdi_pcibus_info;
a9f627c9 874 ce_mmr = (struct tioce __iomem *)ce_common->ce_pcibus.bs_base;
13938ca7
MM
875 ce_kern = (struct tioce_kernel *)ce_common->ce_kernel_private;
876
877 /*
878 * TIOCE Rev A workaround (PV 945826), force an interrupt by writing
879 * the TIO_INTx register directly (1/26/2006)
880 */
881 if (ce_common->ce_rev == TIOCE_REV_A) {
882 u64 int_bit_mask = (1ULL << sn_irq_info->irq_int_bit);
883 u64 status;
884
885 tioce_mmr_load(ce_kern, &ce_mmr->ce_adm_int_status, &status);
886 if (status & int_bit_mask) {
887 u64 force_irq = (1 << 8) | sn_irq_info->irq_irq;
888 u64 ctalk = sn_irq_info->irq_xtalkaddr;
889 u64 nasid, offset;
890
891 nasid = (ctalk & CTALK_NASID_MASK) >> CTALK_NASID_SHFT;
892 offset = (ctalk & CTALK_NODE_OFFSET);
893 HUB_S(TIO_IOSPACE_ADDR(nasid, offset), force_irq);
894 }
895
896 return;
897 }
c9221da9
MM
898
899 /*
900 * irq_int_bit is originally set up by prom, and holds the interrupt
901 * bit shift (not mask) as defined by the bit definitions in the
902 * ce_adm_int mmr. These shifts are not the same for the
903 * ce_adm_force_int register, so do an explicit mapping here to make
904 * things clearer.
905 */
906
907 switch (sn_irq_info->irq_int_bit) {
908 case CE_ADM_INT_PCIE_PORT1_DEV_A_SHFT:
909 force_int_val = 1UL << CE_ADM_FORCE_INT_PCIE_PORT1_DEV_A_SHFT;
910 break;
911 case CE_ADM_INT_PCIE_PORT1_DEV_B_SHFT:
912 force_int_val = 1UL << CE_ADM_FORCE_INT_PCIE_PORT1_DEV_B_SHFT;
913 break;
914 case CE_ADM_INT_PCIE_PORT1_DEV_C_SHFT:
915 force_int_val = 1UL << CE_ADM_FORCE_INT_PCIE_PORT1_DEV_C_SHFT;
916 break;
917 case CE_ADM_INT_PCIE_PORT1_DEV_D_SHFT:
918 force_int_val = 1UL << CE_ADM_FORCE_INT_PCIE_PORT1_DEV_D_SHFT;
919 break;
920 case CE_ADM_INT_PCIE_PORT2_DEV_A_SHFT:
921 force_int_val = 1UL << CE_ADM_FORCE_INT_PCIE_PORT2_DEV_A_SHFT;
922 break;
923 case CE_ADM_INT_PCIE_PORT2_DEV_B_SHFT:
924 force_int_val = 1UL << CE_ADM_FORCE_INT_PCIE_PORT2_DEV_B_SHFT;
925 break;
926 case CE_ADM_INT_PCIE_PORT2_DEV_C_SHFT:
927 force_int_val = 1UL << CE_ADM_FORCE_INT_PCIE_PORT2_DEV_C_SHFT;
928 break;
929 case CE_ADM_INT_PCIE_PORT2_DEV_D_SHFT:
930 force_int_val = 1UL << CE_ADM_FORCE_INT_PCIE_PORT2_DEV_D_SHFT;
931 break;
932 default:
933 return;
934 }
13938ca7 935 tioce_mmr_storei(ce_kern, &ce_mmr->ce_adm_force_int, force_int_val);
c9221da9
MM
936}
937
8409668b
MM
938/**
939 * tioce_target_interrupt - implement set_irq_affinity for tioce resident
940 * functions. Note: only applies to line interrupts, not MSI's.
941 *
942 * @sn_irq_info: SN IRQ context
943 *
944 * Given an sn_irq_info, set the associated CE device's interrupt destination
945 * register. Since the interrupt destination registers are on a per-ce-slot
946 * basis, this will retarget line interrupts for all functions downstream of
947 * the slot.
948 */
949static void
950tioce_target_interrupt(struct sn_irq_info *sn_irq_info)
951{
952 struct pcidev_info *pcidev_info;
953 struct tioce_common *ce_common;
13938ca7 954 struct tioce_kernel *ce_kern;
a9f627c9 955 struct tioce __iomem *ce_mmr;
8409668b 956 int bit;
53493dcf 957 u64 vector;
8409668b
MM
958
959 pcidev_info = (struct pcidev_info *)sn_irq_info->irq_pciioinfo;
960 if (!pcidev_info)
961 return;
962
963 ce_common = (struct tioce_common *)pcidev_info->pdi_pcibus_info;
a9f627c9 964 ce_mmr = (struct tioce __iomem *)ce_common->ce_pcibus.bs_base;
13938ca7 965 ce_kern = (struct tioce_kernel *)ce_common->ce_kernel_private;
8409668b
MM
966
967 bit = sn_irq_info->irq_int_bit;
968
13938ca7 969 tioce_mmr_seti(ce_kern, &ce_mmr->ce_adm_int_mask, (1UL << bit));
53493dcf 970 vector = (u64)sn_irq_info->irq_irq << INTR_VECTOR_SHFT;
5fbcf9a5 971 vector |= sn_irq_info->irq_xtalkaddr;
13938ca7
MM
972 tioce_mmr_storei(ce_kern, &ce_mmr->ce_adm_int_dest[bit], vector);
973 tioce_mmr_clri(ce_kern, &ce_mmr->ce_adm_int_mask, (1UL << bit));
8409668b
MM
974
975 tioce_force_interrupt(sn_irq_info);
976}
977
c9221da9
MM
978/**
979 * tioce_bus_fixup - perform final PCI fixup for a TIO CE bus
980 * @prom_bussoft: Common prom/kernel struct representing the bus
981 *
982 * Replicates the tioce_common pointed to by @prom_bussoft in kernel
983 * space. Allocates and initializes a kernel-only area for a given CE,
984 * and sets up an irq for handling CE error interrupts.
985 *
986 * On successful setup, returns the kernel version of tioce_common back to
987 * the caller.
988 */
989static void *
990tioce_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *controller)
991{
992 struct tioce_common *tioce_common;
13938ca7 993 struct tioce_kernel *tioce_kern;
a9f627c9 994 struct tioce __iomem *tioce_mmr;
c9221da9
MM
995
996 /*
997 * Allocate kernel bus soft and copy from prom.
998 */
999
baf47fb6 1000 tioce_common = kzalloc(sizeof(struct tioce_common), GFP_KERNEL);
c9221da9
MM
1001 if (!tioce_common)
1002 return NULL;
1003
1004 memcpy(tioce_common, prom_bussoft, sizeof(struct tioce_common));
1ee27a4e
JS
1005 tioce_common->ce_pcibus.bs_base = (unsigned long)
1006 ioremap(REGION_OFFSET(tioce_common->ce_pcibus.bs_base),
1007 sizeof(struct tioce_common));
c9221da9 1008
13938ca7
MM
1009 tioce_kern = tioce_kern_init(tioce_common);
1010 if (tioce_kern == NULL) {
c9221da9
MM
1011 kfree(tioce_common);
1012 return NULL;
1013 }
1014
13938ca7
MM
1015 /*
1016 * Clear out any transient errors before registering the error
1017 * interrupt handler.
1018 */
1019
a9f627c9 1020 tioce_mmr = (struct tioce __iomem *)tioce_common->ce_pcibus.bs_base;
13938ca7
MM
1021 tioce_mmr_seti(tioce_kern, &tioce_mmr->ce_adm_int_status_alias, ~0ULL);
1022 tioce_mmr_seti(tioce_kern, &tioce_mmr->ce_adm_error_summary_alias,
1023 ~0ULL);
f640f94e 1024 tioce_mmr_seti(tioce_kern, &tioce_mmr->ce_dre_comp_err_addr, 0ULL);
13938ca7 1025
c9221da9
MM
1026 if (request_irq(SGI_PCIASIC_ERROR,
1027 tioce_error_intr_handler,
121a4226 1028 IRQF_SHARED, "TIOCE error", (void *)tioce_common))
c9221da9
MM
1029 printk(KERN_WARNING
1030 "%s: Unable to get irq %d. "
1031 "Error interrupts won't be routed for "
1032 "TIOCE bus %04x:%02x\n",
1033 __FUNCTION__, SGI_PCIASIC_ERROR,
1034 tioce_common->ce_pcibus.bs_persist_segment,
1035 tioce_common->ce_pcibus.bs_persist_busnum);
1036
6e9de181 1037 sn_set_err_irq_affinity(SGI_PCIASIC_ERROR);
c9221da9
MM
1038 return tioce_common;
1039}
1040
1041static struct sn_pcibus_provider tioce_pci_interfaces = {
1042 .dma_map = tioce_dma,
1043 .dma_map_consistent = tioce_dma_consistent,
1044 .dma_unmap = tioce_dma_unmap,
1045 .bus_fixup = tioce_bus_fixup,
8409668b
MM
1046 .force_interrupt = tioce_force_interrupt,
1047 .target_interrupt = tioce_target_interrupt
c9221da9
MM
1048};
1049
1050/**
1051 * tioce_init_provider - init SN PCI provider ops for TIO CE
1052 */
1053int
1054tioce_init_provider(void)
1055{
1056 sn_pci_provider[PCIIO_ASIC_TYPE_TIOCE] = &tioce_pci_interfaces;
1057 return 0;
1058}