]> git.proxmox.com Git - mirror_ubuntu-kernels.git/blame - arch/ia64/hp/common/sba_iommu.c
x86/PCI: Use dev_is_pci() to identify PCI devices
[mirror_ubuntu-kernels.git] / arch / ia64 / hp / common / sba_iommu.c
CommitLineData
1da177e4
LT
1/*
2** IA64 System Bus Adapter (SBA) I/O MMU manager
3**
5f6602a1 4** (c) Copyright 2002-2005 Alex Williamson
1da177e4 5** (c) Copyright 2002-2003 Grant Grundler
5f6602a1 6** (c) Copyright 2002-2005 Hewlett-Packard Company
1da177e4
LT
7**
8** Portions (c) 2000 Grant Grundler (from parisc I/O MMU code)
9** Portions (c) 1999 Dave S. Miller (from sparc64 I/O MMU code)
10**
11** This program is free software; you can redistribute it and/or modify
12** it under the terms of the GNU General Public License as published by
13** the Free Software Foundation; either version 2 of the License, or
14** (at your option) any later version.
15**
16**
17** This module initializes the IOC (I/O Controller) found on HP
18** McKinley machines and their successors.
19**
20*/
21
1da177e4
LT
22#include <linux/types.h>
23#include <linux/kernel.h>
24#include <linux/module.h>
25#include <linux/spinlock.h>
26#include <linux/slab.h>
27#include <linux/init.h>
28#include <linux/mm.h>
29#include <linux/string.h>
30#include <linux/pci.h>
31#include <linux/proc_fs.h>
32#include <linux/seq_file.h>
33#include <linux/acpi.h>
34#include <linux/efi.h>
35#include <linux/nodemask.h>
36#include <linux/bitops.h> /* hweight64() */
51b58e3e 37#include <linux/crash_dump.h>
b34eb53c 38#include <linux/iommu-helper.h>
0e9cbb9b 39#include <linux/dma-mapping.h>
268bb0ce 40#include <linux/prefetch.h>
1da177e4
LT
41
42#include <asm/delay.h> /* ia64_get_itc() */
43#include <asm/io.h>
44#include <asm/page.h> /* PAGE_OFFSET */
45#include <asm/dma.h>
1da177e4
LT
46
47#include <asm/acpi-ext.h>
48
51b58e3e
TL
49extern int swiotlb_late_init_with_default_size (size_t size);
50
1da177e4
LT
51#define PFX "IOC: "
52
53/*
54** Enabling timing search of the pdir resource map. Output in /proc.
55** Disabled by default to optimize performance.
56*/
57#undef PDIR_SEARCH_TIMING
58
59/*
60** This option allows cards capable of 64bit DMA to bypass the IOMMU. If
61** not defined, all DMA will be 32bit and go through the TLB.
62** There's potentially a conflict in the bio merge code with us
63** advertising an iommu, but then bypassing it. Since I/O MMU bypassing
64** appears to give more performance than bio-level virtual merging, we'll
65** do the former for now. NOTE: BYPASS_SG also needs to be undef'd to
66** completely restrict DMA to the IOMMU.
67*/
68#define ALLOW_IOV_BYPASS
69
70/*
71** This option specifically allows/disallows bypassing scatterlists with
72** multiple entries. Coalescing these entries can allow better DMA streaming
73** and in some cases shows better performance than entirely bypassing the
74** IOMMU. Performance increase on the order of 1-2% sequential output/input
75** using bonnie++ on a RAID0 MD device (sym2 & mpt).
76*/
77#undef ALLOW_IOV_BYPASS_SG
78
79/*
80** If a device prefetches beyond the end of a valid pdir entry, it will cause
81** a hard failure, ie. MCA. Version 3.0 and later of the zx1 LBA should
82** disconnect on 4k boundaries and prevent such issues. If the device is
0779bf2d 83** particularly aggressive, this option will keep the entire pdir valid such
1da177e4
LT
84** that prefetching will hit a valid address. This could severely impact
85** error containment, and is therefore off by default. The page that is
86** used for spill-over is poisoned, so that should help debugging somewhat.
87*/
88#undef FULL_VALID_PDIR
89
90#define ENABLE_MARK_CLEAN
91
92/*
93** The number of debug flags is a clue - this code is fragile. NOTE: since
94** tightening the use of res_lock the resource bitmap and actual pdir are no
95** longer guaranteed to stay in sync. The sanity checking code isn't going to
96** like that.
97*/
98#undef DEBUG_SBA_INIT
99#undef DEBUG_SBA_RUN
100#undef DEBUG_SBA_RUN_SG
101#undef DEBUG_SBA_RESOURCE
102#undef ASSERT_PDIR_SANITY
103#undef DEBUG_LARGE_SG_ENTRIES
104#undef DEBUG_BYPASS
105
106#if defined(FULL_VALID_PDIR) && defined(ASSERT_PDIR_SANITY)
107#error FULL_VALID_PDIR and ASSERT_PDIR_SANITY are mutually exclusive
108#endif
109
110#define SBA_INLINE __inline__
111/* #define SBA_INLINE */
112
113#ifdef DEBUG_SBA_INIT
114#define DBG_INIT(x...) printk(x)
115#else
116#define DBG_INIT(x...)
117#endif
118
119#ifdef DEBUG_SBA_RUN
120#define DBG_RUN(x...) printk(x)
121#else
122#define DBG_RUN(x...)
123#endif
124
125#ifdef DEBUG_SBA_RUN_SG
126#define DBG_RUN_SG(x...) printk(x)
127#else
128#define DBG_RUN_SG(x...)
129#endif
130
131
132#ifdef DEBUG_SBA_RESOURCE
133#define DBG_RES(x...) printk(x)
134#else
135#define DBG_RES(x...)
136#endif
137
138#ifdef DEBUG_BYPASS
139#define DBG_BYPASS(x...) printk(x)
140#else
141#define DBG_BYPASS(x...)
142#endif
143
144#ifdef ASSERT_PDIR_SANITY
145#define ASSERT(expr) \
146 if(!(expr)) { \
147 printk( "\n" __FILE__ ":%d: Assertion " #expr " failed!\n",__LINE__); \
148 panic(#expr); \
149 }
150#else
151#define ASSERT(expr)
152#endif
153
154/*
155** The number of pdir entries to "free" before issuing
156** a read to PCOM register to flush out PCOM writes.
157** Interacts with allocation granularity (ie 4 or 8 entries
158** allocated and free'd/purged at a time might make this
159** less interesting).
160*/
161#define DELAYED_RESOURCE_CNT 64
162
e15da401
BH
163#define PCI_DEVICE_ID_HP_SX2000_IOC 0x12ec
164
1da177e4
LT
165#define ZX1_IOC_ID ((PCI_DEVICE_ID_HP_ZX1_IOC << 16) | PCI_VENDOR_ID_HP)
166#define ZX2_IOC_ID ((PCI_DEVICE_ID_HP_ZX2_IOC << 16) | PCI_VENDOR_ID_HP)
167#define REO_IOC_ID ((PCI_DEVICE_ID_HP_REO_IOC << 16) | PCI_VENDOR_ID_HP)
168#define SX1000_IOC_ID ((PCI_DEVICE_ID_HP_SX1000_IOC << 16) | PCI_VENDOR_ID_HP)
e15da401 169#define SX2000_IOC_ID ((PCI_DEVICE_ID_HP_SX2000_IOC << 16) | PCI_VENDOR_ID_HP)
1da177e4
LT
170
171#define ZX1_IOC_OFFSET 0x1000 /* ACPI reports SBA, we want IOC */
172
173#define IOC_FUNC_ID 0x000
174#define IOC_FCLASS 0x008 /* function class, bist, header, rev... */
175#define IOC_IBASE 0x300 /* IO TLB */
176#define IOC_IMASK 0x308
177#define IOC_PCOM 0x310
178#define IOC_TCNFG 0x318
179#define IOC_PDIR_BASE 0x320
180
181#define IOC_ROPE0_CFG 0x500
182#define IOC_ROPE_AO 0x10 /* Allow "Relaxed Ordering" */
183
184
185/* AGP GART driver looks for this */
186#define ZX1_SBA_IOMMU_COOKIE 0x0000badbadc0ffeeUL
187
188/*
189** The zx1 IOC supports 4/8/16/64KB page sizes (see TCNFG register)
190**
191** Some IOCs (sx1000) can run at the above pages sizes, but are
192** really only supported using the IOC at a 4k page size.
193**
194** iovp_size could only be greater than PAGE_SIZE if we are
195** confident the drivers really only touch the next physical
196** page iff that driver instance owns it.
197*/
198static unsigned long iovp_size;
199static unsigned long iovp_shift;
200static unsigned long iovp_mask;
201
202struct ioc {
203 void __iomem *ioc_hpa; /* I/O MMU base address */
204 char *res_map; /* resource map, bit == pdir entry */
205 u64 *pdir_base; /* physical base address */
206 unsigned long ibase; /* pdir IOV Space base */
207 unsigned long imask; /* pdir IOV Space mask */
208
209 unsigned long *res_hint; /* next avail IOVP - circular search */
210 unsigned long dma_mask;
211 spinlock_t res_lock; /* protects the resource bitmap, but must be held when */
212 /* clearing pdir to prevent races with allocations. */
213 unsigned int res_bitshift; /* from the RIGHT! */
214 unsigned int res_size; /* size of resource map in bytes */
215#ifdef CONFIG_NUMA
216 unsigned int node; /* node where this IOC lives */
217#endif
218#if DELAYED_RESOURCE_CNT > 0
219 spinlock_t saved_lock; /* may want to try to get this on a separate cacheline */
220 /* than res_lock for bigger systems. */
221 int saved_cnt;
222 struct sba_dma_pair {
223 dma_addr_t iova;
224 size_t size;
225 } saved[DELAYED_RESOURCE_CNT];
226#endif
227
228#ifdef PDIR_SEARCH_TIMING
229#define SBA_SEARCH_SAMPLE 0x100
230 unsigned long avg_search[SBA_SEARCH_SAMPLE];
231 unsigned long avg_idx; /* current index into avg_search */
232#endif
233
234 /* Stuff we don't need in performance path */
235 struct ioc *next; /* list of IOC's in system */
236 acpi_handle handle; /* for multiple IOC's */
237 const char *name;
238 unsigned int func_id;
239 unsigned int rev; /* HW revision of chip */
240 u32 iov_size;
241 unsigned int pdir_size; /* in bytes, determined by IOV Space size */
242 struct pci_dev *sac_only_dev;
243};
244
245static struct ioc *ioc_list;
246static int reserve_sba_gart = 1;
247
248static SBA_INLINE void sba_mark_invalid(struct ioc *, dma_addr_t, size_t);
249static SBA_INLINE void sba_free_range(struct ioc *, dma_addr_t, size_t);
250
58b053e4 251#define sba_sg_address(sg) sg_virt((sg))
1da177e4
LT
252
253#ifdef FULL_VALID_PDIR
254static u64 prefetch_spill_page;
255#endif
256
257#ifdef CONFIG_PCI
258# define GET_IOC(dev) (((dev)->bus == &pci_bus_type) \
259 ? ((struct ioc *) PCI_CONTROLLER(to_pci_dev(dev))->iommu) : NULL)
260#else
261# define GET_IOC(dev) NULL
262#endif
263
264/*
265** DMA_CHUNK_SIZE is used by the SCSI mid-layer to break up
0779bf2d 266** (or rather not merge) DMAs into manageable chunks.
1da177e4 267** On parisc, this is more of the software/tuning constraint
0779bf2d
ML
268** rather than the HW. I/O MMU allocation algorithms can be
269** faster with smaller sizes (to some degree).
1da177e4
LT
270*/
271#define DMA_CHUNK_SIZE (BITS_PER_LONG*iovp_size)
272
273#define ROUNDUP(x,y) ((x + ((y)-1)) & ~((y)-1))
274
275/************************************
276** SBA register read and write support
277**
278** BE WARNED: register writes are posted.
279** (ie follow writes which must reach HW with a read)
280**
281*/
282#define READ_REG(addr) __raw_readq(addr)
283#define WRITE_REG(val, addr) __raw_writeq(val, addr)
284
285#ifdef DEBUG_SBA_INIT
286
287/**
288 * sba_dump_tlb - debugging only - print IOMMU operating parameters
289 * @hpa: base address of the IOMMU
290 *
291 * Print the size/location of the IO MMU PDIR.
292 */
293static void
294sba_dump_tlb(char *hpa)
295{
296 DBG_INIT("IO TLB at 0x%p\n", (void *)hpa);
297 DBG_INIT("IOC_IBASE : %016lx\n", READ_REG(hpa+IOC_IBASE));
298 DBG_INIT("IOC_IMASK : %016lx\n", READ_REG(hpa+IOC_IMASK));
299 DBG_INIT("IOC_TCNFG : %016lx\n", READ_REG(hpa+IOC_TCNFG));
300 DBG_INIT("IOC_PDIR_BASE: %016lx\n", READ_REG(hpa+IOC_PDIR_BASE));
301 DBG_INIT("\n");
302}
303#endif
304
305
306#ifdef ASSERT_PDIR_SANITY
307
308/**
309 * sba_dump_pdir_entry - debugging only - print one IOMMU PDIR entry
310 * @ioc: IO MMU structure which owns the pdir we are interested in.
311 * @msg: text to print ont the output line.
312 * @pide: pdir index.
313 *
314 * Print one entry of the IO MMU PDIR in human readable form.
315 */
316static void
317sba_dump_pdir_entry(struct ioc *ioc, char *msg, uint pide)
318{
319 /* start printing from lowest pde in rval */
320 u64 *ptr = &ioc->pdir_base[pide & ~(BITS_PER_LONG - 1)];
321 unsigned long *rptr = (unsigned long *) &ioc->res_map[(pide >>3) & -sizeof(unsigned long)];
322 uint rcnt;
323
324 printk(KERN_DEBUG "SBA: %s rp %p bit %d rval 0x%lx\n",
325 msg, rptr, pide & (BITS_PER_LONG - 1), *rptr);
326
327 rcnt = 0;
328 while (rcnt < BITS_PER_LONG) {
329 printk(KERN_DEBUG "%s %2d %p %016Lx\n",
330 (rcnt == (pide & (BITS_PER_LONG - 1)))
331 ? " -->" : " ",
332 rcnt, ptr, (unsigned long long) *ptr );
333 rcnt++;
334 ptr++;
335 }
336 printk(KERN_DEBUG "%s", msg);
337}
338
339
340/**
341 * sba_check_pdir - debugging only - consistency checker
342 * @ioc: IO MMU structure which owns the pdir we are interested in.
343 * @msg: text to print ont the output line.
344 *
345 * Verify the resource map and pdir state is consistent
346 */
347static int
348sba_check_pdir(struct ioc *ioc, char *msg)
349{
350 u64 *rptr_end = (u64 *) &(ioc->res_map[ioc->res_size]);
351 u64 *rptr = (u64 *) ioc->res_map; /* resource map ptr */
352 u64 *pptr = ioc->pdir_base; /* pdir ptr */
353 uint pide = 0;
354
355 while (rptr < rptr_end) {
356 u64 rval;
357 int rcnt; /* number of bits we might check */
358
359 rval = *rptr;
360 rcnt = 64;
361
362 while (rcnt) {
363 /* Get last byte and highest bit from that */
364 u32 pde = ((u32)((*pptr >> (63)) & 0x1));
365 if ((rval & 0x1) ^ pde)
366 {
367 /*
368 ** BUMMER! -- res_map != pdir --
369 ** Dump rval and matching pdir entries
370 */
371 sba_dump_pdir_entry(ioc, msg, pide);
372 return(1);
373 }
374 rcnt--;
375 rval >>= 1; /* try the next bit */
376 pptr++;
377 pide++;
378 }
379 rptr++; /* look at next word of res_map */
380 }
381 /* It'd be nice if we always got here :^) */
382 return 0;
383}
384
385
386/**
387 * sba_dump_sg - debugging only - print Scatter-Gather list
388 * @ioc: IO MMU structure which owns the pdir we are interested in.
389 * @startsg: head of the SG list
390 * @nents: number of entries in SG list
391 *
392 * print the SG list so we can verify it's correct by hand.
393 */
394static void
395sba_dump_sg( struct ioc *ioc, struct scatterlist *startsg, int nents)
396{
397 while (nents-- > 0) {
398 printk(KERN_DEBUG " %d : DMA %08lx/%05x CPU %p\n", nents,
399 startsg->dma_address, startsg->dma_length,
400 sba_sg_address(startsg));
9b6eccfc 401 startsg = sg_next(startsg);
1da177e4
LT
402 }
403}
404
405static void
406sba_check_sg( struct ioc *ioc, struct scatterlist *startsg, int nents)
407{
408 struct scatterlist *the_sg = startsg;
409 int the_nents = nents;
410
411 while (the_nents-- > 0) {
412 if (sba_sg_address(the_sg) == 0x0UL)
413 sba_dump_sg(NULL, startsg, nents);
9b6eccfc 414 the_sg = sg_next(the_sg);
1da177e4
LT
415 }
416}
417
418#endif /* ASSERT_PDIR_SANITY */
419
420
421
422
423/**************************************************************
424*
425* I/O Pdir Resource Management
426*
427* Bits set in the resource map are in use.
428* Each bit can represent a number of pages.
429* LSbs represent lower addresses (IOVA's).
430*
431***************************************************************/
432#define PAGES_PER_RANGE 1 /* could increase this to 4 or 8 if needed */
433
434/* Convert from IOVP to IOVA and vice versa. */
435#define SBA_IOVA(ioc,iovp,offset) ((ioc->ibase) | (iovp) | (offset))
436#define SBA_IOVP(ioc,iova) ((iova) & ~(ioc->ibase))
437
438#define PDIR_ENTRY_SIZE sizeof(u64)
439
440#define PDIR_INDEX(iovp) ((iovp)>>iovp_shift)
441
442#define RESMAP_MASK(n) ~(~0UL << (n))
443#define RESMAP_IDX_MASK (sizeof(unsigned long) - 1)
444
445
446/**
447 * For most cases the normal get_order is sufficient, however it limits us
448 * to PAGE_SIZE being the minimum mapping alignment and TC flush granularity.
449 * It only incurs about 1 clock cycle to use this one with the static variable
450 * and makes the code more intuitive.
451 */
452static SBA_INLINE int
453get_iovp_order (unsigned long size)
454{
455 long double d = size - 1;
456 long order;
457
458 order = ia64_getf_exp(d);
459 order = order - iovp_shift - 0xffff + 1;
460 if (order < 0)
461 order = 0;
462 return order;
463}
464
b34eb53c
FT
465static unsigned long ptr_to_pide(struct ioc *ioc, unsigned long *res_ptr,
466 unsigned int bitshiftcnt)
467{
468 return (((unsigned long)res_ptr - (unsigned long)ioc->res_map) << 3)
469 + bitshiftcnt;
470}
471
1da177e4
LT
472/**
473 * sba_search_bitmap - find free space in IO PDIR resource bitmap
474 * @ioc: IO MMU structure which owns the pdir we are interested in.
475 * @bits_wanted: number of entries we need.
5f6602a1 476 * @use_hint: use res_hint to indicate where to start looking
1da177e4
LT
477 *
478 * Find consecutive free bits in resource bitmap.
479 * Each bit represents one entry in the IO Pdir.
480 * Cool perf optimization: search for log2(size) bits at a time.
481 */
482static SBA_INLINE unsigned long
b34eb53c
FT
483sba_search_bitmap(struct ioc *ioc, struct device *dev,
484 unsigned long bits_wanted, int use_hint)
1da177e4 485{
5f6602a1 486 unsigned long *res_ptr;
1da177e4 487 unsigned long *res_end = (unsigned long *) &(ioc->res_map[ioc->res_size]);
b34eb53c
FT
488 unsigned long flags, pide = ~0UL, tpide;
489 unsigned long boundary_size;
490 unsigned long shift;
491 int ret;
1da177e4
LT
492
493 ASSERT(((unsigned long) ioc->res_hint & (sizeof(unsigned long) - 1UL)) == 0);
494 ASSERT(res_ptr < res_end);
495
b34eb53c
FT
496 boundary_size = (unsigned long long)dma_get_seg_boundary(dev) + 1;
497 boundary_size = ALIGN(boundary_size, 1ULL << iovp_shift) >> iovp_shift;
498
499 BUG_ON(ioc->ibase & ~iovp_mask);
500 shift = ioc->ibase >> iovp_shift;
501
5f6602a1
AW
502 spin_lock_irqsave(&ioc->res_lock, flags);
503
504 /* Allow caller to force a search through the entire resource space */
505 if (likely(use_hint)) {
506 res_ptr = ioc->res_hint;
507 } else {
508 res_ptr = (ulong *)ioc->res_map;
509 ioc->res_bitshift = 0;
510 }
511
1da177e4
LT
512 /*
513 * N.B. REO/Grande defect AR2305 can cause TLB fetch timeouts
514 * if a TLB entry is purged while in use. sba_mark_invalid()
515 * purges IOTLB entries in power-of-two sizes, so we also
516 * allocate IOVA space in power-of-two sizes.
517 */
518 bits_wanted = 1UL << get_iovp_order(bits_wanted << iovp_shift);
519
520 if (likely(bits_wanted == 1)) {
521 unsigned int bitshiftcnt;
522 for(; res_ptr < res_end ; res_ptr++) {
523 if (likely(*res_ptr != ~0UL)) {
524 bitshiftcnt = ffz(*res_ptr);
525 *res_ptr |= (1UL << bitshiftcnt);
b34eb53c 526 pide = ptr_to_pide(ioc, res_ptr, bitshiftcnt);
1da177e4
LT
527 ioc->res_bitshift = bitshiftcnt + bits_wanted;
528 goto found_it;
529 }
530 }
531 goto not_found;
532
533 }
534
535 if (likely(bits_wanted <= BITS_PER_LONG/2)) {
536 /*
537 ** Search the resource bit map on well-aligned values.
538 ** "o" is the alignment.
539 ** We need the alignment to invalidate I/O TLB using
540 ** SBA HW features in the unmap path.
541 */
542 unsigned long o = 1 << get_iovp_order(bits_wanted << iovp_shift);
543 uint bitshiftcnt = ROUNDUP(ioc->res_bitshift, o);
544 unsigned long mask, base_mask;
545
546 base_mask = RESMAP_MASK(bits_wanted);
547 mask = base_mask << bitshiftcnt;
548
d4ed8084 549 DBG_RES("%s() o %ld %p", __func__, o, res_ptr);
1da177e4
LT
550 for(; res_ptr < res_end ; res_ptr++)
551 {
552 DBG_RES(" %p %lx %lx\n", res_ptr, mask, *res_ptr);
553 ASSERT(0 != mask);
554 for (; mask ; mask <<= o, bitshiftcnt += o) {
b34eb53c
FT
555 tpide = ptr_to_pide(ioc, res_ptr, bitshiftcnt);
556 ret = iommu_is_span_boundary(tpide, bits_wanted,
557 shift,
558 boundary_size);
559 if ((0 == ((*res_ptr) & mask)) && !ret) {
1da177e4 560 *res_ptr |= mask; /* mark resources busy! */
b34eb53c 561 pide = tpide;
1da177e4
LT
562 ioc->res_bitshift = bitshiftcnt + bits_wanted;
563 goto found_it;
564 }
565 }
566
567 bitshiftcnt = 0;
568 mask = base_mask;
569
570 }
571
572 } else {
573 int qwords, bits, i;
574 unsigned long *end;
575
576 qwords = bits_wanted >> 6; /* /64 */
577 bits = bits_wanted - (qwords * BITS_PER_LONG);
578
579 end = res_end - qwords;
580
581 for (; res_ptr < end; res_ptr++) {
b34eb53c
FT
582 tpide = ptr_to_pide(ioc, res_ptr, 0);
583 ret = iommu_is_span_boundary(tpide, bits_wanted,
584 shift, boundary_size);
585 if (ret)
586 goto next_ptr;
1da177e4
LT
587 for (i = 0 ; i < qwords ; i++) {
588 if (res_ptr[i] != 0)
589 goto next_ptr;
590 }
591 if (bits && res_ptr[i] && (__ffs(res_ptr[i]) < bits))
592 continue;
593
594 /* Found it, mark it */
595 for (i = 0 ; i < qwords ; i++)
596 res_ptr[i] = ~0UL;
597 res_ptr[i] |= RESMAP_MASK(bits);
598
b34eb53c 599 pide = tpide;
1da177e4
LT
600 res_ptr += qwords;
601 ioc->res_bitshift = bits;
602 goto found_it;
603next_ptr:
604 ;
605 }
606 }
607
608not_found:
609 prefetch(ioc->res_map);
610 ioc->res_hint = (unsigned long *) ioc->res_map;
611 ioc->res_bitshift = 0;
5f6602a1 612 spin_unlock_irqrestore(&ioc->res_lock, flags);
1da177e4
LT
613 return (pide);
614
615found_it:
616 ioc->res_hint = res_ptr;
5f6602a1 617 spin_unlock_irqrestore(&ioc->res_lock, flags);
1da177e4
LT
618 return (pide);
619}
620
621
622/**
623 * sba_alloc_range - find free bits and mark them in IO PDIR resource bitmap
624 * @ioc: IO MMU structure which owns the pdir we are interested in.
625 * @size: number of bytes to create a mapping for
626 *
627 * Given a size, find consecutive unmarked and then mark those bits in the
628 * resource bit map.
629 */
630static int
b34eb53c 631sba_alloc_range(struct ioc *ioc, struct device *dev, size_t size)
1da177e4
LT
632{
633 unsigned int pages_needed = size >> iovp_shift;
634#ifdef PDIR_SEARCH_TIMING
635 unsigned long itc_start;
636#endif
637 unsigned long pide;
1da177e4
LT
638
639 ASSERT(pages_needed);
640 ASSERT(0 == (size & ~iovp_mask));
641
1da177e4
LT
642#ifdef PDIR_SEARCH_TIMING
643 itc_start = ia64_get_itc();
644#endif
645 /*
646 ** "seek and ye shall find"...praying never hurts either...
647 */
b34eb53c 648 pide = sba_search_bitmap(ioc, dev, pages_needed, 1);
1da177e4 649 if (unlikely(pide >= (ioc->res_size << 3))) {
b34eb53c 650 pide = sba_search_bitmap(ioc, dev, pages_needed, 0);
1da177e4
LT
651 if (unlikely(pide >= (ioc->res_size << 3))) {
652#if DELAYED_RESOURCE_CNT > 0
5f6602a1
AW
653 unsigned long flags;
654
1da177e4
LT
655 /*
656 ** With delayed resource freeing, we can give this one more shot. We're
657 ** getting close to being in trouble here, so do what we can to make this
658 ** one count.
659 */
5f6602a1 660 spin_lock_irqsave(&ioc->saved_lock, flags);
1da177e4
LT
661 if (ioc->saved_cnt > 0) {
662 struct sba_dma_pair *d;
663 int cnt = ioc->saved_cnt;
664
5f6602a1 665 d = &(ioc->saved[ioc->saved_cnt - 1]);
1da177e4 666
5f6602a1 667 spin_lock(&ioc->res_lock);
1da177e4
LT
668 while (cnt--) {
669 sba_mark_invalid(ioc, d->iova, d->size);
670 sba_free_range(ioc, d->iova, d->size);
671 d--;
672 }
673 ioc->saved_cnt = 0;
674 READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */
5f6602a1 675 spin_unlock(&ioc->res_lock);
1da177e4 676 }
5f6602a1 677 spin_unlock_irqrestore(&ioc->saved_lock, flags);
1da177e4 678
b34eb53c 679 pide = sba_search_bitmap(ioc, dev, pages_needed, 0);
e2a46567
FT
680 if (unlikely(pide >= (ioc->res_size << 3))) {
681 printk(KERN_WARNING "%s: I/O MMU @ %p is"
682 "out of mapping resources, %u %u %lx\n",
683 __func__, ioc->ioc_hpa, ioc->res_size,
684 pages_needed, dma_get_seg_boundary(dev));
685 return -1;
686 }
1da177e4 687#else
e2a46567
FT
688 printk(KERN_WARNING "%s: I/O MMU @ %p is"
689 "out of mapping resources, %u %u %lx\n",
690 __func__, ioc->ioc_hpa, ioc->res_size,
691 pages_needed, dma_get_seg_boundary(dev));
692 return -1;
1da177e4
LT
693#endif
694 }
695 }
696
697#ifdef PDIR_SEARCH_TIMING
698 ioc->avg_search[ioc->avg_idx++] = (ia64_get_itc() - itc_start) / pages_needed;
699 ioc->avg_idx &= SBA_SEARCH_SAMPLE - 1;
700#endif
701
702 prefetchw(&(ioc->pdir_base[pide]));
703
704#ifdef ASSERT_PDIR_SANITY
705 /* verify the first enable bit is clear */
706 if(0x00 != ((u8 *) ioc->pdir_base)[pide*PDIR_ENTRY_SIZE + 7]) {
707 sba_dump_pdir_entry(ioc, "sba_search_bitmap() botched it?", pide);
708 }
709#endif
710
711 DBG_RES("%s(%x) %d -> %lx hint %x/%x\n",
d4ed8084 712 __func__, size, pages_needed, pide,
1da177e4
LT
713 (uint) ((unsigned long) ioc->res_hint - (unsigned long) ioc->res_map),
714 ioc->res_bitshift );
715
1da177e4
LT
716 return (pide);
717}
718
719
720/**
721 * sba_free_range - unmark bits in IO PDIR resource bitmap
722 * @ioc: IO MMU structure which owns the pdir we are interested in.
723 * @iova: IO virtual address which was previously allocated.
724 * @size: number of bytes to create a mapping for
725 *
726 * clear bits in the ioc's resource map
727 */
728static SBA_INLINE void
729sba_free_range(struct ioc *ioc, dma_addr_t iova, size_t size)
730{
731 unsigned long iovp = SBA_IOVP(ioc, iova);
732 unsigned int pide = PDIR_INDEX(iovp);
733 unsigned int ridx = pide >> 3; /* convert bit to byte address */
734 unsigned long *res_ptr = (unsigned long *) &((ioc)->res_map[ridx & ~RESMAP_IDX_MASK]);
735 int bits_not_wanted = size >> iovp_shift;
736 unsigned long m;
737
738 /* Round up to power-of-two size: see AR2305 note above */
739 bits_not_wanted = 1UL << get_iovp_order(bits_not_wanted << iovp_shift);
740 for (; bits_not_wanted > 0 ; res_ptr++) {
741
742 if (unlikely(bits_not_wanted > BITS_PER_LONG)) {
743
744 /* these mappings start 64bit aligned */
745 *res_ptr = 0UL;
746 bits_not_wanted -= BITS_PER_LONG;
747 pide += BITS_PER_LONG;
748
749 } else {
750
751 /* 3-bits "bit" address plus 2 (or 3) bits for "byte" == bit in word */
752 m = RESMAP_MASK(bits_not_wanted) << (pide & (BITS_PER_LONG - 1));
753 bits_not_wanted = 0;
754
d4ed8084
HH
755 DBG_RES("%s( ,%x,%x) %x/%lx %x %p %lx\n", __func__, (uint) iova, size,
756 bits_not_wanted, m, pide, res_ptr, *res_ptr);
1da177e4
LT
757
758 ASSERT(m != 0);
759 ASSERT(bits_not_wanted);
760 ASSERT((*res_ptr & m) == m); /* verify same bits are set */
761 *res_ptr &= ~m;
762 }
763 }
764}
765
766
767/**************************************************************
768*
769* "Dynamic DMA Mapping" support (aka "Coherent I/O")
770*
771***************************************************************/
772
773/**
774 * sba_io_pdir_entry - fill in one IO PDIR entry
775 * @pdir_ptr: pointer to IO PDIR entry
776 * @vba: Virtual CPU address of buffer to map
777 *
778 * SBA Mapping Routine
779 *
780 * Given a virtual address (vba, arg1) sba_io_pdir_entry()
781 * loads the I/O PDIR entry pointed to by pdir_ptr (arg0).
782 * Each IO Pdir entry consists of 8 bytes as shown below
783 * (LSB == bit 0):
784 *
785 * 63 40 11 7 0
786 * +-+---------------------+----------------------------------+----+--------+
787 * |V| U | PPN[39:12] | U | FF |
788 * +-+---------------------+----------------------------------+----+--------+
789 *
790 * V == Valid Bit
791 * U == Unused
792 * PPN == Physical Page Number
793 *
794 * The physical address fields are filled with the results of virt_to_phys()
795 * on the vba.
796 */
797
798#if 1
799#define sba_io_pdir_entry(pdir_ptr, vba) *pdir_ptr = ((vba & ~0xE000000000000FFFULL) \
800 | 0x8000000000000000ULL)
801#else
802void SBA_INLINE
803sba_io_pdir_entry(u64 *pdir_ptr, unsigned long vba)
804{
805 *pdir_ptr = ((vba & ~0xE000000000000FFFULL) | 0x80000000000000FFULL);
806}
807#endif
808
809#ifdef ENABLE_MARK_CLEAN
810/**
811 * Since DMA is i-cache coherent, any (complete) pages that were written via
812 * DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to
813 * flush them when they get mapped into an executable vm-area.
814 */
815static void
816mark_clean (void *addr, size_t size)
817{
818 unsigned long pg_addr, end;
819
820 pg_addr = PAGE_ALIGN((unsigned long) addr);
821 end = (unsigned long) addr + size;
822 while (pg_addr + PAGE_SIZE <= end) {
823 struct page *page = virt_to_page((void *)pg_addr);
824 set_bit(PG_arch_1, &page->flags);
825 pg_addr += PAGE_SIZE;
826 }
827}
828#endif
829
830/**
831 * sba_mark_invalid - invalidate one or more IO PDIR entries
832 * @ioc: IO MMU structure which owns the pdir we are interested in.
833 * @iova: IO Virtual Address mapped earlier
834 * @byte_cnt: number of bytes this mapping covers.
835 *
836 * Marking the IO PDIR entry(ies) as Invalid and invalidate
837 * corresponding IO TLB entry. The PCOM (Purge Command Register)
838 * is to purge stale entries in the IO TLB when unmapping entries.
839 *
840 * The PCOM register supports purging of multiple pages, with a minium
841 * of 1 page and a maximum of 2GB. Hardware requires the address be
842 * aligned to the size of the range being purged. The size of the range
843 * must be a power of 2. The "Cool perf optimization" in the
844 * allocation routine helps keep that true.
845 */
846static SBA_INLINE void
847sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
848{
849 u32 iovp = (u32) SBA_IOVP(ioc,iova);
850
851 int off = PDIR_INDEX(iovp);
852
853 /* Must be non-zero and rounded up */
854 ASSERT(byte_cnt > 0);
855 ASSERT(0 == (byte_cnt & ~iovp_mask));
856
857#ifdef ASSERT_PDIR_SANITY
858 /* Assert first pdir entry is set */
859 if (!(ioc->pdir_base[off] >> 60)) {
860 sba_dump_pdir_entry(ioc,"sba_mark_invalid()", PDIR_INDEX(iovp));
861 }
862#endif
863
864 if (byte_cnt <= iovp_size)
865 {
866 ASSERT(off < ioc->pdir_size);
867
868 iovp |= iovp_shift; /* set "size" field for PCOM */
869
870#ifndef FULL_VALID_PDIR
871 /*
872 ** clear I/O PDIR entry "valid" bit
873 ** Do NOT clear the rest - save it for debugging.
874 ** We should only clear bits that have previously
875 ** been enabled.
876 */
877 ioc->pdir_base[off] &= ~(0x80000000000000FFULL);
878#else
879 /*
880 ** If we want to maintain the PDIR as valid, put in
881 ** the spill page so devices prefetching won't
882 ** cause a hard fail.
883 */
884 ioc->pdir_base[off] = (0x80000000000000FFULL | prefetch_spill_page);
885#endif
886 } else {
887 u32 t = get_iovp_order(byte_cnt) + iovp_shift;
888
889 iovp |= t;
890 ASSERT(t <= 31); /* 2GB! Max value of "size" field */
891
892 do {
893 /* verify this pdir entry is enabled */
894 ASSERT(ioc->pdir_base[off] >> 63);
895#ifndef FULL_VALID_PDIR
896 /* clear I/O Pdir entry "valid" bit first */
897 ioc->pdir_base[off] &= ~(0x80000000000000FFULL);
898#else
899 ioc->pdir_base[off] = (0x80000000000000FFULL | prefetch_spill_page);
900#endif
901 off++;
902 byte_cnt -= iovp_size;
903 } while (byte_cnt > 0);
904 }
905
906 WRITE_REG(iovp | ioc->ibase, ioc->ioc_hpa+IOC_PCOM);
907}
908
909/**
309df0c5 910 * sba_map_single_attrs - map one buffer and return IOVA for DMA
1da177e4
LT
911 * @dev: instance of PCI owned by the driver that's asking.
912 * @addr: driver buffer to map.
913 * @size: number of bytes to map in driver buffer.
914 * @dir: R/W or both.
309df0c5 915 * @attrs: optional dma attributes
1da177e4 916 *
395cf969 917 * See Documentation/DMA-API-HOWTO.txt
1da177e4 918 */
160c1d8e
FT
919static dma_addr_t sba_map_page(struct device *dev, struct page *page,
920 unsigned long poff, size_t size,
921 enum dma_data_direction dir,
922 struct dma_attrs *attrs)
1da177e4
LT
923{
924 struct ioc *ioc;
160c1d8e 925 void *addr = page_address(page) + poff;
1da177e4
LT
926 dma_addr_t iovp;
927 dma_addr_t offset;
928 u64 *pdir_start;
929 int pide;
930#ifdef ASSERT_PDIR_SANITY
931 unsigned long flags;
932#endif
933#ifdef ALLOW_IOV_BYPASS
934 unsigned long pci_addr = virt_to_phys(addr);
935#endif
936
937#ifdef ALLOW_IOV_BYPASS
938 ASSERT(to_pci_dev(dev)->dma_mask);
939 /*
940 ** Check if the PCI device can DMA to ptr... if so, just return ptr
941 */
942 if (likely((pci_addr & ~to_pci_dev(dev)->dma_mask) == 0)) {
943 /*
944 ** Device is bit capable of DMA'ing to the buffer...
945 ** just return the PCI address of ptr
946 */
309df0c5
AK
947 DBG_BYPASS("sba_map_single_attrs() bypass mask/addr: "
948 "0x%lx/0x%lx\n",
1da177e4
LT
949 to_pci_dev(dev)->dma_mask, pci_addr);
950 return pci_addr;
951 }
952#endif
953 ioc = GET_IOC(dev);
954 ASSERT(ioc);
955
956 prefetch(ioc->res_hint);
957
958 ASSERT(size > 0);
959 ASSERT(size <= DMA_CHUNK_SIZE);
960
961 /* save offset bits */
962 offset = ((dma_addr_t) (long) addr) & ~iovp_mask;
963
964 /* round up to nearest iovp_size */
965 size = (size + offset + ~iovp_mask) & iovp_mask;
966
967#ifdef ASSERT_PDIR_SANITY
968 spin_lock_irqsave(&ioc->res_lock, flags);
309df0c5 969 if (sba_check_pdir(ioc,"Check before sba_map_single_attrs()"))
1da177e4
LT
970 panic("Sanity check failed");
971 spin_unlock_irqrestore(&ioc->res_lock, flags);
972#endif
973
b34eb53c 974 pide = sba_alloc_range(ioc, dev, size);
e2a46567
FT
975 if (pide < 0)
976 return 0;
1da177e4
LT
977
978 iovp = (dma_addr_t) pide << iovp_shift;
979
d4ed8084 980 DBG_RUN("%s() 0x%p -> 0x%lx\n", __func__, addr, (long) iovp | offset);
1da177e4
LT
981
982 pdir_start = &(ioc->pdir_base[pide]);
983
984 while (size > 0) {
985 ASSERT(((u8 *)pdir_start)[7] == 0); /* verify availability */
986 sba_io_pdir_entry(pdir_start, (unsigned long) addr);
987
988 DBG_RUN(" pdir 0x%p %lx\n", pdir_start, *pdir_start);
989
990 addr += iovp_size;
991 size -= iovp_size;
992 pdir_start++;
993 }
994 /* force pdir update */
995 wmb();
996
997 /* form complete address */
998#ifdef ASSERT_PDIR_SANITY
999 spin_lock_irqsave(&ioc->res_lock, flags);
309df0c5 1000 sba_check_pdir(ioc,"Check after sba_map_single_attrs()");
1da177e4
LT
1001 spin_unlock_irqrestore(&ioc->res_lock, flags);
1002#endif
1003 return SBA_IOVA(ioc, iovp, offset);
1004}
1005
160c1d8e
FT
1006static dma_addr_t sba_map_single_attrs(struct device *dev, void *addr,
1007 size_t size, enum dma_data_direction dir,
1008 struct dma_attrs *attrs)
1009{
1010 return sba_map_page(dev, virt_to_page(addr),
1011 (unsigned long)addr & ~PAGE_MASK, size, dir, attrs);
1012}
1013
5f6602a1
AW
1014#ifdef ENABLE_MARK_CLEAN
1015static SBA_INLINE void
1016sba_mark_clean(struct ioc *ioc, dma_addr_t iova, size_t size)
1017{
1018 u32 iovp = (u32) SBA_IOVP(ioc,iova);
1019 int off = PDIR_INDEX(iovp);
1020 void *addr;
1021
1022 if (size <= iovp_size) {
1023 addr = phys_to_virt(ioc->pdir_base[off] &
1024 ~0xE000000000000FFFULL);
1025 mark_clean(addr, size);
1026 } else {
1027 do {
1028 addr = phys_to_virt(ioc->pdir_base[off] &
1029 ~0xE000000000000FFFULL);
1030 mark_clean(addr, min(size, iovp_size));
1031 off++;
1032 size -= iovp_size;
1033 } while (size > 0);
1034 }
1035}
1036#endif
1037
1da177e4 1038/**
309df0c5 1039 * sba_unmap_single_attrs - unmap one IOVA and free resources
1da177e4
LT
1040 * @dev: instance of PCI owned by the driver that's asking.
1041 * @iova: IOVA of driver buffer previously mapped.
1042 * @size: number of bytes mapped in driver buffer.
1043 * @dir: R/W or both.
309df0c5 1044 * @attrs: optional dma attributes
1da177e4 1045 *
395cf969 1046 * See Documentation/DMA-API-HOWTO.txt
1da177e4 1047 */
160c1d8e
FT
1048static void sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size,
1049 enum dma_data_direction dir, struct dma_attrs *attrs)
1da177e4
LT
1050{
1051 struct ioc *ioc;
1052#if DELAYED_RESOURCE_CNT > 0
1053 struct sba_dma_pair *d;
1054#endif
1055 unsigned long flags;
1056 dma_addr_t offset;
1057
1058 ioc = GET_IOC(dev);
1059 ASSERT(ioc);
1060
1061#ifdef ALLOW_IOV_BYPASS
1062 if (likely((iova & ioc->imask) != ioc->ibase)) {
1063 /*
1064 ** Address does not fall w/in IOVA, must be bypassing
1065 */
70f23fd6 1066 DBG_BYPASS("sba_unmap_single_attrs() bypass addr: 0x%lx\n",
309df0c5 1067 iova);
1da177e4
LT
1068
1069#ifdef ENABLE_MARK_CLEAN
1070 if (dir == DMA_FROM_DEVICE) {
1071 mark_clean(phys_to_virt(iova), size);
1072 }
1073#endif
1074 return;
1075 }
1076#endif
1077 offset = iova & ~iovp_mask;
1078
d4ed8084 1079 DBG_RUN("%s() iovp 0x%lx/%x\n", __func__, (long) iova, size);
1da177e4
LT
1080
1081 iova ^= offset; /* clear offset bits */
1082 size += offset;
1083 size = ROUNDUP(size, iovp_size);
1084
5f6602a1
AW
1085#ifdef ENABLE_MARK_CLEAN
1086 if (dir == DMA_FROM_DEVICE)
1087 sba_mark_clean(ioc, iova, size);
1088#endif
1da177e4
LT
1089
1090#if DELAYED_RESOURCE_CNT > 0
1091 spin_lock_irqsave(&ioc->saved_lock, flags);
1092 d = &(ioc->saved[ioc->saved_cnt]);
1093 d->iova = iova;
1094 d->size = size;
1095 if (unlikely(++(ioc->saved_cnt) >= DELAYED_RESOURCE_CNT)) {
1096 int cnt = ioc->saved_cnt;
1097 spin_lock(&ioc->res_lock);
1098 while (cnt--) {
1099 sba_mark_invalid(ioc, d->iova, d->size);
1100 sba_free_range(ioc, d->iova, d->size);
1101 d--;
1102 }
1103 ioc->saved_cnt = 0;
1104 READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */
1105 spin_unlock(&ioc->res_lock);
1106 }
1107 spin_unlock_irqrestore(&ioc->saved_lock, flags);
1108#else /* DELAYED_RESOURCE_CNT == 0 */
1109 spin_lock_irqsave(&ioc->res_lock, flags);
1110 sba_mark_invalid(ioc, iova, size);
1111 sba_free_range(ioc, iova, size);
1112 READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */
1113 spin_unlock_irqrestore(&ioc->res_lock, flags);
1114#endif /* DELAYED_RESOURCE_CNT == 0 */
1da177e4 1115}
1da177e4 1116
160c1d8e
FT
1117void sba_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size,
1118 enum dma_data_direction dir, struct dma_attrs *attrs)
1119{
1120 sba_unmap_page(dev, iova, size, dir, attrs);
1121}
1122
1da177e4
LT
1123/**
1124 * sba_alloc_coherent - allocate/map shared mem for DMA
1125 * @dev: instance of PCI owned by the driver that's asking.
1126 * @size: number of bytes mapped in driver buffer.
1127 * @dma_handle: IOVA of new buffer.
1128 *
395cf969 1129 * See Documentation/DMA-API-HOWTO.txt
1da177e4 1130 */
055bcf99 1131static void *
baa676fc
AP
1132sba_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
1133 gfp_t flags, struct dma_attrs *attrs)
1da177e4
LT
1134{
1135 struct ioc *ioc;
1136 void *addr;
1137
1138 ioc = GET_IOC(dev);
1139 ASSERT(ioc);
1140
1141#ifdef CONFIG_NUMA
1142 {
1143 struct page *page;
6484eb3e 1144 page = alloc_pages_exact_node(ioc->node == MAX_NUMNODES ?
1da177e4
LT
1145 numa_node_id() : ioc->node, flags,
1146 get_order(size));
1147
1148 if (unlikely(!page))
1149 return NULL;
1150
1151 addr = page_address(page);
1152 }
1153#else
1154 addr = (void *) __get_free_pages(flags, get_order(size));
1155#endif
1156 if (unlikely(!addr))
1157 return NULL;
1158
1159 memset(addr, 0, size);
1160 *dma_handle = virt_to_phys(addr);
1161
1162#ifdef ALLOW_IOV_BYPASS
1163 ASSERT(dev->coherent_dma_mask);
1164 /*
1165 ** Check if the PCI device can DMA to ptr... if so, just return ptr
1166 */
1167 if (likely((*dma_handle & ~dev->coherent_dma_mask) == 0)) {
1168 DBG_BYPASS("sba_alloc_coherent() bypass mask/addr: 0x%lx/0x%lx\n",
1169 dev->coherent_dma_mask, *dma_handle);
1170
1171 return addr;
1172 }
1173#endif
1174
1175 /*
1176 * If device can't bypass or bypass is disabled, pass the 32bit fake
1177 * device to map single to get an iova mapping.
1178 */
309df0c5
AK
1179 *dma_handle = sba_map_single_attrs(&ioc->sac_only_dev->dev, addr,
1180 size, 0, NULL);
1da177e4
LT
1181
1182 return addr;
1183}
1184
1185
1186/**
1187 * sba_free_coherent - free/unmap shared mem for DMA
1188 * @dev: instance of PCI owned by the driver that's asking.
1189 * @size: number of bytes mapped in driver buffer.
1190 * @vaddr: virtual address IOVA of "consistent" buffer.
1191 * @dma_handler: IO virtual address of "consistent" buffer.
1192 *
395cf969 1193 * See Documentation/DMA-API-HOWTO.txt
1da177e4 1194 */
baa676fc
AP
1195static void sba_free_coherent(struct device *dev, size_t size, void *vaddr,
1196 dma_addr_t dma_handle, struct dma_attrs *attrs)
1da177e4 1197{
309df0c5 1198 sba_unmap_single_attrs(dev, dma_handle, size, 0, NULL);
1da177e4
LT
1199 free_pages((unsigned long) vaddr, get_order(size));
1200}
1201
1202
1203/*
1204** Since 0 is a valid pdir_base index value, can't use that
1205** to determine if a value is valid or not. Use a flag to indicate
1206** the SG list entry contains a valid pdir index.
1207*/
1208#define PIDE_FLAG 0x1UL
1209
1210#ifdef DEBUG_LARGE_SG_ENTRIES
1211int dump_run_sg = 0;
1212#endif
1213
1214
1215/**
1216 * sba_fill_pdir - write allocated SG entries into IO PDIR
1217 * @ioc: IO MMU structure which owns the pdir we are interested in.
1218 * @startsg: list of IOVA/size pairs
1219 * @nents: number of entries in startsg list
1220 *
1221 * Take preprocessed SG list and write corresponding entries
1222 * in the IO PDIR.
1223 */
1224
1225static SBA_INLINE int
1226sba_fill_pdir(
1227 struct ioc *ioc,
1228 struct scatterlist *startsg,
1229 int nents)
1230{
1231 struct scatterlist *dma_sg = startsg; /* pointer to current DMA */
1232 int n_mappings = 0;
1233 u64 *pdirp = NULL;
1234 unsigned long dma_offset = 0;
1235
1da177e4
LT
1236 while (nents-- > 0) {
1237 int cnt = startsg->dma_length;
1238 startsg->dma_length = 0;
1239
1240#ifdef DEBUG_LARGE_SG_ENTRIES
1241 if (dump_run_sg)
1242 printk(" %2d : %08lx/%05x %p\n",
1243 nents, startsg->dma_address, cnt,
1244 sba_sg_address(startsg));
1245#else
1246 DBG_RUN_SG(" %d : %08lx/%05x %p\n",
1247 nents, startsg->dma_address, cnt,
1248 sba_sg_address(startsg));
1249#endif
1250 /*
1251 ** Look for the start of a new DMA stream
1252 */
1253 if (startsg->dma_address & PIDE_FLAG) {
1254 u32 pide = startsg->dma_address & ~PIDE_FLAG;
1255 dma_offset = (unsigned long) pide & ~iovp_mask;
1256 startsg->dma_address = 0;
bdb02504
FT
1257 if (n_mappings)
1258 dma_sg = sg_next(dma_sg);
1da177e4
LT
1259 dma_sg->dma_address = pide | ioc->ibase;
1260 pdirp = &(ioc->pdir_base[pide >> iovp_shift]);
1261 n_mappings++;
1262 }
1263
1264 /*
1265 ** Look for a VCONTIG chunk
1266 */
1267 if (cnt) {
1268 unsigned long vaddr = (unsigned long) sba_sg_address(startsg);
1269 ASSERT(pdirp);
1270
1271 /* Since multiple Vcontig blocks could make up
1272 ** one DMA stream, *add* cnt to dma_len.
1273 */
1274 dma_sg->dma_length += cnt;
1275 cnt += dma_offset;
1276 dma_offset=0; /* only want offset on first chunk */
1277 cnt = ROUNDUP(cnt, iovp_size);
1278 do {
1279 sba_io_pdir_entry(pdirp, vaddr);
1280 vaddr += iovp_size;
1281 cnt -= iovp_size;
1282 pdirp++;
1283 } while (cnt > 0);
1284 }
9b6eccfc 1285 startsg = sg_next(startsg);
1da177e4
LT
1286 }
1287 /* force pdir update */
1288 wmb();
1289
1290#ifdef DEBUG_LARGE_SG_ENTRIES
1291 dump_run_sg = 0;
1292#endif
1293 return(n_mappings);
1294}
1295
1296
1297/*
1298** Two address ranges are DMA contiguous *iff* "end of prev" and
1299** "start of next" are both on an IOV page boundary.
1300**
1301** (shift left is a quick trick to mask off upper bits)
1302*/
1303#define DMA_CONTIG(__X, __Y) \
1304 (((((unsigned long) __X) | ((unsigned long) __Y)) << (BITS_PER_LONG - iovp_shift)) == 0UL)
1305
1306
1307/**
1308 * sba_coalesce_chunks - preprocess the SG list
1309 * @ioc: IO MMU structure which owns the pdir we are interested in.
1310 * @startsg: list of IOVA/size pairs
1311 * @nents: number of entries in startsg list
1312 *
1313 * First pass is to walk the SG list and determine where the breaks are
1314 * in the DMA stream. Allocates PDIR entries but does not fill them.
1315 * Returns the number of DMA chunks.
1316 *
1317 * Doing the fill separate from the coalescing/allocation keeps the
1318 * code simpler. Future enhancement could make one pass through
1319 * the sglist do both.
1320 */
1321static SBA_INLINE int
a031bbcb 1322sba_coalesce_chunks(struct ioc *ioc, struct device *dev,
1da177e4
LT
1323 struct scatterlist *startsg,
1324 int nents)
1325{
1326 struct scatterlist *vcontig_sg; /* VCONTIG chunk head */
1327 unsigned long vcontig_len; /* len of VCONTIG chunk */
1328 unsigned long vcontig_end;
1329 struct scatterlist *dma_sg; /* next DMA stream head */
1330 unsigned long dma_offset, dma_len; /* start/len of DMA stream */
1331 int n_mappings = 0;
a031bbcb 1332 unsigned int max_seg_size = dma_get_max_seg_size(dev);
e2a46567 1333 int idx;
1da177e4
LT
1334
1335 while (nents > 0) {
1336 unsigned long vaddr = (unsigned long) sba_sg_address(startsg);
1337
1338 /*
1339 ** Prepare for first/next DMA stream
1340 */
1341 dma_sg = vcontig_sg = startsg;
1342 dma_len = vcontig_len = vcontig_end = startsg->length;
1343 vcontig_end += vaddr;
1344 dma_offset = vaddr & ~iovp_mask;
1345
1346 /* PARANOID: clear entries */
1347 startsg->dma_address = startsg->dma_length = 0;
1348
1349 /*
1350 ** This loop terminates one iteration "early" since
1351 ** it's always looking one "ahead".
1352 */
1353 while (--nents > 0) {
1354 unsigned long vaddr; /* tmp */
1355
9b6eccfc 1356 startsg = sg_next(startsg);
1da177e4
LT
1357
1358 /* PARANOID */
1359 startsg->dma_address = startsg->dma_length = 0;
1360
1361 /* catch brokenness in SCSI layer */
1362 ASSERT(startsg->length <= DMA_CHUNK_SIZE);
1363
1364 /*
1365 ** First make sure current dma stream won't
1366 ** exceed DMA_CHUNK_SIZE if we coalesce the
1367 ** next entry.
1368 */
1369 if (((dma_len + dma_offset + startsg->length + ~iovp_mask) & iovp_mask)
1370 > DMA_CHUNK_SIZE)
1371 break;
1372
a031bbcb
FT
1373 if (dma_len + startsg->length > max_seg_size)
1374 break;
1375
1da177e4
LT
1376 /*
1377 ** Then look for virtually contiguous blocks.
1378 **
1379 ** append the next transaction?
1380 */
1381 vaddr = (unsigned long) sba_sg_address(startsg);
1382 if (vcontig_end == vaddr)
1383 {
1384 vcontig_len += startsg->length;
1385 vcontig_end += startsg->length;
1386 dma_len += startsg->length;
1387 continue;
1388 }
1389
1390#ifdef DEBUG_LARGE_SG_ENTRIES
1391 dump_run_sg = (vcontig_len > iovp_size);
1392#endif
1393
1394 /*
af901ca1 1395 ** Not virtually contiguous.
1da177e4
LT
1396 ** Terminate prev chunk.
1397 ** Start a new chunk.
1398 **
1399 ** Once we start a new VCONTIG chunk, dma_offset
1400 ** can't change. And we need the offset from the first
1401 ** chunk - not the last one. Ergo Successive chunks
1402 ** must start on page boundaries and dove tail
1403 ** with it's predecessor.
1404 */
1405 vcontig_sg->dma_length = vcontig_len;
1406
1407 vcontig_sg = startsg;
1408 vcontig_len = startsg->length;
1409
1410 /*
1411 ** 3) do the entries end/start on page boundaries?
1412 ** Don't update vcontig_end until we've checked.
1413 */
1414 if (DMA_CONTIG(vcontig_end, vaddr))
1415 {
1416 vcontig_end = vcontig_len + vaddr;
1417 dma_len += vcontig_len;
1418 continue;
1419 } else {
1420 break;
1421 }
1422 }
1423
1424 /*
1425 ** End of DMA Stream
1426 ** Terminate last VCONTIG block.
1427 ** Allocate space for DMA stream.
1428 */
1429 vcontig_sg->dma_length = vcontig_len;
1430 dma_len = (dma_len + dma_offset + ~iovp_mask) & iovp_mask;
1431 ASSERT(dma_len <= DMA_CHUNK_SIZE);
e2a46567
FT
1432 idx = sba_alloc_range(ioc, dev, dma_len);
1433 if (idx < 0) {
1434 dma_sg->dma_length = 0;
1435 return -1;
1436 }
1437 dma_sg->dma_address = (dma_addr_t)(PIDE_FLAG | (idx << iovp_shift)
1438 | dma_offset);
1da177e4
LT
1439 n_mappings++;
1440 }
1441
1442 return n_mappings;
1443}
1444
e2a46567
FT
1445static void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist,
1446 int nents, enum dma_data_direction dir,
1447 struct dma_attrs *attrs);
1da177e4
LT
1448/**
1449 * sba_map_sg - map Scatter/Gather list
1450 * @dev: instance of PCI owned by the driver that's asking.
1451 * @sglist: array of buffer/length pairs
1452 * @nents: number of entries in list
1453 * @dir: R/W or both.
309df0c5 1454 * @attrs: optional dma attributes
1da177e4 1455 *
395cf969 1456 * See Documentation/DMA-API-HOWTO.txt
1da177e4 1457 */
055bcf99 1458static int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist,
160c1d8e
FT
1459 int nents, enum dma_data_direction dir,
1460 struct dma_attrs *attrs)
1da177e4
LT
1461{
1462 struct ioc *ioc;
1463 int coalesced, filled = 0;
1464#ifdef ASSERT_PDIR_SANITY
1465 unsigned long flags;
1466#endif
1467#ifdef ALLOW_IOV_BYPASS_SG
1468 struct scatterlist *sg;
1469#endif
1470
d4ed8084 1471 DBG_RUN_SG("%s() START %d entries\n", __func__, nents);
1da177e4
LT
1472 ioc = GET_IOC(dev);
1473 ASSERT(ioc);
1474
1475#ifdef ALLOW_IOV_BYPASS_SG
1476 ASSERT(to_pci_dev(dev)->dma_mask);
1477 if (likely((ioc->dma_mask & ~to_pci_dev(dev)->dma_mask) == 0)) {
9b6eccfc 1478 for_each_sg(sglist, sg, nents, filled) {
1da177e4
LT
1479 sg->dma_length = sg->length;
1480 sg->dma_address = virt_to_phys(sba_sg_address(sg));
1481 }
1482 return filled;
1483 }
1484#endif
1485 /* Fast path single entry scatterlists. */
1486 if (nents == 1) {
1487 sglist->dma_length = sglist->length;
309df0c5 1488 sglist->dma_address = sba_map_single_attrs(dev, sba_sg_address(sglist), sglist->length, dir, attrs);
1da177e4
LT
1489 return 1;
1490 }
1491
1492#ifdef ASSERT_PDIR_SANITY
1493 spin_lock_irqsave(&ioc->res_lock, flags);
309df0c5 1494 if (sba_check_pdir(ioc,"Check before sba_map_sg_attrs()"))
1da177e4
LT
1495 {
1496 sba_dump_sg(ioc, sglist, nents);
309df0c5 1497 panic("Check before sba_map_sg_attrs()");
1da177e4
LT
1498 }
1499 spin_unlock_irqrestore(&ioc->res_lock, flags);
1500#endif
1501
1502 prefetch(ioc->res_hint);
1503
1504 /*
1505 ** First coalesce the chunks and allocate I/O pdir space
1506 **
1507 ** If this is one DMA stream, we can properly map using the
1508 ** correct virtual address associated with each DMA page.
1509 ** w/o this association, we wouldn't have coherent DMA!
1510 ** Access to the virtual address is what forces a two pass algorithm.
1511 */
a031bbcb 1512 coalesced = sba_coalesce_chunks(ioc, dev, sglist, nents);
e2a46567
FT
1513 if (coalesced < 0) {
1514 sba_unmap_sg_attrs(dev, sglist, nents, dir, attrs);
1515 return 0;
1516 }
1da177e4
LT
1517
1518 /*
1519 ** Program the I/O Pdir
1520 **
1521 ** map the virtual addresses to the I/O Pdir
1522 ** o dma_address will contain the pdir index
1523 ** o dma_len will contain the number of bytes to map
1524 ** o address contains the virtual address.
1525 */
1526 filled = sba_fill_pdir(ioc, sglist, nents);
1527
1528#ifdef ASSERT_PDIR_SANITY
1529 spin_lock_irqsave(&ioc->res_lock, flags);
309df0c5 1530 if (sba_check_pdir(ioc,"Check after sba_map_sg_attrs()"))
1da177e4
LT
1531 {
1532 sba_dump_sg(ioc, sglist, nents);
309df0c5 1533 panic("Check after sba_map_sg_attrs()\n");
1da177e4
LT
1534 }
1535 spin_unlock_irqrestore(&ioc->res_lock, flags);
1536#endif
1537
1538 ASSERT(coalesced == filled);
d4ed8084 1539 DBG_RUN_SG("%s() DONE %d mappings\n", __func__, filled);
1da177e4
LT
1540
1541 return filled;
1542}
1da177e4
LT
1543
1544/**
309df0c5 1545 * sba_unmap_sg_attrs - unmap Scatter/Gather list
1da177e4
LT
1546 * @dev: instance of PCI owned by the driver that's asking.
1547 * @sglist: array of buffer/length pairs
1548 * @nents: number of entries in list
1549 * @dir: R/W or both.
309df0c5 1550 * @attrs: optional dma attributes
1da177e4 1551 *
395cf969 1552 * See Documentation/DMA-API-HOWTO.txt
1da177e4 1553 */
055bcf99 1554static void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist,
160c1d8e
FT
1555 int nents, enum dma_data_direction dir,
1556 struct dma_attrs *attrs)
1da177e4
LT
1557{
1558#ifdef ASSERT_PDIR_SANITY
1559 struct ioc *ioc;
1560 unsigned long flags;
1561#endif
1562
1563 DBG_RUN_SG("%s() START %d entries, %p,%x\n",
d4ed8084 1564 __func__, nents, sba_sg_address(sglist), sglist->length);
1da177e4
LT
1565
1566#ifdef ASSERT_PDIR_SANITY
1567 ioc = GET_IOC(dev);
1568 ASSERT(ioc);
1569
1570 spin_lock_irqsave(&ioc->res_lock, flags);
309df0c5 1571 sba_check_pdir(ioc,"Check before sba_unmap_sg_attrs()");
1da177e4
LT
1572 spin_unlock_irqrestore(&ioc->res_lock, flags);
1573#endif
1574
1575 while (nents && sglist->dma_length) {
1576
309df0c5
AK
1577 sba_unmap_single_attrs(dev, sglist->dma_address,
1578 sglist->dma_length, dir, attrs);
9b6eccfc 1579 sglist = sg_next(sglist);
1da177e4
LT
1580 nents--;
1581 }
1582
d4ed8084 1583 DBG_RUN_SG("%s() DONE (nents %d)\n", __func__, nents);
1da177e4
LT
1584
1585#ifdef ASSERT_PDIR_SANITY
1586 spin_lock_irqsave(&ioc->res_lock, flags);
309df0c5 1587 sba_check_pdir(ioc,"Check after sba_unmap_sg_attrs()");
1da177e4
LT
1588 spin_unlock_irqrestore(&ioc->res_lock, flags);
1589#endif
1590
1591}
1592
1593/**************************************************************
1594*
1595* Initialization and claim
1596*
1597***************************************************************/
1598
1599static void __init
1600ioc_iova_init(struct ioc *ioc)
1601{
1602 int tcnfg;
1603 int agp_found = 0;
1604 struct pci_dev *device = NULL;
1605#ifdef FULL_VALID_PDIR
1606 unsigned long index;
1607#endif
1608
1609 /*
1610 ** Firmware programs the base and size of a "safe IOVA space"
1611 ** (one that doesn't overlap memory or LMMIO space) in the
1612 ** IBASE and IMASK registers.
1613 */
1614 ioc->ibase = READ_REG(ioc->ioc_hpa + IOC_IBASE) & ~0x1UL;
1615 ioc->imask = READ_REG(ioc->ioc_hpa + IOC_IMASK) | 0xFFFFFFFF00000000UL;
1616
1617 ioc->iov_size = ~ioc->imask + 1;
1618
1619 DBG_INIT("%s() hpa %p IOV base 0x%lx mask 0x%lx (%dMB)\n",
d4ed8084 1620 __func__, ioc->ioc_hpa, ioc->ibase, ioc->imask,
1da177e4
LT
1621 ioc->iov_size >> 20);
1622
1623 switch (iovp_size) {
1624 case 4*1024: tcnfg = 0; break;
1625 case 8*1024: tcnfg = 1; break;
1626 case 16*1024: tcnfg = 2; break;
1627 case 64*1024: tcnfg = 3; break;
1628 default:
1629 panic(PFX "Unsupported IOTLB page size %ldK",
1630 iovp_size >> 10);
1631 break;
1632 }
1633 WRITE_REG(tcnfg, ioc->ioc_hpa + IOC_TCNFG);
1634
1635 ioc->pdir_size = (ioc->iov_size / iovp_size) * PDIR_ENTRY_SIZE;
1636 ioc->pdir_base = (void *) __get_free_pages(GFP_KERNEL,
1637 get_order(ioc->pdir_size));
1638 if (!ioc->pdir_base)
1639 panic(PFX "Couldn't allocate I/O Page Table\n");
1640
1641 memset(ioc->pdir_base, 0, ioc->pdir_size);
1642
d4ed8084 1643 DBG_INIT("%s() IOV page size %ldK pdir %p size %x\n", __func__,
1da177e4
LT
1644 iovp_size >> 10, ioc->pdir_base, ioc->pdir_size);
1645
1646 ASSERT(ALIGN((unsigned long) ioc->pdir_base, 4*1024) == (unsigned long) ioc->pdir_base);
1647 WRITE_REG(virt_to_phys(ioc->pdir_base), ioc->ioc_hpa + IOC_PDIR_BASE);
1648
1649 /*
1650 ** If an AGP device is present, only use half of the IOV space
1651 ** for PCI DMA. Unfortunately we can't know ahead of time
1652 ** whether GART support will actually be used, for now we
1653 ** can just key on an AGP device found in the system.
1654 ** We program the next pdir index after we stop w/ a key for
1655 ** the GART code to handshake on.
1656 */
1657 for_each_pci_dev(device)
1658 agp_found |= pci_find_capability(device, PCI_CAP_ID_AGP);
1659
1660 if (agp_found && reserve_sba_gart) {
1661 printk(KERN_INFO PFX "reserving %dMb of IOVA space at 0x%lx for agpgart\n",
1662 ioc->iov_size/2 >> 20, ioc->ibase + ioc->iov_size/2);
1663 ioc->pdir_size /= 2;
1664 ((u64 *)ioc->pdir_base)[PDIR_INDEX(ioc->iov_size/2)] = ZX1_SBA_IOMMU_COOKIE;
1665 }
1666#ifdef FULL_VALID_PDIR
1667 /*
1668 ** Check to see if the spill page has been allocated, we don't need more than
1669 ** one across multiple SBAs.
1670 */
1671 if (!prefetch_spill_page) {
1672 char *spill_poison = "SBAIOMMU POISON";
1673 int poison_size = 16;
1674 void *poison_addr, *addr;
1675
1676 addr = (void *)__get_free_pages(GFP_KERNEL, get_order(iovp_size));
1677 if (!addr)
1678 panic(PFX "Couldn't allocate PDIR spill page\n");
1679
1680 poison_addr = addr;
1681 for ( ; (u64) poison_addr < addr + iovp_size; poison_addr += poison_size)
1682 memcpy(poison_addr, spill_poison, poison_size);
1683
1684 prefetch_spill_page = virt_to_phys(addr);
1685
d4ed8084 1686 DBG_INIT("%s() prefetch spill addr: 0x%lx\n", __func__, prefetch_spill_page);
1da177e4
LT
1687 }
1688 /*
1689 ** Set all the PDIR entries valid w/ the spill page as the target
1690 */
1691 for (index = 0 ; index < (ioc->pdir_size / PDIR_ENTRY_SIZE) ; index++)
1692 ((u64 *)ioc->pdir_base)[index] = (0x80000000000000FF | prefetch_spill_page);
1693#endif
1694
1695 /* Clear I/O TLB of any possible entries */
1696 WRITE_REG(ioc->ibase | (get_iovp_order(ioc->iov_size) + iovp_shift), ioc->ioc_hpa + IOC_PCOM);
1697 READ_REG(ioc->ioc_hpa + IOC_PCOM);
1698
1699 /* Enable IOVA translation */
1700 WRITE_REG(ioc->ibase | 1, ioc->ioc_hpa + IOC_IBASE);
1701 READ_REG(ioc->ioc_hpa + IOC_IBASE);
1702}
1703
1704static void __init
1705ioc_resource_init(struct ioc *ioc)
1706{
1707 spin_lock_init(&ioc->res_lock);
1708#if DELAYED_RESOURCE_CNT > 0
1709 spin_lock_init(&ioc->saved_lock);
1710#endif
1711
1712 /* resource map size dictated by pdir_size */
1713 ioc->res_size = ioc->pdir_size / PDIR_ENTRY_SIZE; /* entries */
1714 ioc->res_size >>= 3; /* convert bit count to byte count */
d4ed8084 1715 DBG_INIT("%s() res_size 0x%x\n", __func__, ioc->res_size);
1da177e4
LT
1716
1717 ioc->res_map = (char *) __get_free_pages(GFP_KERNEL,
1718 get_order(ioc->res_size));
1719 if (!ioc->res_map)
1720 panic(PFX "Couldn't allocate resource map\n");
1721
1722 memset(ioc->res_map, 0, ioc->res_size);
1723 /* next available IOVP - circular search */
1724 ioc->res_hint = (unsigned long *) ioc->res_map;
1725
1726#ifdef ASSERT_PDIR_SANITY
1727 /* Mark first bit busy - ie no IOVA 0 */
1728 ioc->res_map[0] = 0x1;
1729 ioc->pdir_base[0] = 0x8000000000000000ULL | ZX1_SBA_IOMMU_COOKIE;
1730#endif
1731#ifdef FULL_VALID_PDIR
1732 /* Mark the last resource used so we don't prefetch beyond IOVA space */
1733 ioc->res_map[ioc->res_size - 1] |= 0x80UL; /* res_map is chars */
1734 ioc->pdir_base[(ioc->pdir_size / PDIR_ENTRY_SIZE) - 1] = (0x80000000000000FF
1735 | prefetch_spill_page);
1736#endif
1737
d4ed8084 1738 DBG_INIT("%s() res_map %x %p\n", __func__,
1da177e4
LT
1739 ioc->res_size, (void *) ioc->res_map);
1740}
1741
1742static void __init
1743ioc_sac_init(struct ioc *ioc)
1744{
1745 struct pci_dev *sac = NULL;
1746 struct pci_controller *controller = NULL;
1747
1748 /*
1749 * pci_alloc_coherent() must return a DMA address which is
1750 * SAC (single address cycle) addressable, so allocate a
1751 * pseudo-device to enforce that.
1752 */
52fd9108 1753 sac = kzalloc(sizeof(*sac), GFP_KERNEL);
1da177e4
LT
1754 if (!sac)
1755 panic(PFX "Couldn't allocate struct pci_dev");
1da177e4 1756
52fd9108 1757 controller = kzalloc(sizeof(*controller), GFP_KERNEL);
1da177e4
LT
1758 if (!controller)
1759 panic(PFX "Couldn't allocate struct pci_controller");
1da177e4
LT
1760
1761 controller->iommu = ioc;
1762 sac->sysdata = controller;
1763 sac->dma_mask = 0xFFFFFFFFUL;
1764#ifdef CONFIG_PCI
1765 sac->dev.bus = &pci_bus_type;
1766#endif
1767 ioc->sac_only_dev = sac;
1768}
1769
1770static void __init
1771ioc_zx1_init(struct ioc *ioc)
1772{
1773 unsigned long rope_config;
1774 unsigned int i;
1775
1776 if (ioc->rev < 0x20)
1777 panic(PFX "IOC 2.0 or later required for IOMMU support\n");
1778
1779 /* 38 bit memory controller + extra bit for range displaced by MMIO */
1780 ioc->dma_mask = (0x1UL << 39) - 1;
1781
1782 /*
1783 ** Clear ROPE(N)_CONFIG AO bit.
1784 ** Disables "NT Ordering" (~= !"Relaxed Ordering")
1785 ** Overrides bit 1 in DMA Hint Sets.
1786 ** Improves netperf UDP_STREAM by ~10% for tg3 on bcm5701.
1787 */
1788 for (i=0; i<(8*8); i+=8) {
1789 rope_config = READ_REG(ioc->ioc_hpa + IOC_ROPE0_CFG + i);
1790 rope_config &= ~IOC_ROPE_AO;
1791 WRITE_REG(rope_config, ioc->ioc_hpa + IOC_ROPE0_CFG + i);
1792 }
1793}
1794
1795typedef void (initfunc)(struct ioc *);
1796
1797struct ioc_iommu {
1798 u32 func_id;
1799 char *name;
1800 initfunc *init;
1801};
1802
1803static struct ioc_iommu ioc_iommu_info[] __initdata = {
1804 { ZX1_IOC_ID, "zx1", ioc_zx1_init },
1805 { ZX2_IOC_ID, "zx2", NULL },
1806 { SX1000_IOC_ID, "sx1000", NULL },
e15da401 1807 { SX2000_IOC_ID, "sx2000", NULL },
1da177e4
LT
1808};
1809
1810static struct ioc * __init
e088a4ad 1811ioc_init(unsigned long hpa, void *handle)
1da177e4
LT
1812{
1813 struct ioc *ioc;
1814 struct ioc_iommu *info;
1815
52fd9108 1816 ioc = kzalloc(sizeof(*ioc), GFP_KERNEL);
1da177e4
LT
1817 if (!ioc)
1818 return NULL;
1819
1da177e4
LT
1820 ioc->next = ioc_list;
1821 ioc_list = ioc;
1822
1823 ioc->handle = handle;
1824 ioc->ioc_hpa = ioremap(hpa, 0x1000);
1825
1826 ioc->func_id = READ_REG(ioc->ioc_hpa + IOC_FUNC_ID);
1827 ioc->rev = READ_REG(ioc->ioc_hpa + IOC_FCLASS) & 0xFFUL;
1828 ioc->dma_mask = 0xFFFFFFFFFFFFFFFFUL; /* conservative */
1829
1830 for (info = ioc_iommu_info; info < ioc_iommu_info + ARRAY_SIZE(ioc_iommu_info); info++) {
1831 if (ioc->func_id == info->func_id) {
1832 ioc->name = info->name;
1833 if (info->init)
1834 (info->init)(ioc);
1835 }
1836 }
1837
1838 iovp_size = (1 << iovp_shift);
1839 iovp_mask = ~(iovp_size - 1);
1840
d4ed8084 1841 DBG_INIT("%s: PAGE_SIZE %ldK, iovp_size %ldK\n", __func__,
1da177e4
LT
1842 PAGE_SIZE >> 10, iovp_size >> 10);
1843
1844 if (!ioc->name) {
1845 ioc->name = kmalloc(24, GFP_KERNEL);
1846 if (ioc->name)
1847 sprintf((char *) ioc->name, "Unknown (%04x:%04x)",
1848 ioc->func_id & 0xFFFF, (ioc->func_id >> 16) & 0xFFFF);
1849 else
1850 ioc->name = "Unknown";
1851 }
1852
1853 ioc_iova_init(ioc);
1854 ioc_resource_init(ioc);
1855 ioc_sac_init(ioc);
1856
1857 if ((long) ~iovp_mask > (long) ia64_max_iommu_merge_mask)
1858 ia64_max_iommu_merge_mask = ~iovp_mask;
1859
1860 printk(KERN_INFO PFX
1861 "%s %d.%d HPA 0x%lx IOVA space %dMb at 0x%lx\n",
1862 ioc->name, (ioc->rev >> 4) & 0xF, ioc->rev & 0xF,
1863 hpa, ioc->iov_size >> 20, ioc->ibase);
1864
1865 return ioc;
1866}
1867
1868
1869
1870/**************************************************************************
1871**
1872** SBA initialization code (HW and SW)
1873**
1874** o identify SBA chip itself
1875** o FIXME: initialize DMA hints for reasonable defaults
1876**
1877**************************************************************************/
1878
1879#ifdef CONFIG_PROC_FS
1880static void *
1881ioc_start(struct seq_file *s, loff_t *pos)
1882{
1883 struct ioc *ioc;
1884 loff_t n = *pos;
1885
1886 for (ioc = ioc_list; ioc; ioc = ioc->next)
1887 if (!n--)
1888 return ioc;
1889
1890 return NULL;
1891}
1892
1893static void *
1894ioc_next(struct seq_file *s, void *v, loff_t *pos)
1895{
1896 struct ioc *ioc = v;
1897
1898 ++*pos;
1899 return ioc->next;
1900}
1901
1902static void
1903ioc_stop(struct seq_file *s, void *v)
1904{
1905}
1906
1907static int
1908ioc_show(struct seq_file *s, void *v)
1909{
1910 struct ioc *ioc = v;
1911 unsigned long *res_ptr = (unsigned long *)ioc->res_map;
1912 int i, used = 0;
1913
1914 seq_printf(s, "Hewlett Packard %s IOC rev %d.%d\n",
1915 ioc->name, ((ioc->rev >> 4) & 0xF), (ioc->rev & 0xF));
1916#ifdef CONFIG_NUMA
1917 if (ioc->node != MAX_NUMNODES)
1918 seq_printf(s, "NUMA node : %d\n", ioc->node);
1919#endif
1920 seq_printf(s, "IOVA size : %ld MB\n", ((ioc->pdir_size >> 3) * iovp_size)/(1024*1024));
1921 seq_printf(s, "IOVA page size : %ld kb\n", iovp_size/1024);
1922
1923 for (i = 0; i < (ioc->res_size / sizeof(unsigned long)); ++i, ++res_ptr)
1924 used += hweight64(*res_ptr);
1925
1926 seq_printf(s, "PDIR size : %d entries\n", ioc->pdir_size >> 3);
1927 seq_printf(s, "PDIR used : %d entries\n", used);
1928
1929#ifdef PDIR_SEARCH_TIMING
1930 {
1931 unsigned long i = 0, avg = 0, min, max;
1932 min = max = ioc->avg_search[0];
1933 for (i = 0; i < SBA_SEARCH_SAMPLE; i++) {
1934 avg += ioc->avg_search[i];
1935 if (ioc->avg_search[i] > max) max = ioc->avg_search[i];
1936 if (ioc->avg_search[i] < min) min = ioc->avg_search[i];
1937 }
1938 avg /= SBA_SEARCH_SAMPLE;
1939 seq_printf(s, "Bitmap search : %ld/%ld/%ld (min/avg/max CPU Cycles/IOVA page)\n",
1940 min, avg, max);
1941 }
1942#endif
1943#ifndef ALLOW_IOV_BYPASS
1944 seq_printf(s, "IOVA bypass disabled\n");
1945#endif
1946 return 0;
1947}
1948
a23fe55e 1949static const struct seq_operations ioc_seq_ops = {
1da177e4
LT
1950 .start = ioc_start,
1951 .next = ioc_next,
1952 .stop = ioc_stop,
1953 .show = ioc_show
1954};
1955
1956static int
1957ioc_open(struct inode *inode, struct file *file)
1958{
1959 return seq_open(file, &ioc_seq_ops);
1960}
1961
5dfe4c96 1962static const struct file_operations ioc_fops = {
1da177e4
LT
1963 .open = ioc_open,
1964 .read = seq_read,
1965 .llseek = seq_lseek,
1966 .release = seq_release
1967};
1968
1969static void __init
1970ioc_proc_init(void)
1971{
e2363768 1972 struct proc_dir_entry *dir;
1da177e4
LT
1973
1974 dir = proc_mkdir("bus/mckinley", NULL);
1975 if (!dir)
1976 return;
1977
e2363768 1978 proc_create(ioc_list->name, 0, dir, &ioc_fops);
1da177e4
LT
1979}
1980#endif
1981
1982static void
1983sba_connect_bus(struct pci_bus *bus)
1984{
1985 acpi_handle handle, parent;
1986 acpi_status status;
1987 struct ioc *ioc;
1988
1989 if (!PCI_CONTROLLER(bus))
1990 panic(PFX "no sysdata on bus %d!\n", bus->number);
1991
1992 if (PCI_CONTROLLER(bus)->iommu)
1993 return;
1994
7b199811 1995 handle = acpi_device_handle(PCI_CONTROLLER(bus)->companion);
1da177e4
LT
1996 if (!handle)
1997 return;
1998
1999 /*
2000 * The IOC scope encloses PCI root bridges in the ACPI
2001 * namespace, so work our way out until we find an IOC we
2002 * claimed previously.
2003 */
2004 do {
2005 for (ioc = ioc_list; ioc; ioc = ioc->next)
2006 if (ioc->handle == handle) {
2007 PCI_CONTROLLER(bus)->iommu = ioc;
2008 return;
2009 }
2010
2011 status = acpi_get_parent(handle, &parent);
2012 handle = parent;
2013 } while (ACPI_SUCCESS(status));
2014
2015 printk(KERN_WARNING "No IOC for PCI Bus %04x:%02x in ACPI\n", pci_domain_nr(bus), bus->number);
2016}
2017
2018#ifdef CONFIG_NUMA
2019static void __init
2020sba_map_ioc_to_node(struct ioc *ioc, acpi_handle handle)
2021{
1da177e4 2022 unsigned int node;
bb0fc085 2023 int pxm;
1da177e4
LT
2024
2025 ioc->node = MAX_NUMNODES;
2026
bb0fc085 2027 pxm = acpi_get_pxm(handle);
1da177e4 2028
bb0fc085 2029 if (pxm < 0)
1da177e4
LT
2030 return;
2031
762834e8 2032 node = pxm_to_node(pxm);
1da177e4
LT
2033
2034 if (node >= MAX_NUMNODES || !node_online(node))
2035 return;
2036
2037 ioc->node = node;
2038 return;
2039}
2040#else
2041#define sba_map_ioc_to_node(ioc, handle)
2042#endif
2043
2044static int __init
66345d5f
RW
2045acpi_sba_ioc_add(struct acpi_device *device,
2046 const struct acpi_device_id *not_used)
1da177e4
LT
2047{
2048 struct ioc *ioc;
2049 acpi_status status;
2050 u64 hpa, length;
80aa9bf0 2051 struct acpi_device_info *adi;
1da177e4
LT
2052
2053 status = hp_acpi_csr_space(device->handle, &hpa, &length);
2054 if (ACPI_FAILURE(status))
2055 return 1;
2056
80aa9bf0 2057 status = acpi_get_object_info(device->handle, &adi);
1da177e4
LT
2058 if (ACPI_FAILURE(status))
2059 return 1;
1da177e4
LT
2060
2061 /*
2062 * For HWP0001, only SBA appears in ACPI namespace. It encloses the PCI
2063 * root bridges, and its CSR space includes the IOC function.
2064 */
80aa9bf0 2065 if (strncmp("HWP0001", adi->hardware_id.string, 7) == 0) {
1da177e4
LT
2066 hpa += ZX1_IOC_OFFSET;
2067 /* zx1 based systems default to kernel page size iommu pages */
2068 if (!iovp_shift)
2069 iovp_shift = min(PAGE_SHIFT, 16);
2070 }
80aa9bf0 2071 kfree(adi);
1da177e4
LT
2072
2073 /*
2074 * default anything not caught above or specified on cmdline to 4k
2075 * iommu page size
2076 */
2077 if (!iovp_shift)
2078 iovp_shift = 12;
2079
2080 ioc = ioc_init(hpa, device->handle);
2081 if (!ioc)
2082 return 1;
2083
2084 /* setup NUMA node association */
2085 sba_map_ioc_to_node(ioc, device->handle);
2086 return 0;
2087}
2088
7091138f
TR
2089static const struct acpi_device_id hp_ioc_iommu_device_ids[] = {
2090 {"HWP0001", 0},
2091 {"HWP0004", 0},
2092 {"", 0},
2093};
66345d5f
RW
2094static struct acpi_scan_handler acpi_sba_ioc_handler = {
2095 .ids = hp_ioc_iommu_device_ids,
2096 .attach = acpi_sba_ioc_add,
1da177e4
LT
2097};
2098
66345d5f
RW
2099static int __init acpi_sba_ioc_init_acpi(void)
2100{
2101 return acpi_scan_add_handler(&acpi_sba_ioc_handler);
2102}
2103/* This has to run before acpi_scan_init(). */
2104arch_initcall(acpi_sba_ioc_init_acpi);
2105
160c1d8e 2106extern struct dma_map_ops swiotlb_dma_ops;
4d9b977c 2107
1da177e4
LT
2108static int __init
2109sba_init(void)
2110{
0b9afede
AW
2111 if (!ia64_platform_is("hpzx1") && !ia64_platform_is("hpzx1_swiotlb"))
2112 return 0;
2113
630bf207 2114#if defined(CONFIG_IA64_GENERIC)
51b58e3e
TL
2115 /* If we are booting a kdump kernel, the sba_iommu will
2116 * cause devices that were not shutdown properly to MCA
2117 * as soon as they are turned back on. Our only option for
2118 * a successful kdump kernel boot is to use the swiotlb.
2119 */
630bf207 2120 if (is_kdump_kernel()) {
4d9b977c 2121 dma_ops = &swiotlb_dma_ops;
51b58e3e
TL
2122 if (swiotlb_late_init_with_default_size(64 * (1<<20)) != 0)
2123 panic("Unable to initialize software I/O TLB:"
2124 " Try machvec=dig boot option");
2125 machvec_init("dig");
2126 return 0;
2127 }
2128#endif
2129
66345d5f
RW
2130 /*
2131 * ioc_list should be populated by the acpi_sba_ioc_handler's .attach()
2132 * routine, but that only happens if acpi_scan_init() has already run.
2133 */
0b9afede
AW
2134 if (!ioc_list) {
2135#ifdef CONFIG_IA64_GENERIC
0b9afede
AW
2136 /*
2137 * If we didn't find something sba_iommu can claim, we
2138 * need to setup the swiotlb and switch to the dig machvec.
2139 */
4d9b977c 2140 dma_ops = &swiotlb_dma_ops;
0b9afede
AW
2141 if (swiotlb_late_init_with_default_size(64 * (1<<20)) != 0)
2142 panic("Unable to find SBA IOMMU or initialize "
2143 "software I/O TLB: Try machvec=dig boot option");
2144 machvec_init("dig");
2145#else
2146 panic("Unable to find SBA IOMMU: Try a generic or DIG kernel");
2147#endif
1da177e4 2148 return 0;
0b9afede
AW
2149 }
2150
2151#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_HP_ZX1_SWIOTLB)
2152 /*
2153 * hpzx1_swiotlb needs to have a fairly small swiotlb bounce
2154 * buffer setup to support devices with smaller DMA masks than
2155 * sba_iommu can handle.
2156 */
2157 if (ia64_platform_is("hpzx1_swiotlb")) {
2158 extern void hwsw_init(void);
2159
2160 hwsw_init();
2161 }
2162#endif
1da177e4
LT
2163
2164#ifdef CONFIG_PCI
2165 {
2166 struct pci_bus *b = NULL;
2167 while ((b = pci_find_next_bus(b)) != NULL)
2168 sba_connect_bus(b);
2169 }
2170#endif
2171
2172#ifdef CONFIG_PROC_FS
2173 ioc_proc_init();
2174#endif
2175 return 0;
2176}
2177
2178subsys_initcall(sba_init); /* must be initialized after ACPI etc., but before any drivers... */
2179
1da177e4
LT
2180static int __init
2181nosbagart(char *str)
2182{
2183 reserve_sba_gart = 0;
2184 return 1;
2185}
2186
055bcf99 2187static int sba_dma_supported (struct device *dev, u64 mask)
1da177e4
LT
2188{
2189 /* make sure it's at least 32bit capable */
2190 return ((mask & 0xFFFFFFFFUL) == 0xFFFFFFFFUL);
2191}
2192
055bcf99 2193static int sba_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
1da177e4
LT
2194{
2195 return 0;
2196}
2197
2198__setup("nosbagart", nosbagart);
2199
2200static int __init
2201sba_page_override(char *str)
2202{
2203 unsigned long page_size;
2204
2205 page_size = memparse(str, &str);
2206 switch (page_size) {
2207 case 4096:
2208 case 8192:
2209 case 16384:
2210 case 65536:
2211 iovp_shift = ffs(page_size) - 1;
2212 break;
2213 default:
2214 printk("%s: unknown/unsupported iommu page size %ld\n",
d4ed8084 2215 __func__, page_size);
1da177e4
LT
2216 }
2217
2218 return 1;
2219}
2220
2221__setup("sbapagesize=",sba_page_override);
2222
160c1d8e 2223struct dma_map_ops sba_dma_ops = {
baa676fc
AP
2224 .alloc = sba_alloc_coherent,
2225 .free = sba_free_coherent,
160c1d8e
FT
2226 .map_page = sba_map_page,
2227 .unmap_page = sba_unmap_page,
2228 .map_sg = sba_map_sg_attrs,
2229 .unmap_sg = sba_unmap_sg_attrs,
0e9cbb9b
FT
2230 .sync_single_for_cpu = machvec_dma_sync_single,
2231 .sync_sg_for_cpu = machvec_dma_sync_sg,
2232 .sync_single_for_device = machvec_dma_sync_single,
2233 .sync_sg_for_device = machvec_dma_sync_sg,
160c1d8e 2234 .dma_supported = sba_dma_supported,
0e9cbb9b
FT
2235 .mapping_error = sba_dma_mapping_error,
2236};
4d9b977c
FT
2237
2238void sba_dma_init(void)
2239{
2240 dma_ops = &sba_dma_ops;
2241}