]> git.proxmox.com Git - mirror_ubuntu-kernels.git/commitdiff
Merge branch 'linus' into x86/cleanups
authorIngo Molnar <mingo@elte.hu>
Mon, 11 Aug 2008 10:57:01 +0000 (12:57 +0200)
committerIngo Molnar <mingo@elte.hu>
Mon, 11 Aug 2008 10:57:01 +0000 (12:57 +0200)
1  2 
arch/x86/kernel/pci-dma.c
arch/x86/mm/init_64.c

index 88ddd04cfa98da31faa558d30947a679cfa296cd,87d4d6964ec2b9ecb5d83ad01c081589218dc303..f704cb51ff82b9e04b14853c88096960f4cdc245
@@@ -11,7 -11,7 +11,7 @@@
  
  static int forbid_dac __read_mostly;
  
const struct dma_mapping_ops *dma_ops;
+ struct dma_mapping_ops *dma_ops;
  EXPORT_SYMBOL(dma_ops);
  
  static int iommu_sac_force __read_mostly;
@@@ -82,7 -82,7 +82,7 @@@ void __init dma32_reserve_bootmem(void
         * using 512M as goal
         */
        align = 64ULL<<20;
 -      size = round_up(dma32_bootmem_size, align);
 +      size = roundup(dma32_bootmem_size, align);
        dma32_bootmem_ptr = __alloc_bootmem_nopanic(size, align,
                                 512ULL<<20);
        if (dma32_bootmem_ptr)
@@@ -123,6 -123,14 +123,14 @@@ void __init pci_iommu_alloc(void
  
        pci_swiotlb_init();
  }
+ unsigned long iommu_num_pages(unsigned long addr, unsigned long len)
+ {
+       unsigned long size = roundup((addr & ~PAGE_MASK) + len, PAGE_SIZE);
+       return size >> PAGE_SHIFT;
+ }
+ EXPORT_SYMBOL(iommu_num_pages);
  #endif
  
  /*
@@@ -192,126 -200,10 +200,10 @@@ static __init int iommu_setup(char *p
  }
  early_param("iommu", iommu_setup);
  
- #ifdef CONFIG_X86_32
- int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
-                               dma_addr_t device_addr, size_t size, int flags)
- {
-       void __iomem *mem_base = NULL;
-       int pages = size >> PAGE_SHIFT;
-       int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
-       if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0)
-               goto out;
-       if (!size)
-               goto out;
-       if (dev->dma_mem)
-               goto out;
-       /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
-       mem_base = ioremap(bus_addr, size);
-       if (!mem_base)
-               goto out;
-       dev->dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
-       if (!dev->dma_mem)
-               goto out;
-       dev->dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
-       if (!dev->dma_mem->bitmap)
-               goto free1_out;
-       dev->dma_mem->virt_base = mem_base;
-       dev->dma_mem->device_base = device_addr;
-       dev->dma_mem->size = pages;
-       dev->dma_mem->flags = flags;
-       if (flags & DMA_MEMORY_MAP)
-               return DMA_MEMORY_MAP;
-       return DMA_MEMORY_IO;
-  free1_out:
-       kfree(dev->dma_mem);
-  out:
-       if (mem_base)
-               iounmap(mem_base);
-       return 0;
- }
- EXPORT_SYMBOL(dma_declare_coherent_memory);
- void dma_release_declared_memory(struct device *dev)
- {
-       struct dma_coherent_mem *mem = dev->dma_mem;
-       if (!mem)
-               return;
-       dev->dma_mem = NULL;
-       iounmap(mem->virt_base);
-       kfree(mem->bitmap);
-       kfree(mem);
- }
- EXPORT_SYMBOL(dma_release_declared_memory);
- void *dma_mark_declared_memory_occupied(struct device *dev,
-                                       dma_addr_t device_addr, size_t size)
- {
-       struct dma_coherent_mem *mem = dev->dma_mem;
-       int pos, err;
-       int pages = (size + (device_addr & ~PAGE_MASK) + PAGE_SIZE - 1);
-       pages >>= PAGE_SHIFT;
-       if (!mem)
-               return ERR_PTR(-EINVAL);
-       pos = (device_addr - mem->device_base) >> PAGE_SHIFT;
-       err = bitmap_allocate_region(mem->bitmap, pos, get_order(pages));
-       if (err != 0)
-               return ERR_PTR(err);
-       return mem->virt_base + (pos << PAGE_SHIFT);
- }
- EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
- static int dma_alloc_from_coherent_mem(struct device *dev, ssize_t size,
-                                      dma_addr_t *dma_handle, void **ret)
- {
-       struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
-       int order = get_order(size);
-       if (mem) {
-               int page = bitmap_find_free_region(mem->bitmap, mem->size,
-                                                    order);
-               if (page >= 0) {
-                       *dma_handle = mem->device_base + (page << PAGE_SHIFT);
-                       *ret = mem->virt_base + (page << PAGE_SHIFT);
-                       memset(*ret, 0, size);
-               }
-               if (mem->flags & DMA_MEMORY_EXCLUSIVE)
-                       *ret = NULL;
-       }
-       return (mem != NULL);
- }
- static int dma_release_coherent(struct device *dev, int order, void *vaddr)
- {
-       struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
-       if (mem && vaddr >= mem->virt_base && vaddr <
-                  (mem->virt_base + (mem->size << PAGE_SHIFT))) {
-               int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
-               bitmap_release_region(mem->bitmap, page, order);
-               return 1;
-       }
-       return 0;
- }
- #else
- #define dma_alloc_from_coherent_mem(dev, size, handle, ret) (0)
- #define dma_release_coherent(dev, order, vaddr) (0)
- #endif /* CONFIG_X86_32 */
  int dma_supported(struct device *dev, u64 mask)
  {
+       struct dma_mapping_ops *ops = get_dma_ops(dev);
  #ifdef CONFIG_PCI
        if (mask > 0xffffffff && forbid_dac > 0) {
                dev_info(dev, "PCI: Disallowing DAC for device\n");
        }
  #endif
  
-       if (dma_ops->dma_supported)
-               return dma_ops->dma_supported(dev, mask);
+       if (ops->dma_supported)
+               return ops->dma_supported(dev, mask);
  
        /* Copied from i386. Doesn't make much sense, because it will
           only work for pci_alloc_coherent.
@@@ -367,6 -259,7 +259,7 @@@ void 
  dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
                   gfp_t gfp)
  {
+       struct dma_mapping_ops *ops = get_dma_ops(dev);
        void *memory = NULL;
        struct page *page;
        unsigned long dma_mask = 0;
        /* ignore region specifiers */
        gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
  
-       if (dma_alloc_from_coherent_mem(dev, size, dma_handle, &memory))
+       if (dma_alloc_from_coherent(dev, size, dma_handle, &memory))
                return memory;
  
        if (!dev) {
                        /* Let low level make its own zone decisions */
                        gfp &= ~(GFP_DMA32|GFP_DMA);
  
-                       if (dma_ops->alloc_coherent)
-                               return dma_ops->alloc_coherent(dev, size,
+                       if (ops->alloc_coherent)
+                               return ops->alloc_coherent(dev, size,
                                                           dma_handle, gfp);
                        return NULL;
                }
                }
        }
  
-       if (dma_ops->alloc_coherent) {
+       if (ops->alloc_coherent) {
                free_pages((unsigned long)memory, get_order(size));
                gfp &= ~(GFP_DMA|GFP_DMA32);
-               return dma_ops->alloc_coherent(dev, size, dma_handle, gfp);
+               return ops->alloc_coherent(dev, size, dma_handle, gfp);
        }
  
-       if (dma_ops->map_simple) {
-               *dma_handle = dma_ops->map_simple(dev, virt_to_phys(memory),
+       if (ops->map_simple) {
+               *dma_handle = ops->map_simple(dev, virt_to_phys(memory),
                                              size,
                                              PCI_DMA_BIDIRECTIONAL);
                if (*dma_handle != bad_dma_address)
@@@ -477,12 -370,14 +370,14 @@@ EXPORT_SYMBOL(dma_alloc_coherent)
  void dma_free_coherent(struct device *dev, size_t size,
                         void *vaddr, dma_addr_t bus)
  {
+       struct dma_mapping_ops *ops = get_dma_ops(dev);
        int order = get_order(size);
        WARN_ON(irqs_disabled());       /* for portability */
-       if (dma_release_coherent(dev, order, vaddr))
+       if (dma_release_from_coherent(dev, order, vaddr))
                return;
-       if (dma_ops->unmap_single)
-               dma_ops->unmap_single(dev, bus, size, 0);
+       if (ops->unmap_single)
+               ops->unmap_single(dev, bus, size, 0);
        free_pages((unsigned long)vaddr, order);
  }
  EXPORT_SYMBOL(dma_free_coherent);
diff --combined arch/x86/mm/init_64.c
index e4805771b5be554858f87a7608d8f486b7c5be28,129618ca0ea274a980ba414b30e604e67c3217fd..08a20e6a15c2cceb716cc7e453b58d65c5232ffd
@@@ -86,43 -86,6 +86,6 @@@ early_param("gbpages", parse_direct_gbp
   * around without checking the pgd every time.
   */
  
- void show_mem(void)
- {
-       long i, total = 0, reserved = 0;
-       long shared = 0, cached = 0;
-       struct page *page;
-       pg_data_t *pgdat;
-       printk(KERN_INFO "Mem-info:\n");
-       show_free_areas();
-       for_each_online_pgdat(pgdat) {
-               for (i = 0; i < pgdat->node_spanned_pages; ++i) {
-                       /*
-                        * This loop can take a while with 256 GB and
-                        * 4k pages so defer the NMI watchdog:
-                        */
-                       if (unlikely(i % MAX_ORDER_NR_PAGES == 0))
-                               touch_nmi_watchdog();
-                       if (!pfn_valid(pgdat->node_start_pfn + i))
-                               continue;
-                       page = pfn_to_page(pgdat->node_start_pfn + i);
-                       total++;
-                       if (PageReserved(page))
-                               reserved++;
-                       else if (PageSwapCache(page))
-                               cached++;
-                       else if (page_count(page))
-                               shared += page_count(page) - 1;
-               }
-       }
-       printk(KERN_INFO "%lu pages of RAM\n",          total);
-       printk(KERN_INFO "%lu reserved pages\n",        reserved);
-       printk(KERN_INFO "%lu pages shared\n",          shared);
-       printk(KERN_INFO "%lu pages swap cached\n",     cached);
- }
  int after_bootmem;
  
  static __init void *spp_getpage(void)
@@@ -258,7 -221,7 +221,7 @@@ void __init init_extra_mapping_uc(unsig
  void __init cleanup_highmap(void)
  {
        unsigned long vaddr = __START_KERNEL_map;
 -      unsigned long end = round_up((unsigned long)_end, PMD_SIZE) - 1;
 +      unsigned long end = roundup((unsigned long)_end, PMD_SIZE) - 1;
        pmd_t *pmd = level2_kernel_pgt;
        pmd_t *last_pmd = pmd + PTRS_PER_PMD;
  
@@@ -474,14 -437,14 +437,14 @@@ static void __init find_early_table_spa
        unsigned long puds, pmds, ptes, tables, start;
  
        puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
 -      tables = round_up(puds * sizeof(pud_t), PAGE_SIZE);
 +      tables = roundup(puds * sizeof(pud_t), PAGE_SIZE);
        if (direct_gbpages) {
                unsigned long extra;
                extra = end - ((end>>PUD_SHIFT) << PUD_SHIFT);
                pmds = (extra + PMD_SIZE - 1) >> PMD_SHIFT;
        } else
                pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
 -      tables += round_up(pmds * sizeof(pmd_t), PAGE_SIZE);
 +      tables += roundup(pmds * sizeof(pmd_t), PAGE_SIZE);
  
        if (cpu_has_pse) {
                unsigned long extra;
                ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT;
        } else
                ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT;
 -      tables += round_up(ptes * sizeof(pte_t), PAGE_SIZE);
 +      tables += roundup(ptes * sizeof(pte_t), PAGE_SIZE);
  
        /*
         * RED-PEN putting page tables only on node 0 could