]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/commitdiff
Merge branch 'stable/xen-swiotlb-0.8.6' of git://git.kernel.org/pub/scm/linux/kernel...
authorLinus Torvalds <torvalds@linux-foundation.org>
Thu, 12 Aug 2010 16:09:41 +0000 (09:09 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 12 Aug 2010 16:09:41 +0000 (09:09 -0700)
* 'stable/xen-swiotlb-0.8.6' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/xen:
  x86: Detect whether we should use Xen SWIOTLB.
  pci-swiotlb-xen: Add glue code to setup dma_ops utilizing xen_swiotlb_* functions.
  swiotlb-xen: SWIOTLB library for Xen PV guest with PCI passthrough.
  xen/mmu: inhibit vmap aliases rather than trying to clear them out
  vmap: add flag to allow lazy unmap to be disabled at runtime
  xen: Add xen_create_contiguous_region
  xen: Rename the balloon lock
  xen: Allow unprivileged Xen domains to create iomap pages
  xen: use _PAGE_IOMAP in ioremap to do machine mappings

Fix up trivial conflicts (adding both xen swiotlb and xen pci platform
driver setup close to each other) in drivers/xen/{Kconfig,Makefile} and
include/xen/xen-ops.h

1  2 
arch/x86/xen/Makefile
arch/x86/xen/enlighten.c
arch/x86/xen/mmu.c
drivers/xen/Kconfig
drivers/xen/Makefile
include/linux/vmalloc.h
include/xen/xen-ops.h
mm/vmalloc.c

Simple merge
Simple merge
index 413b19b3d0fe5322ebe5a492d432eb132304aadf,ef5728dde8f39cf77ffa96d54cac44bba3130fcc..42086ac406af21da6d281687625684c0dddbe11c
  #include <asm/xen/hypercall.h>
  #include <asm/xen/hypervisor.h>
  
+ #include <xen/xen.h>
  #include <xen/page.h>
  #include <xen/interface/xen.h>
 +#include <xen/interface/hvm/hvm_op.h>
  #include <xen/interface/version.h>
+ #include <xen/interface/memory.h>
  #include <xen/hvc-console.h>
  
  #include "multicalls.h"
@@@ -1940,42 -2024,206 +2025,240 @@@ void __init xen_init_mmu_ops(void
        x86_init.paging.pagetable_setup_start = xen_pagetable_setup_start;
        x86_init.paging.pagetable_setup_done = xen_pagetable_setup_done;
        pv_mmu_ops = xen_mmu_ops;
+       vmap_lazy_unmap = false;
+ }
+ /* Protected by xen_reservation_lock. */
+ #define MAX_CONTIG_ORDER 9 /* 2MB */
+ static unsigned long discontig_frames[1<<MAX_CONTIG_ORDER];
+ #define VOID_PTE (mfn_pte(0, __pgprot(0)))
+ static void xen_zap_pfn_range(unsigned long vaddr, unsigned int order,
+                               unsigned long *in_frames,
+                               unsigned long *out_frames)
+ {
+       int i;
+       struct multicall_space mcs;
+       xen_mc_batch();
+       for (i = 0; i < (1UL<<order); i++, vaddr += PAGE_SIZE) {
+               mcs = __xen_mc_entry(0);
+               if (in_frames)
+                       in_frames[i] = virt_to_mfn(vaddr);
+               MULTI_update_va_mapping(mcs.mc, vaddr, VOID_PTE, 0);
+               set_phys_to_machine(virt_to_pfn(vaddr), INVALID_P2M_ENTRY);
+               if (out_frames)
+                       out_frames[i] = virt_to_pfn(vaddr);
+       }
+       xen_mc_issue(0);
+ }
+ /*
+  * Update the pfn-to-mfn mappings for a virtual address range, either to
+  * point to an array of mfns, or contiguously from a single starting
+  * mfn.
+  */
+ static void xen_remap_exchanged_ptes(unsigned long vaddr, int order,
+                                    unsigned long *mfns,
+                                    unsigned long first_mfn)
+ {
+       unsigned i, limit;
+       unsigned long mfn;
+       xen_mc_batch();
+       limit = 1u << order;
+       for (i = 0; i < limit; i++, vaddr += PAGE_SIZE) {
+               struct multicall_space mcs;
+               unsigned flags;
+               mcs = __xen_mc_entry(0);
+               if (mfns)
+                       mfn = mfns[i];
+               else
+                       mfn = first_mfn + i;
+               if (i < (limit - 1))
+                       flags = 0;
+               else {
+                       if (order == 0)
+                               flags = UVMF_INVLPG | UVMF_ALL;
+                       else
+                               flags = UVMF_TLB_FLUSH | UVMF_ALL;
+               }
+               MULTI_update_va_mapping(mcs.mc, vaddr,
+                               mfn_pte(mfn, PAGE_KERNEL), flags);
+               set_phys_to_machine(virt_to_pfn(vaddr), mfn);
+       }
+       xen_mc_issue(0);
+ }
+ /*
+  * Perform the hypercall to exchange a region of our pfns to point to
+  * memory with the required contiguous alignment.  Takes the pfns as
+  * input, and populates mfns as output.
+  *
+  * Returns a success code indicating whether the hypervisor was able to
+  * satisfy the request or not.
+  */
+ static int xen_exchange_memory(unsigned long extents_in, unsigned int order_in,
+                              unsigned long *pfns_in,
+                              unsigned long extents_out,
+                              unsigned int order_out,
+                              unsigned long *mfns_out,
+                              unsigned int address_bits)
+ {
+       long rc;
+       int success;
+       struct xen_memory_exchange exchange = {
+               .in = {
+                       .nr_extents   = extents_in,
+                       .extent_order = order_in,
+                       .extent_start = pfns_in,
+                       .domid        = DOMID_SELF
+               },
+               .out = {
+                       .nr_extents   = extents_out,
+                       .extent_order = order_out,
+                       .extent_start = mfns_out,
+                       .address_bits = address_bits,
+                       .domid        = DOMID_SELF
+               }
+       };
+       BUG_ON(extents_in << order_in != extents_out << order_out);
+       rc = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
+       success = (exchange.nr_exchanged == extents_in);
+       BUG_ON(!success && ((exchange.nr_exchanged != 0) || (rc == 0)));
+       BUG_ON(success && (rc != 0));
+       return success;
+ }
+ int xen_create_contiguous_region(unsigned long vstart, unsigned int order,
+                                unsigned int address_bits)
+ {
+       unsigned long *in_frames = discontig_frames, out_frame;
+       unsigned long  flags;
+       int            success;
+       /*
+        * Currently an auto-translated guest will not perform I/O, nor will
+        * it require PAE page directories below 4GB. Therefore any calls to
+        * this function are redundant and can be ignored.
+        */
+       if (xen_feature(XENFEAT_auto_translated_physmap))
+               return 0;
+       if (unlikely(order > MAX_CONTIG_ORDER))
+               return -ENOMEM;
+       memset((void *) vstart, 0, PAGE_SIZE << order);
+       spin_lock_irqsave(&xen_reservation_lock, flags);
+       /* 1. Zap current PTEs, remembering MFNs. */
+       xen_zap_pfn_range(vstart, order, in_frames, NULL);
+       /* 2. Get a new contiguous memory extent. */
+       out_frame = virt_to_pfn(vstart);
+       success = xen_exchange_memory(1UL << order, 0, in_frames,
+                                     1, order, &out_frame,
+                                     address_bits);
+       /* 3. Map the new extent in place of old pages. */
+       if (success)
+               xen_remap_exchanged_ptes(vstart, order, NULL, out_frame);
+       else
+               xen_remap_exchanged_ptes(vstart, order, in_frames, 0);
+       spin_unlock_irqrestore(&xen_reservation_lock, flags);
+       return success ? 0 : -ENOMEM;
+ }
+ EXPORT_SYMBOL_GPL(xen_create_contiguous_region);
+ void xen_destroy_contiguous_region(unsigned long vstart, unsigned int order)
+ {
+       unsigned long *out_frames = discontig_frames, in_frame;
+       unsigned long  flags;
+       int success;
+       if (xen_feature(XENFEAT_auto_translated_physmap))
+               return;
+       if (unlikely(order > MAX_CONTIG_ORDER))
+               return;
+       memset((void *) vstart, 0, PAGE_SIZE << order);
+       spin_lock_irqsave(&xen_reservation_lock, flags);
+       /* 1. Find start MFN of contiguous extent. */
+       in_frame = virt_to_mfn(vstart);
+       /* 2. Zap current PTEs. */
+       xen_zap_pfn_range(vstart, order, NULL, out_frames);
+       /* 3. Do the exchange for non-contiguous MFNs. */
+       success = xen_exchange_memory(1, order, &in_frame, 1UL << order,
+                                       0, out_frames, 0);
+       /* 4. Map new pages in place of old pages. */
+       if (success)
+               xen_remap_exchanged_ptes(vstart, order, out_frames, 0);
+       else
+               xen_remap_exchanged_ptes(vstart, order, NULL, in_frame);
+       spin_unlock_irqrestore(&xen_reservation_lock, flags);
  }
+ EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region);
  
 +#ifdef CONFIG_XEN_PVHVM
 +static void xen_hvm_exit_mmap(struct mm_struct *mm)
 +{
 +      struct xen_hvm_pagetable_dying a;
 +      int rc;
 +
 +      a.domid = DOMID_SELF;
 +      a.gpa = __pa(mm->pgd);
 +      rc = HYPERVISOR_hvm_op(HVMOP_pagetable_dying, &a);
 +      WARN_ON_ONCE(rc < 0);
 +}
 +
 +static int is_pagetable_dying_supported(void)
 +{
 +      struct xen_hvm_pagetable_dying a;
 +      int rc = 0;
 +
 +      a.domid = DOMID_SELF;
 +      a.gpa = 0x00;
 +      rc = HYPERVISOR_hvm_op(HVMOP_pagetable_dying, &a);
 +      if (rc < 0) {
 +              printk(KERN_DEBUG "HVMOP_pagetable_dying not supported\n");
 +              return 0;
 +      }
 +      return 1;
 +}
 +
 +void __init xen_hvm_init_mmu_ops(void)
 +{
 +      if (is_pagetable_dying_supported())
 +              pv_mmu_ops.exit_mmap = xen_hvm_exit_mmap;
 +}
 +#endif
 +
  #ifdef CONFIG_XEN_DEBUG_FS
  
  static struct dentry *d_mmu_debug;
index 0a882693663997634a553f76271f9cb287ea8fb2,97199c2a64a076ee47622c0f12987c4587c6a26c..60d71e9abe9fa369ce4a9deb99f8050ff6e855b3
@@@ -62,13 -62,8 +62,18 @@@ config XEN_SYS_HYPERVISO
         virtual environment, /sys/hypervisor will still be present,
         but will have no xen contents.
  
 +config XEN_PLATFORM_PCI
 +      tristate "xen platform pci device driver"
 +      depends on XEN_PVHVM
 +      default m
 +      help
 +        Driver for the Xen PCI Platform device: it is responsible for
 +        initializing xenbus and grant_table when running in a Xen HVM
 +        domain. As a consequence this driver is required to run any Xen PV
 +        frontend on Xen HVM.
++
+ config SWIOTLB_XEN
+       def_bool y
+       depends on SWIOTLB
  endmenu
index e392fb776af365823e5b8985245dc5613c303a4c,85f84cff810469628702f6ed56563dd1b37ac1bf..fcaf838f54be26b6c37cf48ec174922ffda3cb1d
@@@ -10,4 -10,4 +10,5 @@@ obj-$(CONFIG_XEN_BALLOON)     += balloon.
  obj-$(CONFIG_XEN_DEV_EVTCHN)  += evtchn.o
  obj-$(CONFIG_XENFS)           += xenfs/
  obj-$(CONFIG_XEN_SYS_HYPERVISOR)      += sys-hypervisor.o
 +obj-$(CONFIG_XEN_PLATFORM_PCI)        += platform-pci.o
+ obj-$(CONFIG_SWIOTLB_XEN)     += swiotlb-xen.o
Simple merge
index 46bc81ef74c67468a667e6f5d06026be1cc43f4c,d789c937c48aac9dc11e722201e7a573040eb8c3..351f4051f6d856d455d5f3d60b75bf3570209d9a
@@@ -15,6 -14,10 +15,12 @@@ void xen_mm_unpin_all(void)
  void xen_timer_resume(void);
  void xen_arch_resume(void);
  
 +int xen_setup_shutdown_event(void);
 +
+ extern unsigned long *xen_contiguous_bitmap;
+ int xen_create_contiguous_region(unsigned long vstart, unsigned int order,
+                               unsigned int address_bits);
+ void xen_destroy_contiguous_region(unsigned long vstart, unsigned int order);
  #endif /* INCLUDE_XEN_OPS_H */
diff --cc mm/vmalloc.c
Simple merge