]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/commitdiff
Merge branch 'stable/cleanups-3.2' of git://git.kernel.org/pub/scm/linux/kernel/git...
authorLinus Torvalds <torvalds@linux-foundation.org>
Mon, 7 Nov 2011 04:13:34 +0000 (20:13 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Mon, 7 Nov 2011 04:13:34 +0000 (20:13 -0800)
* 'stable/cleanups-3.2' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/xen:
  xen: use static initializers in xen-balloon.c
  Xen: fix braces and tabs coding style issue in xenbus_probe.c
  Xen: fix braces coding style issue in xenbus_probe.h
  Xen: fix whitespaces,tabs coding style issue in drivers/xen/pci.c
  Xen: fix braces coding style issue in gntdev.c and grant-table.c
  Xen: fix whitespaces,tabs coding style issue in drivers/xen/events.c
  Xen: fix whitespaces,tabs coding style issue in drivers/xen/balloon.c

Fix up trivial whitespace-conflicts in
 drivers/xen/{balloon.c,pci.c,xenbus/xenbus_probe.c}

1  2 
drivers/xen/balloon.c
drivers/xen/events.c
drivers/xen/gntdev.c
drivers/xen/grant-table.c
drivers/xen/pci.c
drivers/xen/xenbus/xenbus_probe.c

diff --combined drivers/xen/balloon.c
index 1779338e1d865d313947cbfbf200092f7a3e505e,61c0ee7aa7dd9623df1ff48f225a66e0b4e349e8..a767884a6c7a10cfb526aff86de7b0ca1848eb9f
@@@ -4,12 -4,6 +4,12 @@@
   * Copyright (c) 2003, B Dragovic
   * Copyright (c) 2003-2004, M Williamson, K Fraser
   * Copyright (c) 2005 Dan M. Smith, IBM Corporation
 + * Copyright (c) 2010 Daniel Kiper
 + *
 + * Memory hotplug support was written by Daniel Kiper. Work on
 + * it was sponsored by Google under Google Summer of Code 2010
 + * program. Jeremy Fitzhardinge from Citrix was the mentor for
 + * this project.
   *
   * This program is free software; you can redistribute it and/or
   * modify it under the terms of the GNU General Public License version 2
@@@ -39,7 -33,6 +39,7 @@@
  #include <linux/kernel.h>
  #include <linux/sched.h>
  #include <linux/errno.h>
 +#include <linux/module.h>
  #include <linux/mm.h>
  #include <linux/bootmem.h>
  #include <linux/pagemap.h>
@@@ -47,9 -40,6 +47,9 @@@
  #include <linux/mutex.h>
  #include <linux/list.h>
  #include <linux/gfp.h>
 +#include <linux/notifier.h>
 +#include <linux/memory.h>
 +#include <linux/memory_hotplug.h>
  
  #include <asm/page.h>
  #include <asm/pgalloc.h>
@@@ -94,8 -84,8 +94,8 @@@ static unsigned long frame_list[PAGE_SI
  #define inc_totalhigh_pages() (totalhigh_pages++)
  #define dec_totalhigh_pages() (totalhigh_pages--)
  #else
- #define inc_totalhigh_pages() do {} while(0)
- #define dec_totalhigh_pages() do {} while(0)
+ #define inc_totalhigh_pages() do {} while (0)
+ #define dec_totalhigh_pages() do {} while (0)
  #endif
  
  /* List of ballooned pages, threaded through the mem_map array. */
@@@ -155,8 -145,7 +155,7 @@@ static struct page *balloon_retrieve(bo
        if (PageHighMem(page)) {
                balloon_stats.balloon_high--;
                inc_totalhigh_pages();
-       }
-       else
+       } else
                balloon_stats.balloon_low--;
  
        totalram_pages++;
@@@ -204,87 -193,6 +203,87 @@@ static enum bp_state update_schedule(en
        return BP_EAGAIN;
  }
  
 +#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
 +static long current_credit(void)
 +{
 +      return balloon_stats.target_pages - balloon_stats.current_pages -
 +              balloon_stats.hotplug_pages;
 +}
 +
 +static bool balloon_is_inflated(void)
 +{
 +      if (balloon_stats.balloon_low || balloon_stats.balloon_high ||
 +                      balloon_stats.balloon_hotplug)
 +              return true;
 +      else
 +              return false;
 +}
 +
 +/*
 + * reserve_additional_memory() adds memory region of size >= credit above
 + * max_pfn. New region is section aligned and size is modified to be multiple
 + * of section size. Those features allow optimal use of address space and
 + * establish proper alignment when this function is called first time after
 + * boot (last section not fully populated at boot time contains unused memory
 + * pages with PG_reserved bit not set; online_pages_range() does not allow page
 + * onlining in whole range if first onlined page does not have PG_reserved
 + * bit set). Real size of added memory is established at page onlining stage.
 + */
 +
 +static enum bp_state reserve_additional_memory(long credit)
 +{
 +      int nid, rc;
 +      u64 hotplug_start_paddr;
 +      unsigned long balloon_hotplug = credit;
 +
 +      hotplug_start_paddr = PFN_PHYS(SECTION_ALIGN_UP(max_pfn));
 +      balloon_hotplug = round_up(balloon_hotplug, PAGES_PER_SECTION);
 +      nid = memory_add_physaddr_to_nid(hotplug_start_paddr);
 +
 +      rc = add_memory(nid, hotplug_start_paddr, balloon_hotplug << PAGE_SHIFT);
 +
 +      if (rc) {
 +              pr_info("xen_balloon: %s: add_memory() failed: %i\n", __func__, rc);
 +              return BP_EAGAIN;
 +      }
 +
 +      balloon_hotplug -= credit;
 +
 +      balloon_stats.hotplug_pages += credit;
 +      balloon_stats.balloon_hotplug = balloon_hotplug;
 +
 +      return BP_DONE;
 +}
 +
 +static void xen_online_page(struct page *page)
 +{
 +      __online_page_set_limits(page);
 +
 +      mutex_lock(&balloon_mutex);
 +
 +      __balloon_append(page);
 +
 +      if (balloon_stats.hotplug_pages)
 +              --balloon_stats.hotplug_pages;
 +      else
 +              --balloon_stats.balloon_hotplug;
 +
 +      mutex_unlock(&balloon_mutex);
 +}
 +
 +static int xen_memory_notifier(struct notifier_block *nb, unsigned long val, void *v)
 +{
 +      if (val == MEM_ONLINE)
 +              schedule_delayed_work(&balloon_worker, 0);
 +
 +      return NOTIFY_OK;
 +}
 +
 +static struct notifier_block xen_memory_nb = {
 +      .notifier_call = xen_memory_notifier,
 +      .priority = 0
 +};
 +#else
  static long current_credit(void)
  {
        unsigned long target = balloon_stats.target_pages;
        return target - balloon_stats.current_pages;
  }
  
 +static bool balloon_is_inflated(void)
 +{
 +      if (balloon_stats.balloon_low || balloon_stats.balloon_high)
 +              return true;
 +      else
 +              return false;
 +}
 +
 +static enum bp_state reserve_additional_memory(long credit)
 +{
 +      balloon_stats.target_pages = balloon_stats.current_pages;
 +      return BP_DONE;
 +}
 +#endif /* CONFIG_XEN_BALLOON_MEMORY_HOTPLUG */
 +
  static enum bp_state increase_reservation(unsigned long nr_pages)
  {
        int rc;
                .domid        = DOMID_SELF
        };
  
 +#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
 +      if (!balloon_stats.balloon_low && !balloon_stats.balloon_high) {
 +              nr_pages = min(nr_pages, balloon_stats.balloon_hotplug);
 +              balloon_stats.hotplug_pages += nr_pages;
 +              balloon_stats.balloon_hotplug -= nr_pages;
 +              return BP_DONE;
 +      }
 +#endif
 +
        if (nr_pages > ARRAY_SIZE(frame_list))
                nr_pages = ARRAY_SIZE(frame_list);
  
@@@ -394,15 -278,6 +393,15 @@@ static enum bp_state decrease_reservati
                .domid        = DOMID_SELF
        };
  
 +#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
 +      if (balloon_stats.hotplug_pages) {
 +              nr_pages = min(nr_pages, balloon_stats.hotplug_pages);
 +              balloon_stats.hotplug_pages -= nr_pages;
 +              balloon_stats.balloon_hotplug += nr_pages;
 +              return BP_DONE;
 +      }
 +#endif
 +
        if (nr_pages > ARRAY_SIZE(frame_list))
                nr_pages = ARRAY_SIZE(frame_list);
  
                                (unsigned long)__va(pfn << PAGE_SHIFT),
                                __pte_ma(0), 0);
                        BUG_ON(ret);
-                 }
+               }
  
        }
  
@@@ -464,12 -339,8 +463,12 @@@ static void balloon_process(struct work
        do {
                credit = current_credit();
  
 -              if (credit > 0)
 -                      state = increase_reservation(credit);
 +              if (credit > 0) {
 +                      if (balloon_is_inflated())
 +                              state = increase_reservation(credit);
 +                      else
 +                              state = reserve_additional_memory(credit);
 +              }
  
                if (credit < 0)
                        state = decrease_reservation(-credit, GFP_BALLOON);
@@@ -502,24 -373,20 +501,24 @@@ EXPORT_SYMBOL_GPL(balloon_set_new_targe
   * alloc_xenballooned_pages - get pages that have been ballooned out
   * @nr_pages: Number of pages to get
   * @pages: pages returned
 + * @highmem: highmem or lowmem pages
   * @return 0 on success, error otherwise
   */
 -int alloc_xenballooned_pages(int nr_pages, struct page **pages)
 +int alloc_xenballooned_pages(int nr_pages, struct page **pages, bool highmem)
  {
        int pgno = 0;
-       struct pagepage;
+       struct page *page;
        mutex_lock(&balloon_mutex);
        while (pgno < nr_pages) {
 -              page = balloon_retrieve(true);
 -              if (page) {
 +              page = balloon_retrieve(highmem);
 +              if (page && PageHighMem(page) == highmem) {
                        pages[pgno++] = page;
                } else {
                        enum bp_state st;
 -                      st = decrease_reservation(nr_pages - pgno, GFP_HIGHUSER);
 +                      if (page)
 +                              balloon_append(page);
 +                      st = decrease_reservation(nr_pages - pgno,
 +                                      highmem ? GFP_HIGHUSER : GFP_USER);
                        if (st != BP_DONE)
                                goto out_undo;
                }
@@@ -541,7 -408,7 +540,7 @@@ EXPORT_SYMBOL(alloc_xenballooned_pages)
   * @nr_pages: Number of pages
   * @pages: pages to return
   */
- void free_xenballooned_pages(int nr_pages, struct page** pages)
+ void free_xenballooned_pages(int nr_pages, struct page **pages)
  {
        int i;
  
  }
  EXPORT_SYMBOL(free_xenballooned_pages);
  
 -static int __init balloon_init(void)
 +static void __init balloon_add_region(unsigned long start_pfn,
 +                                    unsigned long pages)
  {
        unsigned long pfn, extra_pfn_end;
        struct page *page;
  
 +      /*
 +       * If the amount of usable memory has been limited (e.g., with
 +       * the 'mem' command line parameter), don't add pages beyond
 +       * this limit.
 +       */
 +      extra_pfn_end = min(max_pfn, start_pfn + pages);
 +
 +      for (pfn = start_pfn; pfn < extra_pfn_end; pfn++) {
 +              page = pfn_to_page(pfn);
 +              /* totalram_pages and totalhigh_pages do not
 +                 include the boot-time balloon extension, so
 +                 don't subtract from it. */
 +              __balloon_append(page);
 +      }
 +}
 +
 +static int __init balloon_init(void)
 +{
 +      int i;
 +
        if (!xen_domain())
                return -ENODEV;
  
        pr_info("xen/balloon: Initialising balloon driver.\n");
  
 -      balloon_stats.current_pages = xen_pv_domain() ? min(xen_start_info->nr_pages, max_pfn) : max_pfn;
 +      balloon_stats.current_pages = xen_pv_domain()
 +              ? min(xen_start_info->nr_pages - xen_released_pages, max_pfn)
 +              : max_pfn;
        balloon_stats.target_pages  = balloon_stats.current_pages;
        balloon_stats.balloon_low   = 0;
        balloon_stats.balloon_high  = 0;
        balloon_stats.retry_count = 1;
        balloon_stats.max_retry_count = RETRY_UNLIMITED;
  
 +#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
 +      balloon_stats.hotplug_pages = 0;
 +      balloon_stats.balloon_hotplug = 0;
 +
 +      set_online_page_callback(&xen_online_page);
 +      register_memory_notifier(&xen_memory_nb);
 +#endif
 +
        /*
 -       * Initialise the balloon with excess memory space.  We need
 -       * to make sure we don't add memory which doesn't exist or
 -       * logically exist.  The E820 map can be trimmed to be smaller
 -       * than the amount of physical memory due to the mem= command
 -       * line parameter.  And if this is a 32-bit non-HIGHMEM kernel
 -       * on a system with memory which requires highmem to access,
 -       * don't try to use it.
 +       * Initialize the balloon with pages from the extra memory
 +       * regions (see arch/x86/xen/setup.c).
         */
 -      extra_pfn_end = min(min(max_pfn, e820_end_of_ram_pfn()),
 -                          (unsigned long)PFN_DOWN(xen_extra_mem_start + xen_extra_mem_size));
 -      for (pfn = PFN_UP(xen_extra_mem_start);
 -           pfn < extra_pfn_end;
 -           pfn++) {
 -              page = pfn_to_page(pfn);
 -              /* totalram_pages and totalhigh_pages do not include the boot-time
 -                 balloon extension, so don't subtract from it. */
 -              __balloon_append(page);
 -      }
 +      for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++)
 +              if (xen_extra_mem[i].size)
 +                      balloon_add_region(PFN_UP(xen_extra_mem[i].start),
 +                                         PFN_DOWN(xen_extra_mem[i].size));
  
        return 0;
  }
diff --combined drivers/xen/events.c
index 0eb8a57cc808e0d789c6f1477ba9d699297db377,8876ffd08771baf0790db3643122e16397c93b8b..6e075cdd0c6bf56ff8daacf986d443de23c49998
@@@ -54,7 -54,7 +54,7 @@@
   * This lock protects updates to the following mapping and reference-count
   * arrays. The lock does not need to be acquired to read the mapping tables.
   */
 -static DEFINE_SPINLOCK(irq_mapping_update_lock);
 +static DEFINE_MUTEX(irq_mapping_update_lock);
  
  static LIST_HEAD(xen_irq_list_head);
  
@@@ -85,8 -85,7 +85,7 @@@ enum xen_irq_type 
   *    IPI - IPI vector
   *    EVTCHN -
   */
- struct irq_info
- {
+ struct irq_info {
        struct list_head list;
        enum xen_irq_type type; /* type */
        unsigned irq;
@@@ -282,9 -281,9 +281,9 @@@ static inline unsigned long active_evtc
                                           struct shared_info *sh,
                                           unsigned int idx)
  {
-       return (sh->evtchn_pending[idx] &
+       return sh->evtchn_pending[idx] &
                per_cpu(cpu_evtchn_mask, cpu)[idx] &
-               ~sh->evtchn_mask[idx]);
+               ~sh->evtchn_mask[idx];
  }
  
  static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
@@@ -432,8 -431,7 +431,8 @@@ static int __must_check xen_allocate_ir
  
        irq = irq_alloc_desc_from(first, -1);
  
 -      xen_irq_init(irq);
 +      if (irq >= 0)
 +              xen_irq_init(irq);
  
        return irq;
  }
@@@ -632,7 -630,7 +631,7 @@@ int xen_bind_pirq_gsi_to_irq(unsigned g
        int irq = -1;
        struct physdev_irq irq_op;
  
 -      spin_lock(&irq_mapping_update_lock);
 +      mutex_lock(&irq_mapping_update_lock);
  
        irq = find_irq_by_gsi(gsi);
        if (irq != -1) {
                                handle_edge_irq, name);
  
  out:
 -      spin_unlock(&irq_mapping_update_lock);
 +      mutex_unlock(&irq_mapping_update_lock);
  
        return irq;
  }
@@@ -711,10 -709,10 +710,10 @@@ int xen_bind_pirq_msi_to_irq(struct pci
  {
        int irq, ret;
  
 -      spin_lock(&irq_mapping_update_lock);
 +      mutex_lock(&irq_mapping_update_lock);
  
        irq = xen_allocate_irq_dynamic();
 -      if (irq == -1)
 +      if (irq < 0)
                goto out;
  
        irq_set_chip_and_handler_name(irq, &xen_pirq_chip, handle_edge_irq,
        if (ret < 0)
                goto error_irq;
  out:
 -      spin_unlock(&irq_mapping_update_lock);
 +      mutex_unlock(&irq_mapping_update_lock);
        return irq;
  error_irq:
 -      spin_unlock(&irq_mapping_update_lock);
 +      mutex_unlock(&irq_mapping_update_lock);
        xen_free_irq(irq);
 -      return -1;
 +      return ret;
  }
  #endif
  
@@@ -741,7 -739,7 +740,7 @@@ int xen_destroy_irq(int irq
        struct irq_info *info = info_for_irq(irq);
        int rc = -ENOENT;
  
 -      spin_lock(&irq_mapping_update_lock);
 +      mutex_lock(&irq_mapping_update_lock);
  
        desc = irq_to_desc(irq);
        if (!desc)
        xen_free_irq(irq);
  
  out:
 -      spin_unlock(&irq_mapping_update_lock);
 +      mutex_unlock(&irq_mapping_update_lock);
        return rc;
  }
  
@@@ -777,10 -775,10 +776,10 @@@ int xen_irq_from_pirq(unsigned pirq
  
        struct irq_info *info;
  
 -      spin_lock(&irq_mapping_update_lock);
 +      mutex_lock(&irq_mapping_update_lock);
  
        list_for_each_entry(info, &xen_irq_list_head, list) {
 -              if (info == NULL || info->type != IRQT_PIRQ)
 +              if (info->type != IRQT_PIRQ)
                        continue;
                irq = info->irq;
                if (info->u.pirq.pirq == pirq)
        }
        irq = -1;
  out:
 -      spin_unlock(&irq_mapping_update_lock);
 +      mutex_unlock(&irq_mapping_update_lock);
  
        return irq;
  }
@@@ -803,7 -801,7 +802,7 @@@ int bind_evtchn_to_irq(unsigned int evt
  {
        int irq;
  
 -      spin_lock(&irq_mapping_update_lock);
 +      mutex_lock(&irq_mapping_update_lock);
  
        irq = evtchn_to_irq[evtchn];
  
        }
  
  out:
 -      spin_unlock(&irq_mapping_update_lock);
 +      mutex_unlock(&irq_mapping_update_lock);
  
        return irq;
  }
@@@ -830,7 -828,7 +829,7 @@@ static int bind_ipi_to_irq(unsigned in
        struct evtchn_bind_ipi bind_ipi;
        int evtchn, irq;
  
 -      spin_lock(&irq_mapping_update_lock);
 +      mutex_lock(&irq_mapping_update_lock);
  
        irq = per_cpu(ipi_to_irq, cpu)[ipi];
  
        }
  
   out:
 -      spin_unlock(&irq_mapping_update_lock);
 +      mutex_unlock(&irq_mapping_update_lock);
        return irq;
  }
  
@@@ -873,34 -871,13 +872,34 @@@ static int bind_interdomain_evtchn_to_i
        return err ? : bind_evtchn_to_irq(bind_interdomain.local_port);
  }
  
 +static int find_virq(unsigned int virq, unsigned int cpu)
 +{
 +      struct evtchn_status status;
 +      int port, rc = -ENOENT;
 +
 +      memset(&status, 0, sizeof(status));
 +      for (port = 0; port <= NR_EVENT_CHANNELS; port++) {
 +              status.dom = DOMID_SELF;
 +              status.port = port;
 +              rc = HYPERVISOR_event_channel_op(EVTCHNOP_status, &status);
 +              if (rc < 0)
 +                      continue;
 +              if (status.status != EVTCHNSTAT_virq)
 +                      continue;
 +              if (status.u.virq == virq && status.vcpu == cpu) {
 +                      rc = port;
 +                      break;
 +              }
 +      }
 +      return rc;
 +}
  
  int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
  {
        struct evtchn_bind_virq bind_virq;
 -      int evtchn, irq;
 +      int evtchn, irq, ret;
  
 -      spin_lock(&irq_mapping_update_lock);
 +      mutex_lock(&irq_mapping_update_lock);
  
        irq = per_cpu(virq_to_irq, cpu)[virq];
  
  
                bind_virq.virq = virq;
                bind_virq.vcpu = cpu;
 -              if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
 -                                              &bind_virq) != 0)
 -                      BUG();
 -              evtchn = bind_virq.port;
 +              ret = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
 +                                              &bind_virq);
 +              if (ret == 0)
 +                      evtchn = bind_virq.port;
 +              else {
 +                      if (ret == -EEXIST)
 +                              ret = find_virq(virq, cpu);
 +                      BUG_ON(ret < 0);
 +                      evtchn = ret;
 +              }
  
                xen_irq_info_virq_init(cpu, irq, evtchn, virq);
  
        }
  
  out:
 -      spin_unlock(&irq_mapping_update_lock);
 +      mutex_unlock(&irq_mapping_update_lock);
  
        return irq;
  }
@@@ -941,7 -912,7 +940,7 @@@ static void unbind_from_irq(unsigned in
        struct evtchn_close close;
        int evtchn = evtchn_from_irq(irq);
  
 -      spin_lock(&irq_mapping_update_lock);
 +      mutex_lock(&irq_mapping_update_lock);
  
        if (VALID_EVTCHN(evtchn)) {
                close.port = evtchn;
  
        xen_free_irq(irq);
  
 -      spin_unlock(&irq_mapping_update_lock);
 +      mutex_unlock(&irq_mapping_update_lock);
  }
  
  int bind_evtchn_to_irqhandler(unsigned int evtchn,
@@@ -1049,7 -1020,7 +1048,7 @@@ int bind_ipi_to_irqhandler(enum ipi_vec
        if (irq < 0)
                return irq;
  
 -      irqflags |= IRQF_NO_SUSPEND | IRQF_FORCE_RESUME;
 +      irqflags |= IRQF_NO_SUSPEND | IRQF_FORCE_RESUME | IRQF_EARLY_RESUME;
        retval = request_irq(irq, handler, irqflags, devname, dev_id);
        if (retval != 0) {
                unbind_from_irq(irq);
@@@ -1180,7 -1151,7 +1179,7 @@@ static void __xen_evtchn_do_upcall(void
        int cpu = get_cpu();
        struct shared_info *s = HYPERVISOR_shared_info;
        struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
-       unsigned count;
+       unsigned count;
  
        do {
                unsigned long pending_words;
@@@ -1307,7 -1278,7 +1306,7 @@@ void rebind_evtchn_irq(int evtchn, int 
           will also be masked. */
        disable_irq(irq);
  
 -      spin_lock(&irq_mapping_update_lock);
 +      mutex_lock(&irq_mapping_update_lock);
  
        /* After resume the irq<->evtchn mappings are all cleared out */
        BUG_ON(evtchn_to_irq[evtchn] != -1);
  
        xen_irq_info_evtchn_init(irq, evtchn);
  
 -      spin_unlock(&irq_mapping_update_lock);
 +      mutex_unlock(&irq_mapping_update_lock);
  
        /* new event channels are always bound to cpu 0 */
        irq_set_affinity(irq, cpumask_of(0));
@@@ -1698,7 -1669,6 +1697,7 @@@ void __init xen_init_IRQ(void
  
        evtchn_to_irq = kcalloc(NR_EVENT_CHANNELS, sizeof(*evtchn_to_irq),
                                    GFP_KERNEL);
 +      BUG_ON(!evtchn_to_irq);
        for (i = 0; i < NR_EVENT_CHANNELS; i++)
                evtchn_to_irq[i] = -1;
  
diff --combined drivers/xen/gntdev.c
index 880798aae2f2ae868bc9cc3206517901c1560afa,772a5b8bbf2e8b16a2ca73ce204d3a403261f775..39871326afa2ebb5905d2f6afbc922bb08826d0a
@@@ -83,7 -83,6 +83,7 @@@ struct grant_map 
        struct ioctl_gntdev_grant_ref *grants;
        struct gnttab_map_grant_ref   *map_ops;
        struct gnttab_unmap_grant_ref *unmap_ops;
 +      struct gnttab_map_grant_ref   *kmap_ops;
        struct page **pages;
  };
  
@@@ -117,22 -116,19 +117,22 @@@ static struct grant_map *gntdev_alloc_m
        add->grants    = kzalloc(sizeof(add->grants[0])    * count, GFP_KERNEL);
        add->map_ops   = kzalloc(sizeof(add->map_ops[0])   * count, GFP_KERNEL);
        add->unmap_ops = kzalloc(sizeof(add->unmap_ops[0]) * count, GFP_KERNEL);
 +      add->kmap_ops  = kzalloc(sizeof(add->kmap_ops[0])  * count, GFP_KERNEL);
        add->pages     = kzalloc(sizeof(add->pages[0])     * count, GFP_KERNEL);
        if (NULL == add->grants    ||
            NULL == add->map_ops   ||
            NULL == add->unmap_ops ||
 +          NULL == add->kmap_ops  ||
            NULL == add->pages)
                goto err;
  
 -      if (alloc_xenballooned_pages(count, add->pages))
 +      if (alloc_xenballooned_pages(count, add->pages, false /* lowmem */))
                goto err;
  
        for (i = 0; i < count; i++) {
                add->map_ops[i].handle = -1;
                add->unmap_ops[i].handle = -1;
 +              add->kmap_ops[i].handle = -1;
        }
  
        add->index = 0;
@@@ -146,7 -142,6 +146,7 @@@ err
        kfree(add->grants);
        kfree(add->map_ops);
        kfree(add->unmap_ops);
 +      kfree(add->kmap_ops);
        kfree(add);
        return NULL;
  }
@@@ -193,9 -188,8 +193,8 @@@ static void gntdev_put_map(struct grant
  
        atomic_sub(map->count, &pages_mapped);
  
-       if (map->notify.flags & UNMAP_NOTIFY_SEND_EVENT) {
+       if (map->notify.flags & UNMAP_NOTIFY_SEND_EVENT)
                notify_remote_via_evtchn(map->notify.event);
-       }
  
        if (map->pages) {
                if (!use_ptemod)
@@@ -248,35 -242,10 +247,35 @@@ static int map_grant_pages(struct grant
                        gnttab_set_unmap_op(&map->unmap_ops[i], addr,
                                map->flags, -1 /* handle */);
                }
 +      } else {
 +              /*
 +               * Setup the map_ops corresponding to the pte entries pointing
 +               * to the kernel linear addresses of the struct pages.
 +               * These ptes are completely different from the user ptes dealt
 +               * with find_grant_ptes.
 +               */
 +              for (i = 0; i < map->count; i++) {
 +                      unsigned level;
 +                      unsigned long address = (unsigned long)
 +                              pfn_to_kaddr(page_to_pfn(map->pages[i]));
 +                      pte_t *ptep;
 +                      u64 pte_maddr = 0;
 +                      BUG_ON(PageHighMem(map->pages[i]));
 +
 +                      ptep = lookup_address(address, &level);
 +                      pte_maddr = arbitrary_virt_to_machine(ptep).maddr;
 +                      gnttab_set_map_op(&map->kmap_ops[i], pte_maddr,
 +                              map->flags |
 +                              GNTMAP_host_map |
 +                              GNTMAP_contains_pte,
 +                              map->grants[i].ref,
 +                              map->grants[i].domid);
 +              }
        }
  
        pr_debug("map %d+%d\n", map->index, map->count);
 -      err = gnttab_map_refs(map->map_ops, map->pages, map->count);
 +      err = gnttab_map_refs(map->map_ops, use_ptemod ? map->kmap_ops : NULL,
 +                      map->pages, map->count);
        if (err)
                return err;
  
@@@ -492,11 -461,13 +491,11 @@@ static int gntdev_release(struct inode 
  
        pr_debug("priv %p\n", priv);
  
 -      spin_lock(&priv->lock);
        while (!list_empty(&priv->maps)) {
                map = list_entry(priv->maps.next, struct grant_map, next);
                list_del(&map->next);
                gntdev_put_map(map);
        }
 -      spin_unlock(&priv->lock);
  
        if (use_ptemod)
                mmu_notifier_unregister(&priv->mn, priv->mm);
@@@ -560,11 -531,10 +559,11 @@@ static long gntdev_ioctl_unmap_grant_re
        map = gntdev_find_map_index(priv, op.index >> PAGE_SHIFT, op.count);
        if (map) {
                list_del(&map->next);
 -              gntdev_put_map(map);
                err = 0;
        }
        spin_unlock(&priv->lock);
 +      if (map)
 +              gntdev_put_map(map);
        return err;
  }
  
index 8c71ab80175653827d0ceb966b74b7aef73287d3,3a3dceb7063ac76fa689044118c2359d70db4023..bf1c094f4ebf12ea234b9a60fdaea118d6217d8e
@@@ -82,7 -82,7 +82,7 @@@ static inline grant_ref_t *__gnttab_ent
  static int get_free_entries(unsigned count)
  {
        unsigned long flags;
 -      int ref, rc;
 +      int ref, rc = 0;
        grant_ref_t head;
  
        spin_lock_irqsave(&gnttab_list_lock, flags);
@@@ -193,7 -193,7 +193,7 @@@ int gnttab_query_foreign_access(grant_r
  
        nflags = shared[ref].flags;
  
-       return (nflags & (GTF_reading|GTF_writing));
+       return nflags & (GTF_reading|GTF_writing);
  }
  EXPORT_SYMBOL_GPL(gnttab_query_foreign_access);
  
@@@ -448,8 -448,7 +448,8 @@@ unsigned int gnttab_max_grant_frames(vo
  EXPORT_SYMBOL_GPL(gnttab_max_grant_frames);
  
  int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
 -                  struct page **pages, unsigned int count)
 +                      struct gnttab_map_grant_ref *kmap_ops,
 +                      struct page **pages, unsigned int count)
  {
        int i, ret;
        pte_t *pte;
                         */
                        return -EOPNOTSUPP;
                }
 -              ret = m2p_add_override(mfn, pages[i],
 -                                     map_ops[i].flags & GNTMAP_contains_pte);
 +              ret = m2p_add_override(mfn, pages[i], &kmap_ops[i]);
                if (ret)
                        return ret;
        }
diff --combined drivers/xen/pci.c
index 66057075d6e2664c4411b5168c17ba3ac1828938,c4448ee5595f92ae582168f8c064f42e608d89b4..b84bf0b6cc34c4fd34bfd35015bd8057f0ddf6b9
@@@ -18,7 -18,6 +18,7 @@@
   */
  
  #include <linux/pci.h>
 +#include <linux/acpi.h>
  #include <xen/xen.h>
  #include <xen/interface/physdev.h>
  #include <xen/interface/xen.h>
  #include <asm/xen/hypercall.h>
  #include "../pci/pci.h"
  
 +static bool __read_mostly pci_seg_supported = true;
 +
  static int xen_add_device(struct device *dev)
  {
        int r;
        struct pci_dev *pci_dev = to_pci_dev(dev);
 +#ifdef CONFIG_PCI_IOV
 +      struct pci_dev *physfn = pci_dev->physfn;
 +#endif
 +
 +      if (pci_seg_supported) {
 +              struct physdev_pci_device_add add = {
 +                      .seg = pci_domain_nr(pci_dev->bus),
 +                      .bus = pci_dev->bus->number,
 +                      .devfn = pci_dev->devfn
 +              };
 +#ifdef CONFIG_ACPI
 +              acpi_handle handle;
 +#endif
 +
 +#ifdef CONFIG_PCI_IOV
 +              if (pci_dev->is_virtfn) {
 +                      add.flags = XEN_PCI_DEV_VIRTFN;
 +                      add.physfn.bus = physfn->bus->number;
 +                      add.physfn.devfn = physfn->devfn;
 +              } else
 +#endif
 +              if (pci_ari_enabled(pci_dev->bus) && PCI_SLOT(pci_dev->devfn))
 +                      add.flags = XEN_PCI_DEV_EXTFN;
 +
 +#ifdef CONFIG_ACPI
 +              handle = DEVICE_ACPI_HANDLE(&pci_dev->dev);
 +              if (!handle)
 +                      handle = DEVICE_ACPI_HANDLE(pci_dev->bus->bridge);
 +#ifdef CONFIG_PCI_IOV
 +              if (!handle && pci_dev->is_virtfn)
 +                      handle = DEVICE_ACPI_HANDLE(physfn->bus->bridge);
 +#endif
 +              if (handle) {
 +                      acpi_status status;
 +
 +                      do {
 +                              unsigned long long pxm;
 +
 +                              status = acpi_evaluate_integer(handle, "_PXM",
 +                                                             NULL, &pxm);
 +                              if (ACPI_SUCCESS(status)) {
 +                                      add.optarr[0] = pxm;
 +                                      add.flags |= XEN_PCI_DEV_PXM;
 +                                      break;
 +                              }
 +                              status = acpi_get_parent(handle, &handle);
 +                      } while (ACPI_SUCCESS(status));
 +              }
 +#endif /* CONFIG_ACPI */
 +
 +              r = HYPERVISOR_physdev_op(PHYSDEVOP_pci_device_add, &add);
 +              if (r != -ENOSYS)
 +                      return r;
 +              pci_seg_supported = false;
 +      }
  
 +      if (pci_domain_nr(pci_dev->bus))
 +              r = -ENOSYS;
  #ifdef CONFIG_PCI_IOV
 -      if (pci_dev->is_virtfn) {
 +      else if (pci_dev->is_virtfn) {
                struct physdev_manage_pci_ext manage_pci_ext = {
                        .bus            = pci_dev->bus->number,
                        .devfn          = pci_dev->devfn,
 -                      .is_virtfn      = 1,
 -                      .physfn.bus     = pci_dev->physfn->bus->number,
 -                      .physfn.devfn   = pci_dev->physfn->devfn,
 +                      .is_virtfn      = 1,
 +                      .physfn.bus     = physfn->bus->number,
 +                      .physfn.devfn   = physfn->devfn,
                };
  
                r = HYPERVISOR_physdev_op(PHYSDEVOP_manage_pci_add_ext,
                        &manage_pci_ext);
 -      } else
 +      }
  #endif
 -      if (pci_ari_enabled(pci_dev->bus) && PCI_SLOT(pci_dev->devfn)) {
 +      else if (pci_ari_enabled(pci_dev->bus) && PCI_SLOT(pci_dev->devfn)) {
                struct physdev_manage_pci_ext manage_pci_ext = {
                        .bus            = pci_dev->bus->number,
                        .devfn          = pci_dev->devfn,
                        &manage_pci_ext);
        } else {
                struct physdev_manage_pci manage_pci = {
-                       .bus    = pci_dev->bus->number,
+                       .bus    = pci_dev->bus->number,
                        .devfn  = pci_dev->devfn,
                };
  
@@@ -131,27 -71,13 +131,27 @@@ static int xen_remove_device(struct dev
  {
        int r;
        struct pci_dev *pci_dev = to_pci_dev(dev);
 -      struct physdev_manage_pci manage_pci;
  
 -      manage_pci.bus = pci_dev->bus->number;
 -      manage_pci.devfn = pci_dev->devfn;
 +      if (pci_seg_supported) {
 +              struct physdev_pci_device device = {
 +                      .seg = pci_domain_nr(pci_dev->bus),
 +                      .bus = pci_dev->bus->number,
 +                      .devfn = pci_dev->devfn
 +              };
  
 -      r = HYPERVISOR_physdev_op(PHYSDEVOP_manage_pci_remove,
 -              &manage_pci);
 +              r = HYPERVISOR_physdev_op(PHYSDEVOP_pci_device_remove,
 +                                        &device);
 +      } else if (pci_domain_nr(pci_dev->bus))
 +              r = -ENOSYS;
 +      else {
 +              struct physdev_manage_pci manage_pci = {
 +                      .bus = pci_dev->bus->number,
 +                      .devfn = pci_dev->devfn
 +              };
 +
 +              r = HYPERVISOR_physdev_op(PHYSDEVOP_manage_pci_remove,
 +                                        &manage_pci);
 +      }
  
        return r;
  }
@@@ -170,16 -96,13 +170,16 @@@ static int xen_pci_notifier(struct noti
                r = xen_remove_device(dev);
                break;
        default:
 -              break;
 +              return NOTIFY_DONE;
        }
 -
 -      return r;
 +      if (r)
 +              dev_err(dev, "Failed to %s - passthrough or MSI/MSI-X might fail!\n",
 +                      action == BUS_NOTIFY_ADD_DEVICE ? "add" :
 +                      (action == BUS_NOTIFY_DEL_DEVICE ? "delete" : "?"));
 +      return NOTIFY_OK;
  }
  
 -struct notifier_block device_nb = {
 +static struct notifier_block device_nb = {
        .notifier_call = xen_pci_notifier,
  };
  
index 0e867eeecb045569094c1d52d019ce8f306494b6,d4c7a9ffbcb93d880614bbbdfebdfcae9fe47dc1..1b178c6e893796c2807d8a5a1992c9cc27a7b97a
@@@ -46,7 -46,6 +46,7 @@@
  #include <linux/mutex.h>
  #include <linux/io.h>
  #include <linux/slab.h>
 +#include <linux/module.h>
  
  #include <asm/page.h>
  #include <asm/pgtable.h>
@@@ -310,8 -309,7 +310,7 @@@ void xenbus_unregister_driver(struct xe
  }
  EXPORT_SYMBOL_GPL(xenbus_unregister_driver);
  
- struct xb_find_info
- {
+ struct xb_find_info {
        struct xenbus_device *dev;
        const char *nodename;
  };
@@@ -640,7 -638,7 +639,7 @@@ int xenbus_dev_cancel(struct device *de
  EXPORT_SYMBOL_GPL(xenbus_dev_cancel);
  
  /* A flag to determine if xenstored is 'ready' (i.e. has started) */
- int xenstored_ready = 0;
+ int xenstored_ready;
  
  
  int register_xenstore_notifier(struct notifier_block *nb)
@@@ -685,74 -683,64 +684,74 @@@ static int __init xenbus_probe_initcall
  
  device_initcall(xenbus_probe_initcall);
  
 -static int __init xenbus_init(void)
 +/* Set up event channel for xenstored which is run as a local process
 + * (this is normally used only in dom0)
 + */
 +static int __init xenstored_local_init(void)
  {
        int err = 0;
        unsigned long page = 0;
 +      struct evtchn_alloc_unbound alloc_unbound;
  
 -      DPRINTK("");
 +      /* Allocate Xenstore page */
 +      page = get_zeroed_page(GFP_KERNEL);
 +      if (!page)
 +              goto out_err;
  
 -      err = -ENODEV;
 -      if (!xen_domain())
 -              return err;
 +      xen_store_mfn = xen_start_info->store_mfn =
 +              pfn_to_mfn(virt_to_phys((void *)page) >>
 +                         PAGE_SHIFT);
  
 -      /*
 -       * Domain0 doesn't have a store_evtchn or store_mfn yet.
 -       */
 -      if (xen_initial_domain()) {
 -              struct evtchn_alloc_unbound alloc_unbound;
 +      /* Next allocate a local port which xenstored can bind to */
 +      alloc_unbound.dom        = DOMID_SELF;
 +      alloc_unbound.remote_dom = DOMID_SELF;
  
 -              /* Allocate Xenstore page */
 -              page = get_zeroed_page(GFP_KERNEL);
 -              if (!page)
 -                      goto out_error;
 +      err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
 +                                        &alloc_unbound);
 +      if (err == -ENOSYS)
 +              goto out_err;
  
 -              xen_store_mfn = xen_start_info->store_mfn =
 -                      pfn_to_mfn(virt_to_phys((void *)page) >>
 -                                 PAGE_SHIFT);
 +      BUG_ON(err);
 +      xen_store_evtchn = xen_start_info->store_evtchn =
 +              alloc_unbound.port;
  
 -              /* Next allocate a local port which xenstored can bind to */
 -              alloc_unbound.dom        = DOMID_SELF;
 -              alloc_unbound.remote_dom = 0;
 +      return 0;
  
 -              err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
 -                                                &alloc_unbound);
 -              if (err == -ENOSYS)
 -                      goto out_error;
 + out_err:
 +      if (page != 0)
 +              free_page(page);
 +      return err;
 +}
  
 -              BUG_ON(err);
 -              xen_store_evtchn = xen_start_info->store_evtchn =
 -                      alloc_unbound.port;
 +static int __init xenbus_init(void)
 +{
 +      int err = 0;
  
 -              xen_store_interface = mfn_to_virt(xen_store_mfn);
 +      if (!xen_domain())
 +              return -ENODEV;
 +
 +      if (xen_hvm_domain()) {
 +              uint64_t v = 0;
 +              err = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN, &v);
 +              if (err)
 +                      goto out_error;
 +              xen_store_evtchn = (int)v;
 +              err = hvm_get_parameter(HVM_PARAM_STORE_PFN, &v);
 +              if (err)
 +                      goto out_error;
 +              xen_store_mfn = (unsigned long)v;
 +              xen_store_interface = ioremap(xen_store_mfn << PAGE_SHIFT, PAGE_SIZE);
        } else {
 -              if (xen_hvm_domain()) {
 -                      uint64_t v = 0;
 -                      err = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN, &v);
 -                      if (err)
 -                              goto out_error;
 -                      xen_store_evtchn = (int)v;
 -                      err = hvm_get_parameter(HVM_PARAM_STORE_PFN, &v);
 +              xen_store_evtchn = xen_start_info->store_evtchn;
 +              xen_store_mfn = xen_start_info->store_mfn;
 +              if (xen_store_evtchn)
 +                      xenstored_ready = 1;
 +              else {
 +                      err = xenstored_local_init();
                        if (err)
                                goto out_error;
 -                      xen_store_mfn = (unsigned long)v;
 -                      xen_store_interface = ioremap(xen_store_mfn << PAGE_SHIFT, PAGE_SIZE);
 -              } else {
 -                      xen_store_evtchn = xen_start_info->store_evtchn;
 -                      xen_store_mfn = xen_start_info->store_mfn;
 -                      xen_store_interface = mfn_to_virt(xen_store_mfn);
 -                      xenstored_ready = 1;
                }
 +              xen_store_interface = mfn_to_virt(xen_store_mfn);
        }
  
        /* Initialize the interface to xenstore. */
        proc_mkdir("xen", NULL);
  #endif
  
-  out_error:
 -      return 0;
 -
+ out_error:
 -      if (page != 0)
 -              free_page(page);
 -
        return err;
  }