]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/commitdiff
Merge branch 'guarded-storage' into 'features' to make merging with
authorMartin Schwidefsky <schwidefsky@de.ibm.com>
Wed, 22 Mar 2017 07:22:02 +0000 (08:22 +0100)
committerMartin Schwidefsky <schwidefsky@de.ibm.com>
Wed, 22 Mar 2017 07:22:02 +0000 (08:22 +0100)
the KVM tree easier.

1  2 
arch/s390/kernel/smp.c

diff --combined arch/s390/kernel/smp.c
index 5dab859b0d543be205eaa5a176728e87f5e3bfc6,286bcee800f48a24b96e9512dd94cbc625cc9d72..363000a77ffc74c8afd101537d14535fe20d8933
@@@ -51,6 -51,7 +51,7 @@@
  #include <asm/os_info.h>
  #include <asm/sigp.h>
  #include <asm/idle.h>
+ #include <asm/nmi.h>
  #include "entry.h"
  
  enum {
@@@ -78,6 -79,8 +79,8 @@@ struct pcpu 
  static u8 boot_core_type;
  static struct pcpu pcpu_devices[NR_CPUS];
  
+ static struct kmem_cache *pcpu_mcesa_cache;
  unsigned int smp_cpu_mt_shift;
  EXPORT_SYMBOL(smp_cpu_mt_shift);
  
@@@ -188,8 -191,10 +191,10 @@@ static void pcpu_ec_call(struct pcpu *p
  static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
  {
        unsigned long async_stack, panic_stack;
+       unsigned long mcesa_origin, mcesa_bits;
        struct lowcore *lc;
  
+       mcesa_origin = mcesa_bits = 0;
        if (pcpu != &pcpu_devices[0]) {
                pcpu->lowcore = (struct lowcore *)
                        __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
                panic_stack = __get_free_page(GFP_KERNEL);
                if (!pcpu->lowcore || !panic_stack || !async_stack)
                        goto out;
+               if (MACHINE_HAS_VX || MACHINE_HAS_GS) {
+                       mcesa_origin = (unsigned long)
+                               kmem_cache_alloc(pcpu_mcesa_cache, GFP_KERNEL);
+                       if (!mcesa_origin)
+                               goto out;
+                       mcesa_bits = MACHINE_HAS_GS ? 11 : 0;
+               }
        } else {
                async_stack = pcpu->lowcore->async_stack - ASYNC_FRAME_OFFSET;
                panic_stack = pcpu->lowcore->panic_stack - PANIC_FRAME_OFFSET;
+               mcesa_origin = pcpu->lowcore->mcesad & MCESA_ORIGIN_MASK;
+               mcesa_bits = pcpu->lowcore->mcesad & MCESA_LC_MASK;
        }
        lc = pcpu->lowcore;
        memcpy(lc, &S390_lowcore, 512);
        memset((char *) lc + 512, 0, sizeof(*lc) - 512);
        lc->async_stack = async_stack + ASYNC_FRAME_OFFSET;
        lc->panic_stack = panic_stack + PANIC_FRAME_OFFSET;
+       lc->mcesad = mcesa_origin | mcesa_bits;
        lc->cpu_nr = cpu;
        lc->spinlock_lockval = arch_spin_lockval(cpu);
-       if (MACHINE_HAS_VX)
-               lc->vector_save_area_addr =
-                       (unsigned long) &lc->vector_save_area;
        if (vdso_alloc_per_cpu(lc))
                goto out;
        lowcore_ptr[cpu] = lc;
        return 0;
  out:
        if (pcpu != &pcpu_devices[0]) {
+               if (mcesa_origin)
+                       kmem_cache_free(pcpu_mcesa_cache,
+                                       (void *) mcesa_origin);
                free_page(panic_stack);
                free_pages(async_stack, ASYNC_ORDER);
                free_pages((unsigned long) pcpu->lowcore, LC_ORDER);
  
  static void pcpu_free_lowcore(struct pcpu *pcpu)
  {
+       unsigned long mcesa_origin;
        pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, 0);
        lowcore_ptr[pcpu - pcpu_devices] = NULL;
        vdso_free_per_cpu(pcpu->lowcore);
        if (pcpu == &pcpu_devices[0])
                return;
+       if (MACHINE_HAS_VX || MACHINE_HAS_GS) {
+               mcesa_origin = pcpu->lowcore->mcesad & MCESA_ORIGIN_MASK;
+               kmem_cache_free(pcpu_mcesa_cache, (void *) mcesa_origin);
+       }
        free_page(pcpu->lowcore->panic_stack-PANIC_FRAME_OFFSET);
        free_pages(pcpu->lowcore->async_stack-ASYNC_FRAME_OFFSET, ASYNC_ORDER);
        free_pages((unsigned long) pcpu->lowcore, LC_ORDER);
@@@ -550,9 -571,11 +571,11 @@@ int smp_store_status(int cpu
        if (__pcpu_sigp_relax(pcpu->address, SIGP_STORE_STATUS_AT_ADDRESS,
                              pa) != SIGP_CC_ORDER_CODE_ACCEPTED)
                return -EIO;
-       if (!MACHINE_HAS_VX)
+       if (!MACHINE_HAS_VX && !MACHINE_HAS_GS)
                return 0;
-       pa = __pa(pcpu->lowcore->vector_save_area_addr);
+       pa = __pa(pcpu->lowcore->mcesad & MCESA_ORIGIN_MASK);
+       if (MACHINE_HAS_GS)
+               pa |= pcpu->lowcore->mcesad & MCESA_LC_MASK;
        if (__pcpu_sigp_relax(pcpu->address, SIGP_STORE_ADDITIONAL_STATUS,
                              pa) != SIGP_CC_ORDER_CODE_ACCEPTED)
                return -EIO;
@@@ -897,23 -920,35 +920,33 @@@ void __init smp_fill_possible_mask(void
  
  void __init smp_prepare_cpus(unsigned int max_cpus)
  {
+       unsigned long size;
        /* request the 0x1201 emergency signal external interrupt */
        if (register_external_irq(EXT_IRQ_EMERGENCY_SIG, do_ext_call_interrupt))
                panic("Couldn't request external interrupt 0x1201");
        /* request the 0x1202 external call external interrupt */
        if (register_external_irq(EXT_IRQ_EXTERNAL_CALL, do_ext_call_interrupt))
                panic("Couldn't request external interrupt 0x1202");
+       /* create slab cache for the machine-check-extended-save-areas */
+       if (MACHINE_HAS_VX || MACHINE_HAS_GS) {
+               size = 1UL << (MACHINE_HAS_GS ? 11 : 10);
+               pcpu_mcesa_cache = kmem_cache_create("nmi_save_areas",
+                                                    size, size, 0, NULL);
+               if (!pcpu_mcesa_cache)
+                       panic("Couldn't create nmi save area cache");
+       }
  }
  
  void __init smp_prepare_boot_cpu(void)
  {
        struct pcpu *pcpu = pcpu_devices;
  
 +      WARN_ON(!cpu_present(0) || !cpu_online(0));
        pcpu->state = CPU_STATE_CONFIGURED;
 -      pcpu->address = stap();
        pcpu->lowcore = (struct lowcore *)(unsigned long) store_prefix();
        S390_lowcore.percpu_offset = __per_cpu_offset[0];
        smp_cpu_set_polarization(0, POLARIZATION_UNKNOWN);
 -      set_cpu_present(0, true);
 -      set_cpu_online(0, true);
  }
  
  void __init smp_cpus_done(unsigned int max_cpus)
  
  void __init smp_setup_processor_id(void)
  {
 +      pcpu_devices[0].address = stap();
        S390_lowcore.cpu_nr = 0;
        S390_lowcore.spinlock_lockval = arch_spin_lockval(0);
  }