]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/commitdiff
Merge branch 'x86-uv-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
authorLinus Torvalds <torvalds@linux-foundation.org>
Tue, 8 Dec 2009 21:38:21 +0000 (13:38 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 8 Dec 2009 21:38:21 +0000 (13:38 -0800)
* 'x86-uv-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  x86: UV RTC: Always enable RTC clocksource
  x86: UV RTC: Rename generic_interrupt to x86_platform_ipi
  x86: UV RTC: Clean up error handling
  x86: UV RTC: Add clocksource only boot option
  x86: UV RTC: Fix early expiry handling

1  2 
arch/x86/include/asm/hardirq.h
arch/x86/include/asm/hw_irq.h
arch/x86/include/asm/irq.h
arch/x86/kernel/entry_64.S
arch/x86/kernel/irq.c

index 108eb6fd1ae7f6da8c94ca72197fdf7f4e3ec74d,beaabd794a10c1b8f4af770ba9176a82cf03f7aa..0f8576427cfefff4ada5dfdaac118e01428d8997
@@@ -12,7 -12,7 +12,7 @@@ typedef struct 
        unsigned int apic_timer_irqs;   /* arch dependent */
        unsigned int irq_spurious_count;
  #endif
-       unsigned int generic_irqs;      /* arch dependent */
+       unsigned int x86_platform_ipis; /* arch dependent */
        unsigned int apic_perf_irqs;
        unsigned int apic_pending_irqs;
  #ifdef CONFIG_SMP
        unsigned int irq_call_count;
        unsigned int irq_tlb_count;
  #endif
 -#ifdef CONFIG_X86_MCE
 +#ifdef CONFIG_X86_THERMAL_VECTOR
        unsigned int irq_thermal_count;
 -# ifdef CONFIG_X86_MCE_THRESHOLD
 +#endif
 +#ifdef CONFIG_X86_MCE_THRESHOLD
        unsigned int irq_threshold_count;
 -# endif
  #endif
  } ____cacheline_aligned irq_cpustat_t;
  
index 6e124269fd4bea3d52e28a1c977f22ec79fce07e,95207ca5c6f1780c2e717d893812695a1d37cd04..08c48a81841fd5af23bc24f354dbb82efd8681ef
@@@ -27,7 -27,7 +27,7 @@@
  
  /* Interrupt handlers registered during init_IRQ */
  extern void apic_timer_interrupt(void);
- extern void generic_interrupt(void);
+ extern void x86_platform_ipi(void);
  extern void error_interrupt(void);
  extern void perf_pending_interrupt(void);
  
@@@ -79,32 -79,14 +79,32 @@@ static inline void set_io_apic_irq_attr
                                        int ioapic, int ioapic_pin,
                                        int trigger, int polarity)
  {
 -      irq_attr->ioapic     = ioapic;
 -      irq_attr->ioapic_pin = ioapic_pin;
 -      irq_attr->trigger    = trigger;
 -      irq_attr->polarity   = polarity;
 +      irq_attr->ioapic        = ioapic;
 +      irq_attr->ioapic_pin    = ioapic_pin;
 +      irq_attr->trigger       = trigger;
 +      irq_attr->polarity      = polarity;
  }
  
 -extern int IO_APIC_get_PCI_irq_vector(int bus, int devfn, int pin,
 -                                      struct io_apic_irq_attr *irq_attr);
 +/*
 + * This is performance-critical, we want to do it O(1)
 + *
 + * Most irqs are mapped 1:1 with pins.
 + */
 +struct irq_cfg {
 +      struct irq_pin_list     *irq_2_pin;
 +      cpumask_var_t           domain;
 +      cpumask_var_t           old_domain;
 +      u8                      vector;
 +      u8                      move_in_progress : 1;
 +};
 +
 +extern struct irq_cfg *irq_cfg(unsigned int);
 +extern int assign_irq_vector(int, struct irq_cfg *, const struct cpumask *);
 +extern void send_cleanup_vector(struct irq_cfg *);
 +
 +struct irq_desc;
 +extern unsigned int set_desc_affinity(struct irq_desc *, const struct cpumask *);
 +extern int IO_APIC_get_PCI_irq_vector(int bus, int devfn, int pin, struct io_apic_irq_attr *irq_attr);
  extern void setup_ioapic_dest(void);
  
  extern void enable_IO_APIC(void);
@@@ -119,7 -101,7 +119,7 @@@ extern void eisa_set_level_irq(unsigne
  /* SMP */
  extern void smp_apic_timer_interrupt(struct pt_regs *);
  extern void smp_spurious_interrupt(struct pt_regs *);
- extern void smp_generic_interrupt(struct pt_regs *);
+ extern void smp_x86_platform_ipi(struct pt_regs *);
  extern void smp_error_interrupt(struct pt_regs *);
  #ifdef CONFIG_X86_IO_APIC
  extern asmlinkage void smp_irq_move_cleanup_interrupt(void);
index ffd700ff5dcb8b483d9c4e27762f3c986a6a6643,fcbc6d1445011095d827ad44238813f85f54bd29..5458380b6ef8fb3cefc93077acdb8441b863ba6f
@@@ -34,10 -34,9 +34,10 @@@ static inline int irq_canonicalize(int 
  #ifdef CONFIG_HOTPLUG_CPU
  #include <linux/cpumask.h>
  extern void fixup_irqs(void);
 +extern void irq_force_complete_move(int);
  #endif
  
- extern void (*generic_interrupt_extension)(void);
+ extern void (*x86_platform_ipi_callback)(void);
  extern void native_init_IRQ(void);
  extern bool handle_irq(unsigned irq, struct pt_regs *regs);
  
index 4deb8fc849dd1554a2ed6b3dc9c1f28ed5c6a797,6714432ef381f78b99fa3deae815140fdab2cfd3..63bca794c8f99c0f95aeecf858cacf288ad2d44a
@@@ -155,11 -155,11 +155,11 @@@ GLOBAL(return_to_handler
  
        call ftrace_return_to_handler
  
 -      movq %rax, 16(%rsp)
 +      movq %rax, %rdi
        movq 8(%rsp), %rdx
        movq (%rsp), %rax
 -      addq $16, %rsp
 -      retq
 +      addq $24, %rsp
 +      jmp *%rdi
  #endif
  
  
@@@ -803,10 -803,6 +803,10 @@@ END(interrupt
        call \func
        .endm
  
 +/*
 + * Interrupt entry/exit should be protected against kprobes
 + */
 +      .pushsection .kprobes.text, "ax"
        /*
         * The interrupt stubs push (~vector+0x80) onto the stack and
         * then jump to common_interrupt.
@@@ -945,10 -941,6 +945,10 @@@ ENTRY(retint_kernel
  
        CFI_ENDPROC
  END(common_interrupt)
 +/*
 + * End of kprobes section
 + */
 +       .popsection
  
  /*
   * APIC interrupts.
@@@ -977,8 -969,8 +977,8 @@@ apicinterrupt UV_BAU_MESSAGE 
  #endif
  apicinterrupt LOCAL_TIMER_VECTOR \
        apic_timer_interrupt smp_apic_timer_interrupt
- apicinterrupt GENERIC_INTERRUPT_VECTOR \
-       generic_interrupt smp_generic_interrupt
+ apicinterrupt X86_PLATFORM_IPI_VECTOR \
+       x86_platform_ipi smp_x86_platform_ipi
  
  #ifdef CONFIG_SMP
  apicinterrupt INVALIDATE_TLB_VECTOR_START+0 \
@@@ -1499,17 -1491,12 +1499,17 @@@ error_kernelspace
        leaq irq_return(%rip),%rcx
        cmpq %rcx,RIP+8(%rsp)
        je error_swapgs
 -      movl %ecx,%ecx  /* zero extend */
 -      cmpq %rcx,RIP+8(%rsp)
 -      je error_swapgs
 +      movl %ecx,%eax  /* zero extend */
 +      cmpq %rax,RIP+8(%rsp)
 +      je bstep_iret
        cmpq $gs_change,RIP+8(%rsp)
        je error_swapgs
        jmp error_sti
 +
 +bstep_iret:
 +      /* Fix truncated RIP */
 +      movq %rcx,RIP+8(%rsp)
 +      jmp error_swapgs
  END(error_entry)
  
  
diff --combined arch/x86/kernel/irq.c
index fee6cc2b20791ae1d7a1ea5c5e659ac04b8d2569,9375dce39f5f9f2fde392498ff0b2db0ef55433a..664bcb7384ac29245cdbf60751a6e09f8d1e8e82
@@@ -18,7 -18,7 +18,7 @@@
  atomic_t irq_err_count;
  
  /* Function pointer for generic interrupt vector handling */
- void (*generic_interrupt_extension)(void) = NULL;
+ void (*x86_platform_ipi_callback)(void) = NULL;
  
  /*
   * 'what should we do if we get a hw irq event on an illegal vector'.
@@@ -63,19 -63,19 +63,19 @@@ static int show_other_interrupts(struc
        for_each_online_cpu(j)
                seq_printf(p, "%10u ", irq_stats(j)->irq_spurious_count);
        seq_printf(p, "  Spurious interrupts\n");
 -      seq_printf(p, "%*s: ", prec, "CNT");
 +      seq_printf(p, "%*s: ", prec, "PMI");
        for_each_online_cpu(j)
                seq_printf(p, "%10u ", irq_stats(j)->apic_perf_irqs);
 -      seq_printf(p, "  Performance counter interrupts\n");
 +      seq_printf(p, "  Performance monitoring interrupts\n");
        seq_printf(p, "%*s: ", prec, "PND");
        for_each_online_cpu(j)
                seq_printf(p, "%10u ", irq_stats(j)->apic_pending_irqs);
        seq_printf(p, "  Performance pending work\n");
  #endif
-       if (generic_interrupt_extension) {
+       if (x86_platform_ipi_callback) {
                seq_printf(p, "%*s: ", prec, "PLT");
                for_each_online_cpu(j)
-                       seq_printf(p, "%10u ", irq_stats(j)->generic_irqs);
+                       seq_printf(p, "%10u ", irq_stats(j)->x86_platform_ipis);
                seq_printf(p, "  Platform interrupts\n");
        }
  #ifdef CONFIG_SMP
                seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count);
        seq_printf(p, "  TLB shootdowns\n");
  #endif
 -#ifdef CONFIG_X86_MCE
 +#ifdef CONFIG_X86_THERMAL_VECTOR
        seq_printf(p, "%*s: ", prec, "TRM");
        for_each_online_cpu(j)
                seq_printf(p, "%10u ", irq_stats(j)->irq_thermal_count);
        seq_printf(p, "  Thermal event interrupts\n");
 -# ifdef CONFIG_X86_MCE_THRESHOLD
 +#endif
 +#ifdef CONFIG_X86_MCE_THRESHOLD
        seq_printf(p, "%*s: ", prec, "THR");
        for_each_online_cpu(j)
                seq_printf(p, "%10u ", irq_stats(j)->irq_threshold_count);
        seq_printf(p, "  Threshold APIC interrupts\n");
 -# endif
  #endif
  #ifdef CONFIG_X86_MCE
        seq_printf(p, "%*s: ", prec, "MCE");
@@@ -187,18 -187,18 +187,18 @@@ u64 arch_irq_stat_cpu(unsigned int cpu
        sum += irq_stats(cpu)->apic_perf_irqs;
        sum += irq_stats(cpu)->apic_pending_irqs;
  #endif
-       if (generic_interrupt_extension)
-               sum += irq_stats(cpu)->generic_irqs;
+       if (x86_platform_ipi_callback)
+               sum += irq_stats(cpu)->x86_platform_ipis;
  #ifdef CONFIG_SMP
        sum += irq_stats(cpu)->irq_resched_count;
        sum += irq_stats(cpu)->irq_call_count;
        sum += irq_stats(cpu)->irq_tlb_count;
  #endif
 -#ifdef CONFIG_X86_MCE
 +#ifdef CONFIG_X86_THERMAL_VECTOR
        sum += irq_stats(cpu)->irq_thermal_count;
 -# ifdef CONFIG_X86_MCE_THRESHOLD
 +#endif
 +#ifdef CONFIG_X86_MCE_THRESHOLD
        sum += irq_stats(cpu)->irq_threshold_count;
 -# endif
  #endif
  #ifdef CONFIG_X86_MCE
        sum += per_cpu(mce_exception_count, cpu);
@@@ -244,6 -244,7 +244,6 @@@ unsigned int __irq_entry do_IRQ(struct 
                                __func__, smp_processor_id(), vector, irq);
        }
  
 -      run_local_timers();
        irq_exit();
  
        set_irq_regs(old_regs);
  }
  
  /*
-  * Handler for GENERIC_INTERRUPT_VECTOR.
+  * Handler for X86_PLATFORM_IPI_VECTOR.
   */
- void smp_generic_interrupt(struct pt_regs *regs)
+ void smp_x86_platform_ipi(struct pt_regs *regs)
  {
        struct pt_regs *old_regs = set_irq_regs(regs);
  
  
        irq_enter();
  
-       inc_irq_stat(generic_irqs);
+       inc_irq_stat(x86_platform_ipis);
  
-       if (generic_interrupt_extension)
-               generic_interrupt_extension();
+       if (x86_platform_ipi_callback)
+               x86_platform_ipi_callback();
  
 -      run_local_timers();
        irq_exit();
  
        set_irq_regs(old_regs);
  }
  
  EXPORT_SYMBOL_GPL(vector_used_by_percpu_irq);
 +
 +#ifdef CONFIG_HOTPLUG_CPU
 +/* A cpu has been removed from cpu_online_mask.  Reset irq affinities. */
 +void fixup_irqs(void)
 +{
 +      unsigned int irq, vector;
 +      static int warned;
 +      struct irq_desc *desc;
 +
 +      for_each_irq_desc(irq, desc) {
 +              int break_affinity = 0;
 +              int set_affinity = 1;
 +              const struct cpumask *affinity;
 +
 +              if (!desc)
 +                      continue;
 +              if (irq == 2)
 +                      continue;
 +
 +              /* interrupt's are disabled at this point */
 +              spin_lock(&desc->lock);
 +
 +              affinity = desc->affinity;
 +              if (!irq_has_action(irq) ||
 +                  cpumask_equal(affinity, cpu_online_mask)) {
 +                      spin_unlock(&desc->lock);
 +                      continue;
 +              }
 +
 +              /*
 +               * Complete the irq move. This cpu is going down and for
 +               * non intr-remapping case, we can't wait till this interrupt
 +               * arrives at this cpu before completing the irq move.
 +               */
 +              irq_force_complete_move(irq);
 +
 +              if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
 +                      break_affinity = 1;
 +                      affinity = cpu_all_mask;
 +              }
 +
 +              if (!(desc->status & IRQ_MOVE_PCNTXT) && desc->chip->mask)
 +                      desc->chip->mask(irq);
 +
 +              if (desc->chip->set_affinity)
 +                      desc->chip->set_affinity(irq, affinity);
 +              else if (!(warned++))
 +                      set_affinity = 0;
 +
 +              if (!(desc->status & IRQ_MOVE_PCNTXT) && desc->chip->unmask)
 +                      desc->chip->unmask(irq);
 +
 +              spin_unlock(&desc->lock);
 +
 +              if (break_affinity && set_affinity)
 +                      printk("Broke affinity for irq %i\n", irq);
 +              else if (!set_affinity)
 +                      printk("Cannot set affinity for irq %i\n", irq);
 +      }
 +
 +      /*
 +       * We can remove mdelay() and then send spuriuous interrupts to
 +       * new cpu targets for all the irqs that were handled previously by
 +       * this cpu. While it works, I have seen spurious interrupt messages
 +       * (nothing wrong but still...).
 +       *
 +       * So for now, retain mdelay(1) and check the IRR and then send those
 +       * interrupts to new targets as this cpu is already offlined...
 +       */
 +      mdelay(1);
 +
 +      for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
 +              unsigned int irr;
 +
 +              if (__get_cpu_var(vector_irq)[vector] < 0)
 +                      continue;
 +
 +              irr = apic_read(APIC_IRR + (vector / 32 * 0x10));
 +              if (irr  & (1 << (vector % 32))) {
 +                      irq = __get_cpu_var(vector_irq)[vector];
 +
 +                      desc = irq_to_desc(irq);
 +                      spin_lock(&desc->lock);
 +                      if (desc->chip->retrigger)
 +                              desc->chip->retrigger(irq);
 +                      spin_unlock(&desc->lock);
 +              }
 +      }
 +}
 +#endif