]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/commitdiff
Revert "powerpc: Replace __get_cpu_var uses"
authorTejun Heo <tj@kernel.org>
Wed, 27 Aug 2014 15:18:29 +0000 (11:18 -0400)
committerTejun Heo <tj@kernel.org>
Wed, 27 Aug 2014 15:18:29 +0000 (11:18 -0400)
This reverts commit 5828f666c069af74e00db21559f1535103c9f79a due to
build failure after merging with pending powerpc changes.

Link: http://lkml.kernel.org/g/20140827142243.6277eaff@canb.auug.org.au
Signed-off-by: Tejun Heo <tj@kernel.org>
Reported-by: Stephen Rothwell <sfr@canb.auug.org.au>
Cc: Christoph Lameter <cl@linux-foundation.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
32 files changed:
arch/powerpc/include/asm/hardirq.h
arch/powerpc/include/asm/tlbflush.h
arch/powerpc/include/asm/xics.h
arch/powerpc/kernel/dbell.c
arch/powerpc/kernel/hw_breakpoint.c
arch/powerpc/kernel/iommu.c
arch/powerpc/kernel/irq.c
arch/powerpc/kernel/kgdb.c
arch/powerpc/kernel/kprobes.c
arch/powerpc/kernel/mce.c
arch/powerpc/kernel/process.c
arch/powerpc/kernel/smp.c
arch/powerpc/kernel/sysfs.c
arch/powerpc/kernel/time.c
arch/powerpc/kernel/traps.c
arch/powerpc/kvm/e500.c
arch/powerpc/kvm/e500mc.c
arch/powerpc/mm/hash_native_64.c
arch/powerpc/mm/hash_utils_64.c
arch/powerpc/mm/hugetlbpage-book3e.c
arch/powerpc/mm/hugetlbpage.c
arch/powerpc/perf/core-book3s.c
arch/powerpc/perf/core-fsl-emb.c
arch/powerpc/platforms/cell/interrupt.c
arch/powerpc/platforms/powernv/opal-tracepoints.c
arch/powerpc/platforms/ps3/interrupt.c
arch/powerpc/platforms/pseries/dtl.c
arch/powerpc/platforms/pseries/hvCall_inst.c
arch/powerpc/platforms/pseries/iommu.c
arch/powerpc/platforms/pseries/lpar.c
arch/powerpc/platforms/pseries/ras.c
arch/powerpc/sysdev/xics/xics-common.c

index 8d907ba4fd05684a404b44143e728615c4321150..1bbb3013d6aa41ad47acb3a2d54564feffbe438b 100644 (file)
@@ -21,9 +21,7 @@ DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
 
 #define __ARCH_IRQ_STAT
 
-#define local_softirq_pending()        __this_cpu_read(irq_stat.__softirq_pending)
-#define set_softirq_pending(x) __this_cpu_write(irq_stat._softirq_pending, (x))
-#define or_softirq_pending(x) __this_cpu_or(irq_stat._softirq_pending, (x))
+#define local_softirq_pending()        __get_cpu_var(irq_stat).__softirq_pending
 
 static inline void ack_bad_irq(unsigned int irq)
 {
index cd7c2719d3ef0046a6a34a75c8abf01cc74569c1..2def01ed0cb296ad48296366b4eb8a8dfb9e0da0 100644 (file)
@@ -107,14 +107,14 @@ extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch);
 
 static inline void arch_enter_lazy_mmu_mode(void)
 {
-       struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch);
+       struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
 
        batch->active = 1;
 }
 
 static inline void arch_leave_lazy_mmu_mode(void)
 {
-       struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch);
+       struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
 
        if (batch->index)
                __flush_tlb_pending(batch);
index 5007ad0448ce564428e81f4a9f518367e52c8ffe..282d43a0c85566927755dc71d6a2ce9b5bd40d4a 100644 (file)
@@ -97,7 +97,7 @@ DECLARE_PER_CPU(struct xics_cppr, xics_cppr);
 
 static inline void xics_push_cppr(unsigned int vec)
 {
-       struct xics_cppr *os_cppr = this_cpu_ptr(&xics_cppr);
+       struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr);
 
        if (WARN_ON(os_cppr->index >= MAX_NUM_PRIORITIES - 1))
                return;
@@ -110,7 +110,7 @@ static inline void xics_push_cppr(unsigned int vec)
 
 static inline unsigned char xics_pop_cppr(void)
 {
-       struct xics_cppr *os_cppr = this_cpu_ptr(&xics_cppr);
+       struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr);
 
        if (WARN_ON(os_cppr->index < 1))
                return LOWEST_PRIORITY;
@@ -120,7 +120,7 @@ static inline unsigned char xics_pop_cppr(void)
 
 static inline void xics_set_base_cppr(unsigned char cppr)
 {
-       struct xics_cppr *os_cppr = this_cpu_ptr(&xics_cppr);
+       struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr);
 
        /* we only really want to set the priority when there's
         * just one cppr value on the stack
@@ -132,7 +132,7 @@ static inline void xics_set_base_cppr(unsigned char cppr)
 
 static inline unsigned char xics_cppr_top(void)
 {
-       struct xics_cppr *os_cppr = this_cpu_ptr(&xics_cppr);
+       struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr);
        
        return os_cppr->stack[os_cppr->index];
 }
index f4217819cc31fd417243d17f079c8bf2471d37a5..d55c76c571f38dce85137b7e28e8a6d2f59aaf38 100644 (file)
@@ -41,7 +41,7 @@ void doorbell_exception(struct pt_regs *regs)
 
        may_hard_irq_enable();
 
-       __this_cpu_inc(irq_stat.doorbell_irqs);
+       __get_cpu_var(irq_stat).doorbell_irqs++;
 
        smp_ipi_demux();
 
index b62f90eaf19e74ea2cc8a35d994d4f5815560518..0bb5918faaaf2c008f9e3ae25a7ae3d1c7ad4f21 100644 (file)
@@ -63,7 +63,7 @@ int hw_breakpoint_slots(int type)
 int arch_install_hw_breakpoint(struct perf_event *bp)
 {
        struct arch_hw_breakpoint *info = counter_arch_bp(bp);
-       struct perf_event **slot = this_cpu_ptr(&bp_per_reg);
+       struct perf_event **slot = &__get_cpu_var(bp_per_reg);
 
        *slot = bp;
 
@@ -88,7 +88,7 @@ int arch_install_hw_breakpoint(struct perf_event *bp)
  */
 void arch_uninstall_hw_breakpoint(struct perf_event *bp)
 {
-       struct perf_event **slot = this_cpu_ptr(&bp_per_reg);
+       struct perf_event **slot = &__get_cpu_var(bp_per_reg);
 
        if (*slot != bp) {
                WARN_ONCE(1, "Can't find the breakpoint");
@@ -226,7 +226,7 @@ int __kprobes hw_breakpoint_handler(struct die_args *args)
         */
        rcu_read_lock();
 
-       bp = __this_cpu_read(bp_per_reg);
+       bp = __get_cpu_var(bp_per_reg);
        if (!bp)
                goto out;
        info = counter_arch_bp(bp);
index 71e60bfb89e27a3bd8cc72fc5304f061a8dd1424..a10642a0d861cd6a5a80cbe796b36e97d37f9df9 100644 (file)
@@ -208,7 +208,7 @@ static unsigned long iommu_range_alloc(struct device *dev,
         * We don't need to disable preemption here because any CPU can
         * safely use any IOMMU pool.
         */
-       pool_nr = __this_cpu_read(iommu_pool_hash) & (tbl->nr_pools - 1);
+       pool_nr = __raw_get_cpu_var(iommu_pool_hash) & (tbl->nr_pools - 1);
 
        if (largealloc)
                pool = &(tbl->large_pool);
index 74d40c6855b8aa636c8d26bdf9d450a4a6634295..4c5891de162e2de7bff803f6139853c6ef066221 100644 (file)
@@ -114,7 +114,7 @@ static inline notrace void set_soft_enabled(unsigned long enable)
 static inline notrace int decrementer_check_overflow(void)
 {
        u64 now = get_tb_or_rtc();
-       u64 *next_tb = this_cpu_ptr(&decrementers_next_tb);
+       u64 *next_tb = &__get_cpu_var(decrementers_next_tb);
  
        return now >= *next_tb;
 }
@@ -499,7 +499,7 @@ void __do_irq(struct pt_regs *regs)
 
        /* And finally process it */
        if (unlikely(irq == NO_IRQ))
-               __this_cpu_inc(irq_stat.spurious_irqs);
+               __get_cpu_var(irq_stat).spurious_irqs++;
        else
                generic_handle_irq(irq);
 
index e77c3ccf8dcfe432c6f7e7b761cb64a67afa3bed..8504657379f13fe7858f08b9f4f61ea4e23ecb92 100644 (file)
@@ -155,7 +155,7 @@ static int kgdb_singlestep(struct pt_regs *regs)
 {
        struct thread_info *thread_info, *exception_thread_info;
        struct thread_info *backup_current_thread_info =
-               this_cpu_ptr(&kgdb_thread_info);
+               &__get_cpu_var(kgdb_thread_info);
 
        if (user_mode(regs))
                return 0;
index 7c053f28140663a4bb092c872a6c88e557fdff9d..2f72af82513c71d2d347c0373eaa0714e482ae5d 100644 (file)
@@ -119,7 +119,7 @@ static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
 
 static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
 {
-       __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
+       __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
        kcb->kprobe_status = kcb->prev_kprobe.status;
        kcb->kprobe_saved_msr = kcb->prev_kprobe.saved_msr;
 }
@@ -127,7 +127,7 @@ static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
 static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
                                struct kprobe_ctlblk *kcb)
 {
-       __this_cpu_write(current_kprobe, p);
+       __get_cpu_var(current_kprobe) = p;
        kcb->kprobe_saved_msr = regs->msr;
 }
 
@@ -192,7 +192,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
                                ret = 1;
                                goto no_kprobe;
                        }
-                       p = __this_cpu_read(current_kprobe);
+                       p = __get_cpu_var(current_kprobe);
                        if (p->break_handler && p->break_handler(p, regs)) {
                                goto ss_probe;
                        }
index 15c99b649b04cf2e946e2a612a3660dc50e6712c..a7fd4cb78b788e01a6545964572a4af44b28bc28 100644 (file)
@@ -73,8 +73,8 @@ void save_mce_event(struct pt_regs *regs, long handled,
                    uint64_t nip, uint64_t addr)
 {
        uint64_t srr1;
-       int index = __this_cpu_inc_return(mce_nest_count);
-       struct machine_check_event *mce = this_cpu_ptr(&mce_event[index]);
+       int index = __get_cpu_var(mce_nest_count)++;
+       struct machine_check_event *mce = &__get_cpu_var(mce_event[index]);
 
        /*
         * Return if we don't have enough space to log mce event.
@@ -143,7 +143,7 @@ void save_mce_event(struct pt_regs *regs, long handled,
  */
 int get_mce_event(struct machine_check_event *mce, bool release)
 {
-       int index = __this_cpu_read(mce_nest_count) - 1;
+       int index = __get_cpu_var(mce_nest_count) - 1;
        struct machine_check_event *mc_evt;
        int ret = 0;
 
@@ -153,7 +153,7 @@ int get_mce_event(struct machine_check_event *mce, bool release)
 
        /* Check if we have MCE info to process. */
        if (index < MAX_MC_EVT) {
-               mc_evt = this_cpu_ptr(&mce_event[index]);
+               mc_evt = &__get_cpu_var(mce_event[index]);
                /* Copy the event structure and release the original */
                if (mce)
                        *mce = *mc_evt;
@@ -163,7 +163,7 @@ int get_mce_event(struct machine_check_event *mce, bool release)
        }
        /* Decrement the count to free the slot. */
        if (release)
-               __this_cpu_dec(mce_nest_count);
+               __get_cpu_var(mce_nest_count)--;
 
        return ret;
 }
@@ -184,13 +184,13 @@ void machine_check_queue_event(void)
        if (!get_mce_event(&evt, MCE_EVENT_RELEASE))
                return;
 
-       index = __this_cpu_inc_return(mce_queue_count);
+       index = __get_cpu_var(mce_queue_count)++;
        /* If queue is full, just return for now. */
        if (index >= MAX_MC_EVT) {
-               __this_cpu_dec(mce_queue_count);
+               __get_cpu_var(mce_queue_count)--;
                return;
        }
-       memcpy(this_cpu_ptr(&mce_event_queue[index]), &evt, sizeof(evt));
+       __get_cpu_var(mce_event_queue[index]) = evt;
 
        /* Queue irq work to process this event later. */
        irq_work_queue(&mce_event_process_work);
@@ -208,11 +208,11 @@ static void machine_check_process_queued_event(struct irq_work *work)
         * For now just print it to console.
         * TODO: log this error event to FSP or nvram.
         */
-       while (__this_cpu_read(mce_queue_count) > 0) {
-               index = __this_cpu_read(mce_queue_count) - 1;
+       while (__get_cpu_var(mce_queue_count) > 0) {
+               index = __get_cpu_var(mce_queue_count) - 1;
                machine_check_print_event_info(
-                               this_cpu_ptr(&mce_event_queue[index]));
-               __this_cpu_dec(mce_queue_count);
+                               &__get_cpu_var(mce_event_queue[index]));
+               __get_cpu_var(mce_queue_count)--;
        }
 }
 
index 2df2f2956520e0d40868922f786729d1128b7d66..bf44ae962ab82206bb148c674dd2dbb7f093d1fd 100644 (file)
@@ -498,7 +498,7 @@ static inline int set_dawr(struct arch_hw_breakpoint *brk)
 
 void __set_breakpoint(struct arch_hw_breakpoint *brk)
 {
-       __this_cpu_write(current_brk, *brk);
+       __get_cpu_var(current_brk) = *brk;
 
        if (cpu_has_feature(CPU_FTR_DAWR))
                set_dawr(brk);
@@ -841,7 +841,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
  * schedule DABR
  */
 #ifndef CONFIG_HAVE_HW_BREAKPOINT
-       if (unlikely(!hw_brk_match(this_cpu_ptr(&current_brk), &new->thread.hw_brk)))
+       if (unlikely(!hw_brk_match(&__get_cpu_var(current_brk), &new->thread.hw_brk)))
                __set_breakpoint(&new->thread.hw_brk);
 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
 #endif
@@ -855,7 +855,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
         * Collect processor utilization data per process
         */
        if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
-               struct cpu_usage *cu = this_cpu_ptr(&cpu_usage_array);
+               struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array);
                long unsigned start_tb, current_tb;
                start_tb = old_thread->start_tb;
                cu->current_tb = current_tb = mfspr(SPRN_PURR);
@@ -865,7 +865,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
 #endif /* CONFIG_PPC64 */
 
 #ifdef CONFIG_PPC_BOOK3S_64
-       batch = this_cpu_ptr(&ppc64_tlb_batch);
+       batch = &__get_cpu_var(ppc64_tlb_batch);
        if (batch->active) {
                current_thread_info()->local_flags |= _TLF_LAZY_MMU;
                if (batch->index)
@@ -888,7 +888,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
 #ifdef CONFIG_PPC_BOOK3S_64
        if (current_thread_info()->local_flags & _TLF_LAZY_MMU) {
                current_thread_info()->local_flags &= ~_TLF_LAZY_MMU;
-               batch = this_cpu_ptr(&ppc64_tlb_batch);
+               batch = &__get_cpu_var(ppc64_tlb_batch);
                batch->active = 1;
        }
 #endif /* CONFIG_PPC_BOOK3S_64 */
index 60391a51467a7a211c3181abc67ce94b98a9eb52..a0738af4aba6b80b3d356935d3bcf5c3b61a4f48 100644 (file)
@@ -242,7 +242,7 @@ void smp_muxed_ipi_message_pass(int cpu, int msg)
 
 irqreturn_t smp_ipi_demux(void)
 {
-       struct cpu_messages *info = this_cpu_ptr(&ipi_message);
+       struct cpu_messages *info = &__get_cpu_var(ipi_message);
        unsigned int all;
 
        mb();   /* order any irq clear */
@@ -438,9 +438,9 @@ void generic_mach_cpu_die(void)
        idle_task_exit();
        cpu = smp_processor_id();
        printk(KERN_DEBUG "CPU%d offline\n", cpu);
-       __this_cpu_write(cpu_state, CPU_DEAD);
+       __get_cpu_var(cpu_state) = CPU_DEAD;
        smp_wmb();
-       while (__this_cpu_read(cpu_state) != CPU_UP_PREPARE)
+       while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE)
                cpu_relax();
 }
 
index fa1fd8a0c867f25611dfb6726e64cfd16ab39447..67fd2fd2620ae64a2f44cced5a25b68d49877c50 100644 (file)
@@ -394,10 +394,10 @@ void ppc_enable_pmcs(void)
        ppc_set_pmu_inuse(1);
 
        /* Only need to enable them once */
-       if (__this_cpu_read(pmcs_enabled))
+       if (__get_cpu_var(pmcs_enabled))
                return;
 
-       __this_cpu_write(pmcs_enabled, 1);
+       __get_cpu_var(pmcs_enabled) = 1;
 
        if (ppc_md.enable_pmcs)
                ppc_md.enable_pmcs();
index 4769e5b7f905c764b81c43dabdfb2414d9cb970a..368ab374d33c6b315c4a553a613d10f7d453970b 100644 (file)
@@ -458,9 +458,9 @@ static inline void clear_irq_work_pending(void)
 
 DEFINE_PER_CPU(u8, irq_work_pending);
 
-#define set_irq_work_pending_flag()    __this_cpu_write(irq_work_pending, 1)
-#define test_irq_work_pending()                __this_cpu_read(irq_work_pending)
-#define clear_irq_work_pending()       __this_cpu_write(irq_work_pending, 0)
+#define set_irq_work_pending_flag()    __get_cpu_var(irq_work_pending) = 1
+#define test_irq_work_pending()                __get_cpu_var(irq_work_pending)
+#define clear_irq_work_pending()       __get_cpu_var(irq_work_pending) = 0
 
 #endif /* 32 vs 64 bit */
 
@@ -482,8 +482,8 @@ void arch_irq_work_raise(void)
 void __timer_interrupt(void)
 {
        struct pt_regs *regs = get_irq_regs();
-       u64 *next_tb = this_cpu_ptr(&decrementers_next_tb);
-       struct clock_event_device *evt = this_cpu_ptr(&decrementers);
+       u64 *next_tb = &__get_cpu_var(decrementers_next_tb);
+       struct clock_event_device *evt = &__get_cpu_var(decrementers);
        u64 now;
 
        trace_timer_interrupt_entry(regs);
@@ -498,7 +498,7 @@ void __timer_interrupt(void)
                *next_tb = ~(u64)0;
                if (evt->event_handler)
                        evt->event_handler(evt);
-               __this_cpu_inc(irq_stat.timer_irqs_event);
+               __get_cpu_var(irq_stat).timer_irqs_event++;
        } else {
                now = *next_tb - now;
                if (now <= DECREMENTER_MAX)
@@ -506,13 +506,13 @@ void __timer_interrupt(void)
                /* We may have raced with new irq work */
                if (test_irq_work_pending())
                        set_dec(1);
-               __this_cpu_inc(irq_stat.timer_irqs_others);
+               __get_cpu_var(irq_stat).timer_irqs_others++;
        }
 
 #ifdef CONFIG_PPC64
        /* collect purr register values often, for accurate calculations */
        if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
-               struct cpu_usage *cu = this_cpu_ptr(&cpu_usage_array);
+               struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array);
                cu->current_tb = mfspr(SPRN_PURR);
        }
 #endif
@@ -527,7 +527,7 @@ void __timer_interrupt(void)
 void timer_interrupt(struct pt_regs * regs)
 {
        struct pt_regs *old_regs;
-       u64 *next_tb = this_cpu_ptr(&decrementers_next_tb);
+       u64 *next_tb = &__get_cpu_var(decrementers_next_tb);
 
        /* Ensure a positive value is written to the decrementer, or else
         * some CPUs will continue to take decrementer exceptions.
@@ -813,7 +813,7 @@ static void __init clocksource_init(void)
 static int decrementer_set_next_event(unsigned long evt,
                                      struct clock_event_device *dev)
 {
-       __this_cpu_write(decrementers_next_tb, get_tb_or_rtc() + evt);
+       __get_cpu_var(decrementers_next_tb) = get_tb_or_rtc() + evt;
        set_dec(evt);
 
        /* We may have raced with new irq work */
@@ -833,7 +833,7 @@ static void decrementer_set_mode(enum clock_event_mode mode,
 /* Interrupt handler for the timer broadcast IPI */
 void tick_broadcast_ipi_handler(void)
 {
-       u64 *next_tb = this_cpu_ptr(&decrementers_next_tb);
+       u64 *next_tb = &__get_cpu_var(decrementers_next_tb);
 
        *next_tb = get_tb_or_rtc();
        __timer_interrupt();
index e6595b72269b5c09eb7582c57a3a672f25f8bdfc..0dc43f9932cffb1bb8be62fa5537ae55474ecea2 100644 (file)
@@ -295,7 +295,7 @@ long machine_check_early(struct pt_regs *regs)
 {
        long handled = 0;
 
-       __this_cpu_inc(irq_stat.mce_exceptions);
+       __get_cpu_var(irq_stat).mce_exceptions++;
 
        if (cur_cpu_spec && cur_cpu_spec->machine_check_early)
                handled = cur_cpu_spec->machine_check_early(regs);
@@ -304,7 +304,7 @@ long machine_check_early(struct pt_regs *regs)
 
 long hmi_exception_realmode(struct pt_regs *regs)
 {
-       __this_cpu_inc(irq_stat.hmi_exceptions);
+       __get_cpu_var(irq_stat).hmi_exceptions++;
 
        if (ppc_md.hmi_exception_early)
                ppc_md.hmi_exception_early(regs);
@@ -700,7 +700,7 @@ void machine_check_exception(struct pt_regs *regs)
        enum ctx_state prev_state = exception_enter();
        int recover = 0;
 
-       __this_cpu_inc(irq_stat.mce_exceptions);
+       __get_cpu_var(irq_stat).mce_exceptions++;
 
        /* See if any machine dependent calls. In theory, we would want
         * to call the CPU first, and call the ppc_md. one if the CPU
@@ -1519,7 +1519,7 @@ void vsx_unavailable_tm(struct pt_regs *regs)
 
 void performance_monitor_exception(struct pt_regs *regs)
 {
-       __this_cpu_inc(irq_stat.pmu_irqs);
+       __get_cpu_var(irq_stat).pmu_irqs++;
 
        perf_irq(regs);
 }
index 16095841afe14e6b8b5c13338267f6541f0be2bd..2e02ed849f36d1a5724a48557e08d8e3bea69ce0 100644 (file)
@@ -76,11 +76,11 @@ static inline int local_sid_setup_one(struct id *entry)
        unsigned long sid;
        int ret = -1;
 
-       sid = __this_cpu_inc_return(pcpu_last_used_sid);
+       sid = ++(__get_cpu_var(pcpu_last_used_sid));
        if (sid < NUM_TIDS) {
-               __this_cpu_write(pcpu_sids)entry[sid], entry);
+               __get_cpu_var(pcpu_sids).entry[sid] = entry;
                entry->val = sid;
-               entry->pentry = this_cpu_ptr(&pcpu_sids.entry[sid]);
+               entry->pentry = &__get_cpu_var(pcpu_sids).entry[sid];
                ret = sid;
        }
 
@@ -108,8 +108,8 @@ static inline int local_sid_setup_one(struct id *entry)
 static inline int local_sid_lookup(struct id *entry)
 {
        if (entry && entry->val != 0 &&
-           __this_cpu_read(pcpu_sids.entry[entry->val]) == entry &&
-           entry->pentry == this_cpu_ptr(&pcpu_sids.entry[entry->val]))
+           __get_cpu_var(pcpu_sids).entry[entry->val] == entry &&
+           entry->pentry == &__get_cpu_var(pcpu_sids).entry[entry->val])
                return entry->val;
        return -1;
 }
@@ -117,8 +117,8 @@ static inline int local_sid_lookup(struct id *entry)
 /* Invalidate all id mappings on local core -- call with preempt disabled */
 static inline void local_sid_destroy_all(void)
 {
-       __this_cpu_write(pcpu_last_used_sid, 0);
-       memset(this_cpu_ptr(&pcpu_sids), 0, sizeof(pcpu_sids));
+       __get_cpu_var(pcpu_last_used_sid) = 0;
+       memset(&__get_cpu_var(pcpu_sids), 0, sizeof(__get_cpu_var(pcpu_sids)));
 }
 
 static void *kvmppc_e500_id_table_alloc(struct kvmppc_vcpu_e500 *vcpu_e500)
index 6ef54e523f33aaded5270d8dea98222ac83c49ce..164bad2a19bf6c715f771d76ac1f8289a2daa3e5 100644 (file)
@@ -141,9 +141,9 @@ static void kvmppc_core_vcpu_load_e500mc(struct kvm_vcpu *vcpu, int cpu)
        mtspr(SPRN_GESR, vcpu->arch.shared->esr);
 
        if (vcpu->arch.oldpir != mfspr(SPRN_PIR) ||
-           __this_cpu_read(last_vcpu_of_lpid[vcpu->kvm->arch.lpid]) != vcpu) {
+           __get_cpu_var(last_vcpu_of_lpid)[vcpu->kvm->arch.lpid] != vcpu) {
                kvmppc_e500_tlbil_all(vcpu_e500);
-               __this_cpu_write(last_vcpu_of_lpid[vcpu->kvm->arch.lpid], vcpu);
+               __get_cpu_var(last_vcpu_of_lpid)[vcpu->kvm->arch.lpid] = vcpu;
        }
 
        kvmppc_load_guest_fp(vcpu);
index 504a16f1a1a0e29983551b756153a03a9b6c436c..afc0a8295f84c7097217855fae59f62b1ed6149e 100644 (file)
@@ -625,7 +625,7 @@ static void native_flush_hash_range(unsigned long number, int local)
        unsigned long want_v;
        unsigned long flags;
        real_pte_t pte;
-       struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch);
+       struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
        unsigned long psize = batch->psize;
        int ssize = batch->ssize;
        int i;
index 060d51fda35eef096662814be7fb05f9c7e94afe..daee7f4e5a14ca0048a7dfee9f2f07921529fced 100644 (file)
@@ -1314,7 +1314,7 @@ void flush_hash_range(unsigned long number, int local)
        else {
                int i;
                struct ppc64_tlb_batch *batch =
-                       this_cpu_ptr(&ppc64_tlb_batch);
+                       &__get_cpu_var(ppc64_tlb_batch);
 
                for (i = 0; i < number; i++)
                        flush_hash_page(batch->vpn[i], batch->pte[i],
index ba47aaf33a4bf19c19859fc0de1ed1ad05703c53..5e4ee2573903eb1af940ac1927c6be61c78fc8a9 100644 (file)
@@ -33,13 +33,13 @@ static inline int tlb1_next(void)
 
        ncams = mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY;
 
-       index = this_cpu_read(next_tlbcam_idx);
+       index = __get_cpu_var(next_tlbcam_idx);
 
        /* Just round-robin the entries and wrap when we hit the end */
        if (unlikely(index == ncams - 1))
-               __this_cpu_write(next_tlbcam_idx, tlbcam_index);
+               __get_cpu_var(next_tlbcam_idx) = tlbcam_index;
        else
-               __this_cpu_inc(next_tlbcam_idx);
+               __get_cpu_var(next_tlbcam_idx)++;
 
        return index;
 }
index 8aa04f03fd31dc07b75fce0ba572261108c86aa1..7e70ae968e5f9df04cdf372e06ebea433be34d5b 100644 (file)
@@ -462,7 +462,7 @@ static void hugepd_free(struct mmu_gather *tlb, void *hugepte)
 {
        struct hugepd_freelist **batchp;
 
-       batchp = this_cpu_ptr(&hugepd_freelist_cur);
+       batchp = &get_cpu_var(hugepd_freelist_cur);
 
        if (atomic_read(&tlb->mm->mm_users) < 2 ||
            cpumask_equal(mm_cpumask(tlb->mm),
index 690f9c7bf3c89aceb4f0616e25af17eb75af473f..b7cd00b0171ed9776603969e2af25d5abe3f66a9 100644 (file)
@@ -339,7 +339,7 @@ static void power_pmu_bhrb_reset(void)
 
 static void power_pmu_bhrb_enable(struct perf_event *event)
 {
-       struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
+       struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
 
        if (!ppmu->bhrb_nr)
                return;
@@ -354,7 +354,7 @@ static void power_pmu_bhrb_enable(struct perf_event *event)
 
 static void power_pmu_bhrb_disable(struct perf_event *event)
 {
-       struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
+       struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
 
        if (!ppmu->bhrb_nr)
                return;
@@ -1144,7 +1144,7 @@ static void power_pmu_disable(struct pmu *pmu)
        if (!ppmu)
                return;
        local_irq_save(flags);
-       cpuhw = this_cpu_ptr(&cpu_hw_events);
+       cpuhw = &__get_cpu_var(cpu_hw_events);
 
        if (!cpuhw->disabled) {
                /*
@@ -1211,7 +1211,7 @@ static void power_pmu_enable(struct pmu *pmu)
                return;
        local_irq_save(flags);
 
-       cpuhw = this_cpu_ptr(&cpu_hw_events);
+       cpuhw = &__get_cpu_var(cpu_hw_events);
        if (!cpuhw->disabled)
                goto out;
 
@@ -1403,7 +1403,7 @@ static int power_pmu_add(struct perf_event *event, int ef_flags)
         * Add the event to the list (if there is room)
         * and check whether the total set is still feasible.
         */
-       cpuhw = this_cpu_ptr(&cpu_hw_events);
+       cpuhw = &__get_cpu_var(cpu_hw_events);
        n0 = cpuhw->n_events;
        if (n0 >= ppmu->n_counter)
                goto out;
@@ -1469,7 +1469,7 @@ static void power_pmu_del(struct perf_event *event, int ef_flags)
 
        power_pmu_read(event);
 
-       cpuhw = this_cpu_ptr(&cpu_hw_events);
+       cpuhw = &__get_cpu_var(cpu_hw_events);
        for (i = 0; i < cpuhw->n_events; ++i) {
                if (event == cpuhw->event[i]) {
                        while (++i < cpuhw->n_events) {
@@ -1575,7 +1575,7 @@ static void power_pmu_stop(struct perf_event *event, int ef_flags)
  */
 void power_pmu_start_txn(struct pmu *pmu)
 {
-       struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
+       struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
 
        perf_pmu_disable(pmu);
        cpuhw->group_flag |= PERF_EVENT_TXN;
@@ -1589,7 +1589,7 @@ void power_pmu_start_txn(struct pmu *pmu)
  */
 void power_pmu_cancel_txn(struct pmu *pmu)
 {
-       struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
+       struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
 
        cpuhw->group_flag &= ~PERF_EVENT_TXN;
        perf_pmu_enable(pmu);
@@ -1607,7 +1607,7 @@ int power_pmu_commit_txn(struct pmu *pmu)
 
        if (!ppmu)
                return -EAGAIN;
-       cpuhw = this_cpu_ptr(&cpu_hw_events);
+       cpuhw = &__get_cpu_var(cpu_hw_events);
        n = cpuhw->n_events;
        if (check_excludes(cpuhw->event, cpuhw->flags, 0, n))
                return -EAGAIN;
@@ -1964,7 +1964,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
 
                if (event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK) {
                        struct cpu_hw_events *cpuhw;
-                       cpuhw = this_cpu_ptr(&cpu_hw_events);
+                       cpuhw = &__get_cpu_var(cpu_hw_events);
                        power_pmu_bhrb_read(cpuhw);
                        data.br_stack = &cpuhw->bhrb_stack;
                }
@@ -2037,7 +2037,7 @@ static bool pmc_overflow(unsigned long val)
 static void perf_event_interrupt(struct pt_regs *regs)
 {
        int i, j;
-       struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
+       struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
        struct perf_event *event;
        unsigned long val[8];
        int found, active;
index 4acaea01fe0313f99a69efa1331a2fbf158f001d..d35ae52c69dca3a20fb96e5dcb9fccc05bd5a54d 100644 (file)
@@ -210,7 +210,7 @@ static void fsl_emb_pmu_disable(struct pmu *pmu)
        unsigned long flags;
 
        local_irq_save(flags);
-       cpuhw = this_cpu_ptr(&cpu_hw_events);
+       cpuhw = &__get_cpu_var(cpu_hw_events);
 
        if (!cpuhw->disabled) {
                cpuhw->disabled = 1;
@@ -249,7 +249,7 @@ static void fsl_emb_pmu_enable(struct pmu *pmu)
        unsigned long flags;
 
        local_irq_save(flags);
-       cpuhw = this_cpu_ptr(&cpu_hw_events);
+       cpuhw = &__get_cpu_var(cpu_hw_events);
        if (!cpuhw->disabled)
                goto out;
 
@@ -653,7 +653,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
 static void perf_event_interrupt(struct pt_regs *regs)
 {
        int i;
-       struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
+       struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
        struct perf_event *event;
        unsigned long val;
        int found = 0;
index 4c11421847be8ce78a833322cccca2e031764d11..8a106b4172e0e740dd8cedd7f54b6dc61a5bc270 100644 (file)
@@ -82,7 +82,7 @@ static void iic_unmask(struct irq_data *d)
 
 static void iic_eoi(struct irq_data *d)
 {
-       struct iic *iic = this_cpu_ptr(&cpu_iic);
+       struct iic *iic = &__get_cpu_var(cpu_iic);
        out_be64(&iic->regs->prio, iic->eoi_stack[--iic->eoi_ptr]);
        BUG_ON(iic->eoi_ptr < 0);
 }
@@ -148,7 +148,7 @@ static unsigned int iic_get_irq(void)
        struct iic *iic;
        unsigned int virq;
 
-       iic = this_cpu_ptr(&cpu_iic);
+       iic = &__get_cpu_var(cpu_iic);
        *(unsigned long *) &pending =
                in_be64((u64 __iomem *) &iic->regs->pending_destr);
        if (!(pending.flags & CBE_IIC_IRQ_VALID))
@@ -163,7 +163,7 @@ static unsigned int iic_get_irq(void)
 
 void iic_setup_cpu(void)
 {
-       out_be64(this_cpu_ptr(&cpu_iic.regs->prio), 0xff);
+       out_be64(&__get_cpu_var(cpu_iic).regs->prio, 0xff);
 }
 
 u8 iic_get_target_id(int cpu)
index 9527e2a7c5412cdb0584e5b5dadb748c6db8e8f2..d8a000a9988b035db116aa446792a3968426e13d 100644 (file)
@@ -48,7 +48,7 @@ void __trace_opal_entry(unsigned long opcode, unsigned long *args)
 
        local_irq_save(flags);
 
-       depth = this_cpu_ptr(&opal_trace_depth);
+       depth = &__get_cpu_var(opal_trace_depth);
 
        if (*depth)
                goto out;
@@ -69,7 +69,7 @@ void __trace_opal_exit(long opcode, unsigned long retval)
 
        local_irq_save(flags);
 
-       depth = this_cpu_ptr(&opal_trace_depth);
+       depth = &__get_cpu_var(opal_trace_depth);
 
        if (*depth)
                goto out;
index a6c42f34303aaa3ae0d47542493006fcd952bded..5f3b23220b8ee395e0d9e05821a43b98fd8592f1 100644 (file)
@@ -711,7 +711,7 @@ void __init ps3_register_ipi_irq(unsigned int cpu, unsigned int virq)
 
 static unsigned int ps3_get_irq(void)
 {
-       struct ps3_private *pd = this_cpu_ptr(&ps3_private);
+       struct ps3_private *pd = &__get_cpu_var(ps3_private);
        u64 x = (pd->bmp.status & pd->bmp.mask);
        unsigned int plug;
 
index 39049e4884fbd30e8cf261f867da8309a98e9532..1062f71f5a85c4740c20027299c2acc664389fbe 100644 (file)
@@ -75,7 +75,7 @@ static atomic_t dtl_count;
  */
 static void consume_dtle(struct dtl_entry *dtle, u64 index)
 {
-       struct dtl_ring *dtlr = this_cpu_ptr(&dtl_rings);
+       struct dtl_ring *dtlr = &__get_cpu_var(dtl_rings);
        struct dtl_entry *wp = dtlr->write_ptr;
        struct lppaca *vpa = local_paca->lppaca_ptr;
 
index f02ec3ab428c84d841a5a641b3803b79083491d3..4575f0c9e521203898e2a5158a6697fd271d1aef 100644 (file)
@@ -110,7 +110,7 @@ static void probe_hcall_entry(void *ignored, unsigned long opcode, unsigned long
        if (opcode > MAX_HCALL_OPCODE)
                return;
 
-       h = this_cpu_ptr(&hcall_stats[opcode / 4]);
+       h = &__get_cpu_var(hcall_stats)[opcode / 4];
        h->tb_start = mftb();
        h->purr_start = mfspr(SPRN_PURR);
 }
@@ -123,7 +123,7 @@ static void probe_hcall_exit(void *ignored, unsigned long opcode, unsigned long
        if (opcode > MAX_HCALL_OPCODE)
                return;
 
-       h = this_cpu_ptr(&hcall_stats[opcode / 4]);
+       h = &__get_cpu_var(hcall_stats)[opcode / 4];
        h->num_calls++;
        h->tb_total += mftb() - h->tb_start;
        h->purr_total += mfspr(SPRN_PURR) - h->purr_start;
index 8c355ed4291e091d041f1918ab38350728c1da5e..4642d6a4d35641d5219a2378ce917409cea4c850 100644 (file)
@@ -200,7 +200,7 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
 
        local_irq_save(flags);  /* to protect tcep and the page behind it */
 
-       tcep = __this_cpu_read(tce_page);
+       tcep = __get_cpu_var(tce_page);
 
        /* This is safe to do since interrupts are off when we're called
         * from iommu_alloc{,_sg}()
@@ -213,7 +213,7 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
                        return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr,
                                            direction, attrs);
                }
-               __this_cpu_write(tce_page, tcep);
+               __get_cpu_var(tce_page) = tcep;
        }
 
        rpn = __pa(uaddr) >> TCE_SHIFT;
@@ -399,7 +399,7 @@ static int tce_setrange_multi_pSeriesLP(unsigned long start_pfn,
        long l, limit;
 
        local_irq_disable();    /* to protect tcep and the page behind it */
-       tcep = __this_cpu_read(tce_page);
+       tcep = __get_cpu_var(tce_page);
 
        if (!tcep) {
                tcep = (__be64 *)__get_free_page(GFP_ATOMIC);
@@ -407,7 +407,7 @@ static int tce_setrange_multi_pSeriesLP(unsigned long start_pfn,
                        local_irq_enable();
                        return -ENOMEM;
                }
-               __this_cpu_write(tce_page, tcep);
+               __get_cpu_var(tce_page) = tcep;
        }
 
        proto_tce = TCE_PCI_READ | TCE_PCI_WRITE;
index 56df72da59fe6d67822196ad126ac61c05caa856..34e64237fff9a9ca4cf6d62751e99feff41a3f1e 100644 (file)
@@ -507,7 +507,7 @@ static void pSeries_lpar_flush_hash_range(unsigned long number, int local)
        unsigned long vpn;
        unsigned long i, pix, rc;
        unsigned long flags = 0;
-       struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch);
+       struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
        int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
        unsigned long param[9];
        unsigned long hash, index, shift, hidx, slot;
@@ -697,7 +697,7 @@ void __trace_hcall_entry(unsigned long opcode, unsigned long *args)
 
        local_irq_save(flags);
 
-       depth = this_cpu_ptr(&hcall_trace_depth);
+       depth = &__get_cpu_var(hcall_trace_depth);
 
        if (*depth)
                goto out;
@@ -722,7 +722,7 @@ void __trace_hcall_exit(long opcode, unsigned long retval,
 
        local_irq_save(flags);
 
-       depth = this_cpu_ptr(&hcall_trace_depth);
+       depth = &__get_cpu_var(hcall_trace_depth);
 
        if (*depth)
                goto out;
index 179a69fd5568695dc126a7e5496f92d5e914d0d0..dff05b9eb94682267fed3a79c9ad72998ec7d053 100644 (file)
@@ -302,8 +302,8 @@ static struct rtas_error_log *fwnmi_get_errinfo(struct pt_regs *regs)
        /* If it isn't an extended log we can use the per cpu 64bit buffer */
        h = (struct rtas_error_log *)&savep[1];
        if (!rtas_error_extended(h)) {
-               memcpy(this_cpu_ptr(&mce_data_buf), h, sizeof(__u64));
-               errhdr = (struct rtas_error_log *)this_cpu_ptr(&mce_data_buf);
+               memcpy(&__get_cpu_var(mce_data_buf), h, sizeof(__u64));
+               errhdr = (struct rtas_error_log *)&__get_cpu_var(mce_data_buf);
        } else {
                int len, error_log_length;
 
index 365249cd346bb368d525041f15a9b2b4b1ab38e9..fe0cca4771648f22690dd8e216967798d8cf4843 100644 (file)
@@ -155,7 +155,7 @@ int __init xics_smp_probe(void)
 
 void xics_teardown_cpu(void)
 {
-       struct xics_cppr *os_cppr = this_cpu_ptr(&xics_cppr);
+       struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr);
 
        /*
         * we have to reset the cppr index to 0 because we're