]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/commitdiff
powerpc: Add the ability to save VSX without giving it up
authorCyril Bur <cyrilbur@gmail.com>
Mon, 29 Feb 2016 06:53:51 +0000 (17:53 +1100)
committerMichael Ellerman <mpe@ellerman.id.au>
Wed, 2 Mar 2016 12:34:50 +0000 (23:34 +1100)
This patch adds the ability to be able to save the VSX registers to the
thread struct without giving up (disabling the facility) next time the
process returns to userspace.

This patch builds on a previous optimisation for the FPU and VEC registers
in the thread copy path to avoid a possibly pointless reload of VSX state.

Signed-off-by: Cyril Bur <cyrilbur@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
arch/powerpc/include/asm/switch_to.h
arch/powerpc/kernel/ppc_ksyms.c
arch/powerpc/kernel/process.c
arch/powerpc/kernel/vector.S

index 9028822bb73f52c25c75ea4571942b7bebc9d10e..17c8380673a60637c61fec5772162bf0ae5523cb 100644 (file)
@@ -56,14 +56,10 @@ static inline void __giveup_altivec(struct task_struct *t) { }
 #ifdef CONFIG_VSX
 extern void enable_kernel_vsx(void);
 extern void flush_vsx_to_thread(struct task_struct *);
-extern void giveup_vsx(struct task_struct *);
-extern void __giveup_vsx(struct task_struct *);
 static inline void disable_kernel_vsx(void)
 {
        msr_check_and_clear(MSR_FP|MSR_VEC|MSR_VSX);
 }
-#else
-static inline void __giveup_vsx(struct task_struct *t) { }
 #endif
 
 #ifdef CONFIG_SPE
index 41e1607e800caf9ff5277f08609dab49566123e6..ef7024dacff79d47e27f576f13c2693550b9a491 100644 (file)
@@ -28,10 +28,6 @@ EXPORT_SYMBOL(load_vr_state);
 EXPORT_SYMBOL(store_vr_state);
 #endif
 
-#ifdef CONFIG_VSX
-EXPORT_SYMBOL_GPL(__giveup_vsx);
-#endif
-
 #ifdef CONFIG_EPAPR_PARAVIRT
 EXPORT_SYMBOL(epapr_hypercall_start);
 #endif
index 14c09d25de98b68df8738286ed9469299f364a00..d7a9df51b974335a2baec308b91a485190d55011 100644 (file)
@@ -280,19 +280,31 @@ static inline int restore_altivec(struct task_struct *tsk) { return 0; }
 #endif /* CONFIG_ALTIVEC */
 
 #ifdef CONFIG_VSX
-void giveup_vsx(struct task_struct *tsk)
+static void __giveup_vsx(struct task_struct *tsk)
 {
-       check_if_tm_restore_required(tsk);
-
-       msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX);
        if (tsk->thread.regs->msr & MSR_FP)
                __giveup_fpu(tsk);
        if (tsk->thread.regs->msr & MSR_VEC)
                __giveup_altivec(tsk);
+       tsk->thread.regs->msr &= ~MSR_VSX;
+}
+
+static void giveup_vsx(struct task_struct *tsk)
+{
+       check_if_tm_restore_required(tsk);
+
+       msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX);
        __giveup_vsx(tsk);
        msr_check_and_clear(MSR_FP|MSR_VEC|MSR_VSX);
 }
-EXPORT_SYMBOL(giveup_vsx);
+
+static void save_vsx(struct task_struct *tsk)
+{
+       if (tsk->thread.regs->msr & MSR_FP)
+               save_fpu(tsk);
+       if (tsk->thread.regs->msr & MSR_VEC)
+               save_altivec(tsk);
+}
 
 void enable_kernel_vsx(void)
 {
@@ -335,6 +347,7 @@ static int restore_vsx(struct task_struct *tsk)
 }
 #else
 static inline int restore_vsx(struct task_struct *tsk) { return 0; }
+static inline void save_vsx(struct task_struct *tsk) { }
 #endif /* CONFIG_VSX */
 
 #ifdef CONFIG_SPE
@@ -478,14 +491,19 @@ void save_all(struct task_struct *tsk)
 
        msr_check_and_set(msr_all_available);
 
-       if (usermsr & MSR_FP)
-               save_fpu(tsk);
-
-       if (usermsr & MSR_VEC)
-               save_altivec(tsk);
+       /*
+        * Saving the way the register space is in hardware, save_vsx boils
+        * down to a save_fpu() and save_altivec()
+        */
+       if (usermsr & MSR_VSX) {
+               save_vsx(tsk);
+       } else {
+               if (usermsr & MSR_FP)
+                       save_fpu(tsk);
 
-       if (usermsr & MSR_VSX)
-               __giveup_vsx(tsk);
+               if (usermsr & MSR_VEC)
+                       save_altivec(tsk);
+       }
 
        if (usermsr & MSR_SPE)
                __giveup_spe(tsk);
index 51b0c175ea8ce400a5fb7cbf6d103e9e3f41fdb6..1c2e7a343bf5f195cfe4b53c37171056b0554a38 100644 (file)
@@ -151,23 +151,6 @@ _GLOBAL(load_up_vsx)
        std     r12,_MSR(r1)
        b       fast_exception_return
 
-/*
- * __giveup_vsx(tsk)
- * Disable VSX for the task given as the argument.
- * Does NOT save vsx registers.
- */
-_GLOBAL(__giveup_vsx)
-       addi    r3,r3,THREAD            /* want THREAD of task */
-       ld      r5,PT_REGS(r3)
-       cmpdi   0,r5,0
-       beq     1f
-       ld      r4,_MSR-STACK_FRAME_OVERHEAD(r5)
-       lis     r3,MSR_VSX@h
-       andc    r4,r4,r3                /* disable VSX for previous task */
-       std     r4,_MSR-STACK_FRAME_OVERHEAD(r5)
-1:
-       blr
-
 #endif /* CONFIG_VSX */