]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blobdiff - arch/powerpc/kernel/process.c
powerpc: Add the ability to save VSX without giving it up
[mirror_ubuntu-zesty-kernel.git] / arch / powerpc / kernel / process.c
index dccc87e8fee5e6544de0d8fc732a97aa14f45907..d7a9df51b974335a2baec308b91a485190d55011 100644 (file)
@@ -133,6 +133,16 @@ void __msr_check_and_clear(unsigned long bits)
 EXPORT_SYMBOL(__msr_check_and_clear);
 
 #ifdef CONFIG_PPC_FPU
+void __giveup_fpu(struct task_struct *tsk)
+{
+       save_fpu(tsk);
+       tsk->thread.regs->msr &= ~MSR_FP;
+#ifdef CONFIG_VSX
+       if (cpu_has_feature(CPU_FTR_VSX))
+               tsk->thread.regs->msr &= ~MSR_VSX;
+#endif
+}
+
 void giveup_fpu(struct task_struct *tsk)
 {
        check_if_tm_restore_required(tsk);
@@ -187,9 +197,32 @@ void enable_kernel_fp(void)
        }
 }
 EXPORT_SYMBOL(enable_kernel_fp);
+
+static int restore_fp(struct task_struct *tsk) {
+       if (tsk->thread.load_fp) {
+               load_fp_state(&current->thread.fp_state);
+               current->thread.load_fp++;
+               return 1;
+       }
+       return 0;
+}
+#else
+static int restore_fp(struct task_struct *tsk) { return 0; }
 #endif /* CONFIG_PPC_FPU */
 
 #ifdef CONFIG_ALTIVEC
+#define loadvec(thr) ((thr).load_vec)
+
+static void __giveup_altivec(struct task_struct *tsk)
+{
+       save_altivec(tsk);
+       tsk->thread.regs->msr &= ~MSR_VEC;
+#ifdef CONFIG_VSX
+       if (cpu_has_feature(CPU_FTR_VSX))
+               tsk->thread.regs->msr &= ~MSR_VSX;
+#endif
+}
+
 void giveup_altivec(struct task_struct *tsk)
 {
        check_if_tm_restore_required(tsk);
@@ -229,22 +262,49 @@ void flush_altivec_to_thread(struct task_struct *tsk)
        }
 }
 EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
+
+static int restore_altivec(struct task_struct *tsk)
+{
+       if (cpu_has_feature(CPU_FTR_ALTIVEC) && tsk->thread.load_vec) {
+               load_vr_state(&tsk->thread.vr_state);
+               tsk->thread.used_vr = 1;
+               tsk->thread.load_vec++;
+
+               return 1;
+       }
+       return 0;
+}
+#else
+#define loadvec(thr) 0
+static inline int restore_altivec(struct task_struct *tsk) { return 0; }
 #endif /* CONFIG_ALTIVEC */
 
 #ifdef CONFIG_VSX
-void giveup_vsx(struct task_struct *tsk)
+static void __giveup_vsx(struct task_struct *tsk)
 {
-       check_if_tm_restore_required(tsk);
-
-       msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX);
        if (tsk->thread.regs->msr & MSR_FP)
                __giveup_fpu(tsk);
        if (tsk->thread.regs->msr & MSR_VEC)
                __giveup_altivec(tsk);
+       tsk->thread.regs->msr &= ~MSR_VSX;
+}
+
+static void giveup_vsx(struct task_struct *tsk)
+{
+       check_if_tm_restore_required(tsk);
+
+       msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX);
        __giveup_vsx(tsk);
        msr_check_and_clear(MSR_FP|MSR_VEC|MSR_VSX);
 }
-EXPORT_SYMBOL(giveup_vsx);
+
+static void save_vsx(struct task_struct *tsk)
+{
+       if (tsk->thread.regs->msr & MSR_FP)
+               save_fpu(tsk);
+       if (tsk->thread.regs->msr & MSR_VEC)
+               save_altivec(tsk);
+}
 
 void enable_kernel_vsx(void)
 {
@@ -275,6 +335,19 @@ void flush_vsx_to_thread(struct task_struct *tsk)
        }
 }
 EXPORT_SYMBOL_GPL(flush_vsx_to_thread);
+
+static int restore_vsx(struct task_struct *tsk)
+{
+       if (cpu_has_feature(CPU_FTR_VSX)) {
+               tsk->thread.used_vsr = 1;
+               return 1;
+       }
+
+       return 0;
+}
+#else
+static inline int restore_vsx(struct task_struct *tsk) { return 0; }
+static inline void save_vsx(struct task_struct *tsk) { }
 #endif /* CONFIG_VSX */
 
 #ifdef CONFIG_SPE
@@ -374,12 +447,76 @@ void giveup_all(struct task_struct *tsk)
 }
 EXPORT_SYMBOL(giveup_all);
 
+void restore_math(struct pt_regs *regs)
+{
+       unsigned long msr;
+
+       if (!current->thread.load_fp && !loadvec(current->thread))
+               return;
+
+       msr = regs->msr;
+       msr_check_and_set(msr_all_available);
+
+       /*
+        * Only reload if the bit is not set in the user MSR, the bit BEING set
+        * indicates that the registers are hot
+        */
+       if ((!(msr & MSR_FP)) && restore_fp(current))
+               msr |= MSR_FP | current->thread.fpexc_mode;
+
+       if ((!(msr & MSR_VEC)) && restore_altivec(current))
+               msr |= MSR_VEC;
+
+       if ((msr & (MSR_FP | MSR_VEC)) == (MSR_FP | MSR_VEC) &&
+                       restore_vsx(current)) {
+               msr |= MSR_VSX;
+       }
+
+       msr_check_and_clear(msr_all_available);
+
+       regs->msr = msr;
+}
+
+void save_all(struct task_struct *tsk)
+{
+       unsigned long usermsr;
+
+       if (!tsk->thread.regs)
+               return;
+
+       usermsr = tsk->thread.regs->msr;
+
+       if ((usermsr & msr_all_available) == 0)
+               return;
+
+       msr_check_and_set(msr_all_available);
+
+       /*
+        * Saving the way the register space is in hardware, save_vsx boils
+        * down to a save_fpu() and save_altivec()
+        */
+       if (usermsr & MSR_VSX) {
+               save_vsx(tsk);
+       } else {
+               if (usermsr & MSR_FP)
+                       save_fpu(tsk);
+
+               if (usermsr & MSR_VEC)
+                       save_altivec(tsk);
+       }
+
+       if (usermsr & MSR_SPE)
+               __giveup_spe(tsk);
+
+       msr_check_and_clear(msr_all_available);
+}
+
 void flush_all_to_thread(struct task_struct *tsk)
 {
        if (tsk->thread.regs) {
                preempt_disable();
                BUG_ON(tsk != current);
-               giveup_all(tsk);
+               save_all(tsk);
 
 #ifdef CONFIG_SPE
                if (tsk->thread.regs->msr & MSR_SPE)
@@ -832,17 +969,9 @@ void restore_tm_state(struct pt_regs *regs)
 
        msr_diff = current->thread.ckpt_regs.msr & ~regs->msr;
        msr_diff &= MSR_FP | MSR_VEC | MSR_VSX;
-       if (msr_diff & MSR_FP) {
-               msr_check_and_set(MSR_FP);
-               load_fp_state(&current->thread.fp_state);
-               msr_check_and_clear(MSR_FP);
-               regs->msr |= current->thread.fpexc_mode;
-       }
-       if (msr_diff & MSR_VEC) {
-               msr_check_and_set(MSR_VEC);
-               load_vr_state(&current->thread.vr_state);
-               msr_check_and_clear(MSR_VEC);
-       }
+
+       restore_math(regs);
+
        regs->msr |= msr_diff;
 }
 
@@ -1006,6 +1135,10 @@ struct task_struct *__switch_to(struct task_struct *prev,
                batch = this_cpu_ptr(&ppc64_tlb_batch);
                batch->active = 1;
        }
+
+       if (current_thread_info()->task->thread.regs)
+               restore_math(current_thread_info()->task->thread.regs);
+
 #endif /* CONFIG_PPC_BOOK3S_64 */
 
        return last;
@@ -1307,6 +1440,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
 
                f = ret_from_fork;
        }
+       childregs->msr &= ~(MSR_FP|MSR_VEC|MSR_VSX);
        sp -= STACK_FRAME_OVERHEAD;
 
        /*