]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/commitdiff
powerpc: Never giveup a reclaimed thread when enabling kernel {fp, altivec, vsx}
authorCyril Bur <cyrilbur@gmail.com>
Fri, 23 Sep 2016 06:18:11 +0000 (16:18 +1000)
committerMichael Ellerman <mpe@ellerman.id.au>
Tue, 4 Oct 2016 05:43:07 +0000 (16:43 +1100)
After a thread is reclaimed from its active or suspended transactional
state the checkpointed state exists on CPU, this state (along with the
live/transactional state) has been saved in its entirety by the
reclaiming process.

There exists a sequence of events that would cause the kernel to call
one of enable_kernel_fp(), enable_kernel_altivec() or
enable_kernel_vsx() after a thread has been reclaimed. These functions
save away any user state on the CPU so that the kernel can use the
registers. Not only is this saving away unnecessary at this point, it
is actually incorrect. It causes a save of the checkpointed state to
the live structures within the thread struct thus destroying the true
live state for that thread.

Signed-off-by: Cyril Bur <cyrilbur@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
arch/powerpc/kernel/process.c

index 34ee5f2e3271e7758a5b8f9c77862200dbb45ca2..45b6ea069f929a67f275074120d636b58829b6c3 100644 (file)
@@ -205,12 +205,23 @@ EXPORT_SYMBOL_GPL(flush_fp_to_thread);
 
 void enable_kernel_fp(void)
 {
+       unsigned long cpumsr;
+
        WARN_ON(preemptible());
 
-       msr_check_and_set(MSR_FP);
+       cpumsr = msr_check_and_set(MSR_FP);
 
        if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) {
                check_if_tm_restore_required(current);
+               /*
+                * If a thread has already been reclaimed then the
+                * checkpointed registers are on the CPU but have definitely
+                * been saved by the reclaim code. Don't need to and *cannot*
+                * giveup as this would save  to the 'live' structure not the
+                * checkpointed structure.
+                */
+               if(!msr_tm_active(cpumsr) && msr_tm_active(current->thread.regs->msr))
+                       return;
                __giveup_fpu(current);
        }
 }
@@ -257,12 +268,23 @@ EXPORT_SYMBOL(giveup_altivec);
 
 void enable_kernel_altivec(void)
 {
+       unsigned long cpumsr;
+
        WARN_ON(preemptible());
 
-       msr_check_and_set(MSR_VEC);
+       cpumsr = msr_check_and_set(MSR_VEC);
 
        if (current->thread.regs && (current->thread.regs->msr & MSR_VEC)) {
                check_if_tm_restore_required(current);
+               /*
+                * If a thread has already been reclaimed then the
+                * checkpointed registers are on the CPU but have definitely
+                * been saved by the reclaim code. Don't need to and *cannot*
+                * giveup as this would save  to the 'live' structure not the
+                * checkpointed structure.
+                */
+               if(!msr_tm_active(cpumsr) && msr_tm_active(current->thread.regs->msr))
+                       return;
                __giveup_altivec(current);
        }
 }
@@ -331,12 +353,23 @@ static void save_vsx(struct task_struct *tsk)
 
 void enable_kernel_vsx(void)
 {
+       unsigned long cpumsr;
+
        WARN_ON(preemptible());
 
-       msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX);
+       cpumsr = msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX);
 
        if (current->thread.regs && (current->thread.regs->msr & MSR_VSX)) {
                check_if_tm_restore_required(current);
+               /*
+                * If a thread has already been reclaimed then the
+                * checkpointed registers are on the CPU but have definitely
+                * been saved by the reclaim code. Don't need to and *cannot*
+                * giveup as this would save  to the 'live' structure not the
+                * checkpointed structure.
+                */
+               if(!msr_tm_active(cpumsr) && msr_tm_active(current->thread.regs->msr))
+                       return;
                if (current->thread.regs->msr & MSR_FP)
                        __giveup_fpu(current);
                if (current->thread.regs->msr & MSR_VEC)