]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/commitdiff
arm64: atomic_lse: match asm register sizes
authorMark Rutland <mark.rutland@arm.com>
Wed, 3 May 2017 15:09:37 +0000 (16:09 +0100)
committerCatalin Marinas <catalin.marinas@arm.com>
Tue, 9 May 2017 16:47:17 +0000 (17:47 +0100)
The LSE atomic code uses asm register variables to ensure that
parameters are allocated in specific registers. In the majority of cases
we specifically ask for an x register when using 64-bit values, but in a
couple of cases we use a w regsiter for a 64-bit value.

For asm register variables, the compiler only cares about the register
index, with wN and xN having the same meaning. The compiler determines
the register size to use based on the type of the variable. Thus, this
inconsistency is merely confusing, and not harmful to code generation.

For consistency, this patch updates those cases to use the x register
alias. There should be no functional change as a result of this patch.

Acked-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
arch/arm64/include/asm/atomic_lse.h

index 7457ce082b5ff06ad6cde7a7935f9d815e2bd256..99fa69c9c3cf3ebf080c7443fb16ac6c11cf234a 100644 (file)
@@ -322,7 +322,7 @@ static inline void atomic64_and(long i, atomic64_t *v)
 #define ATOMIC64_FETCH_OP_AND(name, mb, cl...)                         \
 static inline long atomic64_fetch_and##name(long i, atomic64_t *v)     \
 {                                                                      \
-       register long x0 asm ("w0") = i;                                \
+       register long x0 asm ("x0") = i;                                \
        register atomic64_t *x1 asm ("x1") = v;                         \
                                                                        \
        asm volatile(ARM64_LSE_ATOMIC_INSN(                             \
@@ -394,7 +394,7 @@ ATOMIC64_OP_SUB_RETURN(        , al, "memory")
 #define ATOMIC64_FETCH_OP_SUB(name, mb, cl...)                         \
 static inline long atomic64_fetch_sub##name(long i, atomic64_t *v)     \
 {                                                                      \
-       register long x0 asm ("w0") = i;                                \
+       register long x0 asm ("x0") = i;                                \
        register atomic64_t *x1 asm ("x1") = v;                         \
                                                                        \
        asm volatile(ARM64_LSE_ATOMIC_INSN(                             \