]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - arch/x86/include/asm/sync_bitops.h
x86/mm: Simplify p[g4um]d_page() macros
[mirror_ubuntu-bionic-kernel.git] / arch / x86 / include / asm / sync_bitops.h
1 #ifndef _ASM_X86_SYNC_BITOPS_H
2 #define _ASM_X86_SYNC_BITOPS_H
3
4 /*
5 * Copyright 1992, Linus Torvalds.
6 */
7
8 /*
9 * These have to be done with inline assembly: that way the bit-setting
10 * is guaranteed to be atomic. All bit operations return 0 if the bit
11 * was cleared before the operation and != 0 if it was not.
12 *
13 * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
14 */
15
16 #define ADDR (*(volatile long *)addr)
17
18 /**
19 * sync_set_bit - Atomically set a bit in memory
20 * @nr: the bit to set
21 * @addr: the address to start counting from
22 *
23 * This function is atomic and may not be reordered. See __set_bit()
24 * if you do not require the atomic guarantees.
25 *
26 * Note that @nr may be almost arbitrarily large; this function is not
27 * restricted to acting on a single-word quantity.
28 */
29 static inline void sync_set_bit(long nr, volatile unsigned long *addr)
30 {
31 asm volatile("lock; bts %1,%0"
32 : "+m" (ADDR)
33 : "Ir" (nr)
34 : "memory");
35 }
36
37 /**
38 * sync_clear_bit - Clears a bit in memory
39 * @nr: Bit to clear
40 * @addr: Address to start counting from
41 *
42 * sync_clear_bit() is atomic and may not be reordered. However, it does
43 * not contain a memory barrier, so if it is used for locking purposes,
44 * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
45 * in order to ensure changes are visible on other processors.
46 */
47 static inline void sync_clear_bit(long nr, volatile unsigned long *addr)
48 {
49 asm volatile("lock; btr %1,%0"
50 : "+m" (ADDR)
51 : "Ir" (nr)
52 : "memory");
53 }
54
55 /**
56 * sync_change_bit - Toggle a bit in memory
57 * @nr: Bit to change
58 * @addr: Address to start counting from
59 *
60 * sync_change_bit() is atomic and may not be reordered.
61 * Note that @nr may be almost arbitrarily large; this function is not
62 * restricted to acting on a single-word quantity.
63 */
64 static inline void sync_change_bit(long nr, volatile unsigned long *addr)
65 {
66 asm volatile("lock; btc %1,%0"
67 : "+m" (ADDR)
68 : "Ir" (nr)
69 : "memory");
70 }
71
72 /**
73 * sync_test_and_set_bit - Set a bit and return its old value
74 * @nr: Bit to set
75 * @addr: Address to count from
76 *
77 * This operation is atomic and cannot be reordered.
78 * It also implies a memory barrier.
79 */
80 static inline int sync_test_and_set_bit(long nr, volatile unsigned long *addr)
81 {
82 unsigned char oldbit;
83
84 asm volatile("lock; bts %2,%1\n\tsetc %0"
85 : "=qm" (oldbit), "+m" (ADDR)
86 : "Ir" (nr) : "memory");
87 return oldbit;
88 }
89
90 /**
91 * sync_test_and_clear_bit - Clear a bit and return its old value
92 * @nr: Bit to clear
93 * @addr: Address to count from
94 *
95 * This operation is atomic and cannot be reordered.
96 * It also implies a memory barrier.
97 */
98 static inline int sync_test_and_clear_bit(long nr, volatile unsigned long *addr)
99 {
100 unsigned char oldbit;
101
102 asm volatile("lock; btr %2,%1\n\tsetc %0"
103 : "=qm" (oldbit), "+m" (ADDR)
104 : "Ir" (nr) : "memory");
105 return oldbit;
106 }
107
108 /**
109 * sync_test_and_change_bit - Change a bit and return its old value
110 * @nr: Bit to change
111 * @addr: Address to count from
112 *
113 * This operation is atomic and cannot be reordered.
114 * It also implies a memory barrier.
115 */
116 static inline int sync_test_and_change_bit(long nr, volatile unsigned long *addr)
117 {
118 unsigned char oldbit;
119
120 asm volatile("lock; btc %2,%1\n\tsetc %0"
121 : "=qm" (oldbit), "+m" (ADDR)
122 : "Ir" (nr) : "memory");
123 return oldbit;
124 }
125
126 #define sync_test_bit(nr, addr) test_bit(nr, addr)
127
128 #undef ADDR
129
130 #endif /* _ASM_X86_SYNC_BITOPS_H */