]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - include/linux/bitfield.h
timekeeping: Repair ktime_get_coarse*() granularity
[mirror_ubuntu-jammy-kernel.git] / include / linux / bitfield.h
1 /*
2 * Copyright (C) 2014 Felix Fietkau <nbd@nbd.name>
3 * Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15 #ifndef _LINUX_BITFIELD_H
16 #define _LINUX_BITFIELD_H
17
18 #include <linux/build_bug.h>
19 #include <asm/byteorder.h>
20
21 /*
22 * Bitfield access macros
23 *
24 * FIELD_{GET,PREP} macros take as first parameter shifted mask
25 * from which they extract the base mask and shift amount.
26 * Mask must be a compilation time constant.
27 *
28 * Example:
29 *
30 * #define REG_FIELD_A GENMASK(6, 0)
31 * #define REG_FIELD_B BIT(7)
32 * #define REG_FIELD_C GENMASK(15, 8)
33 * #define REG_FIELD_D GENMASK(31, 16)
34 *
35 * Get:
36 * a = FIELD_GET(REG_FIELD_A, reg);
37 * b = FIELD_GET(REG_FIELD_B, reg);
38 *
39 * Set:
40 * reg = FIELD_PREP(REG_FIELD_A, 1) |
41 * FIELD_PREP(REG_FIELD_B, 0) |
42 * FIELD_PREP(REG_FIELD_C, c) |
43 * FIELD_PREP(REG_FIELD_D, 0x40);
44 *
45 * Modify:
46 * reg &= ~REG_FIELD_C;
47 * reg |= FIELD_PREP(REG_FIELD_C, c);
48 */
49
50 #define __bf_shf(x) (__builtin_ffsll(x) - 1)
51
52 #define __BF_FIELD_CHECK(_mask, _reg, _val, _pfx) \
53 ({ \
54 BUILD_BUG_ON_MSG(!__builtin_constant_p(_mask), \
55 _pfx "mask is not constant"); \
56 BUILD_BUG_ON_MSG((_mask) == 0, _pfx "mask is zero"); \
57 BUILD_BUG_ON_MSG(__builtin_constant_p(_val) ? \
58 ~((_mask) >> __bf_shf(_mask)) & (_val) : 0, \
59 _pfx "value too large for the field"); \
60 BUILD_BUG_ON_MSG((_mask) > (typeof(_reg))~0ull, \
61 _pfx "type of reg too small for mask"); \
62 __BUILD_BUG_ON_NOT_POWER_OF_2((_mask) + \
63 (1ULL << __bf_shf(_mask))); \
64 })
65
66 /**
67 * FIELD_FIT() - check if value fits in the field
68 * @_mask: shifted mask defining the field's length and position
69 * @_val: value to test against the field
70 *
71 * Return: true if @_val can fit inside @_mask, false if @_val is too big.
72 */
73 #define FIELD_FIT(_mask, _val) \
74 ({ \
75 __BF_FIELD_CHECK(_mask, 0ULL, _val, "FIELD_FIT: "); \
76 !((((typeof(_mask))_val) << __bf_shf(_mask)) & ~(_mask)); \
77 })
78
79 /**
80 * FIELD_PREP() - prepare a bitfield element
81 * @_mask: shifted mask defining the field's length and position
82 * @_val: value to put in the field
83 *
84 * FIELD_PREP() masks and shifts up the value. The result should
85 * be combined with other fields of the bitfield using logical OR.
86 */
87 #define FIELD_PREP(_mask, _val) \
88 ({ \
89 __BF_FIELD_CHECK(_mask, 0ULL, _val, "FIELD_PREP: "); \
90 ((typeof(_mask))(_val) << __bf_shf(_mask)) & (_mask); \
91 })
92
93 /**
94 * FIELD_GET() - extract a bitfield element
95 * @_mask: shifted mask defining the field's length and position
96 * @_reg: value of entire bitfield
97 *
98 * FIELD_GET() extracts the field specified by @_mask from the
99 * bitfield passed in as @_reg by masking and shifting it down.
100 */
101 #define FIELD_GET(_mask, _reg) \
102 ({ \
103 __BF_FIELD_CHECK(_mask, _reg, 0U, "FIELD_GET: "); \
104 (typeof(_mask))(((_reg) & (_mask)) >> __bf_shf(_mask)); \
105 })
106
107 extern void __compiletime_error("value doesn't fit into mask")
108 __field_overflow(void);
109 extern void __compiletime_error("bad bitfield mask")
110 __bad_mask(void);
111 static __always_inline u64 field_multiplier(u64 field)
112 {
113 if ((field | (field - 1)) & ((field | (field - 1)) + 1))
114 __bad_mask();
115 return field & -field;
116 }
117 static __always_inline u64 field_mask(u64 field)
118 {
119 return field / field_multiplier(field);
120 }
121 #define ____MAKE_OP(type,base,to,from) \
122 static __always_inline __##type type##_encode_bits(base v, base field) \
123 { \
124 if (__builtin_constant_p(v) && (v & ~field_mask(field))) \
125 __field_overflow(); \
126 return to((v & field_mask(field)) * field_multiplier(field)); \
127 } \
128 static __always_inline __##type type##_replace_bits(__##type old, \
129 base val, base field) \
130 { \
131 return (old & ~to(field)) | type##_encode_bits(val, field); \
132 } \
133 static __always_inline void type##p_replace_bits(__##type *p, \
134 base val, base field) \
135 { \
136 *p = (*p & ~to(field)) | type##_encode_bits(val, field); \
137 } \
138 static __always_inline base type##_get_bits(__##type v, base field) \
139 { \
140 return (from(v) & field)/field_multiplier(field); \
141 }
142 #define __MAKE_OP(size) \
143 ____MAKE_OP(le##size,u##size,cpu_to_le##size,le##size##_to_cpu) \
144 ____MAKE_OP(be##size,u##size,cpu_to_be##size,be##size##_to_cpu) \
145 ____MAKE_OP(u##size,u##size,,)
146 ____MAKE_OP(u8,u8,,)
147 __MAKE_OP(16)
148 __MAKE_OP(32)
149 __MAKE_OP(64)
150 #undef __MAKE_OP
151 #undef ____MAKE_OP
152
153 #endif