]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - lib/find_bit.c
x86/speculation/mmio: Reuse SRBDS mitigation for SBDS
[mirror_ubuntu-jammy-kernel.git] / lib / find_bit.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* bit search implementation
3 *
4 * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
6 *
7 * Copyright (C) 2008 IBM Corporation
8 * 'find_last_bit' is written by Rusty Russell <rusty@rustcorp.com.au>
9 * (Inspired by David Howell's find_next_bit implementation)
10 *
11 * Rewritten by Yury Norov <yury.norov@gmail.com> to decrease
12 * size and improve performance, 2015.
13 */
14
15 #include <linux/bitops.h>
16 #include <linux/bitmap.h>
17 #include <linux/export.h>
18 #include <linux/math.h>
19 #include <linux/minmax.h>
20 #include <linux/swab.h>
21
22 #if !defined(find_next_bit) || !defined(find_next_zero_bit) || \
23 !defined(find_next_bit_le) || !defined(find_next_zero_bit_le) || \
24 !defined(find_next_and_bit)
25 /*
26 * This is a common helper function for find_next_bit, find_next_zero_bit, and
27 * find_next_and_bit. The differences are:
28 * - The "invert" argument, which is XORed with each fetched word before
29 * searching it for one bits.
30 * - The optional "addr2", which is anded with "addr1" if present.
31 */
32 unsigned long _find_next_bit(const unsigned long *addr1,
33 const unsigned long *addr2, unsigned long nbits,
34 unsigned long start, unsigned long invert, unsigned long le)
35 {
36 unsigned long tmp, mask;
37
38 if (unlikely(start >= nbits))
39 return nbits;
40
41 tmp = addr1[start / BITS_PER_LONG];
42 if (addr2)
43 tmp &= addr2[start / BITS_PER_LONG];
44 tmp ^= invert;
45
46 /* Handle 1st word. */
47 mask = BITMAP_FIRST_WORD_MASK(start);
48 if (le)
49 mask = swab(mask);
50
51 tmp &= mask;
52
53 start = round_down(start, BITS_PER_LONG);
54
55 while (!tmp) {
56 start += BITS_PER_LONG;
57 if (start >= nbits)
58 return nbits;
59
60 tmp = addr1[start / BITS_PER_LONG];
61 if (addr2)
62 tmp &= addr2[start / BITS_PER_LONG];
63 tmp ^= invert;
64 }
65
66 if (le)
67 tmp = swab(tmp);
68
69 return min(start + __ffs(tmp), nbits);
70 }
71 EXPORT_SYMBOL(_find_next_bit);
72 #endif
73
74 #ifndef find_first_bit
75 /*
76 * Find the first set bit in a memory region.
77 */
78 unsigned long _find_first_bit(const unsigned long *addr, unsigned long size)
79 {
80 unsigned long idx;
81
82 for (idx = 0; idx * BITS_PER_LONG < size; idx++) {
83 if (addr[idx])
84 return min(idx * BITS_PER_LONG + __ffs(addr[idx]), size);
85 }
86
87 return size;
88 }
89 EXPORT_SYMBOL(_find_first_bit);
90 #endif
91
92 #ifndef find_first_zero_bit
93 /*
94 * Find the first cleared bit in a memory region.
95 */
96 unsigned long _find_first_zero_bit(const unsigned long *addr, unsigned long size)
97 {
98 unsigned long idx;
99
100 for (idx = 0; idx * BITS_PER_LONG < size; idx++) {
101 if (addr[idx] != ~0UL)
102 return min(idx * BITS_PER_LONG + ffz(addr[idx]), size);
103 }
104
105 return size;
106 }
107 EXPORT_SYMBOL(_find_first_zero_bit);
108 #endif
109
110 #ifndef find_last_bit
111 unsigned long _find_last_bit(const unsigned long *addr, unsigned long size)
112 {
113 if (size) {
114 unsigned long val = BITMAP_LAST_WORD_MASK(size);
115 unsigned long idx = (size-1) / BITS_PER_LONG;
116
117 do {
118 val &= addr[idx];
119 if (val)
120 return idx * BITS_PER_LONG + __fls(val);
121
122 val = ~0ul;
123 } while (idx--);
124 }
125 return size;
126 }
127 EXPORT_SYMBOL(_find_last_bit);
128 #endif
129
130 unsigned long find_next_clump8(unsigned long *clump, const unsigned long *addr,
131 unsigned long size, unsigned long offset)
132 {
133 offset = find_next_bit(addr, size, offset);
134 if (offset == size)
135 return size;
136
137 offset = round_down(offset, 8);
138 *clump = bitmap_get_value8(addr, offset);
139
140 return offset;
141 }
142 EXPORT_SYMBOL(find_next_clump8);