]> git.proxmox.com Git - ceph.git/blame - ceph/src/rocksdb/util/math128.h
update ceph source to reef 18.1.2
[ceph.git] / ceph / src / rocksdb / util / math128.h
CommitLineData
20effc67
TL
1// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
2// This source code is licensed under both the GPLv2 (found in the
3// COPYING file in the root directory) and Apache 2.0 License
4// (found in the LICENSE.Apache file in the root directory).
5
6#pragma once
7
8#include "util/coding_lean.h"
9#include "util/math.h"
10
11#ifdef TEST_UINT128_COMPAT
12#undef HAVE_UINT128_EXTENSION
13#endif
14
15namespace ROCKSDB_NAMESPACE {
16
17// Unsigned128 is a 128 bit value supporting (at least) bitwise operators,
18// shifts, and comparisons. __uint128_t is not always available.
19
20#ifdef HAVE_UINT128_EXTENSION
21using Unsigned128 = __uint128_t;
22#else
23struct Unsigned128 {
24 uint64_t lo;
25 uint64_t hi;
26
27 inline Unsigned128() {
28 static_assert(sizeof(Unsigned128) == 2 * sizeof(uint64_t),
29 "unexpected overhead in representation");
30 lo = 0;
31 hi = 0;
32 }
33
34 inline Unsigned128(uint64_t lower) {
35 lo = lower;
36 hi = 0;
37 }
38
39 inline Unsigned128(uint64_t lower, uint64_t upper) {
40 lo = lower;
41 hi = upper;
42 }
43
44 explicit operator uint64_t() { return lo; }
45
46 explicit operator uint32_t() { return static_cast<uint32_t>(lo); }
47
48 explicit operator uint16_t() { return static_cast<uint16_t>(lo); }
49
50 explicit operator uint8_t() { return static_cast<uint8_t>(lo); }
51};
52
53inline Unsigned128 operator<<(const Unsigned128& lhs, unsigned shift) {
54 shift &= 127;
55 Unsigned128 rv;
56 if (shift >= 64) {
57 rv.lo = 0;
58 rv.hi = lhs.lo << (shift & 63);
59 } else {
60 uint64_t tmp = lhs.lo;
61 rv.lo = tmp << shift;
62 // Ensure shift==0 shifts away everything. (This avoids another
63 // conditional branch on shift == 0.)
64 tmp = tmp >> 1 >> (63 - shift);
65 rv.hi = tmp | (lhs.hi << shift);
66 }
67 return rv;
68}
69
70inline Unsigned128& operator<<=(Unsigned128& lhs, unsigned shift) {
71 lhs = lhs << shift;
72 return lhs;
73}
74
75inline Unsigned128 operator>>(const Unsigned128& lhs, unsigned shift) {
76 shift &= 127;
77 Unsigned128 rv;
78 if (shift >= 64) {
79 rv.hi = 0;
80 rv.lo = lhs.hi >> (shift & 63);
81 } else {
82 uint64_t tmp = lhs.hi;
83 rv.hi = tmp >> shift;
84 // Ensure shift==0 shifts away everything
85 tmp = tmp << 1 << (63 - shift);
86 rv.lo = tmp | (lhs.lo >> shift);
87 }
88 return rv;
89}
90
91inline Unsigned128& operator>>=(Unsigned128& lhs, unsigned shift) {
92 lhs = lhs >> shift;
93 return lhs;
94}
95
96inline Unsigned128 operator&(const Unsigned128& lhs, const Unsigned128& rhs) {
97 return Unsigned128(lhs.lo & rhs.lo, lhs.hi & rhs.hi);
98}
99
100inline Unsigned128& operator&=(Unsigned128& lhs, const Unsigned128& rhs) {
101 lhs = lhs & rhs;
102 return lhs;
103}
104
105inline Unsigned128 operator|(const Unsigned128& lhs, const Unsigned128& rhs) {
106 return Unsigned128(lhs.lo | rhs.lo, lhs.hi | rhs.hi);
107}
108
109inline Unsigned128& operator|=(Unsigned128& lhs, const Unsigned128& rhs) {
110 lhs = lhs | rhs;
111 return lhs;
112}
113
114inline Unsigned128 operator^(const Unsigned128& lhs, const Unsigned128& rhs) {
115 return Unsigned128(lhs.lo ^ rhs.lo, lhs.hi ^ rhs.hi);
116}
117
118inline Unsigned128& operator^=(Unsigned128& lhs, const Unsigned128& rhs) {
119 lhs = lhs ^ rhs;
120 return lhs;
121}
122
123inline Unsigned128 operator~(const Unsigned128& v) {
124 return Unsigned128(~v.lo, ~v.hi);
125}
126
127inline bool operator==(const Unsigned128& lhs, const Unsigned128& rhs) {
128 return lhs.lo == rhs.lo && lhs.hi == rhs.hi;
129}
130
131inline bool operator!=(const Unsigned128& lhs, const Unsigned128& rhs) {
132 return lhs.lo != rhs.lo || lhs.hi != rhs.hi;
133}
134
135inline bool operator>(const Unsigned128& lhs, const Unsigned128& rhs) {
136 return lhs.hi > rhs.hi || (lhs.hi == rhs.hi && lhs.lo > rhs.lo);
137}
138
139inline bool operator<(const Unsigned128& lhs, const Unsigned128& rhs) {
140 return lhs.hi < rhs.hi || (lhs.hi == rhs.hi && lhs.lo < rhs.lo);
141}
142
143inline bool operator>=(const Unsigned128& lhs, const Unsigned128& rhs) {
144 return lhs.hi > rhs.hi || (lhs.hi == rhs.hi && lhs.lo >= rhs.lo);
145}
146
147inline bool operator<=(const Unsigned128& lhs, const Unsigned128& rhs) {
148 return lhs.hi < rhs.hi || (lhs.hi == rhs.hi && lhs.lo <= rhs.lo);
149}
150#endif
151
152inline uint64_t Lower64of128(Unsigned128 v) {
153#ifdef HAVE_UINT128_EXTENSION
154 return static_cast<uint64_t>(v);
155#else
156 return v.lo;
157#endif
158}
159
160inline uint64_t Upper64of128(Unsigned128 v) {
161#ifdef HAVE_UINT128_EXTENSION
162 return static_cast<uint64_t>(v >> 64);
163#else
164 return v.hi;
165#endif
166}
167
168// This generally compiles down to a single fast instruction on 64-bit.
169// This doesn't really make sense as operator* because it's not a
170// general 128x128 multiply and provides more output than 64x64 multiply.
171inline Unsigned128 Multiply64to128(uint64_t a, uint64_t b) {
172#ifdef HAVE_UINT128_EXTENSION
173 return Unsigned128{a} * Unsigned128{b};
174#else
175 // Full decomposition
176 // NOTE: GCC seems to fully understand this code as 64-bit x 64-bit
177 // -> 128-bit multiplication and optimize it appropriately.
178 uint64_t tmp = uint64_t{b & 0xffffFFFF} * uint64_t{a & 0xffffFFFF};
179 uint64_t lower = tmp & 0xffffFFFF;
180 tmp >>= 32;
181 tmp += uint64_t{b & 0xffffFFFF} * uint64_t{a >> 32};
182 // Avoid overflow: first add lower 32 of tmp2, and later upper 32
183 uint64_t tmp2 = uint64_t{b >> 32} * uint64_t{a & 0xffffFFFF};
184 tmp += tmp2 & 0xffffFFFF;
185 lower |= tmp << 32;
186 tmp >>= 32;
187 tmp += tmp2 >> 32;
188 tmp += uint64_t{b >> 32} * uint64_t{a >> 32};
189 return Unsigned128(lower, tmp);
190#endif
191}
192
193template <>
194inline int FloorLog2(Unsigned128 v) {
195 if (Upper64of128(v) == 0) {
196 return FloorLog2(Lower64of128(v));
197 } else {
198 return FloorLog2(Upper64of128(v)) + 64;
199 }
200}
201
202template <>
203inline int CountTrailingZeroBits(Unsigned128 v) {
204 if (Lower64of128(v) != 0) {
205 return CountTrailingZeroBits(Lower64of128(v));
206 } else {
207 return CountTrailingZeroBits(Upper64of128(v)) + 64;
208 }
209}
210
211template <>
212inline int BitsSetToOne(Unsigned128 v) {
213 return BitsSetToOne(Lower64of128(v)) + BitsSetToOne(Upper64of128(v));
214}
215
216template <>
217inline int BitParity(Unsigned128 v) {
1e59de90
TL
218 return BitParity(Lower64of128(v) ^ Upper64of128(v));
219}
220
221template <>
222inline Unsigned128 EndianSwapValue(Unsigned128 v) {
223 return (Unsigned128{EndianSwapValue(Lower64of128(v))} << 64) |
224 EndianSwapValue(Upper64of128(v));
225}
226
227template <>
228inline Unsigned128 ReverseBits(Unsigned128 v) {
229 return (Unsigned128{ReverseBits(Lower64of128(v))} << 64) |
230 ReverseBits(Upper64of128(v));
231}
232
233template <>
234inline Unsigned128 DownwardInvolution(Unsigned128 v) {
235 return (Unsigned128{DownwardInvolution(Upper64of128(v))} << 64) |
236 DownwardInvolution(Upper64of128(v) ^ Lower64of128(v));
20effc67
TL
237}
238
239template <typename T>
240struct IsUnsignedUpTo128
241 : std::integral_constant<bool, std::is_unsigned<T>::value ||
242 std::is_same<T, Unsigned128>::value> {};
243
244inline void EncodeFixed128(char* dst, Unsigned128 value) {
245 EncodeFixed64(dst, Lower64of128(value));
246 EncodeFixed64(dst + 8, Upper64of128(value));
247}
248
249inline Unsigned128 DecodeFixed128(const char* ptr) {
250 Unsigned128 rv = DecodeFixed64(ptr + 8);
251 return (rv << 64) | DecodeFixed64(ptr);
252}
253
254// A version of EncodeFixed* for generic algorithms. Likely to be used
255// with Unsigned128, so lives here for now.
256template <typename T>
257inline void EncodeFixedGeneric(char* /*dst*/, T /*value*/) {
258 // Unfortunately, GCC does not appear to optimize this simple code down
259 // to a trivial load on Intel:
260 //
261 // T ret_val = 0;
262 // for (size_t i = 0; i < sizeof(T); ++i) {
263 // ret_val |= (static_cast<T>(static_cast<unsigned char>(ptr[i])) << (8 *
264 // i));
265 // }
266 // return ret_val;
267 //
268 // But does unroll the loop, and does optimize manually unrolled version
269 // for specific sizes down to a trivial load. I have no idea why it doesn't
270 // do both on this code.
271
272 // So instead, we rely on specializations
273 static_assert(sizeof(T) == 0, "No specialization provided for this type");
274}
275
276template <>
277inline void EncodeFixedGeneric(char* dst, uint16_t value) {
278 return EncodeFixed16(dst, value);
279}
280template <>
281inline void EncodeFixedGeneric(char* dst, uint32_t value) {
282 return EncodeFixed32(dst, value);
283}
284template <>
285inline void EncodeFixedGeneric(char* dst, uint64_t value) {
286 return EncodeFixed64(dst, value);
287}
288template <>
289inline void EncodeFixedGeneric(char* dst, Unsigned128 value) {
290 return EncodeFixed128(dst, value);
291}
292
293// A version of EncodeFixed* for generic algorithms.
294template <typename T>
295inline T DecodeFixedGeneric(const char* /*dst*/) {
296 static_assert(sizeof(T) == 0, "No specialization provided for this type");
297}
298
299template <>
300inline uint16_t DecodeFixedGeneric(const char* dst) {
301 return DecodeFixed16(dst);
302}
303template <>
304inline uint32_t DecodeFixedGeneric(const char* dst) {
305 return DecodeFixed32(dst);
306}
307template <>
308inline uint64_t DecodeFixedGeneric(const char* dst) {
309 return DecodeFixed64(dst);
310}
311template <>
312inline Unsigned128 DecodeFixedGeneric(const char* dst) {
313 return DecodeFixed128(dst);
314}
315
316} // namespace ROCKSDB_NAMESPACE