]> git.proxmox.com Git - ceph.git/blame - ceph/src/pmdk/src/core/util.h
import ceph 16.2.7
[ceph.git] / ceph / src / pmdk / src / core / util.h
CommitLineData
a4b75251
TL
1/* SPDX-License-Identifier: BSD-3-Clause */
2/* Copyright 2014-2020, Intel Corporation */
3/*
4 * Copyright (c) 2016-2020, Microsoft Corporation. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 *
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 *
18 * * Neither the name of the copyright holder nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35/*
36 * util.h -- internal definitions for util module
37 */
38
39#ifndef PMDK_UTIL_H
40#define PMDK_UTIL_H 1
41
42#include <string.h>
43#include <stddef.h>
44#include <stdint.h>
45#include <stdio.h>
46#include <ctype.h>
47
48#ifdef _MSC_VER
49#include <intrin.h> /* popcnt, bitscan */
50#endif
51
52#include <sys/param.h>
53
54#ifdef __cplusplus
55extern "C" {
56#endif
57
58extern unsigned long long Pagesize;
59extern unsigned long long Mmap_align;
60
61#if defined(__x86_64) || defined(_M_X64) || defined(__aarch64__)
62#define CACHELINE_SIZE 64ULL
63#elif defined(__PPC64__)
64#define CACHELINE_SIZE 128ULL
65#else
66#error unable to recognize architecture at compile time
67#endif
68
69#define PAGE_ALIGNED_DOWN_SIZE(size) ((size) & ~(Pagesize - 1))
70#define PAGE_ALIGNED_UP_SIZE(size)\
71 PAGE_ALIGNED_DOWN_SIZE((size) + (Pagesize - 1))
72#define IS_PAGE_ALIGNED(size) (((size) & (Pagesize - 1)) == 0)
73#define IS_MMAP_ALIGNED(size) (((size) & (Mmap_align - 1)) == 0)
74#define PAGE_ALIGN_UP(addr) ((void *)PAGE_ALIGNED_UP_SIZE((uintptr_t)(addr)))
75
76#define ALIGN_UP(size, align) (((size) + (align) - 1) & ~((align) - 1))
77#define ALIGN_DOWN(size, align) ((size) & ~((align) - 1))
78
79#define ADDR_SUM(vp, lp) ((void *)((char *)(vp) + (lp)))
80
81#define util_alignof(t) offsetof(struct {char _util_c; t _util_m; }, _util_m)
82#define FORMAT_PRINTF(a, b) __attribute__((__format__(__printf__, (a), (b))))
83
84void util_init(void);
85int util_is_zeroed(const void *addr, size_t len);
86uint64_t util_checksum_compute(void *addr, size_t len, uint64_t *csump,
87 size_t skip_off);
88int util_checksum(void *addr, size_t len, uint64_t *csump,
89 int insert, size_t skip_off);
90uint64_t util_checksum_seq(const void *addr, size_t len, uint64_t csum);
91int util_parse_size(const char *str, size_t *sizep);
92char *util_fgets(char *buffer, int max, FILE *stream);
93char *util_getexecname(char *path, size_t pathlen);
94char *util_part_realpath(const char *path);
95int util_compare_file_inodes(const char *path1, const char *path2);
96void *util_aligned_malloc(size_t alignment, size_t size);
97void util_aligned_free(void *ptr);
98struct tm *util_localtime(const time_t *timep);
99int util_safe_strcpy(char *dst, const char *src, size_t max_length);
100void util_emit_log(const char *lib, const char *func, int order);
101char *util_readline(FILE *fh);
102int util_snprintf(char *str, size_t size,
103 const char *format, ...) FORMAT_PRINTF(3, 4);
104
105#ifdef _WIN32
106char *util_toUTF8(const wchar_t *wstr);
107wchar_t *util_toUTF16(const char *wstr);
108void util_free_UTF8(char *str);
109void util_free_UTF16(wchar_t *str);
110int util_toUTF16_buff(const char *in, wchar_t *out, size_t out_size);
111int util_toUTF8_buff(const wchar_t *in, char *out, size_t out_size);
112void util_suppress_errmsg(void);
113int util_lasterror_to_errno(unsigned long err);
114#endif
115
116#define UTIL_MAX_ERR_MSG 128
117void util_strerror(int errnum, char *buff, size_t bufflen);
118void util_strwinerror(unsigned long err, char *buff, size_t bufflen);
119
120void util_set_alloc_funcs(
121 void *(*malloc_func)(size_t size),
122 void (*free_func)(void *ptr),
123 void *(*realloc_func)(void *ptr, size_t size),
124 char *(*strdup_func)(const char *s));
125
126/*
127 * Macro calculates number of elements in given table
128 */
129#ifndef ARRAY_SIZE
130#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
131#endif
132
133#ifdef _MSC_VER
134#define force_inline inline __forceinline
135#define NORETURN __declspec(noreturn)
136#define barrier() _ReadWriteBarrier()
137#else
138#define force_inline __attribute__((always_inline)) inline
139#define NORETURN __attribute__((noreturn))
140#define barrier() asm volatile("" ::: "memory")
141#endif
142
143#ifdef _MSC_VER
144typedef UNALIGNED uint64_t ua_uint64_t;
145typedef UNALIGNED uint32_t ua_uint32_t;
146typedef UNALIGNED uint16_t ua_uint16_t;
147#else
148typedef uint64_t ua_uint64_t __attribute__((aligned(1)));
149typedef uint32_t ua_uint32_t __attribute__((aligned(1)));
150typedef uint16_t ua_uint16_t __attribute__((aligned(1)));
151#endif
152
153#define util_get_not_masked_bits(x, mask) ((x) & ~(mask))
154
155/*
156 * util_setbit -- setbit macro substitution which properly deals with types
157 */
158static inline void
159util_setbit(uint8_t *b, uint32_t i)
160{
161 b[i / 8] = (uint8_t)(b[i / 8] | (uint8_t)(1 << (i % 8)));
162}
163
164/*
165 * util_clrbit -- clrbit macro substitution which properly deals with types
166 */
167static inline void
168util_clrbit(uint8_t *b, uint32_t i)
169{
170 b[i / 8] = (uint8_t)(b[i / 8] & (uint8_t)(~(1 << (i % 8))));
171}
172
173#define util_isset(a, i) isset(a, i)
174#define util_isclr(a, i) isclr(a, i)
175
176#define util_flag_isset(a, f) ((a) & (f))
177#define util_flag_isclr(a, f) (((a) & (f)) == 0)
178
179/*
180 * util_is_pow2 -- returns !0 when there's only 1 bit set in v, 0 otherwise
181 */
182static force_inline int
183util_is_pow2(uint64_t v)
184{
185 return v && !(v & (v - 1));
186}
187
188/*
189 * util_div_ceil -- divides a by b and rounds up the result
190 */
191static force_inline unsigned
192util_div_ceil(unsigned a, unsigned b)
193{
194 return (unsigned)(((unsigned long)a + b - 1) / b);
195}
196
197/*
198 * util_bool_compare_and_swap -- perform an atomic compare and swap
199 * util_fetch_and_* -- perform an operation atomically, return old value
200 * util_synchronize -- issue a full memory barrier
201 * util_popcount -- count number of set bits
202 * util_lssb_index -- return index of least significant set bit,
203 * undefined on zero
204 * util_mssb_index -- return index of most significant set bit
205 * undefined on zero
206 *
207 * XXX assertions needed on (value != 0) in both versions of bitscans
208 *
209 */
210
211#ifndef _MSC_VER
212/*
213 * ISO C11 -- 7.17.1.4
214 * memory_order - an enumerated type whose enumerators identify memory ordering
215 * constraints.
216 */
217typedef enum {
218 memory_order_relaxed = __ATOMIC_RELAXED,
219 memory_order_consume = __ATOMIC_CONSUME,
220 memory_order_acquire = __ATOMIC_ACQUIRE,
221 memory_order_release = __ATOMIC_RELEASE,
222 memory_order_acq_rel = __ATOMIC_ACQ_REL,
223 memory_order_seq_cst = __ATOMIC_SEQ_CST
224} memory_order;
225
226/*
227 * ISO C11 -- 7.17.7.2 The atomic_load generic functions
228 * Integer width specific versions as supplement for:
229 *
230 *
231 * #include <stdatomic.h>
232 * C atomic_load(volatile A *object);
233 * C atomic_load_explicit(volatile A *object, memory_order order);
234 *
235 * The atomic_load interface doesn't return the loaded value, but instead
236 * copies it to a specified address -- see comments at the MSVC version.
237 *
238 * Also, instead of generic functions, two versions are available:
239 * for 32 bit fundamental integers, and for 64 bit ones.
240 */
241#define util_atomic_load_explicit32 __atomic_load
242#define util_atomic_load_explicit64 __atomic_load
243
244/*
245 * ISO C11 -- 7.17.7.1 The atomic_store generic functions
246 * Integer width specific versions as supplement for:
247 *
248 * #include <stdatomic.h>
249 * void atomic_store(volatile A *object, C desired);
250 * void atomic_store_explicit(volatile A *object, C desired,
251 * memory_order order);
252 */
253#define util_atomic_store_explicit32 __atomic_store_n
254#define util_atomic_store_explicit64 __atomic_store_n
255
256/*
257 * https://gcc.gnu.org/onlinedocs/gcc/_005f_005fsync-Builtins.html
258 * https://gcc.gnu.org/onlinedocs/gcc/Other-Builtins.html
259 * https://clang.llvm.org/docs/LanguageExtensions.html#builtin-functions
260 */
261#define util_bool_compare_and_swap32 __sync_bool_compare_and_swap
262#define util_bool_compare_and_swap64 __sync_bool_compare_and_swap
263#define util_fetch_and_add32 __sync_fetch_and_add
264#define util_fetch_and_add64 __sync_fetch_and_add
265#define util_fetch_and_sub32 __sync_fetch_and_sub
266#define util_fetch_and_sub64 __sync_fetch_and_sub
267#define util_fetch_and_and32 __sync_fetch_and_and
268#define util_fetch_and_and64 __sync_fetch_and_and
269#define util_fetch_and_or32 __sync_fetch_and_or
270#define util_fetch_and_or64 __sync_fetch_and_or
271#define util_synchronize __sync_synchronize
272#define util_popcount(value) ((unsigned char)__builtin_popcount(value))
273#define util_popcount64(value) ((unsigned char)__builtin_popcountll(value))
274#define util_lssb_index(value) ((unsigned char)__builtin_ctz(value))
275#define util_lssb_index64(value) ((unsigned char)__builtin_ctzll(value))
276#define util_mssb_index(value) ((unsigned char)(31 - __builtin_clz(value)))
277#define util_mssb_index64(value) ((unsigned char)(63 - __builtin_clzll(value)))
278
279#else
280
281/* ISO C11 -- 7.17.1.4 */
282typedef enum {
283 memory_order_relaxed,
284 memory_order_consume,
285 memory_order_acquire,
286 memory_order_release,
287 memory_order_acq_rel,
288 memory_order_seq_cst
289} memory_order;
290
291/*
292 * ISO C11 -- 7.17.7.2 The atomic_load generic functions
293 * Integer width specific versions as supplement for:
294 *
295 *
296 * #include <stdatomic.h>
297 * C atomic_load(volatile A *object);
298 * C atomic_load_explicit(volatile A *object, memory_order order);
299 *
300 * The atomic_load interface doesn't return the loaded value, but instead
301 * copies it to a specified address.
302 * The MSVC specific implementation needs to trigger a barrier (at least
303 * compiler barrier) after the load from the volatile value. The actual load
304 * from the volatile value itself is expected to be atomic.
305 *
306 * The actual isnterface here:
307 * #include "util.h"
308 * void util_atomic_load32(volatile A *object, A *destination);
309 * void util_atomic_load64(volatile A *object, A *destination);
310 * void util_atomic_load_explicit32(volatile A *object, A *destination,
311 * memory_order order);
312 * void util_atomic_load_explicit64(volatile A *object, A *destination,
313 * memory_order order);
314 */
315
316#ifndef _M_X64
317#error MSVC ports of util_atomic_ only work on X86_64
318#endif
319
320#if _MSC_VER >= 2000
321#error util_atomic_ utility functions not tested with this version of VC++
322#error These utility functions are not future proof, as they are not
323#error based on publicly available documentation.
324#endif
325
326#define util_atomic_load_explicit(object, dest, order)\
327 do {\
328 COMPILE_ERROR_ON(order != memory_order_seq_cst &&\
329 order != memory_order_consume &&\
330 order != memory_order_acquire &&\
331 order != memory_order_relaxed);\
332 *dest = *object;\
333 if (order == memory_order_seq_cst ||\
334 order == memory_order_consume ||\
335 order == memory_order_acquire)\
336 _ReadWriteBarrier();\
337 } while (0)
338
339#define util_atomic_load_explicit32 util_atomic_load_explicit
340#define util_atomic_load_explicit64 util_atomic_load_explicit
341
342/* ISO C11 -- 7.17.7.1 The atomic_store generic functions */
343
344#define util_atomic_store_explicit64(object, desired, order)\
345 do {\
346 COMPILE_ERROR_ON(order != memory_order_seq_cst &&\
347 order != memory_order_release &&\
348 order != memory_order_relaxed);\
349 if (order == memory_order_seq_cst) {\
350 _InterlockedExchange64(\
351 (volatile long long *)object, desired);\
352 } else {\
353 if (order == memory_order_release)\
354 _ReadWriteBarrier();\
355 *object = desired;\
356 }\
357 } while (0)
358
359#define util_atomic_store_explicit32(object, desired, order)\
360 do {\
361 COMPILE_ERROR_ON(order != memory_order_seq_cst &&\
362 order != memory_order_release &&\
363 order != memory_order_relaxed);\
364 if (order == memory_order_seq_cst) {\
365 _InterlockedExchange(\
366 (volatile long *)object, desired);\
367 } else {\
368 if (order == memory_order_release)\
369 _ReadWriteBarrier();\
370 *object = desired;\
371 }\
372 } while (0)
373
374/*
375 * https://msdn.microsoft.com/en-us/library/hh977022.aspx
376 */
377
378static __inline int
379bool_compare_and_swap32_VC(volatile LONG *ptr,
380 LONG oldval, LONG newval)
381{
382 LONG old = InterlockedCompareExchange(ptr, newval, oldval);
383 return (old == oldval);
384}
385
386static __inline int
387bool_compare_and_swap64_VC(volatile LONG64 *ptr,
388 LONG64 oldval, LONG64 newval)
389{
390 LONG64 old = InterlockedCompareExchange64(ptr, newval, oldval);
391 return (old == oldval);
392}
393
394#define util_bool_compare_and_swap32(p, o, n)\
395 bool_compare_and_swap32_VC((LONG *)(p), (LONG)(o), (LONG)(n))
396#define util_bool_compare_and_swap64(p, o, n)\
397 bool_compare_and_swap64_VC((LONG64 *)(p), (LONG64)(o), (LONG64)(n))
398#define util_fetch_and_add32(ptr, value)\
399 InterlockedExchangeAdd((LONG *)(ptr), value)
400#define util_fetch_and_add64(ptr, value)\
401 InterlockedExchangeAdd64((LONG64 *)(ptr), value)
402#define util_fetch_and_sub32(ptr, value)\
403 InterlockedExchangeSubtract((LONG *)(ptr), value)
404#define util_fetch_and_sub64(ptr, value)\
405 InterlockedExchangeAdd64((LONG64 *)(ptr), -((LONG64)(value)))
406#define util_fetch_and_and32(ptr, value)\
407 InterlockedAnd((LONG *)(ptr), value)
408#define util_fetch_and_and64(ptr, value)\
409 InterlockedAnd64((LONG64 *)(ptr), value)
410#define util_fetch_and_or32(ptr, value)\
411 InterlockedOr((LONG *)(ptr), value)
412#define util_fetch_and_or64(ptr, value)\
413 InterlockedOr64((LONG64 *)(ptr), value)
414
415static __inline void
416util_synchronize(void)
417{
418 MemoryBarrier();
419}
420
421#define util_popcount(value) (unsigned char)__popcnt(value)
422#define util_popcount64(value) (unsigned char)__popcnt64(value)
423
424static __inline unsigned char
425util_lssb_index(int value)
426{
427 unsigned long ret;
428 _BitScanForward(&ret, value);
429 return (unsigned char)ret;
430}
431
432static __inline unsigned char
433util_lssb_index64(long long value)
434{
435 unsigned long ret;
436 _BitScanForward64(&ret, value);
437 return (unsigned char)ret;
438}
439
440static __inline unsigned char
441util_mssb_index(int value)
442{
443 unsigned long ret;
444 _BitScanReverse(&ret, value);
445 return (unsigned char)ret;
446}
447
448static __inline unsigned char
449util_mssb_index64(long long value)
450{
451 unsigned long ret;
452 _BitScanReverse64(&ret, value);
453 return (unsigned char)ret;
454}
455
456#endif
457
458/* ISO C11 -- 7.17.7 Operations on atomic types */
459#define util_atomic_load32(object, dest)\
460 util_atomic_load_explicit32(object, dest, memory_order_seq_cst)
461#define util_atomic_load64(object, dest)\
462 util_atomic_load_explicit64(object, dest, memory_order_seq_cst)
463
464#define util_atomic_store32(object, desired)\
465 util_atomic_store_explicit32(object, desired, memory_order_seq_cst)
466#define util_atomic_store64(object, desired)\
467 util_atomic_store_explicit64(object, desired, memory_order_seq_cst)
468
469/*
470 * util_get_printable_ascii -- convert non-printable ascii to dot '.'
471 */
472static inline char
473util_get_printable_ascii(char c)
474{
475 return isprint((unsigned char)c) ? c : '.';
476}
477
478char *util_concat_str(const char *s1, const char *s2);
479
480#if !defined(likely)
481#if defined(__GNUC__)
482#define likely(x) __builtin_expect(!!(x), 1)
483#define unlikely(x) __builtin_expect(!!(x), 0)
484#else
485#define likely(x) (!!(x))
486#define unlikely(x) (!!(x))
487#endif
488#endif
489
490#if defined(__CHECKER__)
491#define COMPILE_ERROR_ON(cond)
492#define ASSERT_COMPILE_ERROR_ON(cond)
493#elif defined(_MSC_VER)
494#define COMPILE_ERROR_ON(cond) C_ASSERT(!(cond))
495/* XXX - can't be done with C_ASSERT() unless we have __builtin_constant_p() */
496#define ASSERT_COMPILE_ERROR_ON(cond) do {} while (0)
497#else
498#define COMPILE_ERROR_ON(cond) ((void)sizeof(char[(cond) ? -1 : 1]))
499#define ASSERT_COMPILE_ERROR_ON(cond) COMPILE_ERROR_ON(cond)
500#endif
501
502#ifndef _MSC_VER
503#define ATTR_CONSTRUCTOR __attribute__((constructor)) static
504#define ATTR_DESTRUCTOR __attribute__((destructor)) static
505#else
506#define ATTR_CONSTRUCTOR
507#define ATTR_DESTRUCTOR
508#endif
509
510#ifndef _MSC_VER
511#define CONSTRUCTOR(fun) ATTR_CONSTRUCTOR
512#else
513#ifdef __cplusplus
514#define CONSTRUCTOR(fun) \
515void fun(); \
516struct _##fun { \
517 _##fun() { \
518 fun(); \
519 } \
520}; static _##fun foo; \
521static
522#else
523#define CONSTRUCTOR(fun) \
524 MSVC_CONSTR(fun) \
525 static
526#endif
527#endif
528
529#ifdef __GNUC__
530#define CHECK_FUNC_COMPATIBLE(func1, func2)\
531 COMPILE_ERROR_ON(!__builtin_types_compatible_p(typeof(func1),\
532 typeof(func2)))
533#else
534#define CHECK_FUNC_COMPATIBLE(func1, func2) do {} while (0)
535#endif /* __GNUC__ */
536
537#ifdef __cplusplus
538}
539#endif
540
541#endif /* util.h */