]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
d6273e143324e196b4ef2dbd9af967f3516235fa
[mirror_ubuntu-bionic-kernel.git] / drivers / staging / lustre / include / linux / libcfs / libcfs_private.h
1 /*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19 *
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
22 * have any questions.
23 *
24 * GPL HEADER END
25 */
26 /*
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
29 *
30 * Copyright (c) 2011, 2012, Intel Corporation.
31 */
32 /*
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
35 *
36 * libcfs/include/libcfs/libcfs_private.h
37 *
38 * Various defines for libcfs.
39 *
40 */
41
42 #ifndef __LIBCFS_PRIVATE_H__
43 #define __LIBCFS_PRIVATE_H__
44
45 #ifndef DEBUG_SUBSYSTEM
46 # define DEBUG_SUBSYSTEM S_UNDEFINED
47 #endif
48
49 /*
50 * When this is on, LASSERT macro includes check for assignment used instead
51 * of equality check, but doesn't have unlikely(). Turn this on from time to
52 * time to make test-builds. This shouldn't be on for production release.
53 */
54 #define LASSERT_CHECKED (0)
55
56 #define LASSERTF(cond, fmt, ...) \
57 do { \
58 if (unlikely(!(cond))) { \
59 LIBCFS_DEBUG_MSG_DATA_DECL(__msg_data, D_EMERG, NULL); \
60 libcfs_debug_msg(&__msg_data, \
61 "ASSERTION( %s ) failed: " fmt, #cond, \
62 ## __VA_ARGS__); \
63 lbug_with_loc(&__msg_data); \
64 } \
65 } while (0)
66
67 #define LASSERT(cond) LASSERTF(cond, "\n")
68
69 #ifdef CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK
70 /**
71 * This is for more expensive checks that one doesn't want to be enabled all
72 * the time. LINVRNT() has to be explicitly enabled by
73 * CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK option.
74 */
75 # define LINVRNT(exp) LASSERT(exp)
76 #else
77 # define LINVRNT(exp) ((void)sizeof !!(exp))
78 #endif
79
80 #define KLASSERT(e) LASSERT(e)
81
82 void __noreturn lbug_with_loc(struct libcfs_debug_msg_data *);
83
84 #define LBUG() \
85 do { \
86 LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, D_EMERG, NULL); \
87 lbug_with_loc(&msgdata); \
88 } while (0)
89
90 #ifndef LIBCFS_VMALLOC_SIZE
91 #define LIBCFS_VMALLOC_SIZE (2 << PAGE_CACHE_SHIFT) /* 2 pages */
92 #endif
93
94 #define LIBCFS_ALLOC_PRE(size, mask) \
95 do { \
96 LASSERT(!in_interrupt() || \
97 ((size) <= LIBCFS_VMALLOC_SIZE && \
98 !gfpflags_allow_blocking(mask))); \
99 } while (0)
100
101 #define LIBCFS_ALLOC_POST(ptr, size) \
102 do { \
103 if (unlikely((ptr) == NULL)) { \
104 CERROR("LNET: out of memory at %s:%d (tried to alloc '" \
105 #ptr "' = %d)\n", __FILE__, __LINE__, (int)(size)); \
106 } else { \
107 memset((ptr), 0, (size)); \
108 } \
109 } while (0)
110
111 /**
112 * allocate memory with GFP flags @mask
113 */
114 #define LIBCFS_ALLOC_GFP(ptr, size, mask) \
115 do { \
116 LIBCFS_ALLOC_PRE((size), (mask)); \
117 (ptr) = (size) <= LIBCFS_VMALLOC_SIZE ? \
118 kmalloc((size), (mask)) : vmalloc(size); \
119 LIBCFS_ALLOC_POST((ptr), (size)); \
120 } while (0)
121
122 /**
123 * default allocator
124 */
125 #define LIBCFS_ALLOC(ptr, size) \
126 LIBCFS_ALLOC_GFP(ptr, size, GFP_NOFS)
127
128 /**
129 * non-sleeping allocator
130 */
131 #define LIBCFS_ALLOC_ATOMIC(ptr, size) \
132 LIBCFS_ALLOC_GFP(ptr, size, GFP_ATOMIC)
133
134 /**
135 * allocate memory for specified CPU partition
136 * \a cptab != NULL, \a cpt is CPU partition id of \a cptab
137 * \a cptab == NULL, \a cpt is HW NUMA node id
138 */
139 #define LIBCFS_CPT_ALLOC_GFP(ptr, cptab, cpt, size, mask) \
140 do { \
141 LIBCFS_ALLOC_PRE((size), (mask)); \
142 (ptr) = (size) <= LIBCFS_VMALLOC_SIZE ? \
143 kmalloc_node((size), (mask), cfs_cpt_spread_node(cptab, cpt)) :\
144 vmalloc_node(size, cfs_cpt_spread_node(cptab, cpt)); \
145 LIBCFS_ALLOC_POST((ptr), (size)); \
146 } while (0)
147
148 /** default numa allocator */
149 #define LIBCFS_CPT_ALLOC(ptr, cptab, cpt, size) \
150 LIBCFS_CPT_ALLOC_GFP(ptr, cptab, cpt, size, GFP_NOFS)
151
152 #define LIBCFS_FREE(ptr, size) \
153 do { \
154 int s = (size); \
155 if (unlikely((ptr) == NULL)) { \
156 CERROR("LIBCFS: free NULL '" #ptr "' (%d bytes) at " \
157 "%s:%d\n", s, __FILE__, __LINE__); \
158 break; \
159 } \
160 if (unlikely(s > LIBCFS_VMALLOC_SIZE)) \
161 vfree(ptr); \
162 else \
163 kfree(ptr); \
164 } while (0)
165
166 /******************************************************************************/
167
168 /* htonl hack - either this, or compile with -O2. Stupid byteorder/generic.h */
169 #if defined(__GNUC__) && (__GNUC__ >= 2) && !defined(__OPTIMIZE__)
170 #define ___htonl(x) __cpu_to_be32(x)
171 #define ___htons(x) __cpu_to_be16(x)
172 #define ___ntohl(x) __be32_to_cpu(x)
173 #define ___ntohs(x) __be16_to_cpu(x)
174 #define htonl(x) ___htonl(x)
175 #define ntohl(x) ___ntohl(x)
176 #define htons(x) ___htons(x)
177 #define ntohs(x) ___ntohs(x)
178 #endif
179
180 void libcfs_run_upcall(char **argv);
181 void libcfs_run_lbug_upcall(struct libcfs_debug_msg_data *);
182 void libcfs_debug_dumplog(void);
183 int libcfs_debug_init(unsigned long bufsize);
184 int libcfs_debug_cleanup(void);
185 int libcfs_debug_clear_buffer(void);
186 int libcfs_debug_mark_buffer(const char *text);
187
188 /*
189 * allocate per-cpu-partition data, returned value is an array of pointers,
190 * variable can be indexed by CPU ID.
191 * cptable != NULL: size of array is number of CPU partitions
192 * cptable == NULL: size of array is number of HW cores
193 */
194 void *cfs_percpt_alloc(struct cfs_cpt_table *cptab, unsigned int size);
195 /*
196 * destroy per-cpu-partition variable
197 */
198 void cfs_percpt_free(void *vars);
199 int cfs_percpt_number(void *vars);
200 void *cfs_percpt_current(void *vars);
201 void *cfs_percpt_index(void *vars, int idx);
202
203 #define cfs_percpt_for_each(var, i, vars) \
204 for (i = 0; i < cfs_percpt_number(vars) && \
205 ((var) = (vars)[i]) != NULL; i++)
206
207 /*
208 * allocate a variable array, returned value is an array of pointers.
209 * Caller can specify length of array by count.
210 */
211 void *cfs_array_alloc(int count, unsigned int size);
212 void cfs_array_free(void *vars);
213
214 #define LASSERT_ATOMIC_ENABLED (1)
215
216 #if LASSERT_ATOMIC_ENABLED
217
218 /** assert value of @a is equal to @v */
219 #define LASSERT_ATOMIC_EQ(a, v) \
220 do { \
221 LASSERTF(atomic_read(a) == v, \
222 "value: %d\n", atomic_read((a))); \
223 } while (0)
224
225 /** assert value of @a is unequal to @v */
226 #define LASSERT_ATOMIC_NE(a, v) \
227 do { \
228 LASSERTF(atomic_read(a) != v, \
229 "value: %d\n", atomic_read((a))); \
230 } while (0)
231
232 /** assert value of @a is little than @v */
233 #define LASSERT_ATOMIC_LT(a, v) \
234 do { \
235 LASSERTF(atomic_read(a) < v, \
236 "value: %d\n", atomic_read((a))); \
237 } while (0)
238
239 /** assert value of @a is little/equal to @v */
240 #define LASSERT_ATOMIC_LE(a, v) \
241 do { \
242 LASSERTF(atomic_read(a) <= v, \
243 "value: %d\n", atomic_read((a))); \
244 } while (0)
245
246 /** assert value of @a is great than @v */
247 #define LASSERT_ATOMIC_GT(a, v) \
248 do { \
249 LASSERTF(atomic_read(a) > v, \
250 "value: %d\n", atomic_read((a))); \
251 } while (0)
252
253 /** assert value of @a is great/equal to @v */
254 #define LASSERT_ATOMIC_GE(a, v) \
255 do { \
256 LASSERTF(atomic_read(a) >= v, \
257 "value: %d\n", atomic_read((a))); \
258 } while (0)
259
260 /** assert value of @a is great than @v1 and little than @v2 */
261 #define LASSERT_ATOMIC_GT_LT(a, v1, v2) \
262 do { \
263 int __v = atomic_read(a); \
264 LASSERTF(__v > v1 && __v < v2, "value: %d\n", __v); \
265 } while (0)
266
267 /** assert value of @a is great than @v1 and little/equal to @v2 */
268 #define LASSERT_ATOMIC_GT_LE(a, v1, v2) \
269 do { \
270 int __v = atomic_read(a); \
271 LASSERTF(__v > v1 && __v <= v2, "value: %d\n", __v); \
272 } while (0)
273
274 /** assert value of @a is great/equal to @v1 and little than @v2 */
275 #define LASSERT_ATOMIC_GE_LT(a, v1, v2) \
276 do { \
277 int __v = atomic_read(a); \
278 LASSERTF(__v >= v1 && __v < v2, "value: %d\n", __v); \
279 } while (0)
280
281 /** assert value of @a is great/equal to @v1 and little/equal to @v2 */
282 #define LASSERT_ATOMIC_GE_LE(a, v1, v2) \
283 do { \
284 int __v = atomic_read(a); \
285 LASSERTF(__v >= v1 && __v <= v2, "value: %d\n", __v); \
286 } while (0)
287
288 #else /* !LASSERT_ATOMIC_ENABLED */
289
290 #define LASSERT_ATOMIC_EQ(a, v) do {} while (0)
291 #define LASSERT_ATOMIC_NE(a, v) do {} while (0)
292 #define LASSERT_ATOMIC_LT(a, v) do {} while (0)
293 #define LASSERT_ATOMIC_LE(a, v) do {} while (0)
294 #define LASSERT_ATOMIC_GT(a, v) do {} while (0)
295 #define LASSERT_ATOMIC_GE(a, v) do {} while (0)
296 #define LASSERT_ATOMIC_GT_LT(a, v1, v2) do {} while (0)
297 #define LASSERT_ATOMIC_GT_LE(a, v1, v2) do {} while (0)
298 #define LASSERT_ATOMIC_GE_LT(a, v1, v2) do {} while (0)
299 #define LASSERT_ATOMIC_GE_LE(a, v1, v2) do {} while (0)
300
301 #endif /* LASSERT_ATOMIC_ENABLED */
302
303 #define LASSERT_ATOMIC_ZERO(a) LASSERT_ATOMIC_EQ(a, 0)
304 #define LASSERT_ATOMIC_POS(a) LASSERT_ATOMIC_GT(a, 0)
305
306 #define CFS_ALLOC_PTR(ptr) LIBCFS_ALLOC(ptr, sizeof(*(ptr)))
307 #define CFS_FREE_PTR(ptr) LIBCFS_FREE(ptr, sizeof(*(ptr)))
308
309 /*
310 * percpu partition lock
311 *
312 * There are some use-cases like this in Lustre:
313 * . each CPU partition has it's own private data which is frequently changed,
314 * and mostly by the local CPU partition.
315 * . all CPU partitions share some global data, these data are rarely changed.
316 *
317 * LNet is typical example.
318 * CPU partition lock is designed for this kind of use-cases:
319 * . each CPU partition has it's own private lock
320 * . change on private data just needs to take the private lock
321 * . read on shared data just needs to take _any_ of private locks
322 * . change on shared data needs to take _all_ private locks,
323 * which is slow and should be really rare.
324 */
325
326 enum {
327 CFS_PERCPT_LOCK_EX = -1, /* negative */
328 };
329
330 struct cfs_percpt_lock {
331 /* cpu-partition-table for this lock */
332 struct cfs_cpt_table *pcl_cptab;
333 /* exclusively locked */
334 unsigned int pcl_locked;
335 /* private lock table */
336 spinlock_t **pcl_locks;
337 };
338
339 /* return number of private locks */
340 static inline int
341 cfs_percpt_lock_num(struct cfs_percpt_lock *pcl)
342 {
343 return cfs_cpt_number(pcl->pcl_cptab);
344 }
345
346 /*
347 * create a cpu-partition lock based on CPU partition table \a cptab,
348 * each private lock has extra \a psize bytes padding data
349 */
350 struct cfs_percpt_lock *cfs_percpt_lock_alloc(struct cfs_cpt_table *cptab);
351 /* destroy a cpu-partition lock */
352 void cfs_percpt_lock_free(struct cfs_percpt_lock *pcl);
353
354 /* lock private lock \a index of \a pcl */
355 void cfs_percpt_lock(struct cfs_percpt_lock *pcl, int index);
356 /* unlock private lock \a index of \a pcl */
357 void cfs_percpt_unlock(struct cfs_percpt_lock *pcl, int index);
358 /* create percpt (atomic) refcount based on @cptab */
359 atomic_t **cfs_percpt_atomic_alloc(struct cfs_cpt_table *cptab, int val);
360 /* destroy percpt refcount */
361 void cfs_percpt_atomic_free(atomic_t **refs);
362 /* return sum of all percpu refs */
363 int cfs_percpt_atomic_summary(atomic_t **refs);
364
365 /** Compile-time assertion.
366
367 * Check an invariant described by a constant expression at compile time by
368 * forcing a compiler error if it does not hold. \a cond must be a constant
369 * expression as defined by the ISO C Standard:
370 *
371 * 6.8.4.2 The switch statement
372 * ....
373 * [#3] The expression of each case label shall be an integer
374 * constant expression and no two of the case constant
375 * expressions in the same switch statement shall have the same
376 * value after conversion...
377 *
378 */
379 #define CLASSERT(cond) do {switch (42) {case (cond): case 0: break; } } while (0)
380
381 /* max value for numeric network address */
382 #define MAX_NUMERIC_VALUE 0xffffffff
383
384 /* implication */
385 #define ergo(a, b) (!(a) || (b))
386 /* logical equivalence */
387 #define equi(a, b) (!!(a) == !!(b))
388
389 /* --------------------------------------------------------------------
390 * Light-weight trace
391 * Support for temporary event tracing with minimal Heisenberg effect.
392 * -------------------------------------------------------------------- */
393
394 struct libcfs_device_userstate {
395 int ldu_memhog_pages;
396 struct page *ldu_memhog_root_page;
397 };
398
399 #define MKSTR(ptr) ((ptr)) ? (ptr) : ""
400
401 static inline int cfs_size_round4(int val)
402 {
403 return (val + 3) & (~0x3);
404 }
405
406 #ifndef HAVE_CFS_SIZE_ROUND
407 static inline int cfs_size_round(int val)
408 {
409 return (val + 7) & (~0x7);
410 }
411
412 #define HAVE_CFS_SIZE_ROUND
413 #endif
414
415 static inline int cfs_size_round16(int val)
416 {
417 return (val + 0xf) & (~0xf);
418 }
419
420 static inline int cfs_size_round32(int val)
421 {
422 return (val + 0x1f) & (~0x1f);
423 }
424
425 static inline int cfs_size_round0(int val)
426 {
427 if (!val)
428 return 0;
429 return (val + 1 + 7) & (~0x7);
430 }
431
432 static inline size_t cfs_round_strlen(char *fset)
433 {
434 return (size_t)cfs_size_round((int)strlen(fset) + 1);
435 }
436
437 #define LOGL(var, len, ptr) \
438 do { \
439 if (var) \
440 memcpy((char *)ptr, (const char *)var, len); \
441 ptr += cfs_size_round(len); \
442 } while (0)
443
444 #define LOGU(var, len, ptr) \
445 do { \
446 if (var) \
447 memcpy((char *)var, (const char *)ptr, len); \
448 ptr += cfs_size_round(len); \
449 } while (0)
450
451 #define LOGL0(var, len, ptr) \
452 do { \
453 if (!len) \
454 break; \
455 memcpy((char *)ptr, (const char *)var, len); \
456 *((char *)(ptr) + len) = 0; \
457 ptr += cfs_size_round(len + 1); \
458 } while (0)
459
460 #endif