]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
Merge tag 'pinctrl-v4.1-1' of git://git.kernel.org/pub/scm/linux/kernel/git/linusw...
[mirror_ubuntu-zesty-kernel.git] / drivers / staging / lustre / include / linux / libcfs / libcfs_private.h
1 /*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19 *
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
22 * have any questions.
23 *
24 * GPL HEADER END
25 */
26 /*
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
29 *
30 * Copyright (c) 2011, 2012, Intel Corporation.
31 */
32 /*
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
35 *
36 * libcfs/include/libcfs/libcfs_private.h
37 *
38 * Various defines for libcfs.
39 *
40 */
41
42 #ifndef __LIBCFS_PRIVATE_H__
43 #define __LIBCFS_PRIVATE_H__
44
45 /* XXX this layering violation is for nidstrings */
46 #include "../lnet/types.h"
47
48 #ifndef DEBUG_SUBSYSTEM
49 # define DEBUG_SUBSYSTEM S_UNDEFINED
50 #endif
51
52
53 /*
54 * When this is on, LASSERT macro includes check for assignment used instead
55 * of equality check, but doesn't have unlikely(). Turn this on from time to
56 * time to make test-builds. This shouldn't be on for production release.
57 */
58 #define LASSERT_CHECKED (0)
59
60 #define LASSERTF(cond, fmt, ...) \
61 do { \
62 if (unlikely(!(cond))) { \
63 LIBCFS_DEBUG_MSG_DATA_DECL(__msg_data, D_EMERG, NULL); \
64 libcfs_debug_msg(&__msg_data, \
65 "ASSERTION( %s ) failed: " fmt, #cond, \
66 ## __VA_ARGS__); \
67 lbug_with_loc(&__msg_data); \
68 } \
69 } while (0)
70
71 #define LASSERT(cond) LASSERTF(cond, "\n")
72
73 #ifdef CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK
74 /**
75 * This is for more expensive checks that one doesn't want to be enabled all
76 * the time. LINVRNT() has to be explicitly enabled by
77 * CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK option.
78 */
79 # define LINVRNT(exp) LASSERT(exp)
80 #else
81 # define LINVRNT(exp) ((void)sizeof !!(exp))
82 #endif
83
84 #define KLASSERT(e) LASSERT(e)
85
86 void lbug_with_loc(struct libcfs_debug_msg_data *)__attribute__((noreturn));
87
88 #define LBUG() \
89 do { \
90 LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, D_EMERG, NULL); \
91 lbug_with_loc(&msgdata); \
92 } while (0)
93
94 extern atomic_t libcfs_kmemory;
95 /*
96 * Memory
97 */
98
99 # define libcfs_kmem_inc(ptr, size) \
100 do { \
101 atomic_add(size, &libcfs_kmemory); \
102 } while (0)
103
104 # define libcfs_kmem_dec(ptr, size) \
105 do { \
106 atomic_sub(size, &libcfs_kmemory); \
107 } while (0)
108
109 # define libcfs_kmem_read() \
110 atomic_read(&libcfs_kmemory)
111
112 #ifndef LIBCFS_VMALLOC_SIZE
113 #define LIBCFS_VMALLOC_SIZE (2 << PAGE_CACHE_SHIFT) /* 2 pages */
114 #endif
115
116 #define LIBCFS_ALLOC_PRE(size, mask) \
117 do { \
118 LASSERT(!in_interrupt() || \
119 ((size) <= LIBCFS_VMALLOC_SIZE && \
120 ((mask) & __GFP_WAIT) == 0)); \
121 } while (0)
122
123 #define LIBCFS_ALLOC_POST(ptr, size) \
124 do { \
125 if (unlikely((ptr) == NULL)) { \
126 CERROR("LNET: out of memory at %s:%d (tried to alloc '" \
127 #ptr "' = %d)\n", __FILE__, __LINE__, (int)(size)); \
128 CERROR("LNET: %d total bytes allocated by lnet\n", \
129 libcfs_kmem_read()); \
130 } else { \
131 memset((ptr), 0, (size)); \
132 libcfs_kmem_inc((ptr), (size)); \
133 CDEBUG(D_MALLOC, "alloc '" #ptr "': %d at %p (tot %d).\n", \
134 (int)(size), (ptr), libcfs_kmem_read()); \
135 } \
136 } while (0)
137
138 /**
139 * allocate memory with GFP flags @mask
140 */
141 #define LIBCFS_ALLOC_GFP(ptr, size, mask) \
142 do { \
143 LIBCFS_ALLOC_PRE((size), (mask)); \
144 (ptr) = (size) <= LIBCFS_VMALLOC_SIZE ? \
145 kmalloc((size), (mask)) : vmalloc(size); \
146 LIBCFS_ALLOC_POST((ptr), (size)); \
147 } while (0)
148
149 /**
150 * default allocator
151 */
152 #define LIBCFS_ALLOC(ptr, size) \
153 LIBCFS_ALLOC_GFP(ptr, size, GFP_NOFS)
154
155 /**
156 * non-sleeping allocator
157 */
158 #define LIBCFS_ALLOC_ATOMIC(ptr, size) \
159 LIBCFS_ALLOC_GFP(ptr, size, GFP_ATOMIC)
160
161 /**
162 * allocate memory for specified CPU partition
163 * \a cptab != NULL, \a cpt is CPU partition id of \a cptab
164 * \a cptab == NULL, \a cpt is HW NUMA node id
165 */
166 #define LIBCFS_CPT_ALLOC_GFP(ptr, cptab, cpt, size, mask) \
167 do { \
168 LIBCFS_ALLOC_PRE((size), (mask)); \
169 (ptr) = (size) <= LIBCFS_VMALLOC_SIZE ? \
170 kmalloc_node((size), (mask), cfs_cpt_spread_node(cptab, cpt)) :\
171 vmalloc_node(size, cfs_cpt_spread_node(cptab, cpt)); \
172 LIBCFS_ALLOC_POST((ptr), (size)); \
173 } while (0)
174
175 /** default numa allocator */
176 #define LIBCFS_CPT_ALLOC(ptr, cptab, cpt, size) \
177 LIBCFS_CPT_ALLOC_GFP(ptr, cptab, cpt, size, GFP_NOFS)
178
179 #define LIBCFS_FREE(ptr, size) \
180 do { \
181 int s = (size); \
182 if (unlikely((ptr) == NULL)) { \
183 CERROR("LIBCFS: free NULL '" #ptr "' (%d bytes) at " \
184 "%s:%d\n", s, __FILE__, __LINE__); \
185 break; \
186 } \
187 libcfs_kmem_dec((ptr), s); \
188 CDEBUG(D_MALLOC, "kfreed '" #ptr "': %d at %p (tot %d).\n", \
189 s, (ptr), libcfs_kmem_read()); \
190 if (unlikely(s > LIBCFS_VMALLOC_SIZE)) \
191 vfree(ptr); \
192 else \
193 kfree(ptr); \
194 } while (0)
195
196 /******************************************************************************/
197
198 /* htonl hack - either this, or compile with -O2. Stupid byteorder/generic.h */
199 #if defined(__GNUC__) && (__GNUC__ >= 2) && !defined(__OPTIMIZE__)
200 #define ___htonl(x) __cpu_to_be32(x)
201 #define ___htons(x) __cpu_to_be16(x)
202 #define ___ntohl(x) __be32_to_cpu(x)
203 #define ___ntohs(x) __be16_to_cpu(x)
204 #define htonl(x) ___htonl(x)
205 #define ntohl(x) ___ntohl(x)
206 #define htons(x) ___htons(x)
207 #define ntohs(x) ___ntohs(x)
208 #endif
209
210 void libcfs_run_upcall(char **argv);
211 void libcfs_run_lbug_upcall(struct libcfs_debug_msg_data *);
212 void libcfs_debug_dumplog(void);
213 int libcfs_debug_init(unsigned long bufsize);
214 int libcfs_debug_cleanup(void);
215 int libcfs_debug_clear_buffer(void);
216 int libcfs_debug_mark_buffer(const char *text);
217
218 void libcfs_debug_set_level(unsigned int debug_level);
219
220 /*
221 * allocate per-cpu-partition data, returned value is an array of pointers,
222 * variable can be indexed by CPU ID.
223 * cptable != NULL: size of array is number of CPU partitions
224 * cptable == NULL: size of array is number of HW cores
225 */
226 void *cfs_percpt_alloc(struct cfs_cpt_table *cptab, unsigned int size);
227 /*
228 * destroy per-cpu-partition variable
229 */
230 void cfs_percpt_free(void *vars);
231 int cfs_percpt_number(void *vars);
232 void *cfs_percpt_current(void *vars);
233 void *cfs_percpt_index(void *vars, int idx);
234
235 #define cfs_percpt_for_each(var, i, vars) \
236 for (i = 0; i < cfs_percpt_number(vars) && \
237 ((var) = (vars)[i]) != NULL; i++)
238
239 /*
240 * allocate a variable array, returned value is an array of pointers.
241 * Caller can specify length of array by count.
242 */
243 void *cfs_array_alloc(int count, unsigned int size);
244 void cfs_array_free(void *vars);
245
246 #define LASSERT_ATOMIC_ENABLED (1)
247
248 #if LASSERT_ATOMIC_ENABLED
249
250 /** assert value of @a is equal to @v */
251 #define LASSERT_ATOMIC_EQ(a, v) \
252 do { \
253 LASSERTF(atomic_read(a) == v, \
254 "value: %d\n", atomic_read((a))); \
255 } while (0)
256
257 /** assert value of @a is unequal to @v */
258 #define LASSERT_ATOMIC_NE(a, v) \
259 do { \
260 LASSERTF(atomic_read(a) != v, \
261 "value: %d\n", atomic_read((a))); \
262 } while (0)
263
264 /** assert value of @a is little than @v */
265 #define LASSERT_ATOMIC_LT(a, v) \
266 do { \
267 LASSERTF(atomic_read(a) < v, \
268 "value: %d\n", atomic_read((a))); \
269 } while (0)
270
271 /** assert value of @a is little/equal to @v */
272 #define LASSERT_ATOMIC_LE(a, v) \
273 do { \
274 LASSERTF(atomic_read(a) <= v, \
275 "value: %d\n", atomic_read((a))); \
276 } while (0)
277
278 /** assert value of @a is great than @v */
279 #define LASSERT_ATOMIC_GT(a, v) \
280 do { \
281 LASSERTF(atomic_read(a) > v, \
282 "value: %d\n", atomic_read((a))); \
283 } while (0)
284
285 /** assert value of @a is great/equal to @v */
286 #define LASSERT_ATOMIC_GE(a, v) \
287 do { \
288 LASSERTF(atomic_read(a) >= v, \
289 "value: %d\n", atomic_read((a))); \
290 } while (0)
291
292 /** assert value of @a is great than @v1 and little than @v2 */
293 #define LASSERT_ATOMIC_GT_LT(a, v1, v2) \
294 do { \
295 int __v = atomic_read(a); \
296 LASSERTF(__v > v1 && __v < v2, "value: %d\n", __v); \
297 } while (0)
298
299 /** assert value of @a is great than @v1 and little/equal to @v2 */
300 #define LASSERT_ATOMIC_GT_LE(a, v1, v2) \
301 do { \
302 int __v = atomic_read(a); \
303 LASSERTF(__v > v1 && __v <= v2, "value: %d\n", __v); \
304 } while (0)
305
306 /** assert value of @a is great/equal to @v1 and little than @v2 */
307 #define LASSERT_ATOMIC_GE_LT(a, v1, v2) \
308 do { \
309 int __v = atomic_read(a); \
310 LASSERTF(__v >= v1 && __v < v2, "value: %d\n", __v); \
311 } while (0)
312
313 /** assert value of @a is great/equal to @v1 and little/equal to @v2 */
314 #define LASSERT_ATOMIC_GE_LE(a, v1, v2) \
315 do { \
316 int __v = atomic_read(a); \
317 LASSERTF(__v >= v1 && __v <= v2, "value: %d\n", __v); \
318 } while (0)
319
320 #else /* !LASSERT_ATOMIC_ENABLED */
321
322 #define LASSERT_ATOMIC_EQ(a, v) do {} while (0)
323 #define LASSERT_ATOMIC_NE(a, v) do {} while (0)
324 #define LASSERT_ATOMIC_LT(a, v) do {} while (0)
325 #define LASSERT_ATOMIC_LE(a, v) do {} while (0)
326 #define LASSERT_ATOMIC_GT(a, v) do {} while (0)
327 #define LASSERT_ATOMIC_GE(a, v) do {} while (0)
328 #define LASSERT_ATOMIC_GT_LT(a, v1, v2) do {} while (0)
329 #define LASSERT_ATOMIC_GT_LE(a, v1, v2) do {} while (0)
330 #define LASSERT_ATOMIC_GE_LT(a, v1, v2) do {} while (0)
331 #define LASSERT_ATOMIC_GE_LE(a, v1, v2) do {} while (0)
332
333 #endif /* LASSERT_ATOMIC_ENABLED */
334
335 #define LASSERT_ATOMIC_ZERO(a) LASSERT_ATOMIC_EQ(a, 0)
336 #define LASSERT_ATOMIC_POS(a) LASSERT_ATOMIC_GT(a, 0)
337
338 #define CFS_ALLOC_PTR(ptr) LIBCFS_ALLOC(ptr, sizeof(*(ptr)))
339 #define CFS_FREE_PTR(ptr) LIBCFS_FREE(ptr, sizeof(*(ptr)))
340
341 /*
342 * percpu partition lock
343 *
344 * There are some use-cases like this in Lustre:
345 * . each CPU partition has it's own private data which is frequently changed,
346 * and mostly by the local CPU partition.
347 * . all CPU partitions share some global data, these data are rarely changed.
348 *
349 * LNet is typical example.
350 * CPU partition lock is designed for this kind of use-cases:
351 * . each CPU partition has it's own private lock
352 * . change on private data just needs to take the private lock
353 * . read on shared data just needs to take _any_ of private locks
354 * . change on shared data needs to take _all_ private locks,
355 * which is slow and should be really rare.
356 */
357
358 enum {
359 CFS_PERCPT_LOCK_EX = -1, /* negative */
360 };
361
362 struct cfs_percpt_lock {
363 /* cpu-partition-table for this lock */
364 struct cfs_cpt_table *pcl_cptab;
365 /* exclusively locked */
366 unsigned int pcl_locked;
367 /* private lock table */
368 spinlock_t **pcl_locks;
369 };
370
371 /* return number of private locks */
372 static inline int
373 cfs_percpt_lock_num(struct cfs_percpt_lock *pcl)
374 {
375 return cfs_cpt_number(pcl->pcl_cptab);
376 }
377
378 /*
379 * create a cpu-partition lock based on CPU partition table \a cptab,
380 * each private lock has extra \a psize bytes padding data
381 */
382 struct cfs_percpt_lock *cfs_percpt_lock_alloc(struct cfs_cpt_table *cptab);
383 /* destroy a cpu-partition lock */
384 void cfs_percpt_lock_free(struct cfs_percpt_lock *pcl);
385
386 /* lock private lock \a index of \a pcl */
387 void cfs_percpt_lock(struct cfs_percpt_lock *pcl, int index);
388 /* unlock private lock \a index of \a pcl */
389 void cfs_percpt_unlock(struct cfs_percpt_lock *pcl, int index);
390 /* create percpt (atomic) refcount based on @cptab */
391 atomic_t **cfs_percpt_atomic_alloc(struct cfs_cpt_table *cptab, int val);
392 /* destroy percpt refcount */
393 void cfs_percpt_atomic_free(atomic_t **refs);
394 /* return sum of all percpu refs */
395 int cfs_percpt_atomic_summary(atomic_t **refs);
396
397 /** Compile-time assertion.
398
399 * Check an invariant described by a constant expression at compile time by
400 * forcing a compiler error if it does not hold. \a cond must be a constant
401 * expression as defined by the ISO C Standard:
402 *
403 * 6.8.4.2 The switch statement
404 * ....
405 * [#3] The expression of each case label shall be an integer
406 * constant expression and no two of the case constant
407 * expressions in the same switch statement shall have the same
408 * value after conversion...
409 *
410 */
411 #define CLASSERT(cond) do {switch (42) {case (cond): case 0: break; } } while (0)
412
413 /* support decl needed both by kernel and liblustre */
414 int libcfs_isknown_lnd(int type);
415 char *libcfs_lnd2modname(int type);
416 char *libcfs_lnd2str(int type);
417 int libcfs_str2lnd(const char *str);
418 char *libcfs_net2str(__u32 net);
419 char *libcfs_nid2str(lnet_nid_t nid);
420 __u32 libcfs_str2net(const char *str);
421 lnet_nid_t libcfs_str2nid(const char *str);
422 int libcfs_str2anynid(lnet_nid_t *nid, const char *str);
423 char *libcfs_id2str(lnet_process_id_t id);
424 void cfs_free_nidlist(struct list_head *list);
425 int cfs_parse_nidlist(char *str, int len, struct list_head *list);
426 int cfs_match_nid(lnet_nid_t nid, struct list_head *list);
427
428 /** \addtogroup lnet_addr
429 * @{ */
430 /* how an LNET NID encodes net:address */
431 /** extract the address part of an lnet_nid_t */
432 #define LNET_NIDADDR(nid) ((__u32)((nid) & 0xffffffff))
433 /** extract the network part of an lnet_nid_t */
434 #define LNET_NIDNET(nid) ((__u32)(((nid) >> 32)) & 0xffffffff)
435 /** make an lnet_nid_t from a network part and an address part */
436 #define LNET_MKNID(net, addr) ((((__u64)(net))<<32)|((__u64)(addr)))
437 /* how net encodes type:number */
438 #define LNET_NETNUM(net) ((net) & 0xffff)
439 #define LNET_NETTYP(net) (((net) >> 16) & 0xffff)
440 #define LNET_MKNET(typ, num) ((((__u32)(typ))<<16)|((__u32)(num)))
441 /** @} lnet_addr */
442
443 /* max value for numeric network address */
444 #define MAX_NUMERIC_VALUE 0xffffffff
445
446 /* implication */
447 #define ergo(a, b) (!(a) || (b))
448 /* logical equivalence */
449 #define equi(a, b) (!!(a) == !!(b))
450
451 /* --------------------------------------------------------------------
452 * Light-weight trace
453 * Support for temporary event tracing with minimal Heisenberg effect.
454 * -------------------------------------------------------------------- */
455
456 struct libcfs_device_userstate {
457 int ldu_memhog_pages;
458 struct page *ldu_memhog_root_page;
459 };
460
461 #define MKSTR(ptr) ((ptr)) ? (ptr) : ""
462
463 static inline int cfs_size_round4(int val)
464 {
465 return (val + 3) & (~0x3);
466 }
467
468 #ifndef HAVE_CFS_SIZE_ROUND
469 static inline int cfs_size_round(int val)
470 {
471 return (val + 7) & (~0x7);
472 }
473
474 #define HAVE_CFS_SIZE_ROUND
475 #endif
476
477 static inline int cfs_size_round16(int val)
478 {
479 return (val + 0xf) & (~0xf);
480 }
481
482 static inline int cfs_size_round32(int val)
483 {
484 return (val + 0x1f) & (~0x1f);
485 }
486
487 static inline int cfs_size_round0(int val)
488 {
489 if (!val)
490 return 0;
491 return (val + 1 + 7) & (~0x7);
492 }
493
494 static inline size_t cfs_round_strlen(char *fset)
495 {
496 return (size_t)cfs_size_round((int)strlen(fset) + 1);
497 }
498
499 /* roundup \a val to power2 */
500 static inline unsigned int cfs_power2_roundup(unsigned int val)
501 {
502 if (val != LOWEST_BIT_SET(val)) { /* not a power of 2 already */
503 do {
504 val &= ~LOWEST_BIT_SET(val);
505 } while (val != LOWEST_BIT_SET(val));
506 /* ...and round up */
507 val <<= 1;
508 }
509 return val;
510 }
511
512 #define LOGL(var, len, ptr) \
513 do { \
514 if (var) \
515 memcpy((char *)ptr, (const char *)var, len); \
516 ptr += cfs_size_round(len); \
517 } while (0)
518
519 #define LOGU(var, len, ptr) \
520 do { \
521 if (var) \
522 memcpy((char *)var, (const char *)ptr, len); \
523 ptr += cfs_size_round(len); \
524 } while (0)
525
526 #define LOGL0(var, len, ptr) \
527 do { \
528 if (!len) \
529 break; \
530 memcpy((char *)ptr, (const char *)var, len); \
531 *((char *)(ptr) + len) = 0; \
532 ptr += cfs_size_round(len + 1); \
533 } while (0)
534
535 /**
536 * Lustre Network Driver types.
537 */
538 enum {
539 /* Only add to these values (i.e. don't ever change or redefine them):
540 * network addresses depend on them... */
541 QSWLND = 1,
542 SOCKLND = 2,
543 GMLND = 3, /* obsolete, keep it so that libcfs_nid2str works */
544 PTLLND = 4,
545 O2IBLND = 5,
546 CIBLND = 6,
547 OPENIBLND = 7,
548 IIBLND = 8,
549 LOLND = 9,
550 RALND = 10,
551 VIBLND = 11,
552 MXLND = 12,
553 GNILND = 13,
554 };
555
556 #endif