]>
Commit | Line | Data |
---|---|---|
d7e09d03 PT |
1 | /* |
2 | * GPL HEADER START | |
3 | * | |
4 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 only, | |
8 | * as published by the Free Software Foundation. | |
9 | * | |
10 | * This program is distributed in the hope that it will be useful, but | |
11 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
13 | * General Public License version 2 for more details (a copy is included | |
14 | * in the LICENSE file that accompanied this code). | |
15 | * | |
16 | * You should have received a copy of the GNU General Public License | |
17 | * version 2 along with this program; If not, see | |
18 | * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf | |
19 | * | |
20 | * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, | |
21 | * CA 95054 USA or visit www.sun.com if you need additional information or | |
22 | * have any questions. | |
23 | * | |
24 | * GPL HEADER END | |
25 | */ | |
26 | /* | |
27 | * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. | |
28 | * Use is subject to license terms. | |
29 | * | |
30 | * Copyright (c) 2012, Intel Corporation. | |
31 | */ | |
32 | /* | |
33 | * This file is part of Lustre, http://www.lustre.org/ | |
34 | * Lustre is a trademark of Sun Microsystems, Inc. | |
35 | * | |
36 | * lnet/include/lnet/lib-lnet.h | |
37 | * | |
38 | * Top level include for library side routines | |
39 | */ | |
40 | ||
41 | #ifndef __LNET_LIB_LNET_H__ | |
42 | #define __LNET_LIB_LNET_H__ | |
43 | ||
44 | #include <linux/lnet/linux/lib-lnet.h> | |
45 | ||
46 | #include <linux/libcfs/libcfs.h> | |
47 | #include <linux/lnet/types.h> | |
48 | #include <linux/lnet/lnet.h> | |
49 | #include <linux/lnet/lib-types.h> | |
50 | ||
51 | extern lnet_t the_lnet; /* THE network */ | |
52 | ||
53 | #if defined(LNET_USE_LIB_FREELIST) | |
54 | /* 1 CPT, simplify implementation... */ | |
55 | # define LNET_CPT_MAX_BITS 0 | |
56 | ||
57 | #else /* KERNEL and no freelist */ | |
58 | ||
59 | # if (BITS_PER_LONG == 32) | |
60 | /* 2 CPTs, allowing more CPTs might make us under memory pressure */ | |
61 | # define LNET_CPT_MAX_BITS 1 | |
62 | ||
63 | # else /* 64-bit system */ | |
64 | /* | |
65 | * 256 CPTs for thousands of CPUs, allowing more CPTs might make us | |
66 | * under risk of consuming all lh_cookie. | |
67 | */ | |
68 | # define LNET_CPT_MAX_BITS 8 | |
69 | # endif /* BITS_PER_LONG == 32 */ | |
70 | #endif | |
71 | ||
72 | /* max allowed CPT number */ | |
73 | #define LNET_CPT_MAX (1 << LNET_CPT_MAX_BITS) | |
74 | ||
75 | #define LNET_CPT_NUMBER (the_lnet.ln_cpt_number) | |
76 | #define LNET_CPT_BITS (the_lnet.ln_cpt_bits) | |
77 | #define LNET_CPT_MASK ((1ULL << LNET_CPT_BITS) - 1) | |
78 | ||
79 | /** exclusive lock */ | |
80 | #define LNET_LOCK_EX CFS_PERCPT_LOCK_EX | |
81 | ||
82 | static inline int lnet_is_wire_handle_none (lnet_handle_wire_t *wh) | |
83 | { | |
84 | return (wh->wh_interface_cookie == LNET_WIRE_HANDLE_COOKIE_NONE && | |
85 | wh->wh_object_cookie == LNET_WIRE_HANDLE_COOKIE_NONE); | |
86 | } | |
87 | ||
88 | static inline int lnet_md_exhausted (lnet_libmd_t *md) | |
89 | { | |
90 | return (md->md_threshold == 0 || | |
91 | ((md->md_options & LNET_MD_MAX_SIZE) != 0 && | |
92 | md->md_offset + md->md_max_size > md->md_length)); | |
93 | } | |
94 | ||
95 | static inline int lnet_md_unlinkable (lnet_libmd_t *md) | |
96 | { | |
97 | /* Should unlink md when its refcount is 0 and either: | |
98 | * - md has been flagged for deletion (by auto unlink or LNetM[DE]Unlink, | |
99 | * in the latter case md may not be exhausted). | |
100 | * - auto unlink is on and md is exhausted. | |
101 | */ | |
102 | if (md->md_refcount != 0) | |
103 | return 0; | |
104 | ||
105 | if ((md->md_flags & LNET_MD_FLAG_ZOMBIE) != 0) | |
106 | return 1; | |
107 | ||
108 | return ((md->md_flags & LNET_MD_FLAG_AUTO_UNLINK) != 0 && | |
109 | lnet_md_exhausted(md)); | |
110 | } | |
111 | ||
112 | #define lnet_cpt_table() (the_lnet.ln_cpt_table) | |
113 | #define lnet_cpt_current() cfs_cpt_current(the_lnet.ln_cpt_table, 1) | |
114 | ||
115 | static inline int | |
116 | lnet_cpt_of_cookie(__u64 cookie) | |
117 | { | |
118 | unsigned int cpt = (cookie >> LNET_COOKIE_TYPE_BITS) & LNET_CPT_MASK; | |
119 | ||
120 | /* LNET_CPT_NUMBER doesn't have to be power2, which means we can | |
121 | * get illegal cpt from it's invalid cookie */ | |
122 | return cpt < LNET_CPT_NUMBER ? cpt : cpt % LNET_CPT_NUMBER; | |
123 | } | |
124 | ||
125 | static inline void | |
126 | lnet_res_lock(int cpt) | |
127 | { | |
128 | cfs_percpt_lock(the_lnet.ln_res_lock, cpt); | |
129 | } | |
130 | ||
131 | static inline void | |
132 | lnet_res_unlock(int cpt) | |
133 | { | |
134 | cfs_percpt_unlock(the_lnet.ln_res_lock, cpt); | |
135 | } | |
136 | ||
137 | static inline int | |
138 | lnet_res_lock_current(void) | |
139 | { | |
140 | int cpt = lnet_cpt_current(); | |
141 | ||
142 | lnet_res_lock(cpt); | |
143 | return cpt; | |
144 | } | |
145 | ||
146 | static inline void | |
147 | lnet_net_lock(int cpt) | |
148 | { | |
149 | cfs_percpt_lock(the_lnet.ln_net_lock, cpt); | |
150 | } | |
151 | ||
152 | static inline void | |
153 | lnet_net_unlock(int cpt) | |
154 | { | |
155 | cfs_percpt_unlock(the_lnet.ln_net_lock, cpt); | |
156 | } | |
157 | ||
158 | static inline int | |
159 | lnet_net_lock_current(void) | |
160 | { | |
161 | int cpt = lnet_cpt_current(); | |
162 | ||
163 | lnet_net_lock(cpt); | |
164 | return cpt; | |
165 | } | |
166 | ||
167 | #define LNET_LOCK() lnet_net_lock(LNET_LOCK_EX) | |
168 | #define LNET_UNLOCK() lnet_net_unlock(LNET_LOCK_EX) | |
169 | ||
170 | ||
171 | #define lnet_ptl_lock(ptl) spin_lock(&(ptl)->ptl_lock) | |
172 | #define lnet_ptl_unlock(ptl) spin_unlock(&(ptl)->ptl_lock) | |
173 | #define lnet_eq_wait_lock() spin_lock(&the_lnet.ln_eq_wait_lock) | |
174 | #define lnet_eq_wait_unlock() spin_unlock(&the_lnet.ln_eq_wait_lock) | |
175 | #define lnet_ni_lock(ni) spin_lock(&(ni)->ni_lock) | |
176 | #define lnet_ni_unlock(ni) spin_unlock(&(ni)->ni_lock) | |
177 | #define LNET_MUTEX_LOCK(m) mutex_lock(m) | |
178 | #define LNET_MUTEX_UNLOCK(m) mutex_unlock(m) | |
179 | ||
180 | ||
181 | #define MAX_PORTALS 64 | |
182 | ||
183 | /* these are only used by code with LNET_USE_LIB_FREELIST, but we still | |
184 | * exported them to !LNET_USE_LIB_FREELIST for easy implemetation */ | |
185 | #define LNET_FL_MAX_MES 2048 | |
186 | #define LNET_FL_MAX_MDS 2048 | |
187 | #define LNET_FL_MAX_EQS 512 | |
188 | #define LNET_FL_MAX_MSGS 2048 /* Outstanding messages */ | |
189 | ||
190 | #ifdef LNET_USE_LIB_FREELIST | |
191 | ||
192 | int lnet_freelist_init(lnet_freelist_t *fl, int n, int size); | |
193 | void lnet_freelist_fini(lnet_freelist_t *fl); | |
194 | ||
195 | static inline void * | |
196 | lnet_freelist_alloc (lnet_freelist_t *fl) | |
197 | { | |
198 | /* ALWAYS called with liblock held */ | |
199 | lnet_freeobj_t *o; | |
200 | ||
201 | if (list_empty (&fl->fl_list)) | |
202 | return (NULL); | |
203 | ||
204 | o = list_entry (fl->fl_list.next, lnet_freeobj_t, fo_list); | |
205 | list_del (&o->fo_list); | |
206 | return ((void *)&o->fo_contents); | |
207 | } | |
208 | ||
209 | static inline void | |
210 | lnet_freelist_free (lnet_freelist_t *fl, void *obj) | |
211 | { | |
212 | /* ALWAYS called with liblock held */ | |
213 | lnet_freeobj_t *o = list_entry (obj, lnet_freeobj_t, fo_contents); | |
214 | ||
215 | list_add (&o->fo_list, &fl->fl_list); | |
216 | } | |
217 | ||
218 | ||
219 | static inline lnet_eq_t * | |
220 | lnet_eq_alloc (void) | |
221 | { | |
222 | /* NEVER called with resource lock held */ | |
223 | struct lnet_res_container *rec = &the_lnet.ln_eq_container; | |
224 | lnet_eq_t *eq; | |
225 | ||
226 | LASSERT(LNET_CPT_NUMBER == 1); | |
227 | ||
228 | lnet_res_lock(0); | |
229 | eq = (lnet_eq_t *)lnet_freelist_alloc(&rec->rec_freelist); | |
230 | lnet_res_unlock(0); | |
231 | ||
232 | return eq; | |
233 | } | |
234 | ||
235 | static inline void | |
236 | lnet_eq_free_locked(lnet_eq_t *eq) | |
237 | { | |
238 | /* ALWAYS called with resource lock held */ | |
239 | struct lnet_res_container *rec = &the_lnet.ln_eq_container; | |
240 | ||
241 | LASSERT(LNET_CPT_NUMBER == 1); | |
242 | lnet_freelist_free(&rec->rec_freelist, eq); | |
243 | } | |
244 | ||
245 | static inline void | |
246 | lnet_eq_free(lnet_eq_t *eq) | |
247 | { | |
248 | lnet_res_lock(0); | |
249 | lnet_eq_free_locked(eq); | |
250 | lnet_res_unlock(0); | |
251 | } | |
252 | ||
253 | static inline lnet_libmd_t * | |
254 | lnet_md_alloc (lnet_md_t *umd) | |
255 | { | |
256 | /* NEVER called with resource lock held */ | |
257 | struct lnet_res_container *rec = the_lnet.ln_md_containers[0]; | |
258 | lnet_libmd_t *md; | |
259 | ||
260 | LASSERT(LNET_CPT_NUMBER == 1); | |
261 | ||
262 | lnet_res_lock(0); | |
263 | md = (lnet_libmd_t *)lnet_freelist_alloc(&rec->rec_freelist); | |
264 | lnet_res_unlock(0); | |
265 | ||
266 | if (md != NULL) | |
267 | INIT_LIST_HEAD(&md->md_list); | |
268 | ||
269 | return md; | |
270 | } | |
271 | ||
272 | static inline void | |
273 | lnet_md_free_locked(lnet_libmd_t *md) | |
274 | { | |
275 | /* ALWAYS called with resource lock held */ | |
276 | struct lnet_res_container *rec = the_lnet.ln_md_containers[0]; | |
277 | ||
278 | LASSERT(LNET_CPT_NUMBER == 1); | |
279 | lnet_freelist_free(&rec->rec_freelist, md); | |
280 | } | |
281 | ||
282 | static inline void | |
283 | lnet_md_free(lnet_libmd_t *md) | |
284 | { | |
285 | lnet_res_lock(0); | |
286 | lnet_md_free_locked(md); | |
287 | lnet_res_unlock(0); | |
288 | } | |
289 | ||
290 | static inline lnet_me_t * | |
291 | lnet_me_alloc(void) | |
292 | { | |
293 | /* NEVER called with resource lock held */ | |
294 | struct lnet_res_container *rec = the_lnet.ln_me_containers[0]; | |
295 | lnet_me_t *me; | |
296 | ||
297 | LASSERT(LNET_CPT_NUMBER == 1); | |
298 | ||
299 | lnet_res_lock(0); | |
300 | me = (lnet_me_t *)lnet_freelist_alloc(&rec->rec_freelist); | |
301 | lnet_res_unlock(0); | |
302 | ||
303 | return me; | |
304 | } | |
305 | ||
306 | static inline void | |
307 | lnet_me_free_locked(lnet_me_t *me) | |
308 | { | |
309 | /* ALWAYS called with resource lock held */ | |
310 | struct lnet_res_container *rec = the_lnet.ln_me_containers[0]; | |
311 | ||
312 | LASSERT(LNET_CPT_NUMBER == 1); | |
313 | lnet_freelist_free(&rec->rec_freelist, me); | |
314 | } | |
315 | ||
316 | static inline void | |
317 | lnet_me_free(lnet_me_t *me) | |
318 | { | |
319 | lnet_res_lock(0); | |
320 | lnet_me_free_locked(me); | |
321 | lnet_res_unlock(0); | |
322 | } | |
323 | ||
324 | static inline lnet_msg_t * | |
325 | lnet_msg_alloc (void) | |
326 | { | |
327 | /* NEVER called with network lock held */ | |
328 | struct lnet_msg_container *msc = the_lnet.ln_msg_containers[0]; | |
329 | lnet_msg_t *msg; | |
330 | ||
331 | LASSERT(LNET_CPT_NUMBER == 1); | |
332 | ||
333 | lnet_net_lock(0); | |
334 | msg = (lnet_msg_t *)lnet_freelist_alloc(&msc->msc_freelist); | |
335 | lnet_net_unlock(0); | |
336 | ||
337 | if (msg != NULL) { | |
338 | /* NULL pointers, clear flags etc */ | |
339 | memset(msg, 0, sizeof(*msg)); | |
340 | } | |
341 | return msg; | |
342 | } | |
343 | ||
344 | static inline void | |
345 | lnet_msg_free_locked(lnet_msg_t *msg) | |
346 | { | |
347 | /* ALWAYS called with network lock held */ | |
348 | struct lnet_msg_container *msc = the_lnet.ln_msg_containers[0]; | |
349 | ||
350 | LASSERT(LNET_CPT_NUMBER == 1); | |
351 | LASSERT(!msg->msg_onactivelist); | |
352 | lnet_freelist_free(&msc->msc_freelist, msg); | |
353 | } | |
354 | ||
355 | static inline void | |
356 | lnet_msg_free (lnet_msg_t *msg) | |
357 | { | |
358 | lnet_net_lock(0); | |
359 | lnet_msg_free_locked(msg); | |
360 | lnet_net_unlock(0); | |
361 | } | |
362 | ||
363 | #else /* !LNET_USE_LIB_FREELIST */ | |
364 | ||
365 | static inline lnet_eq_t * | |
366 | lnet_eq_alloc (void) | |
367 | { | |
368 | /* NEVER called with liblock held */ | |
369 | lnet_eq_t *eq; | |
370 | ||
371 | LIBCFS_ALLOC(eq, sizeof(*eq)); | |
372 | return (eq); | |
373 | } | |
374 | ||
375 | static inline void | |
376 | lnet_eq_free(lnet_eq_t *eq) | |
377 | { | |
378 | /* ALWAYS called with resource lock held */ | |
379 | LIBCFS_FREE(eq, sizeof(*eq)); | |
380 | } | |
381 | ||
382 | static inline lnet_libmd_t * | |
383 | lnet_md_alloc (lnet_md_t *umd) | |
384 | { | |
385 | /* NEVER called with liblock held */ | |
386 | lnet_libmd_t *md; | |
387 | unsigned int size; | |
388 | unsigned int niov; | |
389 | ||
390 | if ((umd->options & LNET_MD_KIOV) != 0) { | |
391 | niov = umd->length; | |
392 | size = offsetof(lnet_libmd_t, md_iov.kiov[niov]); | |
393 | } else { | |
394 | niov = ((umd->options & LNET_MD_IOVEC) != 0) ? | |
395 | umd->length : 1; | |
396 | size = offsetof(lnet_libmd_t, md_iov.iov[niov]); | |
397 | } | |
398 | ||
399 | LIBCFS_ALLOC(md, size); | |
400 | ||
401 | if (md != NULL) { | |
402 | /* Set here in case of early free */ | |
403 | md->md_options = umd->options; | |
404 | md->md_niov = niov; | |
405 | INIT_LIST_HEAD(&md->md_list); | |
406 | } | |
407 | ||
408 | return (md); | |
409 | } | |
410 | ||
411 | static inline void | |
412 | lnet_md_free(lnet_libmd_t *md) | |
413 | { | |
414 | /* ALWAYS called with resource lock held */ | |
415 | unsigned int size; | |
416 | ||
417 | if ((md->md_options & LNET_MD_KIOV) != 0) | |
418 | size = offsetof(lnet_libmd_t, md_iov.kiov[md->md_niov]); | |
419 | else | |
420 | size = offsetof(lnet_libmd_t, md_iov.iov[md->md_niov]); | |
421 | ||
422 | LIBCFS_FREE(md, size); | |
423 | } | |
424 | ||
425 | static inline lnet_me_t * | |
426 | lnet_me_alloc (void) | |
427 | { | |
428 | /* NEVER called with liblock held */ | |
429 | lnet_me_t *me; | |
430 | ||
431 | LIBCFS_ALLOC(me, sizeof(*me)); | |
432 | return (me); | |
433 | } | |
434 | ||
435 | static inline void | |
436 | lnet_me_free(lnet_me_t *me) | |
437 | { | |
438 | /* ALWAYS called with resource lock held */ | |
439 | LIBCFS_FREE(me, sizeof(*me)); | |
440 | } | |
441 | ||
442 | static inline lnet_msg_t * | |
443 | lnet_msg_alloc(void) | |
444 | { | |
445 | /* NEVER called with liblock held */ | |
446 | lnet_msg_t *msg; | |
447 | ||
448 | LIBCFS_ALLOC(msg, sizeof(*msg)); | |
449 | ||
450 | /* no need to zero, LIBCFS_ALLOC does for us */ | |
451 | return (msg); | |
452 | } | |
453 | ||
454 | static inline void | |
455 | lnet_msg_free(lnet_msg_t *msg) | |
456 | { | |
457 | /* ALWAYS called with network lock held */ | |
458 | LASSERT(!msg->msg_onactivelist); | |
459 | LIBCFS_FREE(msg, sizeof(*msg)); | |
460 | } | |
461 | ||
462 | #define lnet_eq_free_locked(eq) lnet_eq_free(eq) | |
463 | #define lnet_md_free_locked(md) lnet_md_free(md) | |
464 | #define lnet_me_free_locked(me) lnet_me_free(me) | |
465 | #define lnet_msg_free_locked(msg) lnet_msg_free(msg) | |
466 | ||
467 | #endif /* LNET_USE_LIB_FREELIST */ | |
468 | ||
469 | lnet_libhandle_t *lnet_res_lh_lookup(struct lnet_res_container *rec, | |
470 | __u64 cookie); | |
471 | void lnet_res_lh_initialize(struct lnet_res_container *rec, | |
472 | lnet_libhandle_t *lh); | |
473 | static inline void | |
474 | lnet_res_lh_invalidate(lnet_libhandle_t *lh) | |
475 | { | |
476 | /* ALWAYS called with resource lock held */ | |
477 | /* NB: cookie is still useful, don't reset it */ | |
478 | list_del(&lh->lh_hash_chain); | |
479 | } | |
480 | ||
481 | static inline void | |
482 | lnet_eq2handle (lnet_handle_eq_t *handle, lnet_eq_t *eq) | |
483 | { | |
484 | if (eq == NULL) { | |
485 | LNetInvalidateHandle(handle); | |
486 | return; | |
487 | } | |
488 | ||
489 | handle->cookie = eq->eq_lh.lh_cookie; | |
490 | } | |
491 | ||
492 | static inline lnet_eq_t * | |
493 | lnet_handle2eq(lnet_handle_eq_t *handle) | |
494 | { | |
495 | /* ALWAYS called with resource lock held */ | |
496 | lnet_libhandle_t *lh; | |
497 | ||
498 | lh = lnet_res_lh_lookup(&the_lnet.ln_eq_container, handle->cookie); | |
499 | if (lh == NULL) | |
500 | return NULL; | |
501 | ||
502 | return lh_entry(lh, lnet_eq_t, eq_lh); | |
503 | } | |
504 | ||
505 | static inline void | |
506 | lnet_md2handle (lnet_handle_md_t *handle, lnet_libmd_t *md) | |
507 | { | |
508 | handle->cookie = md->md_lh.lh_cookie; | |
509 | } | |
510 | ||
511 | static inline lnet_libmd_t * | |
512 | lnet_handle2md(lnet_handle_md_t *handle) | |
513 | { | |
514 | /* ALWAYS called with resource lock held */ | |
515 | lnet_libhandle_t *lh; | |
516 | int cpt; | |
517 | ||
518 | cpt = lnet_cpt_of_cookie(handle->cookie); | |
519 | lh = lnet_res_lh_lookup(the_lnet.ln_md_containers[cpt], | |
520 | handle->cookie); | |
521 | if (lh == NULL) | |
522 | return NULL; | |
523 | ||
524 | return lh_entry(lh, lnet_libmd_t, md_lh); | |
525 | } | |
526 | ||
527 | static inline lnet_libmd_t * | |
528 | lnet_wire_handle2md(lnet_handle_wire_t *wh) | |
529 | { | |
530 | /* ALWAYS called with resource lock held */ | |
531 | lnet_libhandle_t *lh; | |
532 | int cpt; | |
533 | ||
534 | if (wh->wh_interface_cookie != the_lnet.ln_interface_cookie) | |
535 | return NULL; | |
536 | ||
537 | cpt = lnet_cpt_of_cookie(wh->wh_object_cookie); | |
538 | lh = lnet_res_lh_lookup(the_lnet.ln_md_containers[cpt], | |
539 | wh->wh_object_cookie); | |
540 | if (lh == NULL) | |
541 | return NULL; | |
542 | ||
543 | return lh_entry(lh, lnet_libmd_t, md_lh); | |
544 | } | |
545 | ||
546 | static inline void | |
547 | lnet_me2handle (lnet_handle_me_t *handle, lnet_me_t *me) | |
548 | { | |
549 | handle->cookie = me->me_lh.lh_cookie; | |
550 | } | |
551 | ||
552 | static inline lnet_me_t * | |
553 | lnet_handle2me(lnet_handle_me_t *handle) | |
554 | { | |
555 | /* ALWAYS called with resource lock held */ | |
556 | lnet_libhandle_t *lh; | |
557 | int cpt; | |
558 | ||
559 | cpt = lnet_cpt_of_cookie(handle->cookie); | |
560 | lh = lnet_res_lh_lookup(the_lnet.ln_me_containers[cpt], | |
561 | handle->cookie); | |
562 | if (lh == NULL) | |
563 | return NULL; | |
564 | ||
565 | return lh_entry(lh, lnet_me_t, me_lh); | |
566 | } | |
567 | ||
568 | static inline void | |
569 | lnet_peer_addref_locked(lnet_peer_t *lp) | |
570 | { | |
571 | LASSERT (lp->lp_refcount > 0); | |
572 | lp->lp_refcount++; | |
573 | } | |
574 | ||
575 | extern void lnet_destroy_peer_locked(lnet_peer_t *lp); | |
576 | ||
577 | static inline void | |
578 | lnet_peer_decref_locked(lnet_peer_t *lp) | |
579 | { | |
580 | LASSERT (lp->lp_refcount > 0); | |
581 | lp->lp_refcount--; | |
582 | if (lp->lp_refcount == 0) | |
583 | lnet_destroy_peer_locked(lp); | |
584 | } | |
585 | ||
586 | static inline int | |
587 | lnet_isrouter(lnet_peer_t *lp) | |
588 | { | |
589 | return lp->lp_rtr_refcount != 0; | |
590 | } | |
591 | ||
592 | static inline void | |
593 | lnet_ni_addref_locked(lnet_ni_t *ni, int cpt) | |
594 | { | |
595 | LASSERT(cpt >= 0 && cpt < LNET_CPT_NUMBER); | |
596 | LASSERT(*ni->ni_refs[cpt] >= 0); | |
597 | ||
598 | (*ni->ni_refs[cpt])++; | |
599 | } | |
600 | ||
601 | static inline void | |
602 | lnet_ni_addref(lnet_ni_t *ni) | |
603 | { | |
604 | lnet_net_lock(0); | |
605 | lnet_ni_addref_locked(ni, 0); | |
606 | lnet_net_unlock(0); | |
607 | } | |
608 | ||
609 | static inline void | |
610 | lnet_ni_decref_locked(lnet_ni_t *ni, int cpt) | |
611 | { | |
612 | LASSERT(cpt >= 0 && cpt < LNET_CPT_NUMBER); | |
613 | LASSERT(*ni->ni_refs[cpt] > 0); | |
614 | ||
615 | (*ni->ni_refs[cpt])--; | |
616 | } | |
617 | ||
618 | static inline void | |
619 | lnet_ni_decref(lnet_ni_t *ni) | |
620 | { | |
621 | lnet_net_lock(0); | |
622 | lnet_ni_decref_locked(ni, 0); | |
623 | lnet_net_unlock(0); | |
624 | } | |
625 | ||
626 | void lnet_ni_free(lnet_ni_t *ni); | |
627 | ||
628 | static inline int | |
629 | lnet_nid2peerhash(lnet_nid_t nid) | |
630 | { | |
631 | return cfs_hash_long(nid, LNET_PEER_HASH_BITS); | |
632 | } | |
633 | ||
634 | static inline struct list_head * | |
635 | lnet_net2rnethash(__u32 net) | |
636 | { | |
637 | return &the_lnet.ln_remote_nets_hash[(LNET_NETNUM(net) + | |
638 | LNET_NETTYP(net)) & | |
639 | ((1U << the_lnet.ln_remote_nets_hbits) - 1)]; | |
640 | } | |
641 | ||
642 | extern lnd_t the_lolnd; | |
643 | ||
644 | ||
645 | extern int lnet_cpt_of_nid_locked(lnet_nid_t nid); | |
646 | extern int lnet_cpt_of_nid(lnet_nid_t nid); | |
647 | extern lnet_ni_t *lnet_nid2ni_locked(lnet_nid_t nid, int cpt); | |
648 | extern lnet_ni_t *lnet_net2ni_locked(__u32 net, int cpt); | |
649 | extern lnet_ni_t *lnet_net2ni(__u32 net); | |
650 | ||
651 | int lnet_notify(lnet_ni_t *ni, lnet_nid_t peer, int alive, cfs_time_t when); | |
652 | void lnet_notify_locked(lnet_peer_t *lp, int notifylnd, int alive, cfs_time_t when); | |
653 | int lnet_add_route(__u32 net, unsigned int hops, lnet_nid_t gateway_nid); | |
654 | int lnet_check_routes(void); | |
655 | int lnet_del_route(__u32 net, lnet_nid_t gw_nid); | |
656 | void lnet_destroy_routes(void); | |
657 | int lnet_get_route(int idx, __u32 *net, __u32 *hops, | |
658 | lnet_nid_t *gateway, __u32 *alive); | |
659 | void lnet_proc_init(void); | |
660 | void lnet_proc_fini(void); | |
661 | int lnet_rtrpools_alloc(int im_a_router); | |
662 | void lnet_rtrpools_free(void); | |
663 | lnet_remotenet_t *lnet_find_net_locked (__u32 net); | |
664 | ||
665 | int lnet_islocalnid(lnet_nid_t nid); | |
666 | int lnet_islocalnet(__u32 net); | |
667 | ||
668 | void lnet_msg_attach_md(lnet_msg_t *msg, lnet_libmd_t *md, | |
669 | unsigned int offset, unsigned int mlen); | |
670 | void lnet_msg_detach_md(lnet_msg_t *msg, int status); | |
671 | void lnet_build_unlink_event(lnet_libmd_t *md, lnet_event_t *ev); | |
672 | void lnet_build_msg_event(lnet_msg_t *msg, lnet_event_kind_t ev_type); | |
673 | void lnet_msg_commit(lnet_msg_t *msg, int cpt); | |
674 | void lnet_msg_decommit(lnet_msg_t *msg, int cpt, int status); | |
675 | ||
676 | void lnet_eq_enqueue_event(lnet_eq_t *eq, lnet_event_t *ev); | |
677 | void lnet_prep_send(lnet_msg_t *msg, int type, lnet_process_id_t target, | |
678 | unsigned int offset, unsigned int len); | |
679 | int lnet_send(lnet_nid_t nid, lnet_msg_t *msg, lnet_nid_t rtr_nid); | |
680 | void lnet_return_tx_credits_locked(lnet_msg_t *msg); | |
681 | void lnet_return_rx_credits_locked(lnet_msg_t *msg); | |
682 | ||
683 | /* portals functions */ | |
684 | /* portals attributes */ | |
685 | static inline int | |
686 | lnet_ptl_is_lazy(lnet_portal_t *ptl) | |
687 | { | |
688 | return !!(ptl->ptl_options & LNET_PTL_LAZY); | |
689 | } | |
690 | ||
691 | static inline int | |
692 | lnet_ptl_is_unique(lnet_portal_t *ptl) | |
693 | { | |
694 | return !!(ptl->ptl_options & LNET_PTL_MATCH_UNIQUE); | |
695 | } | |
696 | ||
697 | static inline int | |
698 | lnet_ptl_is_wildcard(lnet_portal_t *ptl) | |
699 | { | |
700 | return !!(ptl->ptl_options & LNET_PTL_MATCH_WILDCARD); | |
701 | } | |
702 | ||
703 | static inline void | |
704 | lnet_ptl_setopt(lnet_portal_t *ptl, int opt) | |
705 | { | |
706 | ptl->ptl_options |= opt; | |
707 | } | |
708 | ||
709 | static inline void | |
710 | lnet_ptl_unsetopt(lnet_portal_t *ptl, int opt) | |
711 | { | |
712 | ptl->ptl_options &= ~opt; | |
713 | } | |
714 | ||
715 | /* match-table functions */ | |
716 | struct list_head *lnet_mt_match_head(struct lnet_match_table *mtable, | |
717 | lnet_process_id_t id, __u64 mbits); | |
718 | struct lnet_match_table *lnet_mt_of_attach(unsigned int index, | |
719 | lnet_process_id_t id, __u64 mbits, | |
720 | __u64 ignore_bits, | |
721 | lnet_ins_pos_t pos); | |
722 | int lnet_mt_match_md(struct lnet_match_table *mtable, | |
723 | struct lnet_match_info *info, struct lnet_msg *msg); | |
724 | ||
725 | /* portals match/attach functions */ | |
726 | void lnet_ptl_attach_md(lnet_me_t *me, lnet_libmd_t *md, | |
727 | struct list_head *matches, struct list_head *drops); | |
728 | void lnet_ptl_detach_md(lnet_me_t *me, lnet_libmd_t *md); | |
729 | int lnet_ptl_match_md(struct lnet_match_info *info, struct lnet_msg *msg); | |
730 | ||
731 | /* initialized and finalize portals */ | |
732 | int lnet_portals_create(void); | |
733 | void lnet_portals_destroy(void); | |
734 | ||
735 | /* message functions */ | |
736 | int lnet_parse (lnet_ni_t *ni, lnet_hdr_t *hdr, | |
737 | lnet_nid_t fromnid, void *private, int rdma_req); | |
738 | void lnet_recv(lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed, | |
739 | unsigned int offset, unsigned int mlen, unsigned int rlen); | |
740 | lnet_msg_t *lnet_create_reply_msg (lnet_ni_t *ni, lnet_msg_t *get_msg); | |
741 | void lnet_set_reply_msg_len(lnet_ni_t *ni, lnet_msg_t *msg, unsigned int len); | |
742 | void lnet_finalize(lnet_ni_t *ni, lnet_msg_t *msg, int rc); | |
743 | void lnet_drop_delayed_msg_list(struct list_head *head, char *reason); | |
744 | void lnet_recv_delayed_msg_list(struct list_head *head); | |
745 | ||
746 | int lnet_msg_container_setup(struct lnet_msg_container *container, int cpt); | |
747 | void lnet_msg_container_cleanup(struct lnet_msg_container *container); | |
748 | void lnet_msg_containers_destroy(void); | |
749 | int lnet_msg_containers_create(void); | |
750 | ||
751 | char *lnet_msgtyp2str (int type); | |
752 | void lnet_print_hdr (lnet_hdr_t * hdr); | |
753 | int lnet_fail_nid(lnet_nid_t nid, unsigned int threshold); | |
754 | ||
755 | void lnet_counters_get(lnet_counters_t *counters); | |
756 | void lnet_counters_reset(void); | |
757 | ||
758 | unsigned int lnet_iov_nob (unsigned int niov, struct iovec *iov); | |
759 | int lnet_extract_iov (int dst_niov, struct iovec *dst, | |
760 | int src_niov, struct iovec *src, | |
761 | unsigned int offset, unsigned int len); | |
762 | ||
763 | unsigned int lnet_kiov_nob (unsigned int niov, lnet_kiov_t *iov); | |
764 | int lnet_extract_kiov (int dst_niov, lnet_kiov_t *dst, | |
765 | int src_niov, lnet_kiov_t *src, | |
766 | unsigned int offset, unsigned int len); | |
767 | ||
768 | void lnet_copy_iov2iov (unsigned int ndiov, struct iovec *diov, | |
769 | unsigned int doffset, | |
770 | unsigned int nsiov, struct iovec *siov, | |
771 | unsigned int soffset, unsigned int nob); | |
772 | void lnet_copy_kiov2iov (unsigned int niov, struct iovec *iov, | |
773 | unsigned int iovoffset, | |
774 | unsigned int nkiov, lnet_kiov_t *kiov, | |
775 | unsigned int kiovoffset, unsigned int nob); | |
776 | void lnet_copy_iov2kiov (unsigned int nkiov, lnet_kiov_t *kiov, | |
777 | unsigned int kiovoffset, | |
778 | unsigned int niov, struct iovec *iov, | |
779 | unsigned int iovoffset, unsigned int nob); | |
780 | void lnet_copy_kiov2kiov (unsigned int ndkiov, lnet_kiov_t *dkiov, | |
781 | unsigned int doffset, | |
782 | unsigned int nskiov, lnet_kiov_t *skiov, | |
783 | unsigned int soffset, unsigned int nob); | |
784 | ||
785 | static inline void | |
786 | lnet_copy_iov2flat(int dlen, void *dest, unsigned int doffset, | |
787 | unsigned int nsiov, struct iovec *siov, unsigned int soffset, | |
788 | unsigned int nob) | |
789 | { | |
790 | struct iovec diov = {/*.iov_base = */ dest, /*.iov_len = */ dlen}; | |
791 | ||
792 | lnet_copy_iov2iov(1, &diov, doffset, | |
793 | nsiov, siov, soffset, nob); | |
794 | } | |
795 | ||
796 | static inline void | |
797 | lnet_copy_kiov2flat(int dlen, void *dest, unsigned int doffset, | |
798 | unsigned int nsiov, lnet_kiov_t *skiov, unsigned int soffset, | |
799 | unsigned int nob) | |
800 | { | |
801 | struct iovec diov = {/* .iov_base = */ dest, /* .iov_len = */ dlen}; | |
802 | ||
803 | lnet_copy_kiov2iov(1, &diov, doffset, | |
804 | nsiov, skiov, soffset, nob); | |
805 | } | |
806 | ||
807 | static inline void | |
808 | lnet_copy_flat2iov(unsigned int ndiov, struct iovec *diov, unsigned int doffset, | |
809 | int slen, void *src, unsigned int soffset, unsigned int nob) | |
810 | { | |
811 | struct iovec siov = {/*.iov_base = */ src, /*.iov_len = */slen}; | |
812 | lnet_copy_iov2iov(ndiov, diov, doffset, | |
813 | 1, &siov, soffset, nob); | |
814 | } | |
815 | ||
816 | static inline void | |
817 | lnet_copy_flat2kiov(unsigned int ndiov, lnet_kiov_t *dkiov, unsigned int doffset, | |
818 | int slen, void *src, unsigned int soffset, unsigned int nob) | |
819 | { | |
820 | struct iovec siov = {/* .iov_base = */ src, /* .iov_len = */ slen}; | |
821 | lnet_copy_iov2kiov(ndiov, dkiov, doffset, | |
822 | 1, &siov, soffset, nob); | |
823 | } | |
824 | ||
825 | void lnet_me_unlink(lnet_me_t *me); | |
826 | ||
827 | void lnet_md_unlink(lnet_libmd_t *md); | |
828 | void lnet_md_deconstruct(lnet_libmd_t *lmd, lnet_md_t *umd); | |
829 | ||
830 | void lnet_register_lnd(lnd_t *lnd); | |
831 | void lnet_unregister_lnd(lnd_t *lnd); | |
832 | int lnet_set_ip_niaddr (lnet_ni_t *ni); | |
833 | ||
834 | int lnet_connect(socket_t **sockp, lnet_nid_t peer_nid, | |
835 | __u32 local_ip, __u32 peer_ip, int peer_port); | |
836 | void lnet_connect_console_error(int rc, lnet_nid_t peer_nid, | |
837 | __u32 peer_ip, int port); | |
838 | int lnet_count_acceptor_nis(void); | |
839 | int lnet_acceptor_timeout(void); | |
840 | int lnet_acceptor_port(void); | |
841 | ||
842 | int lnet_count_acceptor_nis(void); | |
843 | int lnet_acceptor_port(void); | |
844 | ||
845 | int lnet_acceptor_start(void); | |
846 | void lnet_acceptor_stop(void); | |
847 | ||
848 | void lnet_get_tunables(void); | |
849 | int lnet_peers_start_down(void); | |
850 | int lnet_peer_buffer_credits(lnet_ni_t *ni); | |
851 | ||
852 | int lnet_router_checker_start(void); | |
853 | void lnet_router_checker_stop(void); | |
854 | void lnet_swap_pinginfo(lnet_ping_info_t *info); | |
855 | ||
856 | int lnet_ping_target_init(void); | |
857 | void lnet_ping_target_fini(void); | |
858 | int lnet_ping(lnet_process_id_t id, int timeout_ms, | |
859 | lnet_process_id_t *ids, int n_ids); | |
860 | ||
861 | int lnet_parse_ip2nets (char **networksp, char *ip2nets); | |
862 | int lnet_parse_routes (char *route_str, int *im_a_router); | |
863 | int lnet_parse_networks (struct list_head *nilist, char *networks); | |
864 | ||
865 | int lnet_nid2peer_locked(lnet_peer_t **lpp, lnet_nid_t nid, int cpt); | |
866 | lnet_peer_t *lnet_find_peer_locked(struct lnet_peer_table *ptable, | |
867 | lnet_nid_t nid); | |
868 | void lnet_peer_tables_cleanup(void); | |
869 | void lnet_peer_tables_destroy(void); | |
870 | int lnet_peer_tables_create(void); | |
871 | void lnet_debug_peer(lnet_nid_t nid); | |
872 | ||
873 | ||
874 | #endif |