]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/staging/lustre/lnet/lnet/lib-move.c
staging: lustre: make local functions static for LNet ni
[mirror_ubuntu-hirsute-kernel.git] / drivers / staging / lustre / lnet / lnet / lib-move.c
CommitLineData
d7e09d03
PT
1/*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19 *
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
22 * have any questions.
23 *
24 * GPL HEADER END
25 */
26/*
27 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
29 *
1dc563a6 30 * Copyright (c) 2011, 2015, Intel Corporation.
d7e09d03
PT
31 */
32/*
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
35 *
36 * lnet/lnet/lib-move.c
37 *
38 * Data movement routines
39 */
40
41#define DEBUG_SUBSYSTEM S_LNET
42
9fdaf8c0 43#include "../../include/linux/lnet/lib-lnet.h"
d7e09d03 44
ec5fb5be
LZ
45/** lnet message has credit and can be submitted to lnd for send/receive */
46#define LNET_CREDIT_OK 0
47/** lnet message is waiting for credit */
48#define LNET_CREDIT_WAIT 1
49
d7e09d03 50static int local_nid_dist_zero = 1;
8cc7b4b9
PT
51module_param(local_nid_dist_zero, int, 0444);
52MODULE_PARM_DESC(local_nid_dist_zero, "Reserved");
d7e09d03
PT
53
54int
af66a6e2 55lnet_fail_nid(lnet_nid_t nid, unsigned int threshold)
d7e09d03 56{
7e7ab095
MS
57 lnet_test_peer_t *tp;
58 struct list_head *el;
59 struct list_head *next;
60 struct list_head cull;
d7e09d03 61
af66a6e2 62 LASSERT(the_lnet.ln_init);
d7e09d03
PT
63
64 /* NB: use lnet_net_lock(0) to serialize operations on test peers */
5fd88337 65 if (threshold) {
d7e09d03
PT
66 /* Adding a new entry */
67 LIBCFS_ALLOC(tp, sizeof(*tp));
06ace26e 68 if (!tp)
d7e09d03
PT
69 return -ENOMEM;
70
71 tp->tp_nid = nid;
72 tp->tp_threshold = threshold;
73
74 lnet_net_lock(0);
75 list_add_tail(&tp->tp_list, &the_lnet.ln_test_peers);
76 lnet_net_unlock(0);
77 return 0;
78 }
79
80 /* removing entries */
81 INIT_LIST_HEAD(&cull);
82
83 lnet_net_lock(0);
84
af66a6e2
LN
85 list_for_each_safe(el, next, &the_lnet.ln_test_peers) {
86 tp = list_entry(el, lnet_test_peer_t, tp_list);
d7e09d03 87
5fd88337 88 if (!tp->tp_threshold || /* needs culling anyway */
d7e09d03 89 nid == LNET_NID_ANY || /* removing all entries */
9b79ca85 90 tp->tp_nid == nid) { /* matched this one */
af66a6e2
LN
91 list_del(&tp->tp_list);
92 list_add(&tp->tp_list, &cull);
d7e09d03
PT
93 }
94 }
95
96 lnet_net_unlock(0);
97
af66a6e2
LN
98 while (!list_empty(&cull)) {
99 tp = list_entry(cull.next, lnet_test_peer_t, tp_list);
d7e09d03 100
af66a6e2
LN
101 list_del(&tp->tp_list);
102 LIBCFS_FREE(tp, sizeof(*tp));
d7e09d03
PT
103 }
104 return 0;
105}
106
107static int
af66a6e2 108fail_peer(lnet_nid_t nid, int outgoing)
d7e09d03
PT
109{
110 lnet_test_peer_t *tp;
7e7ab095
MS
111 struct list_head *el;
112 struct list_head *next;
113 struct list_head cull;
114 int fail = 0;
d7e09d03 115
af66a6e2 116 INIT_LIST_HEAD(&cull);
d7e09d03
PT
117
118 /* NB: use lnet_net_lock(0) to serialize operations on test peers */
119 lnet_net_lock(0);
120
af66a6e2
LN
121 list_for_each_safe(el, next, &the_lnet.ln_test_peers) {
122 tp = list_entry(el, lnet_test_peer_t, tp_list);
d7e09d03 123
5fd88337 124 if (!tp->tp_threshold) {
d7e09d03
PT
125 /* zombie entry */
126 if (outgoing) {
4420cfd3
JS
127 /*
128 * only cull zombies on outgoing tests,
d7e09d03 129 * since we may be at interrupt priority on
4420cfd3
JS
130 * incoming messages.
131 */
af66a6e2
LN
132 list_del(&tp->tp_list);
133 list_add(&tp->tp_list, &cull);
d7e09d03
PT
134 }
135 continue;
136 }
137
138 if (tp->tp_nid == LNET_NID_ANY || /* fail every peer */
139 nid == tp->tp_nid) { /* fail this peer */
140 fail = 1;
141
142 if (tp->tp_threshold != LNET_MD_THRESH_INF) {
143 tp->tp_threshold--;
144 if (outgoing &&
5fd88337 145 !tp->tp_threshold) {
d7e09d03 146 /* see above */
af66a6e2
LN
147 list_del(&tp->tp_list);
148 list_add(&tp->tp_list, &cull);
d7e09d03
PT
149 }
150 }
151 break;
152 }
153 }
154
155 lnet_net_unlock(0);
156
af66a6e2
LN
157 while (!list_empty(&cull)) {
158 tp = list_entry(cull.next, lnet_test_peer_t, tp_list);
159 list_del(&tp->tp_list);
d7e09d03 160
af66a6e2 161 LIBCFS_FREE(tp, sizeof(*tp));
d7e09d03
PT
162 }
163
2b5f2e44 164 return fail;
d7e09d03
PT
165}
166
167unsigned int
f351bad2 168lnet_iov_nob(unsigned int niov, struct kvec *iov)
d7e09d03
PT
169{
170 unsigned int nob = 0;
171
172 while (niov-- > 0)
173 nob += (iov++)->iov_len;
174
2b5f2e44 175 return nob;
d7e09d03
PT
176}
177EXPORT_SYMBOL(lnet_iov_nob);
178
179void
f351bad2 180lnet_copy_iov2iov(unsigned int ndiov, struct kvec *diov, unsigned int doffset,
c314c319
JS
181 unsigned int nsiov, struct kvec *siov, unsigned int soffset,
182 unsigned int nob)
d7e09d03
PT
183{
184 /* NB diov, siov are READ-ONLY */
7e7ab095 185 unsigned int this_nob;
d7e09d03 186
5fd88337 187 if (!nob)
d7e09d03
PT
188 return;
189
190 /* skip complete frags before 'doffset' */
af66a6e2 191 LASSERT(ndiov > 0);
d7e09d03
PT
192 while (doffset >= diov->iov_len) {
193 doffset -= diov->iov_len;
194 diov++;
195 ndiov--;
af66a6e2 196 LASSERT(ndiov > 0);
d7e09d03
PT
197 }
198
199 /* skip complete frags before 'soffset' */
af66a6e2 200 LASSERT(nsiov > 0);
d7e09d03
PT
201 while (soffset >= siov->iov_len) {
202 soffset -= siov->iov_len;
203 siov++;
204 nsiov--;
af66a6e2 205 LASSERT(nsiov > 0);
d7e09d03
PT
206 }
207
208 do {
af66a6e2
LN
209 LASSERT(ndiov > 0);
210 LASSERT(nsiov > 0);
0c575417 211 this_nob = min(diov->iov_len - doffset,
d7e09d03 212 siov->iov_len - soffset);
0c575417 213 this_nob = min(this_nob, nob);
d7e09d03 214
af66a6e2 215 memcpy((char *)diov->iov_base + doffset,
c314c319 216 (char *)siov->iov_base + soffset, this_nob);
d7e09d03
PT
217 nob -= this_nob;
218
219 if (diov->iov_len > doffset + this_nob) {
220 doffset += this_nob;
221 } else {
222 diov++;
223 ndiov--;
224 doffset = 0;
225 }
226
227 if (siov->iov_len > soffset + this_nob) {
228 soffset += this_nob;
229 } else {
230 siov++;
231 nsiov--;
232 soffset = 0;
233 }
234 } while (nob > 0);
235}
236EXPORT_SYMBOL(lnet_copy_iov2iov);
237
238int
f351bad2 239lnet_extract_iov(int dst_niov, struct kvec *dst,
c314c319
JS
240 int src_niov, struct kvec *src,
241 unsigned int offset, unsigned int len)
d7e09d03 242{
4420cfd3
JS
243 /*
244 * Initialise 'dst' to the subset of 'src' starting at 'offset',
d7e09d03 245 * for exactly 'len' bytes, and return the number of entries.
4420cfd3
JS
246 * NB not destructive to 'src'
247 */
7e7ab095
MS
248 unsigned int frag_len;
249 unsigned int niov;
d7e09d03 250
5fd88337 251 if (!len) /* no data => */
2b5f2e44 252 return 0; /* no frags */
d7e09d03 253
af66a6e2 254 LASSERT(src_niov > 0);
d7e09d03
PT
255 while (offset >= src->iov_len) { /* skip initial frags */
256 offset -= src->iov_len;
257 src_niov--;
258 src++;
af66a6e2 259 LASSERT(src_niov > 0);
d7e09d03
PT
260 }
261
262 niov = 1;
263 for (;;) {
af66a6e2
LN
264 LASSERT(src_niov > 0);
265 LASSERT((int)niov <= dst_niov);
d7e09d03
PT
266
267 frag_len = src->iov_len - offset;
268 dst->iov_base = ((char *)src->iov_base) + offset;
269
270 if (len <= frag_len) {
271 dst->iov_len = len;
2b5f2e44 272 return niov;
d7e09d03
PT
273 }
274
275 dst->iov_len = frag_len;
276
277 len -= frag_len;
278 dst++;
279 src++;
280 niov++;
281 src_niov--;
282 offset = 0;
283 }
284}
285EXPORT_SYMBOL(lnet_extract_iov);
286
d7e09d03 287unsigned int
af66a6e2 288lnet_kiov_nob(unsigned int niov, lnet_kiov_t *kiov)
d7e09d03 289{
7e7ab095 290 unsigned int nob = 0;
d7e09d03
PT
291
292 while (niov-- > 0)
293 nob += (kiov++)->kiov_len;
294
2b5f2e44 295 return nob;
d7e09d03
PT
296}
297EXPORT_SYMBOL(lnet_kiov_nob);
298
299void
af66a6e2 300lnet_copy_kiov2kiov(unsigned int ndiov, lnet_kiov_t *diov, unsigned int doffset,
ae4003f0
LN
301 unsigned int nsiov, lnet_kiov_t *siov, unsigned int soffset,
302 unsigned int nob)
d7e09d03
PT
303{
304 /* NB diov, siov are READ-ONLY */
7e7ab095
MS
305 unsigned int this_nob;
306 char *daddr = NULL;
307 char *saddr = NULL;
d7e09d03 308
5fd88337 309 if (!nob)
d7e09d03
PT
310 return;
311
af66a6e2 312 LASSERT(!in_interrupt());
d7e09d03 313
af66a6e2 314 LASSERT(ndiov > 0);
d7e09d03
PT
315 while (doffset >= diov->kiov_len) {
316 doffset -= diov->kiov_len;
317 diov++;
318 ndiov--;
af66a6e2 319 LASSERT(ndiov > 0);
d7e09d03
PT
320 }
321
af66a6e2 322 LASSERT(nsiov > 0);
d7e09d03
PT
323 while (soffset >= siov->kiov_len) {
324 soffset -= siov->kiov_len;
325 siov++;
326 nsiov--;
af66a6e2 327 LASSERT(nsiov > 0);
d7e09d03
PT
328 }
329
330 do {
af66a6e2
LN
331 LASSERT(ndiov > 0);
332 LASSERT(nsiov > 0);
0c575417 333 this_nob = min(diov->kiov_len - doffset,
d7e09d03 334 siov->kiov_len - soffset);
0c575417 335 this_nob = min(this_nob, nob);
d7e09d03 336
06ace26e 337 if (!daddr)
d7e09d03
PT
338 daddr = ((char *)kmap(diov->kiov_page)) +
339 diov->kiov_offset + doffset;
06ace26e 340 if (!saddr)
d7e09d03
PT
341 saddr = ((char *)kmap(siov->kiov_page)) +
342 siov->kiov_offset + soffset;
343
4420cfd3
JS
344 /*
345 * Vanishing risk of kmap deadlock when mapping 2 pages.
d7e09d03 346 * However in practice at least one of the kiovs will be mapped
4420cfd3
JS
347 * kernel pages and the map/unmap will be NOOPs
348 */
af66a6e2 349 memcpy(daddr, saddr, this_nob);
d7e09d03
PT
350 nob -= this_nob;
351
352 if (diov->kiov_len > doffset + this_nob) {
353 daddr += this_nob;
354 doffset += this_nob;
355 } else {
356 kunmap(diov->kiov_page);
357 daddr = NULL;
358 diov++;
359 ndiov--;
360 doffset = 0;
361 }
362
363 if (siov->kiov_len > soffset + this_nob) {
364 saddr += this_nob;
365 soffset += this_nob;
366 } else {
367 kunmap(siov->kiov_page);
368 saddr = NULL;
369 siov++;
370 nsiov--;
371 soffset = 0;
372 }
373 } while (nob > 0);
374
06ace26e 375 if (daddr)
d7e09d03 376 kunmap(diov->kiov_page);
06ace26e 377 if (saddr)
d7e09d03
PT
378 kunmap(siov->kiov_page);
379}
380EXPORT_SYMBOL(lnet_copy_kiov2kiov);
381
382void
f351bad2 383lnet_copy_kiov2iov(unsigned int niov, struct kvec *iov, unsigned int iovoffset,
ae4003f0
LN
384 unsigned int nkiov, lnet_kiov_t *kiov,
385 unsigned int kiovoffset, unsigned int nob)
d7e09d03
PT
386{
387 /* NB iov, kiov are READ-ONLY */
7e7ab095
MS
388 unsigned int this_nob;
389 char *addr = NULL;
d7e09d03 390
5fd88337 391 if (!nob)
d7e09d03
PT
392 return;
393
af66a6e2 394 LASSERT(!in_interrupt());
d7e09d03 395
af66a6e2 396 LASSERT(niov > 0);
d7e09d03
PT
397 while (iovoffset >= iov->iov_len) {
398 iovoffset -= iov->iov_len;
399 iov++;
400 niov--;
af66a6e2 401 LASSERT(niov > 0);
d7e09d03
PT
402 }
403
af66a6e2 404 LASSERT(nkiov > 0);
d7e09d03
PT
405 while (kiovoffset >= kiov->kiov_len) {
406 kiovoffset -= kiov->kiov_len;
407 kiov++;
408 nkiov--;
af66a6e2 409 LASSERT(nkiov > 0);
d7e09d03
PT
410 }
411
412 do {
af66a6e2
LN
413 LASSERT(niov > 0);
414 LASSERT(nkiov > 0);
fce6ad22
JM
415 this_nob = min(iov->iov_len - iovoffset,
416 (__kernel_size_t) kiov->kiov_len - kiovoffset);
0c575417 417 this_nob = min(this_nob, nob);
d7e09d03 418
06ace26e 419 if (!addr)
d7e09d03
PT
420 addr = ((char *)kmap(kiov->kiov_page)) +
421 kiov->kiov_offset + kiovoffset;
422
af66a6e2 423 memcpy((char *)iov->iov_base + iovoffset, addr, this_nob);
d7e09d03
PT
424 nob -= this_nob;
425
426 if (iov->iov_len > iovoffset + this_nob) {
427 iovoffset += this_nob;
428 } else {
429 iov++;
430 niov--;
431 iovoffset = 0;
432 }
433
434 if (kiov->kiov_len > kiovoffset + this_nob) {
435 addr += this_nob;
436 kiovoffset += this_nob;
437 } else {
438 kunmap(kiov->kiov_page);
439 addr = NULL;
440 kiov++;
441 nkiov--;
442 kiovoffset = 0;
443 }
444
445 } while (nob > 0);
446
06ace26e 447 if (addr)
d7e09d03
PT
448 kunmap(kiov->kiov_page);
449}
450EXPORT_SYMBOL(lnet_copy_kiov2iov);
451
452void
ae4003f0
LN
453lnet_copy_iov2kiov(unsigned int nkiov, lnet_kiov_t *kiov,
454 unsigned int kiovoffset, unsigned int niov,
f351bad2 455 struct kvec *iov, unsigned int iovoffset,
ae4003f0 456 unsigned int nob)
d7e09d03
PT
457{
458 /* NB kiov, iov are READ-ONLY */
7e7ab095
MS
459 unsigned int this_nob;
460 char *addr = NULL;
d7e09d03 461
5fd88337 462 if (!nob)
d7e09d03
PT
463 return;
464
af66a6e2 465 LASSERT(!in_interrupt());
d7e09d03 466
af66a6e2 467 LASSERT(nkiov > 0);
d7e09d03
PT
468 while (kiovoffset >= kiov->kiov_len) {
469 kiovoffset -= kiov->kiov_len;
470 kiov++;
471 nkiov--;
af66a6e2 472 LASSERT(nkiov > 0);
d7e09d03
PT
473 }
474
af66a6e2 475 LASSERT(niov > 0);
d7e09d03
PT
476 while (iovoffset >= iov->iov_len) {
477 iovoffset -= iov->iov_len;
478 iov++;
479 niov--;
af66a6e2 480 LASSERT(niov > 0);
d7e09d03
PT
481 }
482
483 do {
af66a6e2
LN
484 LASSERT(nkiov > 0);
485 LASSERT(niov > 0);
fce6ad22 486 this_nob = min((__kernel_size_t) kiov->kiov_len - kiovoffset,
d7e09d03 487 iov->iov_len - iovoffset);
0c575417 488 this_nob = min(this_nob, nob);
d7e09d03 489
06ace26e 490 if (!addr)
d7e09d03
PT
491 addr = ((char *)kmap(kiov->kiov_page)) +
492 kiov->kiov_offset + kiovoffset;
493
af66a6e2 494 memcpy(addr, (char *)iov->iov_base + iovoffset, this_nob);
d7e09d03
PT
495 nob -= this_nob;
496
497 if (kiov->kiov_len > kiovoffset + this_nob) {
498 addr += this_nob;
499 kiovoffset += this_nob;
500 } else {
501 kunmap(kiov->kiov_page);
502 addr = NULL;
503 kiov++;
504 nkiov--;
505 kiovoffset = 0;
506 }
507
508 if (iov->iov_len > iovoffset + this_nob) {
509 iovoffset += this_nob;
510 } else {
511 iov++;
512 niov--;
513 iovoffset = 0;
514 }
515 } while (nob > 0);
516
06ace26e 517 if (addr)
d7e09d03
PT
518 kunmap(kiov->kiov_page);
519}
520EXPORT_SYMBOL(lnet_copy_iov2kiov);
521
522int
af66a6e2 523lnet_extract_kiov(int dst_niov, lnet_kiov_t *dst,
c314c319
JS
524 int src_niov, lnet_kiov_t *src,
525 unsigned int offset, unsigned int len)
d7e09d03 526{
4420cfd3
JS
527 /*
528 * Initialise 'dst' to the subset of 'src' starting at 'offset',
d7e09d03 529 * for exactly 'len' bytes, and return the number of entries.
4420cfd3
JS
530 * NB not destructive to 'src'
531 */
7e7ab095
MS
532 unsigned int frag_len;
533 unsigned int niov;
d7e09d03 534
5fd88337 535 if (!len) /* no data => */
2b5f2e44 536 return 0; /* no frags */
d7e09d03 537
af66a6e2 538 LASSERT(src_niov > 0);
d7e09d03
PT
539 while (offset >= src->kiov_len) { /* skip initial frags */
540 offset -= src->kiov_len;
541 src_niov--;
542 src++;
af66a6e2 543 LASSERT(src_niov > 0);
d7e09d03
PT
544 }
545
546 niov = 1;
547 for (;;) {
af66a6e2
LN
548 LASSERT(src_niov > 0);
549 LASSERT((int)niov <= dst_niov);
d7e09d03
PT
550
551 frag_len = src->kiov_len - offset;
552 dst->kiov_page = src->kiov_page;
553 dst->kiov_offset = src->kiov_offset + offset;
554
555 if (len <= frag_len) {
556 dst->kiov_len = len;
ae4003f0 557 LASSERT(dst->kiov_offset + dst->kiov_len
c314c319 558 <= PAGE_CACHE_SIZE);
2b5f2e44 559 return niov;
d7e09d03
PT
560 }
561
562 dst->kiov_len = frag_len;
af66a6e2 563 LASSERT(dst->kiov_offset + dst->kiov_len <= PAGE_CACHE_SIZE);
d7e09d03
PT
564
565 len -= frag_len;
566 dst++;
567 src++;
568 niov++;
569 src_niov--;
570 offset = 0;
571 }
572}
573EXPORT_SYMBOL(lnet_extract_kiov);
574
f526b20a 575static void
d7e09d03
PT
576lnet_ni_recv(lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed,
577 unsigned int offset, unsigned int mlen, unsigned int rlen)
578{
7e7ab095 579 unsigned int niov = 0;
f351bad2 580 struct kvec *iov = NULL;
7e7ab095
MS
581 lnet_kiov_t *kiov = NULL;
582 int rc;
d7e09d03 583
af66a6e2 584 LASSERT(!in_interrupt());
5fd88337 585 LASSERT(!mlen || msg);
d7e09d03 586
06ace26e 587 if (msg) {
d7e09d03
PT
588 LASSERT(msg->msg_receiving);
589 LASSERT(!msg->msg_sending);
590 LASSERT(rlen == msg->msg_len);
591 LASSERT(mlen <= msg->msg_len);
592 LASSERT(msg->msg_offset == offset);
593 LASSERT(msg->msg_wanted == mlen);
594
595 msg->msg_receiving = 0;
596
5fd88337 597 if (mlen) {
d7e09d03
PT
598 niov = msg->msg_niov;
599 iov = msg->msg_iov;
600 kiov = msg->msg_kiov;
601
af66a6e2 602 LASSERT(niov > 0);
06ace26e 603 LASSERT(!iov != !kiov);
d7e09d03
PT
604 }
605 }
606
0eee6778
JS
607 rc = ni->ni_lnd->lnd_recv(ni, private, msg, delayed,
608 niov, iov, kiov, offset, mlen, rlen);
d7e09d03
PT
609 if (rc < 0)
610 lnet_finalize(ni, msg, rc);
611}
612
f526b20a 613static void
d7e09d03
PT
614lnet_setpayloadbuffer(lnet_msg_t *msg)
615{
616 lnet_libmd_t *md = msg->msg_md;
617
af66a6e2
LN
618 LASSERT(msg->msg_len > 0);
619 LASSERT(!msg->msg_routing);
06ace26e 620 LASSERT(md);
5fd88337 621 LASSERT(!msg->msg_niov);
06ace26e
JS
622 LASSERT(!msg->msg_iov);
623 LASSERT(!msg->msg_kiov);
d7e09d03
PT
624
625 msg->msg_niov = md->md_niov;
5fd88337 626 if (md->md_options & LNET_MD_KIOV)
d7e09d03
PT
627 msg->msg_kiov = md->md_iov.kiov;
628 else
629 msg->msg_iov = md->md_iov.iov;
630}
631
632void
633lnet_prep_send(lnet_msg_t *msg, int type, lnet_process_id_t target,
634 unsigned int offset, unsigned int len)
635{
636 msg->msg_type = type;
637 msg->msg_target = target;
638 msg->msg_len = len;
639 msg->msg_offset = offset;
640
5fd88337 641 if (len)
d7e09d03
PT
642 lnet_setpayloadbuffer(msg);
643
af66a6e2 644 memset(&msg->msg_hdr, 0, sizeof(msg->msg_hdr));
d7e09d03
PT
645 msg->msg_hdr.type = cpu_to_le32(type);
646 msg->msg_hdr.dest_nid = cpu_to_le64(target.nid);
647 msg->msg_hdr.dest_pid = cpu_to_le32(target.pid);
648 /* src_nid will be set later */
649 msg->msg_hdr.src_pid = cpu_to_le32(the_lnet.ln_pid);
650 msg->msg_hdr.payload_length = cpu_to_le32(len);
651}
652
f526b20a 653static void
d7e09d03
PT
654lnet_ni_send(lnet_ni_t *ni, lnet_msg_t *msg)
655{
7e7ab095
MS
656 void *priv = msg->msg_private;
657 int rc;
d7e09d03 658
af66a6e2
LN
659 LASSERT(!in_interrupt());
660 LASSERT(LNET_NETTYP(LNET_NIDNET(ni->ni_nid)) == LOLND ||
c314c319 661 (msg->msg_txcredit && msg->msg_peertxcredit));
d7e09d03 662
0eee6778 663 rc = ni->ni_lnd->lnd_send(ni, priv, msg);
d7e09d03
PT
664 if (rc < 0)
665 lnet_finalize(ni, msg, rc);
666}
667
f526b20a 668static int
d7e09d03
PT
669lnet_ni_eager_recv(lnet_ni_t *ni, lnet_msg_t *msg)
670{
7e7ab095 671 int rc;
d7e09d03
PT
672
673 LASSERT(!msg->msg_sending);
674 LASSERT(msg->msg_receiving);
675 LASSERT(!msg->msg_rx_ready_delay);
06ace26e 676 LASSERT(ni->ni_lnd->lnd_eager_recv);
d7e09d03
PT
677
678 msg->msg_rx_ready_delay = 1;
0eee6778
JS
679 rc = ni->ni_lnd->lnd_eager_recv(ni, msg->msg_private, msg,
680 &msg->msg_private);
5fd88337 681 if (rc) {
2d00bd17 682 CERROR("recv from %s / send to %s aborted: eager_recv failed %d\n",
d7e09d03
PT
683 libcfs_nid2str(msg->msg_rxpeer->lp_nid),
684 libcfs_id2str(msg->msg_target), rc);
685 LASSERT(rc < 0); /* required by my callers */
686 }
687
688 return rc;
689}
690
691/* NB: caller shall hold a ref on 'lp' as I'd drop lnet_net_lock */
f526b20a 692static void
d7e09d03
PT
693lnet_ni_query_locked(lnet_ni_t *ni, lnet_peer_t *lp)
694{
a649ad1d 695 unsigned long last_alive = 0;
d7e09d03
PT
696
697 LASSERT(lnet_peer_aliveness_enabled(lp));
06ace26e 698 LASSERT(ni->ni_lnd->lnd_query);
d7e09d03
PT
699
700 lnet_net_unlock(lp->lp_cpt);
0eee6778 701 ni->ni_lnd->lnd_query(ni, lp->lp_nid, &last_alive);
d7e09d03
PT
702 lnet_net_lock(lp->lp_cpt);
703
704 lp->lp_last_query = cfs_time_current();
705
5fd88337 706 if (last_alive) /* NI has updated timestamp */
d7e09d03
PT
707 lp->lp_last_alive = last_alive;
708}
709
710/* NB: always called with lnet_net_lock held */
711static inline int
a649ad1d 712lnet_peer_is_alive(lnet_peer_t *lp, unsigned long now)
d7e09d03 713{
7e7ab095 714 int alive;
a649ad1d 715 unsigned long deadline;
d7e09d03 716
af66a6e2 717 LASSERT(lnet_peer_aliveness_enabled(lp));
d7e09d03
PT
718
719 /* Trust lnet_notify() if it has more recent aliveness news, but
720 * ignore the initial assumed death (see lnet_peers_start_down()).
721 */
722 if (!lp->lp_alive && lp->lp_alive_count > 0 &&
723 cfs_time_aftereq(lp->lp_timestamp, lp->lp_last_alive))
724 return 0;
725
726 deadline = cfs_time_add(lp->lp_last_alive,
727 cfs_time_seconds(lp->lp_ni->ni_peertimeout));
728 alive = cfs_time_after(deadline, now);
729
730 /* Update obsolete lp_alive except for routers assumed to be dead
731 * initially, because router checker would update aliveness in this
732 * case, and moreover lp_last_alive at peer creation is assumed.
733 */
734 if (alive && !lp->lp_alive &&
5fd88337 735 !(lnet_isrouter(lp) && !lp->lp_alive_count))
d7e09d03
PT
736 lnet_notify_locked(lp, 0, 1, lp->lp_last_alive);
737
738 return alive;
739}
740
4420cfd3
JS
741/*
742 * NB: returns 1 when alive, 0 when dead, negative when error;
743 * may drop the lnet_net_lock
744 */
f526b20a 745static int
af66a6e2 746lnet_peer_alive_locked(lnet_peer_t *lp)
d7e09d03 747{
a649ad1d 748 unsigned long now = cfs_time_current();
d7e09d03
PT
749
750 if (!lnet_peer_aliveness_enabled(lp))
751 return -ENODEV;
752
753 if (lnet_peer_is_alive(lp, now))
754 return 1;
755
4420cfd3
JS
756 /*
757 * Peer appears dead, but we should avoid frequent NI queries (at
758 * most once per lnet_queryinterval seconds).
759 */
5fd88337 760 if (lp->lp_last_query) {
d7e09d03
PT
761 static const int lnet_queryinterval = 1;
762
a649ad1d 763 unsigned long next_query =
d7e09d03
PT
764 cfs_time_add(lp->lp_last_query,
765 cfs_time_seconds(lnet_queryinterval));
766
699503bc 767 if (time_before(now, next_query)) {
d7e09d03 768 if (lp->lp_alive)
2d00bd17 769 CWARN("Unexpected aliveness of peer %s: %d < %d (%d/%d)\n",
d7e09d03
PT
770 libcfs_nid2str(lp->lp_nid),
771 (int)now, (int)next_query,
772 lnet_queryinterval,
773 lp->lp_ni->ni_peertimeout);
774 return 0;
775 }
776 }
777
778 /* query NI for latest aliveness news */
779 lnet_ni_query_locked(lp->lp_ni, lp);
780
781 if (lnet_peer_is_alive(lp, now))
782 return 1;
783
784 lnet_notify_locked(lp, 0, 0, lp->lp_last_alive);
785 return 0;
786}
787
dee2857e
IH
788/**
789 * \param msg The message to be sent.
790 * \param do_send True if lnet_ni_send() should be called in this function.
791 * lnet_send() is going to lnet_net_unlock immediately after this, so
792 * it sets do_send FALSE and I don't do the unlock/send/lock bit.
793 *
ec5fb5be
LZ
794 * \retval LNET_CREDIT_OK If \a msg sent or OK to send.
795 * \retval LNET_CREDIT_WAIT If \a msg blocked for credit.
796 * \retval -EHOSTUNREACH If the next hop of the message appears dead.
797 * \retval -ECANCELED If the MD of the message has been unlinked.
dee2857e
IH
798 */
799static int
d7e09d03
PT
800lnet_post_send_locked(lnet_msg_t *msg, int do_send)
801{
7e7ab095
MS
802 lnet_peer_t *lp = msg->msg_txpeer;
803 lnet_ni_t *ni = lp->lp_ni;
804 int cpt = msg->msg_tx_cpt;
805 struct lnet_tx_queue *tq = ni->ni_tx_queues[cpt];
d7e09d03
PT
806
807 /* non-lnet_send() callers have checked before */
808 LASSERT(!do_send || msg->msg_tx_delayed);
809 LASSERT(!msg->msg_receiving);
810 LASSERT(msg->msg_tx_committed);
811
d7e09d03 812 /* NB 'lp' is always the next hop */
5fd88337
JS
813 if (!(msg->msg_target.pid & LNET_PID_USERFLAG) &&
814 !lnet_peer_alive_locked(lp)) {
d7e09d03
PT
815 the_lnet.ln_counters[cpt]->drop_count++;
816 the_lnet.ln_counters[cpt]->drop_length += msg->msg_len;
817 lnet_net_unlock(cpt);
818
819 CNETERR("Dropping message for %s: peer not alive\n",
820 libcfs_id2str(msg->msg_target));
821 if (do_send)
822 lnet_finalize(ni, msg, -EHOSTUNREACH);
823
824 lnet_net_lock(cpt);
ec5fb5be 825 return -EHOSTUNREACH;
d7e09d03
PT
826 }
827
06ace26e 828 if (msg->msg_md &&
5fd88337 829 (msg->msg_md->md_flags & LNET_MD_FLAG_ABORTED)) {
dee2857e
IH
830 lnet_net_unlock(cpt);
831
2d00bd17 832 CNETERR("Aborting message for %s: LNetM[DE]Unlink() already called on the MD/ME.\n",
dee2857e
IH
833 libcfs_id2str(msg->msg_target));
834 if (do_send)
835 lnet_finalize(ni, msg, -ECANCELED);
836
837 lnet_net_lock(cpt);
ec5fb5be 838 return -ECANCELED;
dee2857e
IH
839 }
840
d7e09d03 841 if (!msg->msg_peertxcredit) {
af66a6e2 842 LASSERT((lp->lp_txcredits < 0) ==
c314c319 843 !list_empty(&lp->lp_txq));
d7e09d03
PT
844
845 msg->msg_peertxcredit = 1;
846 lp->lp_txqnob += msg->msg_len + sizeof(lnet_hdr_t);
847 lp->lp_txcredits--;
848
849 if (lp->lp_txcredits < lp->lp_mintxcredits)
850 lp->lp_mintxcredits = lp->lp_txcredits;
851
852 if (lp->lp_txcredits < 0) {
853 msg->msg_tx_delayed = 1;
854 list_add_tail(&msg->msg_list, &lp->lp_txq);
ec5fb5be 855 return LNET_CREDIT_WAIT;
d7e09d03
PT
856 }
857 }
858
859 if (!msg->msg_txcredit) {
860 LASSERT((tq->tq_credits < 0) ==
861 !list_empty(&tq->tq_delayed));
862
863 msg->msg_txcredit = 1;
864 tq->tq_credits--;
865
866 if (tq->tq_credits < tq->tq_credits_min)
867 tq->tq_credits_min = tq->tq_credits;
868
869 if (tq->tq_credits < 0) {
870 msg->msg_tx_delayed = 1;
871 list_add_tail(&msg->msg_list, &tq->tq_delayed);
ec5fb5be 872 return LNET_CREDIT_WAIT;
d7e09d03
PT
873 }
874 }
875
876 if (do_send) {
877 lnet_net_unlock(cpt);
878 lnet_ni_send(ni, msg);
879 lnet_net_lock(cpt);
880 }
ec5fb5be 881 return LNET_CREDIT_OK;
d7e09d03
PT
882}
883
f526b20a 884static lnet_rtrbufpool_t *
d7e09d03
PT
885lnet_msg2bufpool(lnet_msg_t *msg)
886{
7e7ab095
MS
887 lnet_rtrbufpool_t *rbp;
888 int cpt;
d7e09d03
PT
889
890 LASSERT(msg->msg_rx_committed);
891
892 cpt = msg->msg_rx_cpt;
893 rbp = &the_lnet.ln_rtrpools[cpt][0];
894
895 LASSERT(msg->msg_len <= LNET_MTU);
896 while (msg->msg_len > (unsigned int)rbp->rbp_npages * PAGE_CACHE_SIZE) {
897 rbp++;
898 LASSERT(rbp < &the_lnet.ln_rtrpools[cpt][LNET_NRBPOOLS]);
899 }
900
901 return rbp;
902}
903
f526b20a 904static int
af66a6e2 905lnet_post_routed_recv_locked(lnet_msg_t *msg, int do_recv)
d7e09d03 906{
4420cfd3
JS
907 /*
908 * lnet_parse is going to lnet_net_unlock immediately after this, so it
ec5fb5be
LZ
909 * sets do_recv FALSE and I don't do the unlock/send/lock bit.
910 * I return LNET_CREDIT_WAIT if msg blocked and LNET_CREDIT_OK if
911 * received or OK to receive
4420cfd3 912 */
7e7ab095
MS
913 lnet_peer_t *lp = msg->msg_rxpeer;
914 lnet_rtrbufpool_t *rbp;
915 lnet_rtrbuf_t *rb;
d7e09d03 916
06ace26e
JS
917 LASSERT(!msg->msg_iov);
918 LASSERT(!msg->msg_kiov);
5fd88337 919 LASSERT(!msg->msg_niov);
af66a6e2
LN
920 LASSERT(msg->msg_routing);
921 LASSERT(msg->msg_receiving);
922 LASSERT(!msg->msg_sending);
d7e09d03
PT
923
924 /* non-lnet_parse callers only receive delayed messages */
925 LASSERT(!do_recv || msg->msg_rx_delayed);
926
927 if (!msg->msg_peerrtrcredit) {
af66a6e2 928 LASSERT((lp->lp_rtrcredits < 0) ==
c314c319 929 !list_empty(&lp->lp_rtrq));
d7e09d03
PT
930
931 msg->msg_peerrtrcredit = 1;
932 lp->lp_rtrcredits--;
933 if (lp->lp_rtrcredits < lp->lp_minrtrcredits)
934 lp->lp_minrtrcredits = lp->lp_rtrcredits;
935
936 if (lp->lp_rtrcredits < 0) {
937 /* must have checked eager_recv before here */
938 LASSERT(msg->msg_rx_ready_delay);
939 msg->msg_rx_delayed = 1;
940 list_add_tail(&msg->msg_list, &lp->lp_rtrq);
ec5fb5be 941 return LNET_CREDIT_WAIT;
d7e09d03
PT
942 }
943 }
944
945 rbp = lnet_msg2bufpool(msg);
946
947 if (!msg->msg_rtrcredit) {
d7e09d03
PT
948 msg->msg_rtrcredit = 1;
949 rbp->rbp_credits--;
950 if (rbp->rbp_credits < rbp->rbp_mincredits)
951 rbp->rbp_mincredits = rbp->rbp_credits;
952
953 if (rbp->rbp_credits < 0) {
954 /* must have checked eager_recv before here */
955 LASSERT(msg->msg_rx_ready_delay);
956 msg->msg_rx_delayed = 1;
957 list_add_tail(&msg->msg_list, &rbp->rbp_msgs);
ec5fb5be 958 return LNET_CREDIT_WAIT;
d7e09d03
PT
959 }
960 }
961
af66a6e2 962 LASSERT(!list_empty(&rbp->rbp_bufs));
d7e09d03
PT
963 rb = list_entry(rbp->rbp_bufs.next, lnet_rtrbuf_t, rb_list);
964 list_del(&rb->rb_list);
965
966 msg->msg_niov = rbp->rbp_npages;
967 msg->msg_kiov = &rb->rb_kiov[0];
968
969 if (do_recv) {
970 int cpt = msg->msg_rx_cpt;
971
972 lnet_net_unlock(cpt);
973 lnet_ni_recv(lp->lp_ni, msg->msg_private, msg, 1,
974 0, msg->msg_len, msg->msg_len);
975 lnet_net_lock(cpt);
976 }
ec5fb5be 977 return LNET_CREDIT_OK;
d7e09d03
PT
978}
979
980void
981lnet_return_tx_credits_locked(lnet_msg_t *msg)
982{
7e7ab095
MS
983 lnet_peer_t *txpeer = msg->msg_txpeer;
984 lnet_msg_t *msg2;
d7e09d03
PT
985
986 if (msg->msg_txcredit) {
7e7ab095 987 struct lnet_ni *ni = txpeer->lp_ni;
d7e09d03
PT
988 struct lnet_tx_queue *tq = ni->ni_tx_queues[msg->msg_tx_cpt];
989
990 /* give back NI txcredits */
991 msg->msg_txcredit = 0;
992
993 LASSERT((tq->tq_credits < 0) ==
994 !list_empty(&tq->tq_delayed));
995
996 tq->tq_credits++;
997 if (tq->tq_credits <= 0) {
998 msg2 = list_entry(tq->tq_delayed.next,
c314c319 999 lnet_msg_t, msg_list);
d7e09d03
PT
1000 list_del(&msg2->msg_list);
1001
1002 LASSERT(msg2->msg_txpeer->lp_ni == ni);
1003 LASSERT(msg2->msg_tx_delayed);
1004
1005 (void) lnet_post_send_locked(msg2, 1);
1006 }
1007 }
1008
1009 if (msg->msg_peertxcredit) {
1010 /* give back peer txcredits */
1011 msg->msg_peertxcredit = 0;
1012
1013 LASSERT((txpeer->lp_txcredits < 0) ==
1014 !list_empty(&txpeer->lp_txq));
1015
1016 txpeer->lp_txqnob -= msg->msg_len + sizeof(lnet_hdr_t);
af66a6e2 1017 LASSERT(txpeer->lp_txqnob >= 0);
d7e09d03
PT
1018
1019 txpeer->lp_txcredits++;
1020 if (txpeer->lp_txcredits <= 0) {
1021 msg2 = list_entry(txpeer->lp_txq.next,
c314c319 1022 lnet_msg_t, msg_list);
d7e09d03
PT
1023 list_del(&msg2->msg_list);
1024
1025 LASSERT(msg2->msg_txpeer == txpeer);
1026 LASSERT(msg2->msg_tx_delayed);
1027
1028 (void) lnet_post_send_locked(msg2, 1);
1029 }
1030 }
1031
06ace26e 1032 if (txpeer) {
d7e09d03
PT
1033 msg->msg_txpeer = NULL;
1034 lnet_peer_decref_locked(txpeer);
1035 }
1036}
1037
86ef6250
AS
1038void
1039lnet_schedule_blocked_locked(lnet_rtrbufpool_t *rbp)
1040{
1041 lnet_msg_t *msg;
1042
1043 if (list_empty(&rbp->rbp_msgs))
1044 return;
1045 msg = list_entry(rbp->rbp_msgs.next,
1046 lnet_msg_t, msg_list);
1047 list_del(&msg->msg_list);
1048
1049 (void)lnet_post_routed_recv_locked(msg, 1);
1050}
1051
1052void
1053lnet_drop_routed_msgs_locked(struct list_head *list, int cpt)
1054{
1055 struct list_head drop;
1056 lnet_msg_t *msg;
1057 lnet_msg_t *tmp;
1058
1059 INIT_LIST_HEAD(&drop);
1060
1061 list_splice_init(list, &drop);
1062
1063 lnet_net_unlock(cpt);
1064
1065 list_for_each_entry_safe(msg, tmp, &drop, msg_list) {
1066 lnet_ni_recv(msg->msg_rxpeer->lp_ni, msg->msg_private, NULL,
1067 0, 0, 0, msg->msg_hdr.payload_length);
1068 list_del_init(&msg->msg_list);
1069 lnet_finalize(NULL, msg, -ECANCELED);
1070 }
1071
1072 lnet_net_lock(cpt);
1073}
1074
d7e09d03
PT
1075void
1076lnet_return_rx_credits_locked(lnet_msg_t *msg)
1077{
7e7ab095
MS
1078 lnet_peer_t *rxpeer = msg->msg_rxpeer;
1079 lnet_msg_t *msg2;
d7e09d03
PT
1080
1081 if (msg->msg_rtrcredit) {
1082 /* give back global router credits */
7e7ab095 1083 lnet_rtrbuf_t *rb;
d7e09d03
PT
1084 lnet_rtrbufpool_t *rbp;
1085
4420cfd3
JS
1086 /*
1087 * NB If a msg ever blocks for a buffer in rbp_msgs, it stays
d7e09d03 1088 * there until it gets one allocated, or aborts the wait
4420cfd3
JS
1089 * itself
1090 */
06ace26e 1091 LASSERT(msg->msg_kiov);
d7e09d03
PT
1092
1093 rb = list_entry(msg->msg_kiov, lnet_rtrbuf_t, rb_kiov[0]);
1094 rbp = rb->rb_pool;
d7e09d03
PT
1095
1096 msg->msg_kiov = NULL;
1097 msg->msg_rtrcredit = 0;
1098
86ef6250
AS
1099 LASSERT(rbp == lnet_msg2bufpool(msg));
1100
d7e09d03
PT
1101 LASSERT((rbp->rbp_credits > 0) ==
1102 !list_empty(&rbp->rbp_bufs));
1103
86ef6250
AS
1104 /*
1105 * If routing is now turned off, we just drop this buffer and
1106 * don't bother trying to return credits.
1107 */
1108 if (!the_lnet.ln_routing) {
1109 lnet_destroy_rtrbuf(rb, rbp->rbp_npages);
1110 goto routing_off;
1111 }
d7e09d03 1112
86ef6250
AS
1113 /*
1114 * It is possible that a user has lowered the desired number of
1115 * buffers in this pool. Make sure we never put back
1116 * more buffers than the stated number.
1117 */
1118 if (rbp->rbp_credits >= rbp->rbp_nbuffers) {
1119 /* Discard this buffer so we don't have too many. */
1120 lnet_destroy_rtrbuf(rb, rbp->rbp_npages);
1121 } else {
1122 list_add(&rb->rb_list, &rbp->rbp_bufs);
1123 rbp->rbp_credits++;
1124 if (rbp->rbp_credits <= 0)
1125 lnet_schedule_blocked_locked(rbp);
d7e09d03
PT
1126 }
1127 }
1128
86ef6250 1129routing_off:
d7e09d03
PT
1130 if (msg->msg_peerrtrcredit) {
1131 /* give back peer router credits */
1132 msg->msg_peerrtrcredit = 0;
1133
1134 LASSERT((rxpeer->lp_rtrcredits < 0) ==
1135 !list_empty(&rxpeer->lp_rtrq));
1136
1137 rxpeer->lp_rtrcredits++;
86ef6250
AS
1138 /*
1139 * drop all messages which are queued to be routed on that
1140 * peer.
1141 */
1142 if (!the_lnet.ln_routing) {
1143 lnet_drop_routed_msgs_locked(&rxpeer->lp_rtrq,
1144 msg->msg_rx_cpt);
1145 } else if (rxpeer->lp_rtrcredits <= 0) {
d7e09d03 1146 msg2 = list_entry(rxpeer->lp_rtrq.next,
c314c319 1147 lnet_msg_t, msg_list);
d7e09d03
PT
1148 list_del(&msg2->msg_list);
1149
1150 (void) lnet_post_routed_recv_locked(msg2, 1);
1151 }
1152 }
06ace26e 1153 if (rxpeer) {
d7e09d03
PT
1154 msg->msg_rxpeer = NULL;
1155 lnet_peer_decref_locked(rxpeer);
1156 }
1157}
1158
1159static int
1160lnet_compare_routes(lnet_route_t *r1, lnet_route_t *r2)
1161{
1162 lnet_peer_t *p1 = r1->lr_gateway;
1163 lnet_peer_t *p2 = r2->lr_gateway;
1164
e75fb87f
DO
1165 if (r1->lr_priority < r2->lr_priority)
1166 return 1;
1167
1168 if (r1->lr_priority > r2->lr_priority)
1169 return -1;
1170
d7e09d03
PT
1171 if (r1->lr_hops < r2->lr_hops)
1172 return 1;
1173
1174 if (r1->lr_hops > r2->lr_hops)
1175 return -1;
1176
1177 if (p1->lp_txqnob < p2->lp_txqnob)
1178 return 1;
1179
1180 if (p1->lp_txqnob > p2->lp_txqnob)
1181 return -1;
1182
1183 if (p1->lp_txcredits > p2->lp_txcredits)
1184 return 1;
1185
1186 if (p1->lp_txcredits < p2->lp_txcredits)
1187 return -1;
1188
1189 if (r1->lr_seq - r2->lr_seq <= 0)
1190 return 1;
1191
1192 return -1;
1193}
1194
1195static lnet_peer_t *
1196lnet_find_route_locked(lnet_ni_t *ni, lnet_nid_t target, lnet_nid_t rtr_nid)
1197{
7e7ab095 1198 lnet_remotenet_t *rnet;
4f0bedec
CH
1199 lnet_route_t *route;
1200 lnet_route_t *best_route;
1201 lnet_route_t *last_route;
7e7ab095
MS
1202 struct lnet_peer *lp_best;
1203 struct lnet_peer *lp;
1204 int rc;
d7e09d03 1205
4420cfd3
JS
1206 /*
1207 * If @rtr_nid is not LNET_NID_ANY, return the gateway with
1208 * rtr_nid nid, otherwise find the best gateway I can use
1209 */
d7e09d03 1210 rnet = lnet_find_net_locked(LNET_NIDNET(target));
06ace26e 1211 if (!rnet)
d7e09d03
PT
1212 return NULL;
1213
1214 lp_best = NULL;
4f0bedec
CH
1215 best_route = NULL;
1216 last_route = NULL;
1217 list_for_each_entry(route, &rnet->lrn_routes, lr_list) {
1218 lp = route->lr_gateway;
d7e09d03 1219
4ee23a84 1220 if (!lnet_is_route_alive(route))
d7e09d03
PT
1221 continue;
1222
06ace26e 1223 if (ni && lp->lp_ni != ni)
d7e09d03
PT
1224 continue;
1225
1226 if (lp->lp_nid == rtr_nid) /* it's pre-determined router */
1227 return lp;
1228
06ace26e 1229 if (!lp_best) {
4f0bedec
CH
1230 best_route = route;
1231 last_route = route;
d7e09d03
PT
1232 lp_best = lp;
1233 continue;
1234 }
1235
1236 /* no protection on below fields, but it's harmless */
4f0bedec
CH
1237 if (last_route->lr_seq - route->lr_seq < 0)
1238 last_route = route;
d7e09d03 1239
4f0bedec 1240 rc = lnet_compare_routes(route, best_route);
d7e09d03
PT
1241 if (rc < 0)
1242 continue;
1243
4f0bedec 1244 best_route = route;
d7e09d03
PT
1245 lp_best = lp;
1246 }
1247
4420cfd3
JS
1248 /*
1249 * set sequence number on the best router to the latest sequence + 1
d7e09d03 1250 * so we can round-robin all routers, it's race and inaccurate but
4420cfd3
JS
1251 * harmless and functional
1252 */
4f0bedec
CH
1253 if (best_route)
1254 best_route->lr_seq = last_route->lr_seq + 1;
d7e09d03
PT
1255 return lp_best;
1256}
1257
1258int
1259lnet_send(lnet_nid_t src_nid, lnet_msg_t *msg, lnet_nid_t rtr_nid)
1260{
7e7ab095
MS
1261 lnet_nid_t dst_nid = msg->msg_target.nid;
1262 struct lnet_ni *src_ni;
1263 struct lnet_ni *local_ni;
1264 struct lnet_peer *lp;
1265 int cpt;
1266 int cpt2;
1267 int rc;
d7e09d03 1268
4420cfd3
JS
1269 /*
1270 * NB: rtr_nid is set to LNET_NID_ANY for all current use-cases,
d7e09d03 1271 * but we might want to use pre-determined router for ACK/REPLY
4420cfd3
JS
1272 * in the future
1273 */
06ace26e
JS
1274 /* NB: ni == interface pre-determined (ACK/REPLY) */
1275 LASSERT(!msg->msg_txpeer);
af66a6e2
LN
1276 LASSERT(!msg->msg_sending);
1277 LASSERT(!msg->msg_target_is_router);
1278 LASSERT(!msg->msg_receiving);
d7e09d03
PT
1279
1280 msg->msg_sending = 1;
1281
1282 LASSERT(!msg->msg_tx_committed);
1283 cpt = lnet_cpt_of_nid(rtr_nid == LNET_NID_ANY ? dst_nid : rtr_nid);
1284 again:
1285 lnet_net_lock(cpt);
1286
1287 if (the_lnet.ln_shutdown) {
1288 lnet_net_unlock(cpt);
1289 return -ESHUTDOWN;
1290 }
1291
1292 if (src_nid == LNET_NID_ANY) {
1293 src_ni = NULL;
1294 } else {
1295 src_ni = lnet_nid2ni_locked(src_nid, cpt);
06ace26e 1296 if (!src_ni) {
d7e09d03 1297 lnet_net_unlock(cpt);
2d00bd17
JP
1298 LCONSOLE_WARN("Can't send to %s: src %s is not a local nid\n",
1299 libcfs_nid2str(dst_nid),
d7e09d03
PT
1300 libcfs_nid2str(src_nid));
1301 return -EINVAL;
1302 }
af66a6e2 1303 LASSERT(!msg->msg_routing);
d7e09d03
PT
1304 }
1305
1306 /* Is this for someone on a local network? */
1307 local_ni = lnet_net2ni_locked(LNET_NIDNET(dst_nid), cpt);
1308
06ace26e
JS
1309 if (local_ni) {
1310 if (!src_ni) {
d7e09d03
PT
1311 src_ni = local_ni;
1312 src_nid = src_ni->ni_nid;
1313 } else if (src_ni == local_ni) {
1314 lnet_ni_decref_locked(local_ni, cpt);
1315 } else {
1316 lnet_ni_decref_locked(local_ni, cpt);
1317 lnet_ni_decref_locked(src_ni, cpt);
1318 lnet_net_unlock(cpt);
1319 LCONSOLE_WARN("No route to %s via from %s\n",
1320 libcfs_nid2str(dst_nid),
1321 libcfs_nid2str(src_nid));
1322 return -EINVAL;
1323 }
1324
1325 LASSERT(src_nid != LNET_NID_ANY);
1326 lnet_msg_commit(msg, cpt);
1327
1328 if (!msg->msg_routing)
1329 msg->msg_hdr.src_nid = cpu_to_le64(src_nid);
1330
1331 if (src_ni == the_lnet.ln_loni) {
1332 /* No send credit hassles with LOLND */
1333 lnet_net_unlock(cpt);
1334 lnet_ni_send(src_ni, msg);
1335
1336 lnet_net_lock(cpt);
1337 lnet_ni_decref_locked(src_ni, cpt);
1338 lnet_net_unlock(cpt);
1339 return 0;
1340 }
1341
1342 rc = lnet_nid2peer_locked(&lp, dst_nid, cpt);
1343 /* lp has ref on src_ni; lose mine */
1344 lnet_ni_decref_locked(src_ni, cpt);
5fd88337 1345 if (rc) {
d7e09d03
PT
1346 lnet_net_unlock(cpt);
1347 LCONSOLE_WARN("Error %d finding peer %s\n", rc,
1348 libcfs_nid2str(dst_nid));
1349 /* ENOMEM or shutting down */
1350 return rc;
1351 }
af66a6e2 1352 LASSERT(lp->lp_ni == src_ni);
d7e09d03
PT
1353 } else {
1354 /* sending to a remote network */
1355 lp = lnet_find_route_locked(src_ni, dst_nid, rtr_nid);
06ace26e
JS
1356 if (!lp) {
1357 if (src_ni)
d7e09d03
PT
1358 lnet_ni_decref_locked(src_ni, cpt);
1359 lnet_net_unlock(cpt);
1360
2d00bd17 1361 LCONSOLE_WARN("No route to %s via %s (all routers down)\n",
d7e09d03
PT
1362 libcfs_id2str(msg->msg_target),
1363 libcfs_nid2str(src_nid));
1364 return -EHOSTUNREACH;
1365 }
1366
4420cfd3
JS
1367 /*
1368 * rtr_nid is LNET_NID_ANY or NID of pre-determined router,
d7e09d03
PT
1369 * it's possible that rtr_nid isn't LNET_NID_ANY and lp isn't
1370 * pre-determined router, this can happen if router table
4420cfd3
JS
1371 * was changed when we release the lock
1372 */
d7e09d03
PT
1373 if (rtr_nid != lp->lp_nid) {
1374 cpt2 = lnet_cpt_of_nid_locked(lp->lp_nid);
1375 if (cpt2 != cpt) {
06ace26e 1376 if (src_ni)
d7e09d03
PT
1377 lnet_ni_decref_locked(src_ni, cpt);
1378 lnet_net_unlock(cpt);
1379
1380 rtr_nid = lp->lp_nid;
1381 cpt = cpt2;
1382 goto again;
1383 }
1384 }
1385
1386 CDEBUG(D_NET, "Best route to %s via %s for %s %d\n",
1387 libcfs_nid2str(dst_nid), libcfs_nid2str(lp->lp_nid),
1388 lnet_msgtyp2str(msg->msg_type), msg->msg_len);
1389
06ace26e 1390 if (!src_ni) {
d7e09d03
PT
1391 src_ni = lp->lp_ni;
1392 src_nid = src_ni->ni_nid;
1393 } else {
af66a6e2 1394 LASSERT(src_ni == lp->lp_ni);
d7e09d03
PT
1395 lnet_ni_decref_locked(src_ni, cpt);
1396 }
1397
1398 lnet_peer_addref_locked(lp);
1399
1400 LASSERT(src_nid != LNET_NID_ANY);
1401 lnet_msg_commit(msg, cpt);
1402
1403 if (!msg->msg_routing) {
1404 /* I'm the source and now I know which NI to send on */
1405 msg->msg_hdr.src_nid = cpu_to_le64(src_nid);
1406 }
1407
1408 msg->msg_target_is_router = 1;
1409 msg->msg_target.nid = lp->lp_nid;
1410 msg->msg_target.pid = LUSTRE_SRV_LNET_PID;
1411 }
1412
1413 /* 'lp' is our best choice of peer */
1414
af66a6e2
LN
1415 LASSERT(!msg->msg_peertxcredit);
1416 LASSERT(!msg->msg_txcredit);
06ace26e 1417 LASSERT(!msg->msg_txpeer);
d7e09d03
PT
1418
1419 msg->msg_txpeer = lp; /* msg takes my ref on lp */
1420
1421 rc = lnet_post_send_locked(msg, 0);
1422 lnet_net_unlock(cpt);
1423
ec5fb5be
LZ
1424 if (rc < 0)
1425 return rc;
d7e09d03 1426
ec5fb5be 1427 if (rc == LNET_CREDIT_OK)
d7e09d03
PT
1428 lnet_ni_send(src_ni, msg);
1429
ec5fb5be 1430 return 0; /* rc == LNET_CREDIT_OK or LNET_CREDIT_WAIT */
d7e09d03
PT
1431}
1432
1433static void
1434lnet_drop_message(lnet_ni_t *ni, int cpt, void *private, unsigned int nob)
1435{
1436 lnet_net_lock(cpt);
1437 the_lnet.ln_counters[cpt]->drop_count++;
1438 the_lnet.ln_counters[cpt]->drop_length += nob;
1439 lnet_net_unlock(cpt);
1440
1441 lnet_ni_recv(ni, private, NULL, 0, 0, 0, nob);
1442}
1443
1444static void
1445lnet_recv_put(lnet_ni_t *ni, lnet_msg_t *msg)
1446{
7e7ab095 1447 lnet_hdr_t *hdr = &msg->msg_hdr;
d7e09d03 1448
5fd88337 1449 if (msg->msg_wanted)
d7e09d03
PT
1450 lnet_setpayloadbuffer(msg);
1451
1452 lnet_build_msg_event(msg, LNET_EVENT_PUT);
1453
4420cfd3
JS
1454 /*
1455 * Must I ACK? If so I'll grab the ack_wmd out of the header and put
1456 * it back into the ACK during lnet_finalize()
1457 */
5fd88337
JS
1458 msg->msg_ack = !lnet_is_wire_handle_none(&hdr->msg.put.ack_wmd) &&
1459 !(msg->msg_md->md_options & LNET_MD_ACK_DISABLE);
d7e09d03
PT
1460
1461 lnet_ni_recv(ni, msg->msg_private, msg, msg->msg_rx_delayed,
1462 msg->msg_offset, msg->msg_wanted, hdr->payload_length);
1463}
1464
1465static int
1466lnet_parse_put(lnet_ni_t *ni, lnet_msg_t *msg)
1467{
7e7ab095
MS
1468 lnet_hdr_t *hdr = &msg->msg_hdr;
1469 struct lnet_match_info info;
1470 int rc;
d7e09d03
PT
1471
1472 /* Convert put fields to host byte order */
1473 hdr->msg.put.match_bits = le64_to_cpu(hdr->msg.put.match_bits);
1474 hdr->msg.put.ptl_index = le32_to_cpu(hdr->msg.put.ptl_index);
1475 hdr->msg.put.offset = le32_to_cpu(hdr->msg.put.offset);
1476
1477 info.mi_id.nid = hdr->src_nid;
1478 info.mi_id.pid = hdr->src_pid;
1479 info.mi_opc = LNET_MD_OP_PUT;
1480 info.mi_portal = hdr->msg.put.ptl_index;
1481 info.mi_rlength = hdr->payload_length;
1482 info.mi_roffset = hdr->msg.put.offset;
1483 info.mi_mbits = hdr->msg.put.match_bits;
1484
06ace26e 1485 msg->msg_rx_ready_delay = !ni->ni_lnd->lnd_eager_recv;
d7e09d03
PT
1486
1487 again:
1488 rc = lnet_ptl_match_md(&info, msg);
1489 switch (rc) {
1490 default:
1491 LBUG();
1492
1493 case LNET_MATCHMD_OK:
1494 lnet_recv_put(ni, msg);
1495 return 0;
1496
1497 case LNET_MATCHMD_NONE:
1498 if (msg->msg_rx_delayed) /* attached on delayed list */
1499 return 0;
1500
1501 rc = lnet_ni_eager_recv(ni, msg);
5fd88337 1502 if (!rc)
d7e09d03
PT
1503 goto again;
1504 /* fall through */
1505
1506 case LNET_MATCHMD_DROP:
b0f5aad5 1507 CNETERR("Dropping PUT from %s portal %d match %llu offset %d length %d: %d\n",
d7e09d03
PT
1508 libcfs_id2str(info.mi_id), info.mi_portal,
1509 info.mi_mbits, info.mi_roffset, info.mi_rlength, rc);
1510
1511 return ENOENT; /* +ve: OK but no match */
1512 }
1513}
1514
1515static int
1516lnet_parse_get(lnet_ni_t *ni, lnet_msg_t *msg, int rdma_get)
1517{
7e7ab095
MS
1518 struct lnet_match_info info;
1519 lnet_hdr_t *hdr = &msg->msg_hdr;
1520 lnet_handle_wire_t reply_wmd;
1521 int rc;
d7e09d03
PT
1522
1523 /* Convert get fields to host byte order */
7e7ab095
MS
1524 hdr->msg.get.match_bits = le64_to_cpu(hdr->msg.get.match_bits);
1525 hdr->msg.get.ptl_index = le32_to_cpu(hdr->msg.get.ptl_index);
1526 hdr->msg.get.sink_length = le32_to_cpu(hdr->msg.get.sink_length);
1527 hdr->msg.get.src_offset = le32_to_cpu(hdr->msg.get.src_offset);
1528
1529 info.mi_id.nid = hdr->src_nid;
1530 info.mi_id.pid = hdr->src_pid;
1531 info.mi_opc = LNET_MD_OP_GET;
1532 info.mi_portal = hdr->msg.get.ptl_index;
1533 info.mi_rlength = hdr->msg.get.sink_length;
1534 info.mi_roffset = hdr->msg.get.src_offset;
1535 info.mi_mbits = hdr->msg.get.match_bits;
d7e09d03
PT
1536
1537 rc = lnet_ptl_match_md(&info, msg);
1538 if (rc == LNET_MATCHMD_DROP) {
b0f5aad5 1539 CNETERR("Dropping GET from %s portal %d match %llu offset %d length %d\n",
d7e09d03
PT
1540 libcfs_id2str(info.mi_id), info.mi_portal,
1541 info.mi_mbits, info.mi_roffset, info.mi_rlength);
1542 return ENOENT; /* +ve: OK but no match */
1543 }
1544
1545 LASSERT(rc == LNET_MATCHMD_OK);
1546
1547 lnet_build_msg_event(msg, LNET_EVENT_GET);
1548
1549 reply_wmd = hdr->msg.get.return_wmd;
1550
1551 lnet_prep_send(msg, LNET_MSG_REPLY, info.mi_id,
1552 msg->msg_offset, msg->msg_wanted);
1553
1554 msg->msg_hdr.msg.reply.dst_wmd = reply_wmd;
1555
1556 if (rdma_get) {
1557 /* The LND completes the REPLY from her recv procedure */
1558 lnet_ni_recv(ni, msg->msg_private, msg, 0,
1559 msg->msg_offset, msg->msg_len, msg->msg_len);
1560 return 0;
1561 }
1562
1563 lnet_ni_recv(ni, msg->msg_private, NULL, 0, 0, 0, 0);
1564 msg->msg_receiving = 0;
1565
1566 rc = lnet_send(ni->ni_nid, msg, LNET_NID_ANY);
1567 if (rc < 0) {
1568 /* didn't get as far as lnet_ni_send() */
1569 CERROR("%s: Unable to send REPLY for GET from %s: %d\n",
1570 libcfs_nid2str(ni->ni_nid),
1571 libcfs_id2str(info.mi_id), rc);
1572
1573 lnet_finalize(ni, msg, rc);
1574 }
1575
1576 return 0;
1577}
1578
1579static int
1580lnet_parse_reply(lnet_ni_t *ni, lnet_msg_t *msg)
1581{
7e7ab095
MS
1582 void *private = msg->msg_private;
1583 lnet_hdr_t *hdr = &msg->msg_hdr;
d7e09d03 1584 lnet_process_id_t src = {0};
7e7ab095
MS
1585 lnet_libmd_t *md;
1586 int rlength;
1587 int mlength;
1588 int cpt;
d7e09d03
PT
1589
1590 cpt = lnet_cpt_of_cookie(hdr->msg.reply.dst_wmd.wh_object_cookie);
1591 lnet_res_lock(cpt);
1592
1593 src.nid = hdr->src_nid;
1594 src.pid = hdr->src_pid;
1595
1596 /* NB handles only looked up by creator (no flips) */
1597 md = lnet_wire_handle2md(&hdr->msg.reply.dst_wmd);
5fd88337 1598 if (!md || !md->md_threshold || md->md_me) {
55f5a824 1599 CNETERR("%s: Dropping REPLY from %s for %s MD %#llx.%#llx\n",
d7e09d03 1600 libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
06ace26e 1601 !md ? "invalid" : "inactive",
d7e09d03
PT
1602 hdr->msg.reply.dst_wmd.wh_interface_cookie,
1603 hdr->msg.reply.dst_wmd.wh_object_cookie);
06ace26e 1604 if (md && md->md_me)
d7e09d03
PT
1605 CERROR("REPLY MD also attached to portal %d\n",
1606 md->md_me->me_portal);
1607
1608 lnet_res_unlock(cpt);
1609 return ENOENT; /* +ve: OK but no match */
1610 }
1611
5fd88337 1612 LASSERT(!md->md_offset);
d7e09d03
PT
1613
1614 rlength = hdr->payload_length;
005b23d6 1615 mlength = min_t(uint, rlength, md->md_length);
d7e09d03
PT
1616
1617 if (mlength < rlength &&
5fd88337 1618 !(md->md_options & LNET_MD_TRUNCATE)) {
55f5a824 1619 CNETERR("%s: Dropping REPLY from %s length %d for MD %#llx would overflow (%d)\n",
d7e09d03
PT
1620 libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
1621 rlength, hdr->msg.reply.dst_wmd.wh_object_cookie,
1622 mlength);
1623 lnet_res_unlock(cpt);
1624 return ENOENT; /* +ve: OK but no match */
1625 }
1626
55f5a824 1627 CDEBUG(D_NET, "%s: Reply from %s of length %d/%d into md %#llx\n",
d7e09d03
PT
1628 libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
1629 mlength, rlength, hdr->msg.reply.dst_wmd.wh_object_cookie);
1630
1631 lnet_msg_attach_md(msg, md, 0, mlength);
1632
5fd88337 1633 if (mlength)
d7e09d03
PT
1634 lnet_setpayloadbuffer(msg);
1635
1636 lnet_res_unlock(cpt);
1637
1638 lnet_build_msg_event(msg, LNET_EVENT_REPLY);
1639
1640 lnet_ni_recv(ni, private, msg, 0, 0, mlength, rlength);
1641 return 0;
1642}
1643
1644static int
1645lnet_parse_ack(lnet_ni_t *ni, lnet_msg_t *msg)
1646{
7e7ab095 1647 lnet_hdr_t *hdr = &msg->msg_hdr;
d7e09d03 1648 lnet_process_id_t src = {0};
7e7ab095
MS
1649 lnet_libmd_t *md;
1650 int cpt;
d7e09d03
PT
1651
1652 src.nid = hdr->src_nid;
1653 src.pid = hdr->src_pid;
1654
1655 /* Convert ack fields to host byte order */
1656 hdr->msg.ack.match_bits = le64_to_cpu(hdr->msg.ack.match_bits);
1657 hdr->msg.ack.mlength = le32_to_cpu(hdr->msg.ack.mlength);
1658
1659 cpt = lnet_cpt_of_cookie(hdr->msg.ack.dst_wmd.wh_object_cookie);
1660 lnet_res_lock(cpt);
1661
1662 /* NB handles only looked up by creator (no flips) */
1663 md = lnet_wire_handle2md(&hdr->msg.ack.dst_wmd);
5fd88337 1664 if (!md || !md->md_threshold || md->md_me) {
d7e09d03
PT
1665 /* Don't moan; this is expected */
1666 CDEBUG(D_NET,
55f5a824 1667 "%s: Dropping ACK from %s to %s MD %#llx.%#llx\n",
d7e09d03 1668 libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
06ace26e 1669 !md ? "invalid" : "inactive",
d7e09d03
PT
1670 hdr->msg.ack.dst_wmd.wh_interface_cookie,
1671 hdr->msg.ack.dst_wmd.wh_object_cookie);
06ace26e 1672 if (md && md->md_me)
d7e09d03
PT
1673 CERROR("Source MD also attached to portal %d\n",
1674 md->md_me->me_portal);
1675
1676 lnet_res_unlock(cpt);
1677 return ENOENT; /* +ve! */
1678 }
1679
55f5a824 1680 CDEBUG(D_NET, "%s: ACK from %s into md %#llx\n",
d7e09d03
PT
1681 libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
1682 hdr->msg.ack.dst_wmd.wh_object_cookie);
1683
1684 lnet_msg_attach_md(msg, md, 0, 0);
1685
1686 lnet_res_unlock(cpt);
1687
1688 lnet_build_msg_event(msg, LNET_EVENT_ACK);
1689
1690 lnet_ni_recv(ni, msg->msg_private, msg, 0, 0, 0, msg->msg_len);
1691 return 0;
1692}
1693
ec5fb5be
LZ
1694/**
1695 * \retval LNET_CREDIT_OK If \a msg is forwarded
1696 * \retval LNET_CREDIT_WAIT If \a msg is blocked because w/o buffer
1697 * \retval -ve error code
1698 */
d7e09d03
PT
1699static int
1700lnet_parse_forward_locked(lnet_ni_t *ni, lnet_msg_t *msg)
1701{
7e7ab095 1702 int rc = 0;
d7e09d03 1703
86ef6250
AS
1704 if (!the_lnet.ln_routing)
1705 return -ECANCELED;
1706
d7e09d03
PT
1707 if (msg->msg_rxpeer->lp_rtrcredits <= 0 ||
1708 lnet_msg2bufpool(msg)->rbp_credits <= 0) {
06ace26e 1709 if (!ni->ni_lnd->lnd_eager_recv) {
d7e09d03
PT
1710 msg->msg_rx_ready_delay = 1;
1711 } else {
1712 lnet_net_unlock(msg->msg_rx_cpt);
1713 rc = lnet_ni_eager_recv(ni, msg);
1714 lnet_net_lock(msg->msg_rx_cpt);
1715 }
1716 }
1717
5fd88337 1718 if (!rc)
d7e09d03
PT
1719 rc = lnet_post_routed_recv_locked(msg, 0);
1720 return rc;
1721}
1722
1723char *
af66a6e2 1724lnet_msgtyp2str(int type)
d7e09d03
PT
1725{
1726 switch (type) {
1727 case LNET_MSG_ACK:
2b5f2e44 1728 return "ACK";
d7e09d03 1729 case LNET_MSG_PUT:
2b5f2e44 1730 return "PUT";
d7e09d03 1731 case LNET_MSG_GET:
2b5f2e44 1732 return "GET";
d7e09d03 1733 case LNET_MSG_REPLY:
2b5f2e44 1734 return "REPLY";
d7e09d03 1735 case LNET_MSG_HELLO:
2b5f2e44 1736 return "HELLO";
d7e09d03 1737 default:
2b5f2e44 1738 return "<UNKNOWN>";
d7e09d03
PT
1739 }
1740}
d7e09d03
PT
1741
1742void
51f01fab 1743lnet_print_hdr(lnet_hdr_t *hdr)
d7e09d03
PT
1744{
1745 lnet_process_id_t src = {0};
1746 lnet_process_id_t dst = {0};
af66a6e2 1747 char *type_str = lnet_msgtyp2str(hdr->type);
d7e09d03
PT
1748
1749 src.nid = hdr->src_nid;
1750 src.pid = hdr->src_pid;
1751
1752 dst.nid = hdr->dest_nid;
1753 dst.pid = hdr->dest_pid;
1754
1755 CWARN("P3 Header at %p of type %s\n", hdr, type_str);
1756 CWARN(" From %s\n", libcfs_id2str(src));
1757 CWARN(" To %s\n", libcfs_id2str(dst));
1758
1759 switch (hdr->type) {
1760 default:
1761 break;
1762
1763 case LNET_MSG_PUT:
2d00bd17 1764 CWARN(" Ptl index %d, ack md %#llx.%#llx, match bits %llu\n",
d7e09d03
PT
1765 hdr->msg.put.ptl_index,
1766 hdr->msg.put.ack_wmd.wh_interface_cookie,
1767 hdr->msg.put.ack_wmd.wh_object_cookie,
1768 hdr->msg.put.match_bits);
55f5a824 1769 CWARN(" Length %d, offset %d, hdr data %#llx\n",
d7e09d03
PT
1770 hdr->payload_length, hdr->msg.put.offset,
1771 hdr->msg.put.hdr_data);
1772 break;
1773
1774 case LNET_MSG_GET:
2d00bd17
JP
1775 CWARN(" Ptl index %d, return md %#llx.%#llx, match bits %llu\n",
1776 hdr->msg.get.ptl_index,
d7e09d03
PT
1777 hdr->msg.get.return_wmd.wh_interface_cookie,
1778 hdr->msg.get.return_wmd.wh_object_cookie,
1779 hdr->msg.get.match_bits);
1780 CWARN(" Length %d, src offset %d\n",
1781 hdr->msg.get.sink_length,
1782 hdr->msg.get.src_offset);
1783 break;
1784
1785 case LNET_MSG_ACK:
2d00bd17 1786 CWARN(" dst md %#llx.%#llx, manipulated length %d\n",
d7e09d03
PT
1787 hdr->msg.ack.dst_wmd.wh_interface_cookie,
1788 hdr->msg.ack.dst_wmd.wh_object_cookie,
1789 hdr->msg.ack.mlength);
1790 break;
1791
1792 case LNET_MSG_REPLY:
2d00bd17 1793 CWARN(" dst md %#llx.%#llx, length %d\n",
d7e09d03
PT
1794 hdr->msg.reply.dst_wmd.wh_interface_cookie,
1795 hdr->msg.reply.dst_wmd.wh_object_cookie,
1796 hdr->payload_length);
1797 }
d7e09d03
PT
1798}
1799
1800int
1801lnet_parse(lnet_ni_t *ni, lnet_hdr_t *hdr, lnet_nid_t from_nid,
1802 void *private, int rdma_req)
1803{
7e7ab095
MS
1804 int rc = 0;
1805 int cpt;
1806 int for_me;
1807 struct lnet_msg *msg;
1808 lnet_pid_t dest_pid;
1809 lnet_nid_t dest_nid;
1810 lnet_nid_t src_nid;
1811 __u32 payload_length;
1812 __u32 type;
d7e09d03 1813
af66a6e2 1814 LASSERT(!in_interrupt());
d7e09d03
PT
1815
1816 type = le32_to_cpu(hdr->type);
1817 src_nid = le64_to_cpu(hdr->src_nid);
1818 dest_nid = le64_to_cpu(hdr->dest_nid);
1819 dest_pid = le32_to_cpu(hdr->dest_pid);
1820 payload_length = le32_to_cpu(hdr->payload_length);
1821
1822 for_me = (ni->ni_nid == dest_nid);
1823 cpt = lnet_cpt_of_nid(from_nid);
1824
1825 switch (type) {
1826 case LNET_MSG_ACK:
1827 case LNET_MSG_GET:
1828 if (payload_length > 0) {
1829 CERROR("%s, src %s: bad %s payload %d (0 expected)\n",
1830 libcfs_nid2str(from_nid),
1831 libcfs_nid2str(src_nid),
1832 lnet_msgtyp2str(type), payload_length);
1833 return -EPROTO;
1834 }
1835 break;
1836
1837 case LNET_MSG_PUT:
1838 case LNET_MSG_REPLY:
ae4003f0
LN
1839 if (payload_length >
1840 (__u32)(for_me ? LNET_MAX_PAYLOAD : LNET_MTU)) {
2d00bd17 1841 CERROR("%s, src %s: bad %s payload %d (%d max expected)\n",
d7e09d03
PT
1842 libcfs_nid2str(from_nid),
1843 libcfs_nid2str(src_nid),
1844 lnet_msgtyp2str(type),
1845 payload_length,
1846 for_me ? LNET_MAX_PAYLOAD : LNET_MTU);
1847 return -EPROTO;
1848 }
1849 break;
1850
1851 default:
1852 CERROR("%s, src %s: Bad message type 0x%x\n",
1853 libcfs_nid2str(from_nid),
1854 libcfs_nid2str(src_nid), type);
1855 return -EPROTO;
1856 }
1857
1858 if (the_lnet.ln_routing &&
ec0067d1 1859 ni->ni_last_alive != ktime_get_real_seconds()) {
d7e09d03 1860 /* NB: so far here is the only place to set NI status to "up */
86ef6250 1861 lnet_ni_lock(ni);
ec0067d1 1862 ni->ni_last_alive = ktime_get_real_seconds();
06ace26e 1863 if (ni->ni_status &&
d7e09d03
PT
1864 ni->ni_status->ns_status == LNET_NI_STATUS_DOWN)
1865 ni->ni_status->ns_status = LNET_NI_STATUS_UP;
1866 lnet_ni_unlock(ni);
1867 }
1868
4420cfd3
JS
1869 /*
1870 * Regard a bad destination NID as a protocol error. Senders should
d7e09d03 1871 * know what they're doing; if they don't they're misconfigured, buggy
4420cfd3
JS
1872 * or malicious so we chop them off at the knees :)
1873 */
d7e09d03
PT
1874 if (!for_me) {
1875 if (LNET_NIDNET(dest_nid) == LNET_NIDNET(ni->ni_nid)) {
1876 /* should have gone direct */
2d00bd17
JP
1877 CERROR("%s, src %s: Bad dest nid %s (should have been sent direct)\n",
1878 libcfs_nid2str(from_nid),
1879 libcfs_nid2str(src_nid),
1880 libcfs_nid2str(dest_nid));
d7e09d03
PT
1881 return -EPROTO;
1882 }
1883
1884 if (lnet_islocalnid(dest_nid)) {
4420cfd3
JS
1885 /*
1886 * dest is another local NI; sender should have used
1887 * this node's NID on its own network
1888 */
2d00bd17
JP
1889 CERROR("%s, src %s: Bad dest nid %s (it's my nid but on a different network)\n",
1890 libcfs_nid2str(from_nid),
1891 libcfs_nid2str(src_nid),
1892 libcfs_nid2str(dest_nid));
d7e09d03
PT
1893 return -EPROTO;
1894 }
1895
1896 if (rdma_req && type == LNET_MSG_GET) {
2d00bd17
JP
1897 CERROR("%s, src %s: Bad optimized GET for %s (final destination must be me)\n",
1898 libcfs_nid2str(from_nid),
1899 libcfs_nid2str(src_nid),
1900 libcfs_nid2str(dest_nid));
d7e09d03
PT
1901 return -EPROTO;
1902 }
1903
1904 if (!the_lnet.ln_routing) {
2d00bd17
JP
1905 CERROR("%s, src %s: Dropping message for %s (routing not enabled)\n",
1906 libcfs_nid2str(from_nid),
1907 libcfs_nid2str(src_nid),
1908 libcfs_nid2str(dest_nid));
d7e09d03
PT
1909 goto drop;
1910 }
1911 }
1912
4420cfd3
JS
1913 /*
1914 * Message looks OK; we're not going to return an error, so we MUST
1915 * call back lnd_recv() come what may...
1916 */
af66a6e2 1917 if (!list_empty(&the_lnet.ln_test_peers) && /* normally we don't */
9b79ca85 1918 fail_peer(src_nid, 0)) { /* shall we now? */
d7e09d03
PT
1919 CERROR("%s, src %s: Dropping %s to simulate failure\n",
1920 libcfs_nid2str(from_nid), libcfs_nid2str(src_nid),
1921 lnet_msgtyp2str(type));
1922 goto drop;
1923 }
1924
1925 msg = lnet_msg_alloc();
06ace26e 1926 if (!msg) {
d7e09d03
PT
1927 CERROR("%s, src %s: Dropping %s (out of memory)\n",
1928 libcfs_nid2str(from_nid), libcfs_nid2str(src_nid),
1929 lnet_msgtyp2str(type));
1930 goto drop;
1931 }
1932
ae4003f0
LN
1933 /* msg zeroed in lnet_msg_alloc;
1934 * i.e. flags all clear, pointers NULL etc
1935 */
d7e09d03
PT
1936 msg->msg_type = type;
1937 msg->msg_private = private;
1938 msg->msg_receiving = 1;
d3d3d37a
JS
1939 msg->msg_wanted = payload_length;
1940 msg->msg_len = payload_length;
d7e09d03
PT
1941 msg->msg_offset = 0;
1942 msg->msg_hdr = *hdr;
1943 /* for building message event */
1944 msg->msg_from = from_nid;
1945 if (!for_me) {
1946 msg->msg_target.pid = dest_pid;
1947 msg->msg_target.nid = dest_nid;
1948 msg->msg_routing = 1;
1949
1950 } else {
1951 /* convert common msg->hdr fields to host byteorder */
1952 msg->msg_hdr.type = type;
1953 msg->msg_hdr.src_nid = src_nid;
1954 msg->msg_hdr.src_pid = le32_to_cpu(msg->msg_hdr.src_pid);
1955 msg->msg_hdr.dest_nid = dest_nid;
1956 msg->msg_hdr.dest_pid = dest_pid;
1957 msg->msg_hdr.payload_length = payload_length;
1958 }
1959
1960 lnet_net_lock(cpt);
1961 rc = lnet_nid2peer_locked(&msg->msg_rxpeer, from_nid, cpt);
5fd88337 1962 if (rc) {
d7e09d03 1963 lnet_net_unlock(cpt);
2d00bd17 1964 CERROR("%s, src %s: Dropping %s (error %d looking up sender)\n",
d7e09d03
PT
1965 libcfs_nid2str(from_nid), libcfs_nid2str(src_nid),
1966 lnet_msgtyp2str(type), rc);
1967 lnet_msg_free(msg);
1968 goto drop;
1969 }
1970
af3fa7c7
LZ
1971 if (lnet_isrouter(msg->msg_rxpeer)) {
1972 lnet_peer_set_alive(msg->msg_rxpeer);
1973 if (avoid_asym_router_failure &&
1974 LNET_NIDNET(src_nid) != LNET_NIDNET(from_nid)) {
1975 /* received a remote message from router, update
1976 * remote NI status on this router.
1977 * NB: multi-hop routed message will be ignored.
1978 */
1979 lnet_router_ni_update_locked(msg->msg_rxpeer,
1980 LNET_NIDNET(src_nid));
1981 }
1982 }
1983
d7e09d03
PT
1984 lnet_msg_commit(msg, cpt);
1985
1986 if (!for_me) {
1987 rc = lnet_parse_forward_locked(ni, msg);
1988 lnet_net_unlock(cpt);
1989
1990 if (rc < 0)
1991 goto free_drop;
ec5fb5be
LZ
1992
1993 if (rc == LNET_CREDIT_OK) {
d7e09d03
PT
1994 lnet_ni_recv(ni, msg->msg_private, msg, 0,
1995 0, payload_length, payload_length);
1996 }
1997 return 0;
1998 }
1999
2000 lnet_net_unlock(cpt);
2001
2002 switch (type) {
2003 case LNET_MSG_ACK:
2004 rc = lnet_parse_ack(ni, msg);
2005 break;
2006 case LNET_MSG_PUT:
2007 rc = lnet_parse_put(ni, msg);
2008 break;
2009 case LNET_MSG_GET:
2010 rc = lnet_parse_get(ni, msg, rdma_req);
2011 break;
2012 case LNET_MSG_REPLY:
2013 rc = lnet_parse_reply(ni, msg);
2014 break;
2015 default:
2016 LASSERT(0);
2017 rc = -EPROTO;
2018 goto free_drop; /* prevent an unused label if !kernel */
2019 }
2020
5fd88337 2021 if (!rc)
d7e09d03
PT
2022 return 0;
2023
af66a6e2 2024 LASSERT(rc == ENOENT);
d7e09d03
PT
2025
2026 free_drop:
06ace26e 2027 LASSERT(!msg->msg_md);
d7e09d03
PT
2028 lnet_finalize(ni, msg, rc);
2029
2030 drop:
2031 lnet_drop_message(ni, cpt, private, payload_length);
2032 return 0;
2033}
2034EXPORT_SYMBOL(lnet_parse);
2035
2036void
2037lnet_drop_delayed_msg_list(struct list_head *head, char *reason)
2038{
2039 while (!list_empty(head)) {
7e7ab095
MS
2040 lnet_process_id_t id = {0};
2041 lnet_msg_t *msg;
d7e09d03
PT
2042
2043 msg = list_entry(head->next, lnet_msg_t, msg_list);
2044 list_del(&msg->msg_list);
2045
2046 id.nid = msg->msg_hdr.src_nid;
2047 id.pid = msg->msg_hdr.src_pid;
2048
06ace26e 2049 LASSERT(!msg->msg_md);
d7e09d03 2050 LASSERT(msg->msg_rx_delayed);
06ace26e 2051 LASSERT(msg->msg_rxpeer);
d7e09d03
PT
2052 LASSERT(msg->msg_hdr.type == LNET_MSG_PUT);
2053
b0f5aad5 2054 CWARN("Dropping delayed PUT from %s portal %d match %llu offset %d length %d: %s\n",
d7e09d03
PT
2055 libcfs_id2str(id),
2056 msg->msg_hdr.msg.put.ptl_index,
2057 msg->msg_hdr.msg.put.match_bits,
2058 msg->msg_hdr.msg.put.offset,
2059 msg->msg_hdr.payload_length, reason);
2060
4420cfd3
JS
2061 /*
2062 * NB I can't drop msg's ref on msg_rxpeer until after I've
d7e09d03 2063 * called lnet_drop_message(), so I just hang onto msg as well
4420cfd3
JS
2064 * until that's done
2065 */
d7e09d03
PT
2066 lnet_drop_message(msg->msg_rxpeer->lp_ni,
2067 msg->msg_rxpeer->lp_cpt,
2068 msg->msg_private, msg->msg_len);
2069 /*
2070 * NB: message will not generate event because w/o attached MD,
2071 * but we still should give error code so lnet_msg_decommit()
2072 * can skip counters operations and other checks.
2073 */
2074 lnet_finalize(msg->msg_rxpeer->lp_ni, msg, -ENOENT);
2075 }
2076}
2077
2078void
2079lnet_recv_delayed_msg_list(struct list_head *head)
2080{
2081 while (!list_empty(head)) {
7e7ab095
MS
2082 lnet_msg_t *msg;
2083 lnet_process_id_t id;
d7e09d03
PT
2084
2085 msg = list_entry(head->next, lnet_msg_t, msg_list);
2086 list_del(&msg->msg_list);
2087
4420cfd3
JS
2088 /*
2089 * md won't disappear under me, since each msg
2090 * holds a ref on it
2091 */
d7e09d03
PT
2092 id.nid = msg->msg_hdr.src_nid;
2093 id.pid = msg->msg_hdr.src_pid;
2094
2095 LASSERT(msg->msg_rx_delayed);
06ace26e
JS
2096 LASSERT(msg->msg_md);
2097 LASSERT(msg->msg_rxpeer);
d7e09d03
PT
2098 LASSERT(msg->msg_hdr.type == LNET_MSG_PUT);
2099
2d00bd17
JP
2100 CDEBUG(D_NET, "Resuming delayed PUT from %s portal %d match %llu offset %d length %d.\n",
2101 libcfs_id2str(id), msg->msg_hdr.msg.put.ptl_index,
2102 msg->msg_hdr.msg.put.match_bits,
2103 msg->msg_hdr.msg.put.offset,
2104 msg->msg_hdr.payload_length);
d7e09d03
PT
2105
2106 lnet_recv_put(msg->msg_rxpeer->lp_ni, msg);
2107 }
2108}
2109
2110/**
2111 * Initiate an asynchronous PUT operation.
2112 *
2113 * There are several events associated with a PUT: completion of the send on
2114 * the initiator node (LNET_EVENT_SEND), and when the send completes
2115 * successfully, the receipt of an acknowledgment (LNET_EVENT_ACK) indicating
2116 * that the operation was accepted by the target. The event LNET_EVENT_PUT is
2117 * used at the target node to indicate the completion of incoming data
2118 * delivery.
2119 *
2120 * The local events will be logged in the EQ associated with the MD pointed to
2121 * by \a mdh handle. Using a MD without an associated EQ results in these
2122 * events being discarded. In this case, the caller must have another
2123 * mechanism (e.g., a higher level protocol) for determining when it is safe
2124 * to modify the memory region associated with the MD.
2125 *
2126 * Note that LNet does not guarantee the order of LNET_EVENT_SEND and
2127 * LNET_EVENT_ACK, though intuitively ACK should happen after SEND.
2128 *
2129 * \param self Indicates the NID of a local interface through which to send
2130 * the PUT request. Use LNET_NID_ANY to let LNet choose one by itself.
2131 * \param mdh A handle for the MD that describes the memory to be sent. The MD
2132 * must be "free floating" (See LNetMDBind()).
2133 * \param ack Controls whether an acknowledgment is requested.
2134 * Acknowledgments are only sent when they are requested by the initiating
2135 * process and the target MD enables them.
2136 * \param target A process identifier for the target process.
2137 * \param portal The index in the \a target's portal table.
2138 * \param match_bits The match bits to use for MD selection at the target
2139 * process.
2140 * \param offset The offset into the target MD (only used when the target
2141 * MD has the LNET_MD_MANAGE_REMOTE option set).
2142 * \param hdr_data 64 bits of user data that can be included in the message
2143 * header. This data is written to an event queue entry at the target if an
2144 * EQ is present on the matching MD.
2145 *
2146 * \retval 0 Success, and only in this case events will be generated
2147 * and logged to EQ (if it exists).
2148 * \retval -EIO Simulated failure.
2149 * \retval -ENOMEM Memory allocation failure.
2150 * \retval -ENOENT Invalid MD object.
2151 *
2152 * \see lnet_event_t::hdr_data and lnet_event_kind_t.
2153 */
2154int
2155LNetPut(lnet_nid_t self, lnet_handle_md_t mdh, lnet_ack_req_t ack,
2156 lnet_process_id_t target, unsigned int portal,
2157 __u64 match_bits, unsigned int offset,
2158 __u64 hdr_data)
2159{
7e7ab095
MS
2160 struct lnet_msg *msg;
2161 struct lnet_libmd *md;
2162 int cpt;
2163 int rc;
d7e09d03 2164
af66a6e2
LN
2165 LASSERT(the_lnet.ln_init);
2166 LASSERT(the_lnet.ln_refcount > 0);
d7e09d03 2167
af66a6e2 2168 if (!list_empty(&the_lnet.ln_test_peers) && /* normally we don't */
9b79ca85 2169 fail_peer(target.nid, 1)) { /* shall we now? */
d7e09d03
PT
2170 CERROR("Dropping PUT to %s: simulated failure\n",
2171 libcfs_id2str(target));
2172 return -EIO;
2173 }
2174
2175 msg = lnet_msg_alloc();
06ace26e 2176 if (!msg) {
d7e09d03
PT
2177 CERROR("Dropping PUT to %s: ENOMEM on lnet_msg_t\n",
2178 libcfs_id2str(target));
2179 return -ENOMEM;
2180 }
2181 msg->msg_vmflush = !!memory_pressure_get();
2182
2183 cpt = lnet_cpt_of_cookie(mdh.cookie);
2184 lnet_res_lock(cpt);
2185
2186 md = lnet_handle2md(&mdh);
5fd88337 2187 if (!md || !md->md_threshold || md->md_me) {
b0f5aad5 2188 CERROR("Dropping PUT (%llu:%d:%s): MD (%d) invalid\n",
d7e09d03 2189 match_bits, portal, libcfs_id2str(target),
06ace26e
JS
2190 !md ? -1 : md->md_threshold);
2191 if (md && md->md_me)
d7e09d03
PT
2192 CERROR("Source MD also attached to portal %d\n",
2193 md->md_me->me_portal);
2194 lnet_res_unlock(cpt);
2195
2196 lnet_msg_free(msg);
2197 return -ENOENT;
2198 }
2199
2200 CDEBUG(D_NET, "LNetPut -> %s\n", libcfs_id2str(target));
2201
2202 lnet_msg_attach_md(msg, md, 0, 0);
2203
2204 lnet_prep_send(msg, LNET_MSG_PUT, target, 0, md->md_length);
2205
2206 msg->msg_hdr.msg.put.match_bits = cpu_to_le64(match_bits);
2207 msg->msg_hdr.msg.put.ptl_index = cpu_to_le32(portal);
2208 msg->msg_hdr.msg.put.offset = cpu_to_le32(offset);
2209 msg->msg_hdr.msg.put.hdr_data = hdr_data;
2210
2211 /* NB handles only looked up by creator (no flips) */
2212 if (ack == LNET_ACK_REQ) {
2213 msg->msg_hdr.msg.put.ack_wmd.wh_interface_cookie =
2214 the_lnet.ln_interface_cookie;
2215 msg->msg_hdr.msg.put.ack_wmd.wh_object_cookie =
2216 md->md_lh.lh_cookie;
2217 } else {
2218 msg->msg_hdr.msg.put.ack_wmd.wh_interface_cookie =
2219 LNET_WIRE_HANDLE_COOKIE_NONE;
2220 msg->msg_hdr.msg.put.ack_wmd.wh_object_cookie =
2221 LNET_WIRE_HANDLE_COOKIE_NONE;
2222 }
2223
2224 lnet_res_unlock(cpt);
2225
2226 lnet_build_msg_event(msg, LNET_EVENT_SEND);
2227
2228 rc = lnet_send(self, msg, LNET_NID_ANY);
5fd88337 2229 if (rc) {
af66a6e2 2230 CNETERR("Error sending PUT to %s: %d\n",
c314c319 2231 libcfs_id2str(target), rc);
af66a6e2 2232 lnet_finalize(NULL, msg, rc);
d7e09d03
PT
2233 }
2234
2235 /* completion will be signalled by an event */
2236 return 0;
2237}
2238EXPORT_SYMBOL(LNetPut);
2239
2240lnet_msg_t *
af66a6e2 2241lnet_create_reply_msg(lnet_ni_t *ni, lnet_msg_t *getmsg)
d7e09d03 2242{
4420cfd3
JS
2243 /*
2244 * The LND can DMA direct to the GET md (i.e. no REPLY msg). This
d7e09d03
PT
2245 * returns a msg for the LND to pass to lnet_finalize() when the sink
2246 * data has been received.
2247 *
2248 * CAVEAT EMPTOR: 'getmsg' is the original GET, which is freed when
4420cfd3
JS
2249 * lnet_finalize() is called on it, so the LND must call this first
2250 */
7e7ab095
MS
2251 struct lnet_msg *msg = lnet_msg_alloc();
2252 struct lnet_libmd *getmd = getmsg->msg_md;
2253 lnet_process_id_t peer_id = getmsg->msg_target;
2254 int cpt;
d7e09d03
PT
2255
2256 LASSERT(!getmsg->msg_target_is_router);
2257 LASSERT(!getmsg->msg_routing);
2258
06ace26e 2259 if (!msg) {
af66a6e2 2260 CERROR("%s: Dropping REPLY from %s: can't allocate msg\n",
c314c319 2261 libcfs_nid2str(ni->ni_nid), libcfs_id2str(peer_id));
d7e09d03
PT
2262 goto drop;
2263 }
2264
600e9b49
LZ
2265 cpt = lnet_cpt_of_cookie(getmd->md_lh.lh_cookie);
2266 lnet_res_lock(cpt);
2267
2268 LASSERT(getmd->md_refcount > 0);
2269
5fd88337 2270 if (!getmd->md_threshold) {
af66a6e2 2271 CERROR("%s: Dropping REPLY from %s for inactive MD %p\n",
c314c319
JS
2272 libcfs_nid2str(ni->ni_nid), libcfs_id2str(peer_id),
2273 getmd);
d7e09d03
PT
2274 lnet_res_unlock(cpt);
2275 goto drop;
2276 }
2277
5fd88337 2278 LASSERT(!getmd->md_offset);
d7e09d03
PT
2279
2280 CDEBUG(D_NET, "%s: Reply from %s md %p\n",
2281 libcfs_nid2str(ni->ni_nid), libcfs_id2str(peer_id), getmd);
2282
2283 /* setup information for lnet_build_msg_event */
2284 msg->msg_from = peer_id.nid;
2285 msg->msg_type = LNET_MSG_GET; /* flag this msg as an "optimized" GET */
2286 msg->msg_hdr.src_nid = peer_id.nid;
2287 msg->msg_hdr.payload_length = getmd->md_length;
2288 msg->msg_receiving = 1; /* required by lnet_msg_attach_md */
2289
2290 lnet_msg_attach_md(msg, getmd, getmd->md_offset, getmd->md_length);
2291 lnet_res_unlock(cpt);
2292
2293 cpt = lnet_cpt_of_nid(peer_id.nid);
2294
2295 lnet_net_lock(cpt);
2296 lnet_msg_commit(msg, cpt);
2297 lnet_net_unlock(cpt);
2298
2299 lnet_build_msg_event(msg, LNET_EVENT_REPLY);
2300
2301 return msg;
2302
2303 drop:
2304 cpt = lnet_cpt_of_nid(peer_id.nid);
2305
2306 lnet_net_lock(cpt);
2307 the_lnet.ln_counters[cpt]->drop_count++;
2308 the_lnet.ln_counters[cpt]->drop_length += getmd->md_length;
2309 lnet_net_unlock(cpt);
2310
06ace26e 2311 if (msg)
d7e09d03
PT
2312 lnet_msg_free(msg);
2313
2314 return NULL;
2315}
2316EXPORT_SYMBOL(lnet_create_reply_msg);
2317
2318void
2319lnet_set_reply_msg_len(lnet_ni_t *ni, lnet_msg_t *reply, unsigned int len)
2320{
4420cfd3
JS
2321 /*
2322 * Set the REPLY length, now the RDMA that elides the REPLY message has
2323 * completed and I know it.
2324 */
06ace26e 2325 LASSERT(reply);
af66a6e2
LN
2326 LASSERT(reply->msg_type == LNET_MSG_GET);
2327 LASSERT(reply->msg_ev.type == LNET_EVENT_REPLY);
d7e09d03 2328
4420cfd3
JS
2329 /*
2330 * NB I trusted my peer to RDMA. If she tells me she's written beyond
2331 * the end of my buffer, I might as well be dead.
2332 */
af66a6e2 2333 LASSERT(len <= reply->msg_ev.mlength);
d7e09d03
PT
2334
2335 reply->msg_ev.mlength = len;
2336}
2337EXPORT_SYMBOL(lnet_set_reply_msg_len);
2338
2339/**
2340 * Initiate an asynchronous GET operation.
2341 *
2342 * On the initiator node, an LNET_EVENT_SEND is logged when the GET request
2343 * is sent, and an LNET_EVENT_REPLY is logged when the data returned from
2344 * the target node in the REPLY has been written to local MD.
2345 *
2346 * On the target node, an LNET_EVENT_GET is logged when the GET request
2347 * arrives and is accepted into a MD.
2348 *
2349 * \param self,target,portal,match_bits,offset See the discussion in LNetPut().
2350 * \param mdh A handle for the MD that describes the memory into which the
ae4003f0
LN
2351 * requested data will be received. The MD must be "free floating"
2352 * (See LNetMDBind()).
d7e09d03
PT
2353 *
2354 * \retval 0 Success, and only in this case events will be generated
2355 * and logged to EQ (if it exists) of the MD.
2356 * \retval -EIO Simulated failure.
2357 * \retval -ENOMEM Memory allocation failure.
2358 * \retval -ENOENT Invalid MD object.
2359 */
2360int
2361LNetGet(lnet_nid_t self, lnet_handle_md_t mdh,
2362 lnet_process_id_t target, unsigned int portal,
2363 __u64 match_bits, unsigned int offset)
2364{
7e7ab095
MS
2365 struct lnet_msg *msg;
2366 struct lnet_libmd *md;
2367 int cpt;
2368 int rc;
d7e09d03 2369
af66a6e2
LN
2370 LASSERT(the_lnet.ln_init);
2371 LASSERT(the_lnet.ln_refcount > 0);
d7e09d03 2372
af66a6e2 2373 if (!list_empty(&the_lnet.ln_test_peers) && /* normally we don't */
9b79ca85 2374 fail_peer(target.nid, 1)) { /* shall we now? */
d7e09d03
PT
2375 CERROR("Dropping GET to %s: simulated failure\n",
2376 libcfs_id2str(target));
2377 return -EIO;
2378 }
2379
2380 msg = lnet_msg_alloc();
06ace26e 2381 if (!msg) {
d7e09d03
PT
2382 CERROR("Dropping GET to %s: ENOMEM on lnet_msg_t\n",
2383 libcfs_id2str(target));
2384 return -ENOMEM;
2385 }
2386
2387 cpt = lnet_cpt_of_cookie(mdh.cookie);
2388 lnet_res_lock(cpt);
2389
2390 md = lnet_handle2md(&mdh);
5fd88337 2391 if (!md || !md->md_threshold || md->md_me) {
b0f5aad5 2392 CERROR("Dropping GET (%llu:%d:%s): MD (%d) invalid\n",
d7e09d03 2393 match_bits, portal, libcfs_id2str(target),
06ace26e
JS
2394 !md ? -1 : md->md_threshold);
2395 if (md && md->md_me)
d7e09d03
PT
2396 CERROR("REPLY MD also attached to portal %d\n",
2397 md->md_me->me_portal);
2398
2399 lnet_res_unlock(cpt);
2400
2401 lnet_msg_free(msg);
d7e09d03
PT
2402 return -ENOENT;
2403 }
2404
2405 CDEBUG(D_NET, "LNetGet -> %s\n", libcfs_id2str(target));
2406
2407 lnet_msg_attach_md(msg, md, 0, 0);
2408
2409 lnet_prep_send(msg, LNET_MSG_GET, target, 0, 0);
2410
2411 msg->msg_hdr.msg.get.match_bits = cpu_to_le64(match_bits);
2412 msg->msg_hdr.msg.get.ptl_index = cpu_to_le32(portal);
2413 msg->msg_hdr.msg.get.src_offset = cpu_to_le32(offset);
2414 msg->msg_hdr.msg.get.sink_length = cpu_to_le32(md->md_length);
2415
2416 /* NB handles only looked up by creator (no flips) */
2417 msg->msg_hdr.msg.get.return_wmd.wh_interface_cookie =
2418 the_lnet.ln_interface_cookie;
2419 msg->msg_hdr.msg.get.return_wmd.wh_object_cookie =
2420 md->md_lh.lh_cookie;
2421
2422 lnet_res_unlock(cpt);
2423
2424 lnet_build_msg_event(msg, LNET_EVENT_SEND);
2425
2426 rc = lnet_send(self, msg, LNET_NID_ANY);
2427 if (rc < 0) {
af66a6e2 2428 CNETERR("Error sending GET to %s: %d\n",
c314c319 2429 libcfs_id2str(target), rc);
af66a6e2 2430 lnet_finalize(NULL, msg, rc);
d7e09d03
PT
2431 }
2432
2433 /* completion will be signalled by an event */
2434 return 0;
2435}
2436EXPORT_SYMBOL(LNetGet);
2437
2438/**
2439 * Calculate distance to node at \a dstnid.
2440 *
2441 * \param dstnid Target NID.
2442 * \param srcnidp If not NULL, NID of the local interface to reach \a dstnid
2443 * is saved here.
2444 * \param orderp If not NULL, order of the route to reach \a dstnid is saved
2445 * here.
2446 *
2447 * \retval 0 If \a dstnid belongs to a local interface, and reserved option
2448 * local_nid_dist_zero is set, which is the default.
2449 * \retval positives Distance to target NID, i.e. number of hops plus one.
2450 * \retval -EHOSTUNREACH If \a dstnid is not reachable.
2451 */
2452int
2453LNetDist(lnet_nid_t dstnid, lnet_nid_t *srcnidp, __u32 *orderp)
2454{
7e7ab095
MS
2455 struct list_head *e;
2456 struct lnet_ni *ni;
2457 lnet_remotenet_t *rnet;
2458 __u32 dstnet = LNET_NIDNET(dstnid);
2459 int hops;
2460 int cpt;
2461 __u32 order = 2;
2462 struct list_head *rn_list;
d7e09d03 2463
4420cfd3
JS
2464 /*
2465 * if !local_nid_dist_zero, I don't return a distance of 0 ever
d7e09d03
PT
2466 * (when lustre sees a distance of 0, it substitutes 0@lo), so I
2467 * keep order 0 free for 0@lo and order 1 free for a local NID
4420cfd3
JS
2468 * match
2469 */
af66a6e2
LN
2470 LASSERT(the_lnet.ln_init);
2471 LASSERT(the_lnet.ln_refcount > 0);
d7e09d03
PT
2472
2473 cpt = lnet_net_lock_current();
2474
af66a6e2 2475 list_for_each(e, &the_lnet.ln_nis) {
d7e09d03
PT
2476 ni = list_entry(e, lnet_ni_t, ni_list);
2477
2478 if (ni->ni_nid == dstnid) {
06ace26e 2479 if (srcnidp)
d7e09d03 2480 *srcnidp = dstnid;
06ace26e 2481 if (orderp) {
d7e09d03
PT
2482 if (LNET_NETTYP(LNET_NIDNET(dstnid)) == LOLND)
2483 *orderp = 0;
2484 else
2485 *orderp = 1;
2486 }
2487 lnet_net_unlock(cpt);
2488
2489 return local_nid_dist_zero ? 0 : 1;
2490 }
2491
2492 if (LNET_NIDNET(ni->ni_nid) == dstnet) {
06ace26e 2493 if (srcnidp)
d7e09d03 2494 *srcnidp = ni->ni_nid;
06ace26e 2495 if (orderp)
d7e09d03
PT
2496 *orderp = order;
2497 lnet_net_unlock(cpt);
2498 return 1;
2499 }
2500
2501 order++;
2502 }
2503
2504 rn_list = lnet_net2rnethash(dstnet);
2505 list_for_each(e, rn_list) {
2506 rnet = list_entry(e, lnet_remotenet_t, lrn_list);
2507
2508 if (rnet->lrn_net == dstnet) {
2509 lnet_route_t *route;
2510 lnet_route_t *shortest = NULL;
2511
af66a6e2 2512 LASSERT(!list_empty(&rnet->lrn_routes));
d7e09d03
PT
2513
2514 list_for_each_entry(route, &rnet->lrn_routes,
c314c319 2515 lr_list) {
06ace26e 2516 if (!shortest ||
d7e09d03
PT
2517 route->lr_hops < shortest->lr_hops)
2518 shortest = route;
2519 }
2520
06ace26e 2521 LASSERT(shortest);
d7e09d03 2522 hops = shortest->lr_hops;
06ace26e 2523 if (srcnidp)
d7e09d03 2524 *srcnidp = shortest->lr_gateway->lp_ni->ni_nid;
06ace26e 2525 if (orderp)
d7e09d03
PT
2526 *orderp = order;
2527 lnet_net_unlock(cpt);
2528 return hops + 1;
2529 }
2530 order++;
2531 }
2532
2533 lnet_net_unlock(cpt);
2534 return -EHOSTUNREACH;
2535}
2536EXPORT_SYMBOL(LNetDist);