]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
Merge remote-tracking branch 'spi/topic/dma' into spi-next
[mirror_ubuntu-bionic-kernel.git] / drivers / staging / lustre / lnet / klnds / socklnd / socklnd_cb.c
CommitLineData
d7e09d03
PT
1/*
2 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
3 *
4 * Copyright (c) 2011, 2012, Intel Corporation.
5 *
6 * Author: Zach Brown <zab@zabbo.net>
7 * Author: Peter J. Braam <braam@clusterfs.com>
8 * Author: Phil Schwan <phil@clusterfs.com>
9 * Author: Eric Barton <eric@bartonsoftware.com>
10 *
11 * This file is part of Portals, http://www.sf.net/projects/sandiaportals/
12 *
13 * Portals is free software; you can redistribute it and/or
14 * modify it under the terms of version 2 of the GNU General Public
15 * License as published by the Free Software Foundation.
16 *
17 * Portals is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with Portals; if not, write to the Free Software
24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 */
26
27#include "socklnd.h"
28
29ksock_tx_t *
30ksocknal_alloc_tx(int type, int size)
31{
32 ksock_tx_t *tx = NULL;
33
34 if (type == KSOCK_MSG_NOOP) {
35 LASSERT(size == KSOCK_NOOP_TX_SIZE);
36
37 /* searching for a noop tx in free list */
38 spin_lock(&ksocknal_data.ksnd_tx_lock);
39
40 if (!list_empty(&ksocknal_data.ksnd_idle_noop_txs)) {
41 tx = list_entry(ksocknal_data.ksnd_idle_noop_txs. \
42 next, ksock_tx_t, tx_list);
43 LASSERT(tx->tx_desc_size == size);
44 list_del(&tx->tx_list);
45 }
46
47 spin_unlock(&ksocknal_data.ksnd_tx_lock);
48 }
49
50 if (tx == NULL)
51 LIBCFS_ALLOC(tx, size);
52
53 if (tx == NULL)
54 return NULL;
55
56 atomic_set(&tx->tx_refcount, 1);
57 tx->tx_zc_aborted = 0;
58 tx->tx_zc_capable = 0;
59 tx->tx_zc_checked = 0;
60 tx->tx_desc_size = size;
61
62 atomic_inc(&ksocknal_data.ksnd_nactive_txs);
63
64 return tx;
65}
66
67ksock_tx_t *
68ksocknal_alloc_tx_noop(__u64 cookie, int nonblk)
69{
70 ksock_tx_t *tx;
71
72 tx = ksocknal_alloc_tx(KSOCK_MSG_NOOP, KSOCK_NOOP_TX_SIZE);
73 if (tx == NULL) {
74 CERROR("Can't allocate noop tx desc\n");
75 return NULL;
76 }
77
78 tx->tx_conn = NULL;
79 tx->tx_lnetmsg = NULL;
80 tx->tx_kiov = NULL;
81 tx->tx_nkiov = 0;
82 tx->tx_iov = tx->tx_frags.virt.iov;
83 tx->tx_niov = 1;
84 tx->tx_nonblk = nonblk;
85
86 socklnd_init_msg(&tx->tx_msg, KSOCK_MSG_NOOP);
87 tx->tx_msg.ksm_zc_cookies[1] = cookie;
88
89 return tx;
90}
91
92
93void
94ksocknal_free_tx (ksock_tx_t *tx)
95{
96 atomic_dec(&ksocknal_data.ksnd_nactive_txs);
97
98 if (tx->tx_lnetmsg == NULL && tx->tx_desc_size == KSOCK_NOOP_TX_SIZE) {
99 /* it's a noop tx */
100 spin_lock(&ksocknal_data.ksnd_tx_lock);
101
102 list_add(&tx->tx_list, &ksocknal_data.ksnd_idle_noop_txs);
103
104 spin_unlock(&ksocknal_data.ksnd_tx_lock);
105 } else {
106 LIBCFS_FREE(tx, tx->tx_desc_size);
107 }
108}
109
110int
111ksocknal_send_iov (ksock_conn_t *conn, ksock_tx_t *tx)
112{
113 struct iovec *iov = tx->tx_iov;
114 int nob;
115 int rc;
116
117 LASSERT (tx->tx_niov > 0);
118
119 /* Never touch tx->tx_iov inside ksocknal_lib_send_iov() */
120 rc = ksocknal_lib_send_iov(conn, tx);
121
122 if (rc <= 0) /* sent nothing? */
123 return (rc);
124
125 nob = rc;
126 LASSERT (nob <= tx->tx_resid);
127 tx->tx_resid -= nob;
128
129 /* "consume" iov */
130 do {
131 LASSERT (tx->tx_niov > 0);
132
133 if (nob < (int) iov->iov_len) {
134 iov->iov_base = (void *)((char *)iov->iov_base + nob);
135 iov->iov_len -= nob;
136 return (rc);
137 }
138
139 nob -= iov->iov_len;
140 tx->tx_iov = ++iov;
141 tx->tx_niov--;
142 } while (nob != 0);
143
144 return (rc);
145}
146
147int
148ksocknal_send_kiov (ksock_conn_t *conn, ksock_tx_t *tx)
149{
150 lnet_kiov_t *kiov = tx->tx_kiov;
151 int nob;
152 int rc;
153
154 LASSERT (tx->tx_niov == 0);
155 LASSERT (tx->tx_nkiov > 0);
156
157 /* Never touch tx->tx_kiov inside ksocknal_lib_send_kiov() */
158 rc = ksocknal_lib_send_kiov(conn, tx);
159
160 if (rc <= 0) /* sent nothing? */
161 return (rc);
162
163 nob = rc;
164 LASSERT (nob <= tx->tx_resid);
165 tx->tx_resid -= nob;
166
167 /* "consume" kiov */
168 do {
169 LASSERT(tx->tx_nkiov > 0);
170
171 if (nob < (int)kiov->kiov_len) {
172 kiov->kiov_offset += nob;
173 kiov->kiov_len -= nob;
174 return rc;
175 }
176
177 nob -= (int)kiov->kiov_len;
178 tx->tx_kiov = ++kiov;
179 tx->tx_nkiov--;
180 } while (nob != 0);
181
182 return (rc);
183}
184
185int
186ksocknal_transmit (ksock_conn_t *conn, ksock_tx_t *tx)
187{
188 int rc;
189 int bufnob;
190
191 if (ksocknal_data.ksnd_stall_tx != 0) {
192 cfs_pause(cfs_time_seconds(ksocknal_data.ksnd_stall_tx));
193 }
194
195 LASSERT (tx->tx_resid != 0);
196
197 rc = ksocknal_connsock_addref(conn);
198 if (rc != 0) {
199 LASSERT (conn->ksnc_closing);
200 return (-ESHUTDOWN);
201 }
202
203 do {
204 if (ksocknal_data.ksnd_enomem_tx > 0) {
205 /* testing... */
206 ksocknal_data.ksnd_enomem_tx--;
207 rc = -EAGAIN;
208 } else if (tx->tx_niov != 0) {
209 rc = ksocknal_send_iov (conn, tx);
210 } else {
211 rc = ksocknal_send_kiov (conn, tx);
212 }
213
214 bufnob = cfs_sock_wmem_queued(conn->ksnc_sock);
215 if (rc > 0) /* sent something? */
216 conn->ksnc_tx_bufnob += rc; /* account it */
217
218 if (bufnob < conn->ksnc_tx_bufnob) {
219 /* allocated send buffer bytes < computed; infer
220 * something got ACKed */
221 conn->ksnc_tx_deadline =
222 cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
223 conn->ksnc_peer->ksnp_last_alive = cfs_time_current();
224 conn->ksnc_tx_bufnob = bufnob;
225 mb();
226 }
227
228 if (rc <= 0) { /* Didn't write anything? */
229
230 if (rc == 0) /* some stacks return 0 instead of -EAGAIN */
231 rc = -EAGAIN;
232
233 /* Check if EAGAIN is due to memory pressure */
234 if(rc == -EAGAIN && ksocknal_lib_memory_pressure(conn))
235 rc = -ENOMEM;
236
237 break;
238 }
239
240 /* socket's wmem_queued now includes 'rc' bytes */
241 atomic_sub (rc, &conn->ksnc_tx_nob);
242 rc = 0;
243
244 } while (tx->tx_resid != 0);
245
246 ksocknal_connsock_decref(conn);
247 return (rc);
248}
249
250int
251ksocknal_recv_iov (ksock_conn_t *conn)
252{
253 struct iovec *iov = conn->ksnc_rx_iov;
254 int nob;
255 int rc;
256
257 LASSERT (conn->ksnc_rx_niov > 0);
258
259 /* Never touch conn->ksnc_rx_iov or change connection
260 * status inside ksocknal_lib_recv_iov */
261 rc = ksocknal_lib_recv_iov(conn);
262
263 if (rc <= 0)
264 return (rc);
265
266 /* received something... */
267 nob = rc;
268
269 conn->ksnc_peer->ksnp_last_alive = cfs_time_current();
270 conn->ksnc_rx_deadline =
271 cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
272 mb(); /* order with setting rx_started */
273 conn->ksnc_rx_started = 1;
274
275 conn->ksnc_rx_nob_wanted -= nob;
276 conn->ksnc_rx_nob_left -= nob;
277
278 do {
279 LASSERT (conn->ksnc_rx_niov > 0);
280
281 if (nob < (int)iov->iov_len) {
282 iov->iov_len -= nob;
283 iov->iov_base = (void *)((char *)iov->iov_base + nob);
284 return (-EAGAIN);
285 }
286
287 nob -= iov->iov_len;
288 conn->ksnc_rx_iov = ++iov;
289 conn->ksnc_rx_niov--;
290 } while (nob != 0);
291
292 return (rc);
293}
294
295int
296ksocknal_recv_kiov (ksock_conn_t *conn)
297{
298 lnet_kiov_t *kiov = conn->ksnc_rx_kiov;
299 int nob;
300 int rc;
301 LASSERT (conn->ksnc_rx_nkiov > 0);
302
303 /* Never touch conn->ksnc_rx_kiov or change connection
304 * status inside ksocknal_lib_recv_iov */
305 rc = ksocknal_lib_recv_kiov(conn);
306
307 if (rc <= 0)
308 return (rc);
309
310 /* received something... */
311 nob = rc;
312
313 conn->ksnc_peer->ksnp_last_alive = cfs_time_current();
314 conn->ksnc_rx_deadline =
315 cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
316 mb(); /* order with setting rx_started */
317 conn->ksnc_rx_started = 1;
318
319 conn->ksnc_rx_nob_wanted -= nob;
320 conn->ksnc_rx_nob_left -= nob;
321
322 do {
323 LASSERT (conn->ksnc_rx_nkiov > 0);
324
325 if (nob < (int) kiov->kiov_len) {
326 kiov->kiov_offset += nob;
327 kiov->kiov_len -= nob;
328 return -EAGAIN;
329 }
330
331 nob -= kiov->kiov_len;
332 conn->ksnc_rx_kiov = ++kiov;
333 conn->ksnc_rx_nkiov--;
334 } while (nob != 0);
335
336 return 1;
337}
338
339int
340ksocknal_receive (ksock_conn_t *conn)
341{
342 /* Return 1 on success, 0 on EOF, < 0 on error.
343 * Caller checks ksnc_rx_nob_wanted to determine
344 * progress/completion. */
345 int rc;
d7e09d03
PT
346
347 if (ksocknal_data.ksnd_stall_rx != 0) {
348 cfs_pause(cfs_time_seconds (ksocknal_data.ksnd_stall_rx));
349 }
350
351 rc = ksocknal_connsock_addref(conn);
352 if (rc != 0) {
353 LASSERT (conn->ksnc_closing);
354 return (-ESHUTDOWN);
355 }
356
357 for (;;) {
358 if (conn->ksnc_rx_niov != 0)
359 rc = ksocknal_recv_iov (conn);
360 else
361 rc = ksocknal_recv_kiov (conn);
362
363 if (rc <= 0) {
364 /* error/EOF or partial receive */
365 if (rc == -EAGAIN) {
366 rc = 1;
367 } else if (rc == 0 && conn->ksnc_rx_started) {
368 /* EOF in the middle of a message */
369 rc = -EPROTO;
370 }
371 break;
372 }
373
374 /* Completed a fragment */
375
376 if (conn->ksnc_rx_nob_wanted == 0) {
377 rc = 1;
378 break;
379 }
380 }
381
382 ksocknal_connsock_decref(conn);
0a3bdb00 383 return rc;
d7e09d03
PT
384}
385
386void
387ksocknal_tx_done (lnet_ni_t *ni, ksock_tx_t *tx)
388{
389 lnet_msg_t *lnetmsg = tx->tx_lnetmsg;
390 int rc = (tx->tx_resid == 0 && !tx->tx_zc_aborted) ? 0 : -EIO;
d7e09d03
PT
391
392 LASSERT(ni != NULL || tx->tx_conn != NULL);
393
394 if (tx->tx_conn != NULL)
395 ksocknal_conn_decref(tx->tx_conn);
396
397 if (ni == NULL && tx->tx_conn != NULL)
398 ni = tx->tx_conn->ksnc_peer->ksnp_ni;
399
400 ksocknal_free_tx (tx);
401 if (lnetmsg != NULL) /* KSOCK_MSG_NOOP go without lnetmsg */
402 lnet_finalize (ni, lnetmsg, rc);
d7e09d03
PT
403}
404
405void
406ksocknal_txlist_done (lnet_ni_t *ni, struct list_head *txlist, int error)
407{
408 ksock_tx_t *tx;
409
410 while (!list_empty (txlist)) {
411 tx = list_entry (txlist->next, ksock_tx_t, tx_list);
412
413 if (error && tx->tx_lnetmsg != NULL) {
414 CNETERR("Deleting packet type %d len %d %s->%s\n",
415 le32_to_cpu (tx->tx_lnetmsg->msg_hdr.type),
416 le32_to_cpu (tx->tx_lnetmsg->msg_hdr.payload_length),
417 libcfs_nid2str(le64_to_cpu(tx->tx_lnetmsg->msg_hdr.src_nid)),
418 libcfs_nid2str(le64_to_cpu(tx->tx_lnetmsg->msg_hdr.dest_nid)));
419 } else if (error) {
420 CNETERR("Deleting noop packet\n");
421 }
422
423 list_del (&tx->tx_list);
424
425 LASSERT (atomic_read(&tx->tx_refcount) == 1);
426 ksocknal_tx_done (ni, tx);
427 }
428}
429
430static void
431ksocknal_check_zc_req(ksock_tx_t *tx)
432{
433 ksock_conn_t *conn = tx->tx_conn;
434 ksock_peer_t *peer = conn->ksnc_peer;
435
436 /* Set tx_msg.ksm_zc_cookies[0] to a unique non-zero cookie and add tx
437 * to ksnp_zc_req_list if some fragment of this message should be sent
438 * zero-copy. Our peer will send an ACK containing this cookie when
439 * she has received this message to tell us we can signal completion.
440 * tx_msg.ksm_zc_cookies[0] remains non-zero while tx is on
441 * ksnp_zc_req_list. */
442 LASSERT (tx->tx_msg.ksm_type != KSOCK_MSG_NOOP);
443 LASSERT (tx->tx_zc_capable);
444
445 tx->tx_zc_checked = 1;
446
447 if (conn->ksnc_proto == &ksocknal_protocol_v1x ||
448 !conn->ksnc_zc_capable)
449 return;
450
451 /* assign cookie and queue tx to pending list, it will be released when
452 * a matching ack is received. See ksocknal_handle_zcack() */
453
454 ksocknal_tx_addref(tx);
455
456 spin_lock(&peer->ksnp_lock);
457
458 /* ZC_REQ is going to be pinned to the peer */
459 tx->tx_deadline =
460 cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
461
462 LASSERT (tx->tx_msg.ksm_zc_cookies[0] == 0);
463
464 tx->tx_msg.ksm_zc_cookies[0] = peer->ksnp_zc_next_cookie++;
465
466 if (peer->ksnp_zc_next_cookie == 0)
467 peer->ksnp_zc_next_cookie = SOCKNAL_KEEPALIVE_PING + 1;
468
469 list_add_tail(&tx->tx_zc_list, &peer->ksnp_zc_req_list);
470
471 spin_unlock(&peer->ksnp_lock);
472}
473
474static void
475ksocknal_uncheck_zc_req(ksock_tx_t *tx)
476{
477 ksock_peer_t *peer = tx->tx_conn->ksnc_peer;
478
479 LASSERT(tx->tx_msg.ksm_type != KSOCK_MSG_NOOP);
480 LASSERT(tx->tx_zc_capable);
481
482 tx->tx_zc_checked = 0;
483
484 spin_lock(&peer->ksnp_lock);
485
486 if (tx->tx_msg.ksm_zc_cookies[0] == 0) {
487 /* Not waiting for an ACK */
488 spin_unlock(&peer->ksnp_lock);
489 return;
490 }
491
492 tx->tx_msg.ksm_zc_cookies[0] = 0;
493 list_del(&tx->tx_zc_list);
494
495 spin_unlock(&peer->ksnp_lock);
496
497 ksocknal_tx_decref(tx);
498}
499
500int
501ksocknal_process_transmit (ksock_conn_t *conn, ksock_tx_t *tx)
502{
503 int rc;
504
505 if (tx->tx_zc_capable && !tx->tx_zc_checked)
506 ksocknal_check_zc_req(tx);
507
508 rc = ksocknal_transmit (conn, tx);
509
510 CDEBUG (D_NET, "send(%d) %d\n", tx->tx_resid, rc);
511
512 if (tx->tx_resid == 0) {
513 /* Sent everything OK */
514 LASSERT (rc == 0);
515
516 return (0);
517 }
518
519 if (rc == -EAGAIN)
520 return (rc);
521
522 if (rc == -ENOMEM) {
523 static int counter;
524
525 counter++; /* exponential backoff warnings */
526 if ((counter & (-counter)) == counter)
527 CWARN("%u ENOMEM tx %p (%u allocated)\n",
528 counter, conn, atomic_read(&libcfs_kmemory));
529
530 /* Queue on ksnd_enomem_conns for retry after a timeout */
531 spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
532
533 /* enomem list takes over scheduler's ref... */
534 LASSERT (conn->ksnc_tx_scheduled);
535 list_add_tail(&conn->ksnc_tx_list,
536 &ksocknal_data.ksnd_enomem_conns);
537 if (!cfs_time_aftereq(cfs_time_add(cfs_time_current(),
538 SOCKNAL_ENOMEM_RETRY),
539 ksocknal_data.ksnd_reaper_waketime))
540 wake_up (&ksocknal_data.ksnd_reaper_waitq);
541
542 spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
543 return (rc);
544 }
545
546 /* Actual error */
547 LASSERT (rc < 0);
548
549 if (!conn->ksnc_closing) {
550 switch (rc) {
551 case -ECONNRESET:
5e8f6920 552 LCONSOLE_WARN("Host %pI4h reset our connection "
d7e09d03
PT
553 "while we were sending data; it may have "
554 "rebooted.\n",
5e8f6920 555 &conn->ksnc_ipaddr);
d7e09d03
PT
556 break;
557 default:
558 LCONSOLE_WARN("There was an unexpected network error "
5e8f6920
PT
559 "while writing to %pI4h: %d.\n",
560 &conn->ksnc_ipaddr, rc);
d7e09d03
PT
561 break;
562 }
563 CDEBUG(D_NET, "[%p] Error %d on write to %s"
5e8f6920 564 " ip %pI4h:%d\n", conn, rc,
d7e09d03 565 libcfs_id2str(conn->ksnc_peer->ksnp_id),
5e8f6920 566 &conn->ksnc_ipaddr,
d7e09d03
PT
567 conn->ksnc_port);
568 }
569
570 if (tx->tx_zc_checked)
571 ksocknal_uncheck_zc_req(tx);
572
573 /* it's not an error if conn is being closed */
574 ksocknal_close_conn_and_siblings (conn,
575 (conn->ksnc_closing) ? 0 : rc);
576
577 return (rc);
578}
579
580void
581ksocknal_launch_connection_locked (ksock_route_t *route)
582{
583
584 /* called holding write lock on ksnd_global_lock */
585
586 LASSERT (!route->ksnr_scheduled);
587 LASSERT (!route->ksnr_connecting);
588 LASSERT ((ksocknal_route_mask() & ~route->ksnr_connected) != 0);
589
590 route->ksnr_scheduled = 1; /* scheduling conn for connd */
591 ksocknal_route_addref(route); /* extra ref for connd */
592
593 spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
594
595 list_add_tail(&route->ksnr_connd_list,
596 &ksocknal_data.ksnd_connd_routes);
597 wake_up(&ksocknal_data.ksnd_connd_waitq);
598
599 spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
600}
601
602void
603ksocknal_launch_all_connections_locked (ksock_peer_t *peer)
604{
605 ksock_route_t *route;
606
607 /* called holding write lock on ksnd_global_lock */
608 for (;;) {
609 /* launch any/all connections that need it */
610 route = ksocknal_find_connectable_route_locked(peer);
611 if (route == NULL)
612 return;
613
614 ksocknal_launch_connection_locked(route);
615 }
616}
617
618ksock_conn_t *
619ksocknal_find_conn_locked(ksock_peer_t *peer, ksock_tx_t *tx, int nonblk)
620{
621 struct list_head *tmp;
622 ksock_conn_t *conn;
623 ksock_conn_t *typed = NULL;
624 ksock_conn_t *fallback = NULL;
625 int tnob = 0;
626 int fnob = 0;
627
628 list_for_each (tmp, &peer->ksnp_conns) {
629 ksock_conn_t *c = list_entry(tmp, ksock_conn_t, ksnc_list);
630 int nob = atomic_read(&c->ksnc_tx_nob) +
631 cfs_sock_wmem_queued(c->ksnc_sock);
632 int rc;
633
634 LASSERT (!c->ksnc_closing);
635 LASSERT (c->ksnc_proto != NULL &&
636 c->ksnc_proto->pro_match_tx != NULL);
637
638 rc = c->ksnc_proto->pro_match_tx(c, tx, nonblk);
639
640 switch (rc) {
641 default:
642 LBUG();
643 case SOCKNAL_MATCH_NO: /* protocol rejected the tx */
644 continue;
645
646 case SOCKNAL_MATCH_YES: /* typed connection */
647 if (typed == NULL || tnob > nob ||
648 (tnob == nob && *ksocknal_tunables.ksnd_round_robin &&
649 cfs_time_after(typed->ksnc_tx_last_post, c->ksnc_tx_last_post))) {
650 typed = c;
651 tnob = nob;
652 }
653 break;
654
655 case SOCKNAL_MATCH_MAY: /* fallback connection */
656 if (fallback == NULL || fnob > nob ||
657 (fnob == nob && *ksocknal_tunables.ksnd_round_robin &&
658 cfs_time_after(fallback->ksnc_tx_last_post, c->ksnc_tx_last_post))) {
659 fallback = c;
660 fnob = nob;
661 }
662 break;
663 }
664 }
665
666 /* prefer the typed selection */
667 conn = (typed != NULL) ? typed : fallback;
668
669 if (conn != NULL)
670 conn->ksnc_tx_last_post = cfs_time_current();
671
672 return conn;
673}
674
675void
676ksocknal_tx_prep(ksock_conn_t *conn, ksock_tx_t *tx)
677{
678 conn->ksnc_proto->pro_pack(tx);
679
680 atomic_add (tx->tx_nob, &conn->ksnc_tx_nob);
681 ksocknal_conn_addref(conn); /* +1 ref for tx */
682 tx->tx_conn = conn;
683}
684
685void
686ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn)
687{
688 ksock_sched_t *sched = conn->ksnc_scheduler;
689 ksock_msg_t *msg = &tx->tx_msg;
690 ksock_tx_t *ztx = NULL;
691 int bufnob = 0;
692
693 /* called holding global lock (read or irq-write) and caller may
694 * not have dropped this lock between finding conn and calling me,
695 * so we don't need the {get,put}connsock dance to deref
696 * ksnc_sock... */
697 LASSERT(!conn->ksnc_closing);
698
5e8f6920 699 CDEBUG(D_NET, "Sending to %s ip %pI4h:%d\n",
d7e09d03 700 libcfs_id2str(conn->ksnc_peer->ksnp_id),
5e8f6920 701 &conn->ksnc_ipaddr,
d7e09d03
PT
702 conn->ksnc_port);
703
704 ksocknal_tx_prep(conn, tx);
705
706 /* Ensure the frags we've been given EXACTLY match the number of
707 * bytes we want to send. Many TCP/IP stacks disregard any total
708 * size parameters passed to them and just look at the frags.
709 *
710 * We always expect at least 1 mapped fragment containing the
711 * complete ksocknal message header. */
712 LASSERT (lnet_iov_nob (tx->tx_niov, tx->tx_iov) +
713 lnet_kiov_nob(tx->tx_nkiov, tx->tx_kiov) ==
714 (unsigned int)tx->tx_nob);
715 LASSERT (tx->tx_niov >= 1);
716 LASSERT (tx->tx_resid == tx->tx_nob);
717
718 CDEBUG (D_NET, "Packet %p type %d, nob %d niov %d nkiov %d\n",
719 tx, (tx->tx_lnetmsg != NULL) ? tx->tx_lnetmsg->msg_hdr.type:
720 KSOCK_MSG_NOOP,
721 tx->tx_nob, tx->tx_niov, tx->tx_nkiov);
722
723 /*
724 * FIXME: SOCK_WMEM_QUEUED and SOCK_ERROR could block in __DARWIN8__
725 * but they're used inside spinlocks a lot.
726 */
727 bufnob = cfs_sock_wmem_queued(conn->ksnc_sock);
728 spin_lock_bh(&sched->kss_lock);
729
730 if (list_empty(&conn->ksnc_tx_queue) && bufnob == 0) {
731 /* First packet starts the timeout */
732 conn->ksnc_tx_deadline =
733 cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
734 if (conn->ksnc_tx_bufnob > 0) /* something got ACKed */
735 conn->ksnc_peer->ksnp_last_alive = cfs_time_current();
736 conn->ksnc_tx_bufnob = 0;
737 mb(); /* order with adding to tx_queue */
738 }
739
740 if (msg->ksm_type == KSOCK_MSG_NOOP) {
741 /* The packet is noop ZC ACK, try to piggyback the ack_cookie
742 * on a normal packet so I don't need to send it */
743 LASSERT (msg->ksm_zc_cookies[1] != 0);
744 LASSERT (conn->ksnc_proto->pro_queue_tx_zcack != NULL);
745
746 if (conn->ksnc_proto->pro_queue_tx_zcack(conn, tx, 0))
747 ztx = tx; /* ZC ACK piggybacked on ztx release tx later */
748
749 } else {
750 /* It's a normal packet - can it piggback a noop zc-ack that
751 * has been queued already? */
752 LASSERT (msg->ksm_zc_cookies[1] == 0);
753 LASSERT (conn->ksnc_proto->pro_queue_tx_msg != NULL);
754
755 ztx = conn->ksnc_proto->pro_queue_tx_msg(conn, tx);
756 /* ztx will be released later */
757 }
758
759 if (ztx != NULL) {
760 atomic_sub (ztx->tx_nob, &conn->ksnc_tx_nob);
761 list_add_tail(&ztx->tx_list, &sched->kss_zombie_noop_txs);
762 }
763
764 if (conn->ksnc_tx_ready && /* able to send */
765 !conn->ksnc_tx_scheduled) { /* not scheduled to send */
766 /* +1 ref for scheduler */
767 ksocknal_conn_addref(conn);
768 list_add_tail (&conn->ksnc_tx_list,
769 &sched->kss_tx_conns);
770 conn->ksnc_tx_scheduled = 1;
771 wake_up (&sched->kss_waitq);
772 }
773
774 spin_unlock_bh(&sched->kss_lock);
775}
776
777
778ksock_route_t *
779ksocknal_find_connectable_route_locked (ksock_peer_t *peer)
780{
781 cfs_time_t now = cfs_time_current();
782 struct list_head *tmp;
783 ksock_route_t *route;
784
785 list_for_each (tmp, &peer->ksnp_routes) {
786 route = list_entry (tmp, ksock_route_t, ksnr_list);
787
788 LASSERT (!route->ksnr_connecting || route->ksnr_scheduled);
789
790 if (route->ksnr_scheduled) /* connections being established */
791 continue;
792
793 /* all route types connected ? */
794 if ((ksocknal_route_mask() & ~route->ksnr_connected) == 0)
795 continue;
796
797 if (!(route->ksnr_retry_interval == 0 || /* first attempt */
798 cfs_time_aftereq(now, route->ksnr_timeout))) {
799 CDEBUG(D_NET,
5e8f6920 800 "Too soon to retry route %pI4h "
d7e09d03 801 "(cnted %d, interval %ld, %ld secs later)\n",
5e8f6920 802 &route->ksnr_ipaddr,
d7e09d03
PT
803 route->ksnr_connected,
804 route->ksnr_retry_interval,
805 cfs_duration_sec(route->ksnr_timeout - now));
806 continue;
807 }
808
809 return (route);
810 }
811
812 return (NULL);
813}
814
815ksock_route_t *
816ksocknal_find_connecting_route_locked (ksock_peer_t *peer)
817{
818 struct list_head *tmp;
819 ksock_route_t *route;
820
821 list_for_each (tmp, &peer->ksnp_routes) {
822 route = list_entry (tmp, ksock_route_t, ksnr_list);
823
824 LASSERT (!route->ksnr_connecting || route->ksnr_scheduled);
825
826 if (route->ksnr_scheduled)
827 return (route);
828 }
829
830 return (NULL);
831}
832
833int
834ksocknal_launch_packet (lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id)
835{
836 ksock_peer_t *peer;
837 ksock_conn_t *conn;
838 rwlock_t *g_lock;
839 int retry;
840 int rc;
841
842 LASSERT (tx->tx_conn == NULL);
843
844 g_lock = &ksocknal_data.ksnd_global_lock;
845
846 for (retry = 0;; retry = 1) {
847 read_lock(g_lock);
848 peer = ksocknal_find_peer_locked(ni, id);
849 if (peer != NULL) {
850 if (ksocknal_find_connectable_route_locked(peer) == NULL) {
851 conn = ksocknal_find_conn_locked(peer, tx, tx->tx_nonblk);
852 if (conn != NULL) {
853 /* I've got no routes that need to be
854 * connecting and I do have an actual
855 * connection... */
856 ksocknal_queue_tx_locked (tx, conn);
857 read_unlock(g_lock);
858 return (0);
859 }
860 }
861 }
862
863 /* I'll need a write lock... */
864 read_unlock(g_lock);
865
866 write_lock_bh(g_lock);
867
868 peer = ksocknal_find_peer_locked(ni, id);
869 if (peer != NULL)
870 break;
871
872 write_unlock_bh(g_lock);
873
874 if ((id.pid & LNET_PID_USERFLAG) != 0) {
875 CERROR("Refusing to create a connection to "
876 "userspace process %s\n", libcfs_id2str(id));
877 return -EHOSTUNREACH;
878 }
879
880 if (retry) {
881 CERROR("Can't find peer %s\n", libcfs_id2str(id));
882 return -EHOSTUNREACH;
883 }
884
885 rc = ksocknal_add_peer(ni, id,
886 LNET_NIDADDR(id.nid),
887 lnet_acceptor_port());
888 if (rc != 0) {
889 CERROR("Can't add peer %s: %d\n",
890 libcfs_id2str(id), rc);
891 return rc;
892 }
893 }
894
895 ksocknal_launch_all_connections_locked(peer);
896
897 conn = ksocknal_find_conn_locked(peer, tx, tx->tx_nonblk);
898 if (conn != NULL) {
899 /* Connection exists; queue message on it */
900 ksocknal_queue_tx_locked (tx, conn);
901 write_unlock_bh(g_lock);
902 return (0);
903 }
904
905 if (peer->ksnp_accepting > 0 ||
906 ksocknal_find_connecting_route_locked (peer) != NULL) {
907 /* the message is going to be pinned to the peer */
908 tx->tx_deadline =
909 cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
910
911 /* Queue the message until a connection is established */
912 list_add_tail (&tx->tx_list, &peer->ksnp_tx_queue);
913 write_unlock_bh(g_lock);
914 return 0;
915 }
916
917 write_unlock_bh(g_lock);
918
919 /* NB Routes may be ignored if connections to them failed recently */
920 CNETERR("No usable routes to %s\n", libcfs_id2str(id));
921 return (-EHOSTUNREACH);
922}
923
924int
925ksocknal_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
926{
aadbacc7 927 int mpflag = 1;
d7e09d03
PT
928 int type = lntmsg->msg_type;
929 lnet_process_id_t target = lntmsg->msg_target;
930 unsigned int payload_niov = lntmsg->msg_niov;
931 struct iovec *payload_iov = lntmsg->msg_iov;
932 lnet_kiov_t *payload_kiov = lntmsg->msg_kiov;
933 unsigned int payload_offset = lntmsg->msg_offset;
934 unsigned int payload_nob = lntmsg->msg_len;
935 ksock_tx_t *tx;
936 int desc_size;
937 int rc;
938
939 /* NB 'private' is different depending on what we're sending.
940 * Just ignore it... */
941
942 CDEBUG(D_NET, "sending %u bytes in %d frags to %s\n",
943 payload_nob, payload_niov, libcfs_id2str(target));
944
945 LASSERT (payload_nob == 0 || payload_niov > 0);
946 LASSERT (payload_niov <= LNET_MAX_IOV);
947 /* payload is either all vaddrs or all pages */
948 LASSERT (!(payload_kiov != NULL && payload_iov != NULL));
949 LASSERT (!in_interrupt ());
950
951 if (payload_iov != NULL)
952 desc_size = offsetof(ksock_tx_t,
953 tx_frags.virt.iov[1 + payload_niov]);
954 else
955 desc_size = offsetof(ksock_tx_t,
956 tx_frags.paged.kiov[payload_niov]);
957
958 if (lntmsg->msg_vmflush)
959 mpflag = cfs_memory_pressure_get_and_set();
960 tx = ksocknal_alloc_tx(KSOCK_MSG_LNET, desc_size);
961 if (tx == NULL) {
962 CERROR("Can't allocate tx desc type %d size %d\n",
963 type, desc_size);
964 if (lntmsg->msg_vmflush)
965 cfs_memory_pressure_restore(mpflag);
966 return (-ENOMEM);
967 }
968
969 tx->tx_conn = NULL; /* set when assigned a conn */
970 tx->tx_lnetmsg = lntmsg;
971
972 if (payload_iov != NULL) {
973 tx->tx_kiov = NULL;
974 tx->tx_nkiov = 0;
975 tx->tx_iov = tx->tx_frags.virt.iov;
976 tx->tx_niov = 1 +
977 lnet_extract_iov(payload_niov, &tx->tx_iov[1],
978 payload_niov, payload_iov,
979 payload_offset, payload_nob);
980 } else {
981 tx->tx_niov = 1;
982 tx->tx_iov = &tx->tx_frags.paged.iov;
983 tx->tx_kiov = tx->tx_frags.paged.kiov;
984 tx->tx_nkiov = lnet_extract_kiov(payload_niov, tx->tx_kiov,
985 payload_niov, payload_kiov,
986 payload_offset, payload_nob);
987
988 if (payload_nob >= *ksocknal_tunables.ksnd_zc_min_payload)
989 tx->tx_zc_capable = 1;
990 }
991
992 socklnd_init_msg(&tx->tx_msg, KSOCK_MSG_LNET);
993
994 /* The first fragment will be set later in pro_pack */
995 rc = ksocknal_launch_packet(ni, tx, target);
aadbacc7 996 if (!mpflag)
d7e09d03 997 cfs_memory_pressure_restore(mpflag);
aadbacc7 998
d7e09d03
PT
999 if (rc == 0)
1000 return (0);
1001
1002 ksocknal_free_tx(tx);
1003 return (-EIO);
1004}
1005
1006int
1007ksocknal_thread_start(int (*fn)(void *arg), void *arg, char *name)
1008{
9edf0f67 1009 struct task_struct *task = kthread_run(fn, arg, "%s", name);
d7e09d03
PT
1010
1011 if (IS_ERR(task))
1012 return PTR_ERR(task);
1013
1014 write_lock_bh(&ksocknal_data.ksnd_global_lock);
1015 ksocknal_data.ksnd_nthreads++;
1016 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1017 return 0;
1018}
1019
1020void
1021ksocknal_thread_fini (void)
1022{
1023 write_lock_bh(&ksocknal_data.ksnd_global_lock);
1024 ksocknal_data.ksnd_nthreads--;
1025 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1026}
1027
1028int
1029ksocknal_new_packet (ksock_conn_t *conn, int nob_to_skip)
1030{
1031 static char ksocknal_slop_buffer[4096];
1032
1033 int nob;
1034 unsigned int niov;
1035 int skipped;
1036
1037 LASSERT(conn->ksnc_proto != NULL);
1038
1039 if ((*ksocknal_tunables.ksnd_eager_ack & conn->ksnc_type) != 0) {
1040 /* Remind the socket to ack eagerly... */
1041 ksocknal_lib_eager_ack(conn);
1042 }
1043
1044 if (nob_to_skip == 0) { /* right at next packet boundary now */
1045 conn->ksnc_rx_started = 0;
1046 mb(); /* racing with timeout thread */
1047
1048 switch (conn->ksnc_proto->pro_version) {
1049 case KSOCK_PROTO_V2:
1050 case KSOCK_PROTO_V3:
1051 conn->ksnc_rx_state = SOCKNAL_RX_KSM_HEADER;
1052 conn->ksnc_rx_iov = (struct iovec *)&conn->ksnc_rx_iov_space;
1053 conn->ksnc_rx_iov[0].iov_base = (char *)&conn->ksnc_msg;
1054
1055 conn->ksnc_rx_nob_wanted = offsetof(ksock_msg_t, ksm_u);
1056 conn->ksnc_rx_nob_left = offsetof(ksock_msg_t, ksm_u);
1057 conn->ksnc_rx_iov[0].iov_len = offsetof(ksock_msg_t, ksm_u);
1058 break;
1059
1060 case KSOCK_PROTO_V1:
1061 /* Receiving bare lnet_hdr_t */
1062 conn->ksnc_rx_state = SOCKNAL_RX_LNET_HEADER;
1063 conn->ksnc_rx_nob_wanted = sizeof(lnet_hdr_t);
1064 conn->ksnc_rx_nob_left = sizeof(lnet_hdr_t);
1065
1066 conn->ksnc_rx_iov = (struct iovec *)&conn->ksnc_rx_iov_space;
1067 conn->ksnc_rx_iov[0].iov_base = (char *)&conn->ksnc_msg.ksm_u.lnetmsg;
1068 conn->ksnc_rx_iov[0].iov_len = sizeof (lnet_hdr_t);
1069 break;
1070
1071 default:
1072 LBUG ();
1073 }
1074 conn->ksnc_rx_niov = 1;
1075
1076 conn->ksnc_rx_kiov = NULL;
1077 conn->ksnc_rx_nkiov = 0;
1078 conn->ksnc_rx_csum = ~0;
1079 return (1);
1080 }
1081
1082 /* Set up to skip as much as possible now. If there's more left
1083 * (ran out of iov entries) we'll get called again */
1084
1085 conn->ksnc_rx_state = SOCKNAL_RX_SLOP;
1086 conn->ksnc_rx_nob_left = nob_to_skip;
1087 conn->ksnc_rx_iov = (struct iovec *)&conn->ksnc_rx_iov_space;
1088 skipped = 0;
1089 niov = 0;
1090
1091 do {
1092 nob = MIN (nob_to_skip, sizeof (ksocknal_slop_buffer));
1093
1094 conn->ksnc_rx_iov[niov].iov_base = ksocknal_slop_buffer;
1095 conn->ksnc_rx_iov[niov].iov_len = nob;
1096 niov++;
1097 skipped += nob;
1098 nob_to_skip -=nob;
1099
1100 } while (nob_to_skip != 0 && /* mustn't overflow conn's rx iov */
1101 niov < sizeof(conn->ksnc_rx_iov_space) / sizeof (struct iovec));
1102
1103 conn->ksnc_rx_niov = niov;
1104 conn->ksnc_rx_kiov = NULL;
1105 conn->ksnc_rx_nkiov = 0;
1106 conn->ksnc_rx_nob_wanted = skipped;
1107 return (0);
1108}
1109
1110int
1111ksocknal_process_receive (ksock_conn_t *conn)
1112{
1113 lnet_hdr_t *lhdr;
1114 lnet_process_id_t *id;
1115 int rc;
1116
1117 LASSERT (atomic_read(&conn->ksnc_conn_refcount) > 0);
1118
1119 /* NB: sched lock NOT held */
2b284326 1120 /* SOCKNAL_RX_LNET_HEADER is here for backward compatibility */
d7e09d03
PT
1121 LASSERT (conn->ksnc_rx_state == SOCKNAL_RX_KSM_HEADER ||
1122 conn->ksnc_rx_state == SOCKNAL_RX_LNET_PAYLOAD ||
1123 conn->ksnc_rx_state == SOCKNAL_RX_LNET_HEADER ||
1124 conn->ksnc_rx_state == SOCKNAL_RX_SLOP);
1125 again:
1126 if (conn->ksnc_rx_nob_wanted != 0) {
1127 rc = ksocknal_receive(conn);
1128
1129 if (rc <= 0) {
1130 LASSERT (rc != -EAGAIN);
1131
1132 if (rc == 0)
5e8f6920
PT
1133 CDEBUG(D_NET, "[%p] EOF from %s"
1134 " ip %pI4h:%d\n", conn,
d7e09d03 1135 libcfs_id2str(conn->ksnc_peer->ksnp_id),
5e8f6920 1136 &conn->ksnc_ipaddr,
d7e09d03
PT
1137 conn->ksnc_port);
1138 else if (!conn->ksnc_closing)
5e8f6920
PT
1139 CERROR("[%p] Error %d on read from %s"
1140 " ip %pI4h:%d\n",
d7e09d03
PT
1141 conn, rc,
1142 libcfs_id2str(conn->ksnc_peer->ksnp_id),
5e8f6920 1143 &conn->ksnc_ipaddr,
d7e09d03
PT
1144 conn->ksnc_port);
1145
1146 /* it's not an error if conn is being closed */
1147 ksocknal_close_conn_and_siblings (conn,
1148 (conn->ksnc_closing) ? 0 : rc);
1149 return (rc == 0 ? -ESHUTDOWN : rc);
1150 }
1151
1152 if (conn->ksnc_rx_nob_wanted != 0) {
1153 /* short read */
1154 return (-EAGAIN);
1155 }
1156 }
1157 switch (conn->ksnc_rx_state) {
1158 case SOCKNAL_RX_KSM_HEADER:
1159 if (conn->ksnc_flip) {
1160 __swab32s(&conn->ksnc_msg.ksm_type);
1161 __swab32s(&conn->ksnc_msg.ksm_csum);
1162 __swab64s(&conn->ksnc_msg.ksm_zc_cookies[0]);
1163 __swab64s(&conn->ksnc_msg.ksm_zc_cookies[1]);
1164 }
1165
1166 if (conn->ksnc_msg.ksm_type != KSOCK_MSG_NOOP &&
1167 conn->ksnc_msg.ksm_type != KSOCK_MSG_LNET) {
1168 CERROR("%s: Unknown message type: %x\n",
1169 libcfs_id2str(conn->ksnc_peer->ksnp_id),
1170 conn->ksnc_msg.ksm_type);
1171 ksocknal_new_packet(conn, 0);
1172 ksocknal_close_conn_and_siblings(conn, -EPROTO);
1173 return (-EPROTO);
1174 }
1175
1176 if (conn->ksnc_msg.ksm_type == KSOCK_MSG_NOOP &&
1177 conn->ksnc_msg.ksm_csum != 0 && /* has checksum */
1178 conn->ksnc_msg.ksm_csum != conn->ksnc_rx_csum) {
1179 /* NOOP Checksum error */
1180 CERROR("%s: Checksum error, wire:0x%08X data:0x%08X\n",
1181 libcfs_id2str(conn->ksnc_peer->ksnp_id),
1182 conn->ksnc_msg.ksm_csum, conn->ksnc_rx_csum);
1183 ksocknal_new_packet(conn, 0);
1184 ksocknal_close_conn_and_siblings(conn, -EPROTO);
1185 return (-EIO);
1186 }
1187
1188 if (conn->ksnc_msg.ksm_zc_cookies[1] != 0) {
1189 __u64 cookie = 0;
1190
1191 LASSERT (conn->ksnc_proto != &ksocknal_protocol_v1x);
1192
1193 if (conn->ksnc_msg.ksm_type == KSOCK_MSG_NOOP)
1194 cookie = conn->ksnc_msg.ksm_zc_cookies[0];
1195
1196 rc = conn->ksnc_proto->pro_handle_zcack(conn, cookie,
1197 conn->ksnc_msg.ksm_zc_cookies[1]);
1198
1199 if (rc != 0) {
1200 CERROR("%s: Unknown ZC-ACK cookie: "LPU64", "LPU64"\n",
1201 libcfs_id2str(conn->ksnc_peer->ksnp_id),
1202 cookie, conn->ksnc_msg.ksm_zc_cookies[1]);
1203 ksocknal_new_packet(conn, 0);
1204 ksocknal_close_conn_and_siblings(conn, -EPROTO);
1205 return (rc);
1206 }
1207 }
1208
1209 if (conn->ksnc_msg.ksm_type == KSOCK_MSG_NOOP) {
1210 ksocknal_new_packet (conn, 0);
1211 return 0; /* NOOP is done and just return */
1212 }
1213
1214 conn->ksnc_rx_state = SOCKNAL_RX_LNET_HEADER;
1215 conn->ksnc_rx_nob_wanted = sizeof(ksock_lnet_msg_t);
1216 conn->ksnc_rx_nob_left = sizeof(ksock_lnet_msg_t);
1217
1218 conn->ksnc_rx_iov = (struct iovec *)&conn->ksnc_rx_iov_space;
1219 conn->ksnc_rx_iov[0].iov_base = (char *)&conn->ksnc_msg.ksm_u.lnetmsg;
1220 conn->ksnc_rx_iov[0].iov_len = sizeof(ksock_lnet_msg_t);
1221
1222 conn->ksnc_rx_niov = 1;
1223 conn->ksnc_rx_kiov = NULL;
1224 conn->ksnc_rx_nkiov = 0;
1225
1226 goto again; /* read lnet header now */
1227
1228 case SOCKNAL_RX_LNET_HEADER:
1229 /* unpack message header */
1230 conn->ksnc_proto->pro_unpack(&conn->ksnc_msg);
1231
1232 if ((conn->ksnc_peer->ksnp_id.pid & LNET_PID_USERFLAG) != 0) {
1233 /* Userspace peer */
1234 lhdr = &conn->ksnc_msg.ksm_u.lnetmsg.ksnm_hdr;
1235 id = &conn->ksnc_peer->ksnp_id;
1236
1237 /* Substitute process ID assigned at connection time */
1238 lhdr->src_pid = cpu_to_le32(id->pid);
1239 lhdr->src_nid = cpu_to_le64(id->nid);
1240 }
1241
1242 conn->ksnc_rx_state = SOCKNAL_RX_PARSE;
1243 ksocknal_conn_addref(conn); /* ++ref while parsing */
1244
1245 rc = lnet_parse(conn->ksnc_peer->ksnp_ni,
1246 &conn->ksnc_msg.ksm_u.lnetmsg.ksnm_hdr,
1247 conn->ksnc_peer->ksnp_id.nid, conn, 0);
1248 if (rc < 0) {
1249 /* I just received garbage: give up on this conn */
1250 ksocknal_new_packet(conn, 0);
1251 ksocknal_close_conn_and_siblings (conn, rc);
1252 ksocknal_conn_decref(conn);
1253 return (-EPROTO);
1254 }
1255
1256 /* I'm racing with ksocknal_recv() */
1257 LASSERT (conn->ksnc_rx_state == SOCKNAL_RX_PARSE ||
1258 conn->ksnc_rx_state == SOCKNAL_RX_LNET_PAYLOAD);
1259
1260 if (conn->ksnc_rx_state != SOCKNAL_RX_LNET_PAYLOAD)
1261 return 0;
1262
1263 /* ksocknal_recv() got called */
1264 goto again;
1265
1266 case SOCKNAL_RX_LNET_PAYLOAD:
1267 /* payload all received */
1268 rc = 0;
1269
1270 if (conn->ksnc_rx_nob_left == 0 && /* not truncating */
1271 conn->ksnc_msg.ksm_csum != 0 && /* has checksum */
1272 conn->ksnc_msg.ksm_csum != conn->ksnc_rx_csum) {
1273 CERROR("%s: Checksum error, wire:0x%08X data:0x%08X\n",
1274 libcfs_id2str(conn->ksnc_peer->ksnp_id),
1275 conn->ksnc_msg.ksm_csum, conn->ksnc_rx_csum);
1276 rc = -EIO;
1277 }
1278
1279 if (rc == 0 && conn->ksnc_msg.ksm_zc_cookies[0] != 0) {
1280 LASSERT(conn->ksnc_proto != &ksocknal_protocol_v1x);
1281
1282 lhdr = &conn->ksnc_msg.ksm_u.lnetmsg.ksnm_hdr;
1283 id = &conn->ksnc_peer->ksnp_id;
1284
1285 rc = conn->ksnc_proto->pro_handle_zcreq(conn,
1286 conn->ksnc_msg.ksm_zc_cookies[0],
1287 *ksocknal_tunables.ksnd_nonblk_zcack ||
1288 le64_to_cpu(lhdr->src_nid) != id->nid);
1289 }
1290
1291 lnet_finalize(conn->ksnc_peer->ksnp_ni, conn->ksnc_cookie, rc);
1292
1293 if (rc != 0) {
1294 ksocknal_new_packet(conn, 0);
1295 ksocknal_close_conn_and_siblings (conn, rc);
1296 return (-EPROTO);
1297 }
1298 /* Fall through */
1299
1300 case SOCKNAL_RX_SLOP:
1301 /* starting new packet? */
1302 if (ksocknal_new_packet (conn, conn->ksnc_rx_nob_left))
1303 return 0; /* come back later */
1304 goto again; /* try to finish reading slop now */
1305
1306 default:
1307 break;
1308 }
1309
1310 /* Not Reached */
1311 LBUG ();
1312 return (-EINVAL); /* keep gcc happy */
1313}
1314
1315int
1316ksocknal_recv (lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed,
1317 unsigned int niov, struct iovec *iov, lnet_kiov_t *kiov,
1318 unsigned int offset, unsigned int mlen, unsigned int rlen)
1319{
1320 ksock_conn_t *conn = (ksock_conn_t *)private;
1321 ksock_sched_t *sched = conn->ksnc_scheduler;
1322
1323 LASSERT (mlen <= rlen);
1324 LASSERT (niov <= LNET_MAX_IOV);
1325
1326 conn->ksnc_cookie = msg;
1327 conn->ksnc_rx_nob_wanted = mlen;
1328 conn->ksnc_rx_nob_left = rlen;
1329
1330 if (mlen == 0 || iov != NULL) {
1331 conn->ksnc_rx_nkiov = 0;
1332 conn->ksnc_rx_kiov = NULL;
1333 conn->ksnc_rx_iov = conn->ksnc_rx_iov_space.iov;
1334 conn->ksnc_rx_niov =
1335 lnet_extract_iov(LNET_MAX_IOV, conn->ksnc_rx_iov,
1336 niov, iov, offset, mlen);
1337 } else {
1338 conn->ksnc_rx_niov = 0;
1339 conn->ksnc_rx_iov = NULL;
1340 conn->ksnc_rx_kiov = conn->ksnc_rx_iov_space.kiov;
1341 conn->ksnc_rx_nkiov =
1342 lnet_extract_kiov(LNET_MAX_IOV, conn->ksnc_rx_kiov,
1343 niov, kiov, offset, mlen);
1344 }
1345
1346 LASSERT (mlen ==
1347 lnet_iov_nob (conn->ksnc_rx_niov, conn->ksnc_rx_iov) +
1348 lnet_kiov_nob (conn->ksnc_rx_nkiov, conn->ksnc_rx_kiov));
1349
1350 LASSERT (conn->ksnc_rx_scheduled);
1351
1352 spin_lock_bh(&sched->kss_lock);
1353
1354 switch (conn->ksnc_rx_state) {
1355 case SOCKNAL_RX_PARSE_WAIT:
1356 list_add_tail(&conn->ksnc_rx_list, &sched->kss_rx_conns);
1357 wake_up (&sched->kss_waitq);
1358 LASSERT (conn->ksnc_rx_ready);
1359 break;
1360
1361 case SOCKNAL_RX_PARSE:
1362 /* scheduler hasn't noticed I'm parsing yet */
1363 break;
1364 }
1365
1366 conn->ksnc_rx_state = SOCKNAL_RX_LNET_PAYLOAD;
1367
1368 spin_unlock_bh(&sched->kss_lock);
1369 ksocknal_conn_decref(conn);
1370 return 0;
1371}
1372
1373static inline int
1374ksocknal_sched_cansleep(ksock_sched_t *sched)
1375{
1376 int rc;
1377
1378 spin_lock_bh(&sched->kss_lock);
1379
1380 rc = (!ksocknal_data.ksnd_shuttingdown &&
1381 list_empty(&sched->kss_rx_conns) &&
1382 list_empty(&sched->kss_tx_conns));
1383
1384 spin_unlock_bh(&sched->kss_lock);
1385 return rc;
1386}
1387
1388int ksocknal_scheduler(void *arg)
1389{
1390 struct ksock_sched_info *info;
1391 ksock_sched_t *sched;
1392 ksock_conn_t *conn;
1393 ksock_tx_t *tx;
1394 int rc;
1395 int nloops = 0;
1396 long id = (long)arg;
1397
1398 info = ksocknal_data.ksnd_sched_info[KSOCK_THREAD_CPT(id)];
1399 sched = &info->ksi_scheds[KSOCK_THREAD_SID(id)];
1400
1401 cfs_block_allsigs();
1402
1403 rc = cfs_cpt_bind(lnet_cpt_table(), info->ksi_cpt);
1404 if (rc != 0) {
1405 CERROR("Can't set CPT affinity to %d: %d\n",
1406 info->ksi_cpt, rc);
1407 }
1408
1409 spin_lock_bh(&sched->kss_lock);
1410
1411 while (!ksocknal_data.ksnd_shuttingdown) {
1412 int did_something = 0;
1413
1414 /* Ensure I progress everything semi-fairly */
1415
1416 if (!list_empty (&sched->kss_rx_conns)) {
1417 conn = list_entry(sched->kss_rx_conns.next,
1418 ksock_conn_t, ksnc_rx_list);
1419 list_del(&conn->ksnc_rx_list);
1420
1421 LASSERT(conn->ksnc_rx_scheduled);
1422 LASSERT(conn->ksnc_rx_ready);
1423
1424 /* clear rx_ready in case receive isn't complete.
1425 * Do it BEFORE we call process_recv, since
1426 * data_ready can set it any time after we release
1427 * kss_lock. */
1428 conn->ksnc_rx_ready = 0;
1429 spin_unlock_bh(&sched->kss_lock);
1430
1431 rc = ksocknal_process_receive(conn);
1432
1433 spin_lock_bh(&sched->kss_lock);
1434
1435 /* I'm the only one that can clear this flag */
1436 LASSERT(conn->ksnc_rx_scheduled);
1437
1438 /* Did process_receive get everything it wanted? */
1439 if (rc == 0)
1440 conn->ksnc_rx_ready = 1;
1441
1442 if (conn->ksnc_rx_state == SOCKNAL_RX_PARSE) {
1443 /* Conn blocked waiting for ksocknal_recv()
1444 * I change its state (under lock) to signal
1445 * it can be rescheduled */
1446 conn->ksnc_rx_state = SOCKNAL_RX_PARSE_WAIT;
1447 } else if (conn->ksnc_rx_ready) {
1448 /* reschedule for rx */
1449 list_add_tail (&conn->ksnc_rx_list,
1450 &sched->kss_rx_conns);
1451 } else {
1452 conn->ksnc_rx_scheduled = 0;
1453 /* drop my ref */
1454 ksocknal_conn_decref(conn);
1455 }
1456
1457 did_something = 1;
1458 }
1459
1460 if (!list_empty (&sched->kss_tx_conns)) {
1461 LIST_HEAD (zlist);
1462
1463 if (!list_empty(&sched->kss_zombie_noop_txs)) {
1464 list_add(&zlist,
1465 &sched->kss_zombie_noop_txs);
1466 list_del_init(&sched->kss_zombie_noop_txs);
1467 }
1468
1469 conn = list_entry(sched->kss_tx_conns.next,
1470 ksock_conn_t, ksnc_tx_list);
1471 list_del (&conn->ksnc_tx_list);
1472
1473 LASSERT(conn->ksnc_tx_scheduled);
1474 LASSERT(conn->ksnc_tx_ready);
1475 LASSERT(!list_empty(&conn->ksnc_tx_queue));
1476
1477 tx = list_entry(conn->ksnc_tx_queue.next,
1478 ksock_tx_t, tx_list);
1479
1480 if (conn->ksnc_tx_carrier == tx)
1481 ksocknal_next_tx_carrier(conn);
1482
1483 /* dequeue now so empty list => more to send */
1484 list_del(&tx->tx_list);
1485
1486 /* Clear tx_ready in case send isn't complete. Do
1487 * it BEFORE we call process_transmit, since
1488 * write_space can set it any time after we release
1489 * kss_lock. */
1490 conn->ksnc_tx_ready = 0;
1491 spin_unlock_bh(&sched->kss_lock);
1492
1493 if (!list_empty(&zlist)) {
1494 /* free zombie noop txs, it's fast because
1495 * noop txs are just put in freelist */
1496 ksocknal_txlist_done(NULL, &zlist, 0);
1497 }
1498
1499 rc = ksocknal_process_transmit(conn, tx);
1500
1501 if (rc == -ENOMEM || rc == -EAGAIN) {
1502 /* Incomplete send: replace tx on HEAD of tx_queue */
1503 spin_lock_bh(&sched->kss_lock);
1504 list_add(&tx->tx_list,
1505 &conn->ksnc_tx_queue);
1506 } else {
1507 /* Complete send; tx -ref */
1508 ksocknal_tx_decref(tx);
1509
1510 spin_lock_bh(&sched->kss_lock);
1511 /* assume space for more */
1512 conn->ksnc_tx_ready = 1;
1513 }
1514
1515 if (rc == -ENOMEM) {
1516 /* Do nothing; after a short timeout, this
1517 * conn will be reposted on kss_tx_conns. */
1518 } else if (conn->ksnc_tx_ready &&
1519 !list_empty (&conn->ksnc_tx_queue)) {
1520 /* reschedule for tx */
1521 list_add_tail (&conn->ksnc_tx_list,
1522 &sched->kss_tx_conns);
1523 } else {
1524 conn->ksnc_tx_scheduled = 0;
1525 /* drop my ref */
1526 ksocknal_conn_decref(conn);
1527 }
1528
1529 did_something = 1;
1530 }
1531 if (!did_something || /* nothing to do */
1532 ++nloops == SOCKNAL_RESCHED) { /* hogging CPU? */
1533 spin_unlock_bh(&sched->kss_lock);
1534
1535 nloops = 0;
1536
1537 if (!did_something) { /* wait for something to do */
1538 cfs_wait_event_interruptible_exclusive(
1539 sched->kss_waitq,
1540 !ksocknal_sched_cansleep(sched), rc);
1541 LASSERT (rc == 0);
1542 } else {
1543 cond_resched();
1544 }
1545
1546 spin_lock_bh(&sched->kss_lock);
1547 }
1548 }
1549
1550 spin_unlock_bh(&sched->kss_lock);
1551 ksocknal_thread_fini();
1552 return 0;
1553}
1554
1555/*
1556 * Add connection to kss_rx_conns of scheduler
1557 * and wakeup the scheduler.
1558 */
1559void ksocknal_read_callback (ksock_conn_t *conn)
1560{
1561 ksock_sched_t *sched;
d7e09d03
PT
1562
1563 sched = conn->ksnc_scheduler;
1564
1565 spin_lock_bh(&sched->kss_lock);
1566
1567 conn->ksnc_rx_ready = 1;
1568
1569 if (!conn->ksnc_rx_scheduled) { /* not being progressed */
1570 list_add_tail(&conn->ksnc_rx_list,
1571 &sched->kss_rx_conns);
1572 conn->ksnc_rx_scheduled = 1;
1573 /* extra ref for scheduler */
1574 ksocknal_conn_addref(conn);
1575
1576 wake_up (&sched->kss_waitq);
1577 }
1578 spin_unlock_bh(&sched->kss_lock);
d7e09d03
PT
1579}
1580
1581/*
1582 * Add connection to kss_tx_conns of scheduler
1583 * and wakeup the scheduler.
1584 */
1585void ksocknal_write_callback (ksock_conn_t *conn)
1586{
1587 ksock_sched_t *sched;
d7e09d03
PT
1588
1589 sched = conn->ksnc_scheduler;
1590
1591 spin_lock_bh(&sched->kss_lock);
1592
1593 conn->ksnc_tx_ready = 1;
1594
1595 if (!conn->ksnc_tx_scheduled && // not being progressed
1596 !list_empty(&conn->ksnc_tx_queue)){//packets to send
1597 list_add_tail (&conn->ksnc_tx_list,
1598 &sched->kss_tx_conns);
1599 conn->ksnc_tx_scheduled = 1;
1600 /* extra ref for scheduler */
1601 ksocknal_conn_addref(conn);
1602
1603 wake_up (&sched->kss_waitq);
1604 }
1605
1606 spin_unlock_bh(&sched->kss_lock);
d7e09d03
PT
1607}
1608
1609ksock_proto_t *
1610ksocknal_parse_proto_version (ksock_hello_msg_t *hello)
1611{
1612 __u32 version = 0;
1613
1614 if (hello->kshm_magic == LNET_PROTO_MAGIC)
1615 version = hello->kshm_version;
1616 else if (hello->kshm_magic == __swab32(LNET_PROTO_MAGIC))
1617 version = __swab32(hello->kshm_version);
1618
1619 if (version != 0) {
1620#if SOCKNAL_VERSION_DEBUG
1621 if (*ksocknal_tunables.ksnd_protocol == 1)
1622 return NULL;
1623
1624 if (*ksocknal_tunables.ksnd_protocol == 2 &&
1625 version == KSOCK_PROTO_V3)
1626 return NULL;
1627#endif
1628 if (version == KSOCK_PROTO_V2)
1629 return &ksocknal_protocol_v2x;
1630
1631 if (version == KSOCK_PROTO_V3)
1632 return &ksocknal_protocol_v3x;
1633
1634 return NULL;
1635 }
1636
1637 if (hello->kshm_magic == le32_to_cpu(LNET_PROTO_TCP_MAGIC)) {
1638 lnet_magicversion_t *hmv = (lnet_magicversion_t *)hello;
1639
1640 CLASSERT (sizeof (lnet_magicversion_t) ==
1641 offsetof (ksock_hello_msg_t, kshm_src_nid));
1642
1643 if (hmv->version_major == cpu_to_le16 (KSOCK_PROTO_V1_MAJOR) &&
1644 hmv->version_minor == cpu_to_le16 (KSOCK_PROTO_V1_MINOR))
1645 return &ksocknal_protocol_v1x;
1646 }
1647
1648 return NULL;
1649}
1650
1651int
1652ksocknal_send_hello (lnet_ni_t *ni, ksock_conn_t *conn,
1653 lnet_nid_t peer_nid, ksock_hello_msg_t *hello)
1654{
1655 /* CAVEAT EMPTOR: this byte flips 'ipaddrs' */
1656 ksock_net_t *net = (ksock_net_t *)ni->ni_data;
1657
1658 LASSERT (hello->kshm_nips <= LNET_MAX_INTERFACES);
1659
1660 /* rely on caller to hold a ref on socket so it wouldn't disappear */
1661 LASSERT (conn->ksnc_proto != NULL);
1662
1663 hello->kshm_src_nid = ni->ni_nid;
1664 hello->kshm_dst_nid = peer_nid;
1665 hello->kshm_src_pid = the_lnet.ln_pid;
1666
1667 hello->kshm_src_incarnation = net->ksnn_incarnation;
1668 hello->kshm_ctype = conn->ksnc_type;
1669
1670 return conn->ksnc_proto->pro_send_hello(conn, hello);
1671}
1672
1673int
1674ksocknal_invert_type(int type)
1675{
1676 switch (type)
1677 {
1678 case SOCKLND_CONN_ANY:
1679 case SOCKLND_CONN_CONTROL:
1680 return (type);
1681 case SOCKLND_CONN_BULK_IN:
1682 return SOCKLND_CONN_BULK_OUT;
1683 case SOCKLND_CONN_BULK_OUT:
1684 return SOCKLND_CONN_BULK_IN;
1685 default:
1686 return (SOCKLND_CONN_NONE);
1687 }
1688}
1689
1690int
1691ksocknal_recv_hello (lnet_ni_t *ni, ksock_conn_t *conn,
1692 ksock_hello_msg_t *hello, lnet_process_id_t *peerid,
1693 __u64 *incarnation)
1694{
1695 /* Return < 0 fatal error
1696 * 0 success
1697 * EALREADY lost connection race
1698 * EPROTO protocol version mismatch
1699 */
1700 socket_t *sock = conn->ksnc_sock;
1701 int active = (conn->ksnc_proto != NULL);
1702 int timeout;
1703 int proto_match;
1704 int rc;
1705 ksock_proto_t *proto;
1706 lnet_process_id_t recv_id;
1707
1708 /* socket type set on active connections - not set on passive */
1709 LASSERT (!active == !(conn->ksnc_type != SOCKLND_CONN_NONE));
1710
1711 timeout = active ? *ksocknal_tunables.ksnd_timeout :
1712 lnet_acceptor_timeout();
1713
1714 rc = libcfs_sock_read(sock, &hello->kshm_magic, sizeof (hello->kshm_magic), timeout);
1715 if (rc != 0) {
5e8f6920
PT
1716 CERROR("Error %d reading HELLO from %pI4h\n",
1717 rc, &conn->ksnc_ipaddr);
d7e09d03
PT
1718 LASSERT (rc < 0);
1719 return rc;
1720 }
1721
1722 if (hello->kshm_magic != LNET_PROTO_MAGIC &&
1723 hello->kshm_magic != __swab32(LNET_PROTO_MAGIC) &&
1724 hello->kshm_magic != le32_to_cpu (LNET_PROTO_TCP_MAGIC)) {
1725 /* Unexpected magic! */
5e8f6920
PT
1726 CERROR("Bad magic(1) %#08x (%#08x expected) from "
1727 "%pI4h\n", __cpu_to_le32 (hello->kshm_magic),
d7e09d03 1728 LNET_PROTO_TCP_MAGIC,
5e8f6920 1729 &conn->ksnc_ipaddr);
d7e09d03
PT
1730 return -EPROTO;
1731 }
1732
1733 rc = libcfs_sock_read(sock, &hello->kshm_version,
1734 sizeof(hello->kshm_version), timeout);
1735 if (rc != 0) {
5e8f6920
PT
1736 CERROR("Error %d reading HELLO from %pI4h\n",
1737 rc, &conn->ksnc_ipaddr);
d7e09d03
PT
1738 LASSERT (rc < 0);
1739 return rc;
1740 }
1741
1742 proto = ksocknal_parse_proto_version(hello);
1743 if (proto == NULL) {
1744 if (!active) {
1745 /* unknown protocol from peer, tell peer my protocol */
1746 conn->ksnc_proto = &ksocknal_protocol_v3x;
1747#if SOCKNAL_VERSION_DEBUG
1748 if (*ksocknal_tunables.ksnd_protocol == 2)
1749 conn->ksnc_proto = &ksocknal_protocol_v2x;
1750 else if (*ksocknal_tunables.ksnd_protocol == 1)
1751 conn->ksnc_proto = &ksocknal_protocol_v1x;
1752#endif
1753 hello->kshm_nips = 0;
1754 ksocknal_send_hello(ni, conn, ni->ni_nid, hello);
1755 }
1756
5e8f6920
PT
1757 CERROR("Unknown protocol version (%d.x expected)"
1758 " from %pI4h\n",
d7e09d03 1759 conn->ksnc_proto->pro_version,
5e8f6920 1760 &conn->ksnc_ipaddr);
d7e09d03
PT
1761
1762 return -EPROTO;
1763 }
1764
1765 proto_match = (conn->ksnc_proto == proto);
1766 conn->ksnc_proto = proto;
1767
1768 /* receive the rest of hello message anyway */
1769 rc = conn->ksnc_proto->pro_recv_hello(conn, hello, timeout);
1770 if (rc != 0) {
5e8f6920
PT
1771 CERROR("Error %d reading or checking hello from from %pI4h\n",
1772 rc, &conn->ksnc_ipaddr);
d7e09d03
PT
1773 LASSERT (rc < 0);
1774 return rc;
1775 }
1776
1777 *incarnation = hello->kshm_src_incarnation;
1778
1779 if (hello->kshm_src_nid == LNET_NID_ANY) {
1780 CERROR("Expecting a HELLO hdr with a NID, but got LNET_NID_ANY"
5e8f6920 1781 "from %pI4h\n", &conn->ksnc_ipaddr);
d7e09d03
PT
1782 return -EPROTO;
1783 }
1784
1785 if (!active &&
1786 conn->ksnc_port > LNET_ACCEPTOR_MAX_RESERVED_PORT) {
1787 /* Userspace NAL assigns peer process ID from socket */
1788 recv_id.pid = conn->ksnc_port | LNET_PID_USERFLAG;
1789 recv_id.nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid), conn->ksnc_ipaddr);
1790 } else {
1791 recv_id.nid = hello->kshm_src_nid;
1792 recv_id.pid = hello->kshm_src_pid;
1793 }
1794
1795 if (!active) {
1796 *peerid = recv_id;
1797
1798 /* peer determines type */
1799 conn->ksnc_type = ksocknal_invert_type(hello->kshm_ctype);
1800 if (conn->ksnc_type == SOCKLND_CONN_NONE) {
5e8f6920 1801 CERROR("Unexpected type %d from %s ip %pI4h\n",
d7e09d03 1802 hello->kshm_ctype, libcfs_id2str(*peerid),
5e8f6920 1803 &conn->ksnc_ipaddr);
d7e09d03
PT
1804 return -EPROTO;
1805 }
1806
1807 return 0;
1808 }
1809
1810 if (peerid->pid != recv_id.pid ||
1811 peerid->nid != recv_id.nid) {
1812 LCONSOLE_ERROR_MSG(0x130, "Connected successfully to %s on host"
5e8f6920 1813 " %pI4h, but they claimed they were "
d7e09d03
PT
1814 "%s; please check your Lustre "
1815 "configuration.\n",
1816 libcfs_id2str(*peerid),
5e8f6920 1817 &conn->ksnc_ipaddr,
d7e09d03
PT
1818 libcfs_id2str(recv_id));
1819 return -EPROTO;
1820 }
1821
1822 if (hello->kshm_ctype == SOCKLND_CONN_NONE) {
1823 /* Possible protocol mismatch or I lost the connection race */
1824 return proto_match ? EALREADY : EPROTO;
1825 }
1826
1827 if (ksocknal_invert_type(hello->kshm_ctype) != conn->ksnc_type) {
5e8f6920 1828 CERROR("Mismatched types: me %d, %s ip %pI4h %d\n",
d7e09d03 1829 conn->ksnc_type, libcfs_id2str(*peerid),
5e8f6920 1830 &conn->ksnc_ipaddr,
d7e09d03
PT
1831 hello->kshm_ctype);
1832 return -EPROTO;
1833 }
1834
1835 return 0;
1836}
1837
1838int
1839ksocknal_connect (ksock_route_t *route)
1840{
1841 LIST_HEAD (zombies);
1842 ksock_peer_t *peer = route->ksnr_peer;
1843 int type;
1844 int wanted;
1845 socket_t *sock;
1846 cfs_time_t deadline;
1847 int retry_later = 0;
1848 int rc = 0;
1849
1850 deadline = cfs_time_add(cfs_time_current(),
1851 cfs_time_seconds(*ksocknal_tunables.ksnd_timeout));
1852
1853 write_lock_bh(&ksocknal_data.ksnd_global_lock);
1854
1855 LASSERT (route->ksnr_scheduled);
1856 LASSERT (!route->ksnr_connecting);
1857
1858 route->ksnr_connecting = 1;
1859
1860 for (;;) {
1861 wanted = ksocknal_route_mask() & ~route->ksnr_connected;
1862
1863 /* stop connecting if peer/route got closed under me, or
1864 * route got connected while queued */
1865 if (peer->ksnp_closing || route->ksnr_deleted ||
1866 wanted == 0) {
1867 retry_later = 0;
1868 break;
1869 }
1870
1871 /* reschedule if peer is connecting to me */
1872 if (peer->ksnp_accepting > 0) {
1873 CDEBUG(D_NET,
1874 "peer %s(%d) already connecting to me, retry later.\n",
1875 libcfs_nid2str(peer->ksnp_id.nid), peer->ksnp_accepting);
1876 retry_later = 1;
1877 }
1878
1879 if (retry_later) /* needs reschedule */
1880 break;
1881
1882 if ((wanted & (1 << SOCKLND_CONN_ANY)) != 0) {
1883 type = SOCKLND_CONN_ANY;
1884 } else if ((wanted & (1 << SOCKLND_CONN_CONTROL)) != 0) {
1885 type = SOCKLND_CONN_CONTROL;
1886 } else if ((wanted & (1 << SOCKLND_CONN_BULK_IN)) != 0) {
1887 type = SOCKLND_CONN_BULK_IN;
1888 } else {
1889 LASSERT ((wanted & (1 << SOCKLND_CONN_BULK_OUT)) != 0);
1890 type = SOCKLND_CONN_BULK_OUT;
1891 }
1892
1893 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1894
1895 if (cfs_time_aftereq(cfs_time_current(), deadline)) {
1896 rc = -ETIMEDOUT;
1897 lnet_connect_console_error(rc, peer->ksnp_id.nid,
1898 route->ksnr_ipaddr,
1899 route->ksnr_port);
1900 goto failed;
1901 }
1902
1903 rc = lnet_connect(&sock, peer->ksnp_id.nid,
1904 route->ksnr_myipaddr,
1905 route->ksnr_ipaddr, route->ksnr_port);
1906 if (rc != 0)
1907 goto failed;
1908
1909 rc = ksocknal_create_conn(peer->ksnp_ni, route, sock, type);
1910 if (rc < 0) {
1911 lnet_connect_console_error(rc, peer->ksnp_id.nid,
1912 route->ksnr_ipaddr,
1913 route->ksnr_port);
1914 goto failed;
1915 }
1916
1917 /* A +ve RC means I have to retry because I lost the connection
1918 * race or I have to renegotiate protocol version */
1919 retry_later = (rc != 0);
1920 if (retry_later)
1921 CDEBUG(D_NET, "peer %s: conn race, retry later.\n",
1922 libcfs_nid2str(peer->ksnp_id.nid));
1923
1924 write_lock_bh(&ksocknal_data.ksnd_global_lock);
1925 }
1926
1927 route->ksnr_scheduled = 0;
1928 route->ksnr_connecting = 0;
1929
1930 if (retry_later) {
1931 /* re-queue for attention; this frees me up to handle
1932 * the peer's incoming connection request */
1933
1934 if (rc == EALREADY ||
1935 (rc == 0 && peer->ksnp_accepting > 0)) {
1936 /* We want to introduce a delay before next
1937 * attempt to connect if we lost conn race,
1938 * but the race is resolved quickly usually,
1939 * so min_reconnectms should be good heuristic */
1940 route->ksnr_retry_interval =
1941 cfs_time_seconds(*ksocknal_tunables.ksnd_min_reconnectms)/1000;
1942 route->ksnr_timeout = cfs_time_add(cfs_time_current(),
1943 route->ksnr_retry_interval);
1944 }
1945
1946 ksocknal_launch_connection_locked(route);
1947 }
1948
1949 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1950 return retry_later;
1951
1952 failed:
1953 write_lock_bh(&ksocknal_data.ksnd_global_lock);
1954
1955 route->ksnr_scheduled = 0;
1956 route->ksnr_connecting = 0;
1957
1958 /* This is a retry rather than a new connection */
1959 route->ksnr_retry_interval *= 2;
1960 route->ksnr_retry_interval =
1961 MAX(route->ksnr_retry_interval,
1962 cfs_time_seconds(*ksocknal_tunables.ksnd_min_reconnectms)/1000);
1963 route->ksnr_retry_interval =
1964 MIN(route->ksnr_retry_interval,
1965 cfs_time_seconds(*ksocknal_tunables.ksnd_max_reconnectms)/1000);
1966
1967 LASSERT (route->ksnr_retry_interval != 0);
1968 route->ksnr_timeout = cfs_time_add(cfs_time_current(),
1969 route->ksnr_retry_interval);
1970
1971 if (!list_empty(&peer->ksnp_tx_queue) &&
1972 peer->ksnp_accepting == 0 &&
1973 ksocknal_find_connecting_route_locked(peer) == NULL) {
1974 ksock_conn_t *conn;
1975
1976 /* ksnp_tx_queue is queued on a conn on successful
1977 * connection for V1.x and V2.x */
1978 if (!list_empty (&peer->ksnp_conns)) {
1979 conn = list_entry(peer->ksnp_conns.next,
1980 ksock_conn_t, ksnc_list);
1981 LASSERT (conn->ksnc_proto == &ksocknal_protocol_v3x);
1982 }
1983
1984 /* take all the blocked packets while I've got the lock and
1985 * complete below... */
1986 list_splice_init(&peer->ksnp_tx_queue, &zombies);
1987 }
1988
2b284326 1989#if 0 /* irrelevant with only eager routes */
d7e09d03
PT
1990 if (!route->ksnr_deleted) {
1991 /* make this route least-favourite for re-selection */
1992 list_del(&route->ksnr_list);
1993 list_add_tail(&route->ksnr_list, &peer->ksnp_routes);
1994 }
1995#endif
1996 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1997
1998 ksocknal_peer_failed(peer);
1999 ksocknal_txlist_done(peer->ksnp_ni, &zombies, 1);
2000 return 0;
2001}
2002
2003/*
2004 * check whether we need to create more connds.
2005 * It will try to create new thread if it's necessary, @timeout can
2006 * be updated if failed to create, so caller wouldn't keep try while
2007 * running out of resource.
2008 */
2009static int
2010ksocknal_connd_check_start(long sec, long *timeout)
2011{
2012 char name[16];
2013 int rc;
2014 int total = ksocknal_data.ksnd_connd_starting +
2015 ksocknal_data.ksnd_connd_running;
2016
2017 if (unlikely(ksocknal_data.ksnd_init < SOCKNAL_INIT_ALL)) {
2018 /* still in initializing */
2019 return 0;
2020 }
2021
2022 if (total >= *ksocknal_tunables.ksnd_nconnds_max ||
2023 total > ksocknal_data.ksnd_connd_connecting + SOCKNAL_CONND_RESV) {
2024 /* can't create more connd, or still have enough
2025 * threads to handle more connecting */
2026 return 0;
2027 }
2028
2029 if (list_empty(&ksocknal_data.ksnd_connd_routes)) {
2030 /* no pending connecting request */
2031 return 0;
2032 }
2033
2034 if (sec - ksocknal_data.ksnd_connd_failed_stamp <= 1) {
2035 /* may run out of resource, retry later */
2036 *timeout = cfs_time_seconds(1);
2037 return 0;
2038 }
2039
2040 if (ksocknal_data.ksnd_connd_starting > 0) {
2041 /* serialize starting to avoid flood */
2042 return 0;
2043 }
2044
2045 ksocknal_data.ksnd_connd_starting_stamp = sec;
2046 ksocknal_data.ksnd_connd_starting++;
2047 spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
2048
2049 /* NB: total is the next id */
2050 snprintf(name, sizeof(name), "socknal_cd%02d", total);
2051 rc = ksocknal_thread_start(ksocknal_connd, NULL, name);
2052
2053 spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
2054 if (rc == 0)
2055 return 1;
2056
2057 /* we tried ... */
2058 LASSERT(ksocknal_data.ksnd_connd_starting > 0);
2059 ksocknal_data.ksnd_connd_starting--;
2060 ksocknal_data.ksnd_connd_failed_stamp = cfs_time_current_sec();
2061
2062 return 1;
2063}
2064
2065/*
2066 * check whether current thread can exit, it will return 1 if there are too
2067 * many threads and no creating in past 120 seconds.
2068 * Also, this function may update @timeout to make caller come back
2069 * again to recheck these conditions.
2070 */
2071static int
2072ksocknal_connd_check_stop(long sec, long *timeout)
2073{
2074 int val;
2075
2076 if (unlikely(ksocknal_data.ksnd_init < SOCKNAL_INIT_ALL)) {
2077 /* still in initializing */
2078 return 0;
2079 }
2080
2081 if (ksocknal_data.ksnd_connd_starting > 0) {
2082 /* in progress of starting new thread */
2083 return 0;
2084 }
2085
2086 if (ksocknal_data.ksnd_connd_running <=
2087 *ksocknal_tunables.ksnd_nconnds) { /* can't shrink */
2088 return 0;
2089 }
2090
2091 /* created thread in past 120 seconds? */
2092 val = (int)(ksocknal_data.ksnd_connd_starting_stamp +
2093 SOCKNAL_CONND_TIMEOUT - sec);
2094
2095 *timeout = (val > 0) ? cfs_time_seconds(val) :
2096 cfs_time_seconds(SOCKNAL_CONND_TIMEOUT);
2097 if (val > 0)
2098 return 0;
2099
2100 /* no creating in past 120 seconds */
2101
2102 return ksocknal_data.ksnd_connd_running >
2103 ksocknal_data.ksnd_connd_connecting + SOCKNAL_CONND_RESV;
2104}
2105
2106/* Go through connd_routes queue looking for a route that we can process
2107 * right now, @timeout_p can be updated if we need to come back later */
2108static ksock_route_t *
2109ksocknal_connd_get_route_locked(signed long *timeout_p)
2110{
2111 ksock_route_t *route;
2112 cfs_time_t now;
2113
2114 now = cfs_time_current();
2115
2116 /* connd_routes can contain both pending and ordinary routes */
2117 list_for_each_entry (route, &ksocknal_data.ksnd_connd_routes,
2118 ksnr_connd_list) {
2119
2120 if (route->ksnr_retry_interval == 0 ||
2121 cfs_time_aftereq(now, route->ksnr_timeout))
2122 return route;
2123
2124 if (*timeout_p == MAX_SCHEDULE_TIMEOUT ||
2125 (int)*timeout_p > (int)(route->ksnr_timeout - now))
2126 *timeout_p = (int)(route->ksnr_timeout - now);
2127 }
2128
2129 return NULL;
2130}
2131
2132int
2133ksocknal_connd (void *arg)
2134{
2135 spinlock_t *connd_lock = &ksocknal_data.ksnd_connd_lock;
2136 ksock_connreq_t *cr;
2137 wait_queue_t wait;
2138 int nloops = 0;
2139 int cons_retry = 0;
2140
2141 cfs_block_allsigs ();
2142
2143 init_waitqueue_entry_current (&wait);
2144
2145 spin_lock_bh(connd_lock);
2146
2147 LASSERT(ksocknal_data.ksnd_connd_starting > 0);
2148 ksocknal_data.ksnd_connd_starting--;
2149 ksocknal_data.ksnd_connd_running++;
2150
2151 while (!ksocknal_data.ksnd_shuttingdown) {
2152 ksock_route_t *route = NULL;
2153 long sec = cfs_time_current_sec();
2154 long timeout = MAX_SCHEDULE_TIMEOUT;
2155 int dropped_lock = 0;
2156
2157 if (ksocknal_connd_check_stop(sec, &timeout)) {
2158 /* wakeup another one to check stop */
2159 wake_up(&ksocknal_data.ksnd_connd_waitq);
2160 break;
2161 }
2162
2163 if (ksocknal_connd_check_start(sec, &timeout)) {
2164 /* created new thread */
2165 dropped_lock = 1;
2166 }
2167
2168 if (!list_empty(&ksocknal_data.ksnd_connd_connreqs)) {
2169 /* Connection accepted by the listener */
2170 cr = list_entry(ksocknal_data.ksnd_connd_connreqs. \
2171 next, ksock_connreq_t, ksncr_list);
2172
2173 list_del(&cr->ksncr_list);
2174 spin_unlock_bh(connd_lock);
2175 dropped_lock = 1;
2176
2177 ksocknal_create_conn(cr->ksncr_ni, NULL,
2178 cr->ksncr_sock, SOCKLND_CONN_NONE);
2179 lnet_ni_decref(cr->ksncr_ni);
2180 LIBCFS_FREE(cr, sizeof(*cr));
2181
2182 spin_lock_bh(connd_lock);
2183 }
2184
2185 /* Only handle an outgoing connection request if there
2186 * is a thread left to handle incoming connections and
2187 * create new connd */
2188 if (ksocknal_data.ksnd_connd_connecting + SOCKNAL_CONND_RESV <
2189 ksocknal_data.ksnd_connd_running) {
2190 route = ksocknal_connd_get_route_locked(&timeout);
2191 }
2192 if (route != NULL) {
2193 list_del (&route->ksnr_connd_list);
2194 ksocknal_data.ksnd_connd_connecting++;
2195 spin_unlock_bh(connd_lock);
2196 dropped_lock = 1;
2197
2198 if (ksocknal_connect(route)) {
2199 /* consecutive retry */
2200 if (cons_retry++ > SOCKNAL_INSANITY_RECONN) {
2201 CWARN("massive consecutive "
5e8f6920
PT
2202 "re-connecting to %pI4h\n",
2203 &route->ksnr_ipaddr);
d7e09d03
PT
2204 cons_retry = 0;
2205 }
2206 } else {
2207 cons_retry = 0;
2208 }
2209
2210 ksocknal_route_decref(route);
2211
2212 spin_lock_bh(connd_lock);
2213 ksocknal_data.ksnd_connd_connecting--;
2214 }
2215
2216 if (dropped_lock) {
2217 if (++nloops < SOCKNAL_RESCHED)
2218 continue;
2219 spin_unlock_bh(connd_lock);
2220 nloops = 0;
2221 cond_resched();
2222 spin_lock_bh(connd_lock);
2223 continue;
2224 }
2225
2226 /* Nothing to do for 'timeout' */
2227 set_current_state(TASK_INTERRUPTIBLE);
2228 add_wait_queue_exclusive(&ksocknal_data.ksnd_connd_waitq, &wait);
2229 spin_unlock_bh(connd_lock);
2230
2231 nloops = 0;
2232 waitq_timedwait(&wait, TASK_INTERRUPTIBLE, timeout);
2233
2234 set_current_state(TASK_RUNNING);
2235 remove_wait_queue(&ksocknal_data.ksnd_connd_waitq, &wait);
2236 spin_lock_bh(connd_lock);
2237 }
2238 ksocknal_data.ksnd_connd_running--;
2239 spin_unlock_bh(connd_lock);
2240
2241 ksocknal_thread_fini();
2242 return 0;
2243}
2244
2245ksock_conn_t *
2246ksocknal_find_timed_out_conn (ksock_peer_t *peer)
2247{
2248 /* We're called with a shared lock on ksnd_global_lock */
2249 ksock_conn_t *conn;
2250 struct list_head *ctmp;
2251
2252 list_for_each (ctmp, &peer->ksnp_conns) {
2253 int error;
2254 conn = list_entry (ctmp, ksock_conn_t, ksnc_list);
2255
2256 /* Don't need the {get,put}connsock dance to deref ksnc_sock */
2257 LASSERT (!conn->ksnc_closing);
2258
2259 /* SOCK_ERROR will reset error code of socket in
2260 * some platform (like Darwin8.x) */
2261 error = cfs_sock_error(conn->ksnc_sock);
2262 if (error != 0) {
2263 ksocknal_conn_addref(conn);
2264
2265 switch (error) {
2266 case ECONNRESET:
2267 CNETERR("A connection with %s "
5e8f6920 2268 "(%pI4h:%d) was reset; "
d7e09d03
PT
2269 "it may have rebooted.\n",
2270 libcfs_id2str(peer->ksnp_id),
5e8f6920 2271 &conn->ksnc_ipaddr,
d7e09d03
PT
2272 conn->ksnc_port);
2273 break;
2274 case ETIMEDOUT:
2275 CNETERR("A connection with %s "
5e8f6920 2276 "(%pI4h:%d) timed out; the "
d7e09d03
PT
2277 "network or node may be down.\n",
2278 libcfs_id2str(peer->ksnp_id),
5e8f6920 2279 &conn->ksnc_ipaddr,
d7e09d03
PT
2280 conn->ksnc_port);
2281 break;
2282 default:
2283 CNETERR("An unexpected network error %d "
2284 "occurred with %s "
5e8f6920 2285 "(%pI4h:%d\n", error,
d7e09d03 2286 libcfs_id2str(peer->ksnp_id),
5e8f6920 2287 &conn->ksnc_ipaddr,
d7e09d03
PT
2288 conn->ksnc_port);
2289 break;
2290 }
2291
2292 return (conn);
2293 }
2294
2295 if (conn->ksnc_rx_started &&
2296 cfs_time_aftereq(cfs_time_current(),
2297 conn->ksnc_rx_deadline)) {
2298 /* Timed out incomplete incoming message */
2299 ksocknal_conn_addref(conn);
5e8f6920 2300 CNETERR("Timeout receiving from %s (%pI4h:%d), "
d7e09d03
PT
2301 "state %d wanted %d left %d\n",
2302 libcfs_id2str(peer->ksnp_id),
5e8f6920 2303 &conn->ksnc_ipaddr,
d7e09d03
PT
2304 conn->ksnc_port,
2305 conn->ksnc_rx_state,
2306 conn->ksnc_rx_nob_wanted,
2307 conn->ksnc_rx_nob_left);
2308 return (conn);
2309 }
2310
2311 if ((!list_empty(&conn->ksnc_tx_queue) ||
2312 cfs_sock_wmem_queued(conn->ksnc_sock) != 0) &&
2313 cfs_time_aftereq(cfs_time_current(),
2314 conn->ksnc_tx_deadline)) {
2315 /* Timed out messages queued for sending or
2316 * buffered in the socket's send buffer */
2317 ksocknal_conn_addref(conn);
5e8f6920 2318 CNETERR("Timeout sending data to %s (%pI4h:%d) "
d7e09d03
PT
2319 "the network or that node may be down.\n",
2320 libcfs_id2str(peer->ksnp_id),
5e8f6920 2321 &conn->ksnc_ipaddr,
d7e09d03
PT
2322 conn->ksnc_port);
2323 return (conn);
2324 }
2325 }
2326
2327 return (NULL);
2328}
2329
2330static inline void
2331ksocknal_flush_stale_txs(ksock_peer_t *peer)
2332{
2333 ksock_tx_t *tx;
2334 LIST_HEAD (stale_txs);
2335
2336 write_lock_bh(&ksocknal_data.ksnd_global_lock);
2337
2338 while (!list_empty (&peer->ksnp_tx_queue)) {
2339 tx = list_entry (peer->ksnp_tx_queue.next,
2340 ksock_tx_t, tx_list);
2341
2342 if (!cfs_time_aftereq(cfs_time_current(),
2343 tx->tx_deadline))
2344 break;
2345
2346 list_del (&tx->tx_list);
2347 list_add_tail (&tx->tx_list, &stale_txs);
2348 }
2349
2350 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
2351
2352 ksocknal_txlist_done(peer->ksnp_ni, &stale_txs, 1);
2353}
2354
2355int
2356ksocknal_send_keepalive_locked(ksock_peer_t *peer)
2357{
2358 ksock_sched_t *sched;
2359 ksock_conn_t *conn;
2360 ksock_tx_t *tx;
2361
2362 if (list_empty(&peer->ksnp_conns)) /* last_alive will be updated by create_conn */
2363 return 0;
2364
2365 if (peer->ksnp_proto != &ksocknal_protocol_v3x)
2366 return 0;
2367
2368 if (*ksocknal_tunables.ksnd_keepalive <= 0 ||
2369 cfs_time_before(cfs_time_current(),
2370 cfs_time_add(peer->ksnp_last_alive,
2371 cfs_time_seconds(*ksocknal_tunables.ksnd_keepalive))))
2372 return 0;
2373
2374 if (cfs_time_before(cfs_time_current(),
2375 peer->ksnp_send_keepalive))
2376 return 0;
2377
2378 /* retry 10 secs later, so we wouldn't put pressure
2379 * on this peer if we failed to send keepalive this time */
2380 peer->ksnp_send_keepalive = cfs_time_shift(10);
2381
2382 conn = ksocknal_find_conn_locked(peer, NULL, 1);
2383 if (conn != NULL) {
2384 sched = conn->ksnc_scheduler;
2385
2386 spin_lock_bh(&sched->kss_lock);
2387 if (!list_empty(&conn->ksnc_tx_queue)) {
2388 spin_unlock_bh(&sched->kss_lock);
2389 /* there is an queued ACK, don't need keepalive */
2390 return 0;
2391 }
2392
2393 spin_unlock_bh(&sched->kss_lock);
2394 }
2395
2396 read_unlock(&ksocknal_data.ksnd_global_lock);
2397
2398 /* cookie = 1 is reserved for keepalive PING */
2399 tx = ksocknal_alloc_tx_noop(1, 1);
2400 if (tx == NULL) {
2401 read_lock(&ksocknal_data.ksnd_global_lock);
2402 return -ENOMEM;
2403 }
2404
2405 if (ksocknal_launch_packet(peer->ksnp_ni, tx, peer->ksnp_id) == 0) {
2406 read_lock(&ksocknal_data.ksnd_global_lock);
2407 return 1;
2408 }
2409
2410 ksocknal_free_tx(tx);
2411 read_lock(&ksocknal_data.ksnd_global_lock);
2412
2413 return -EIO;
2414}
2415
2416
2417void
2418ksocknal_check_peer_timeouts (int idx)
2419{
2420 struct list_head *peers = &ksocknal_data.ksnd_peers[idx];
2421 ksock_peer_t *peer;
2422 ksock_conn_t *conn;
2423 ksock_tx_t *tx;
2424
2425 again:
2426 /* NB. We expect to have a look at all the peers and not find any
2427 * connections to time out, so we just use a shared lock while we
2428 * take a look... */
2429 read_lock(&ksocknal_data.ksnd_global_lock);
2430
2431 list_for_each_entry(peer, peers, ksnp_list) {
2432 cfs_time_t deadline = 0;
2433 int resid = 0;
2434 int n = 0;
2435
2436 if (ksocknal_send_keepalive_locked(peer) != 0) {
2437 read_unlock(&ksocknal_data.ksnd_global_lock);
2438 goto again;
2439 }
2440
2441 conn = ksocknal_find_timed_out_conn (peer);
2442
2443 if (conn != NULL) {
2444 read_unlock(&ksocknal_data.ksnd_global_lock);
2445
2446 ksocknal_close_conn_and_siblings (conn, -ETIMEDOUT);
2447
2448 /* NB we won't find this one again, but we can't
2449 * just proceed with the next peer, since we dropped
2450 * ksnd_global_lock and it might be dead already! */
2451 ksocknal_conn_decref(conn);
2452 goto again;
2453 }
2454
2455 /* we can't process stale txs right here because we're
2456 * holding only shared lock */
2457 if (!list_empty (&peer->ksnp_tx_queue)) {
2458 ksock_tx_t *tx =
2459 list_entry (peer->ksnp_tx_queue.next,
2460 ksock_tx_t, tx_list);
2461
2462 if (cfs_time_aftereq(cfs_time_current(),
2463 tx->tx_deadline)) {
2464
2465 ksocknal_peer_addref(peer);
2466 read_unlock(&ksocknal_data.ksnd_global_lock);
2467
2468 ksocknal_flush_stale_txs(peer);
2469
2470 ksocknal_peer_decref(peer);
2471 goto again;
2472 }
2473 }
2474
2475 if (list_empty(&peer->ksnp_zc_req_list))
2476 continue;
2477
2478 spin_lock(&peer->ksnp_lock);
2479 list_for_each_entry(tx, &peer->ksnp_zc_req_list, tx_zc_list) {
2480 if (!cfs_time_aftereq(cfs_time_current(),
2481 tx->tx_deadline))
2482 break;
2483 /* ignore the TX if connection is being closed */
2484 if (tx->tx_conn->ksnc_closing)
2485 continue;
2486 n++;
2487 }
2488
2489 if (n == 0) {
2490 spin_unlock(&peer->ksnp_lock);
2491 continue;
2492 }
2493
2494 tx = list_entry(peer->ksnp_zc_req_list.next,
2495 ksock_tx_t, tx_zc_list);
2496 deadline = tx->tx_deadline;
2497 resid = tx->tx_resid;
2498 conn = tx->tx_conn;
2499 ksocknal_conn_addref(conn);
2500
2501 spin_unlock(&peer->ksnp_lock);
2502 read_unlock(&ksocknal_data.ksnd_global_lock);
2503
2504 CERROR("Total %d stale ZC_REQs for peer %s detected; the "
2505 "oldest(%p) timed out %ld secs ago, "
2506 "resid: %d, wmem: %d\n",
2507 n, libcfs_nid2str(peer->ksnp_id.nid), tx,
2508 cfs_duration_sec(cfs_time_current() - deadline),
2509 resid, cfs_sock_wmem_queued(conn->ksnc_sock));
2510
2511 ksocknal_close_conn_and_siblings (conn, -ETIMEDOUT);
2512 ksocknal_conn_decref(conn);
2513 goto again;
2514 }
2515
2516 read_unlock(&ksocknal_data.ksnd_global_lock);
2517}
2518
2519int
2520ksocknal_reaper (void *arg)
2521{
2522 wait_queue_t wait;
2523 ksock_conn_t *conn;
2524 ksock_sched_t *sched;
2525 struct list_head enomem_conns;
2526 int nenomem_conns;
2527 cfs_duration_t timeout;
2528 int i;
2529 int peer_index = 0;
2530 cfs_time_t deadline = cfs_time_current();
2531
2532 cfs_block_allsigs ();
2533
2534 INIT_LIST_HEAD(&enomem_conns);
2535 init_waitqueue_entry_current (&wait);
2536
2537 spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
2538
2539 while (!ksocknal_data.ksnd_shuttingdown) {
2540
2541 if (!list_empty (&ksocknal_data.ksnd_deathrow_conns)) {
2542 conn = list_entry (ksocknal_data. \
2543 ksnd_deathrow_conns.next,
2544 ksock_conn_t, ksnc_list);
2545 list_del (&conn->ksnc_list);
2546
2547 spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
2548
2549 ksocknal_terminate_conn(conn);
2550 ksocknal_conn_decref(conn);
2551
2552 spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
2553 continue;
2554 }
2555
2556 if (!list_empty (&ksocknal_data.ksnd_zombie_conns)) {
2557 conn = list_entry (ksocknal_data.ksnd_zombie_conns.\
2558 next, ksock_conn_t, ksnc_list);
2559 list_del (&conn->ksnc_list);
2560
2561 spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
2562
2563 ksocknal_destroy_conn(conn);
2564
2565 spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
2566 continue;
2567 }
2568
2569 if (!list_empty (&ksocknal_data.ksnd_enomem_conns)) {
2570 list_add(&enomem_conns,
2571 &ksocknal_data.ksnd_enomem_conns);
2572 list_del_init(&ksocknal_data.ksnd_enomem_conns);
2573 }
2574
2575 spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
2576
2577 /* reschedule all the connections that stalled with ENOMEM... */
2578 nenomem_conns = 0;
2579 while (!list_empty (&enomem_conns)) {
2580 conn = list_entry (enomem_conns.next,
2581 ksock_conn_t, ksnc_tx_list);
2582 list_del (&conn->ksnc_tx_list);
2583
2584 sched = conn->ksnc_scheduler;
2585
2586 spin_lock_bh(&sched->kss_lock);
2587
2588 LASSERT(conn->ksnc_tx_scheduled);
2589 conn->ksnc_tx_ready = 1;
2590 list_add_tail(&conn->ksnc_tx_list,
2591 &sched->kss_tx_conns);
2592 wake_up(&sched->kss_waitq);
2593
2594 spin_unlock_bh(&sched->kss_lock);
2595 nenomem_conns++;
2596 }
2597
2598 /* careful with the jiffy wrap... */
2599 while ((timeout = cfs_time_sub(deadline,
2600 cfs_time_current())) <= 0) {
2601 const int n = 4;
2602 const int p = 1;
2603 int chunk = ksocknal_data.ksnd_peer_hash_size;
2604
2605 /* Time to check for timeouts on a few more peers: I do
2606 * checks every 'p' seconds on a proportion of the peer
2607 * table and I need to check every connection 'n' times
2608 * within a timeout interval, to ensure I detect a
2609 * timeout on any connection within (n+1)/n times the
2610 * timeout interval. */
2611
2612 if (*ksocknal_tunables.ksnd_timeout > n * p)
2613 chunk = (chunk * n * p) /
2614 *ksocknal_tunables.ksnd_timeout;
2615 if (chunk == 0)
2616 chunk = 1;
2617
2618 for (i = 0; i < chunk; i++) {
2619 ksocknal_check_peer_timeouts (peer_index);
2620 peer_index = (peer_index + 1) %
2621 ksocknal_data.ksnd_peer_hash_size;
2622 }
2623
2624 deadline = cfs_time_add(deadline, cfs_time_seconds(p));
2625 }
2626
2627 if (nenomem_conns != 0) {
2628 /* Reduce my timeout if I rescheduled ENOMEM conns.
2629 * This also prevents me getting woken immediately
2630 * if any go back on my enomem list. */
2631 timeout = SOCKNAL_ENOMEM_RETRY;
2632 }
2633 ksocknal_data.ksnd_reaper_waketime =
2634 cfs_time_add(cfs_time_current(), timeout);
2635
2636 set_current_state (TASK_INTERRUPTIBLE);
2637 add_wait_queue (&ksocknal_data.ksnd_reaper_waitq, &wait);
2638
2639 if (!ksocknal_data.ksnd_shuttingdown &&
2640 list_empty (&ksocknal_data.ksnd_deathrow_conns) &&
2641 list_empty (&ksocknal_data.ksnd_zombie_conns))
2642 waitq_timedwait (&wait, TASK_INTERRUPTIBLE,
2643 timeout);
2644
2645 set_current_state (TASK_RUNNING);
2646 remove_wait_queue (&ksocknal_data.ksnd_reaper_waitq, &wait);
2647
2648 spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
2649 }
2650
2651 spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
2652
2653 ksocknal_thread_fini();
2654 return 0;
2655}