]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/rds/connection.c
Merge tag 'iommu-updates-v4.14' of git://git.kernel.org/pub/scm/linux/kernel/git...
[mirror_ubuntu-bionic-kernel.git] / net / rds / connection.c
1 /*
2 * Copyright (c) 2006 Oracle. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33 #include <linux/kernel.h>
34 #include <linux/list.h>
35 #include <linux/slab.h>
36 #include <linux/export.h>
37 #include <net/inet_hashtables.h>
38
39 #include "rds.h"
40 #include "loop.h"
41
42 #define RDS_CONNECTION_HASH_BITS 12
43 #define RDS_CONNECTION_HASH_ENTRIES (1 << RDS_CONNECTION_HASH_BITS)
44 #define RDS_CONNECTION_HASH_MASK (RDS_CONNECTION_HASH_ENTRIES - 1)
45
46 /* converting this to RCU is a chore for another day.. */
47 static DEFINE_SPINLOCK(rds_conn_lock);
48 static unsigned long rds_conn_count;
49 static struct hlist_head rds_conn_hash[RDS_CONNECTION_HASH_ENTRIES];
50 static struct kmem_cache *rds_conn_slab;
51
52 static struct hlist_head *rds_conn_bucket(__be32 laddr, __be32 faddr)
53 {
54 static u32 rds_hash_secret __read_mostly;
55
56 unsigned long hash;
57
58 net_get_random_once(&rds_hash_secret, sizeof(rds_hash_secret));
59
60 /* Pass NULL, don't need struct net for hash */
61 hash = __inet_ehashfn(be32_to_cpu(laddr), 0,
62 be32_to_cpu(faddr), 0,
63 rds_hash_secret);
64 return &rds_conn_hash[hash & RDS_CONNECTION_HASH_MASK];
65 }
66
67 #define rds_conn_info_set(var, test, suffix) do { \
68 if (test) \
69 var |= RDS_INFO_CONNECTION_FLAG_##suffix; \
70 } while (0)
71
72 /* rcu read lock must be held or the connection spinlock */
73 static struct rds_connection *rds_conn_lookup(struct net *net,
74 struct hlist_head *head,
75 __be32 laddr, __be32 faddr,
76 struct rds_transport *trans)
77 {
78 struct rds_connection *conn, *ret = NULL;
79
80 hlist_for_each_entry_rcu(conn, head, c_hash_node) {
81 if (conn->c_faddr == faddr && conn->c_laddr == laddr &&
82 conn->c_trans == trans && net == rds_conn_net(conn)) {
83 ret = conn;
84 break;
85 }
86 }
87 rdsdebug("returning conn %p for %pI4 -> %pI4\n", ret,
88 &laddr, &faddr);
89 return ret;
90 }
91
92 /*
93 * This is called by transports as they're bringing down a connection.
94 * It clears partial message state so that the transport can start sending
95 * and receiving over this connection again in the future. It is up to
96 * the transport to have serialized this call with its send and recv.
97 */
98 static void rds_conn_path_reset(struct rds_conn_path *cp)
99 {
100 struct rds_connection *conn = cp->cp_conn;
101
102 rdsdebug("connection %pI4 to %pI4 reset\n",
103 &conn->c_laddr, &conn->c_faddr);
104
105 rds_stats_inc(s_conn_reset);
106 rds_send_path_reset(cp);
107 cp->cp_flags = 0;
108
109 /* Do not clear next_rx_seq here, else we cannot distinguish
110 * retransmitted packets from new packets, and will hand all
111 * of them to the application. That is not consistent with the
112 * reliability guarantees of RDS. */
113 }
114
115 static void __rds_conn_path_init(struct rds_connection *conn,
116 struct rds_conn_path *cp, bool is_outgoing)
117 {
118 spin_lock_init(&cp->cp_lock);
119 cp->cp_next_tx_seq = 1;
120 init_waitqueue_head(&cp->cp_waitq);
121 INIT_LIST_HEAD(&cp->cp_send_queue);
122 INIT_LIST_HEAD(&cp->cp_retrans);
123
124 cp->cp_conn = conn;
125 atomic_set(&cp->cp_state, RDS_CONN_DOWN);
126 cp->cp_send_gen = 0;
127 cp->cp_reconnect_jiffies = 0;
128 INIT_DELAYED_WORK(&cp->cp_send_w, rds_send_worker);
129 INIT_DELAYED_WORK(&cp->cp_recv_w, rds_recv_worker);
130 INIT_DELAYED_WORK(&cp->cp_conn_w, rds_connect_worker);
131 INIT_WORK(&cp->cp_down_w, rds_shutdown_worker);
132 mutex_init(&cp->cp_cm_lock);
133 cp->cp_flags = 0;
134 }
135
136 /*
137 * There is only every one 'conn' for a given pair of addresses in the
138 * system at a time. They contain messages to be retransmitted and so
139 * span the lifetime of the actual underlying transport connections.
140 *
141 * For now they are not garbage collected once they're created. They
142 * are torn down as the module is removed, if ever.
143 */
144 static struct rds_connection *__rds_conn_create(struct net *net,
145 __be32 laddr, __be32 faddr,
146 struct rds_transport *trans, gfp_t gfp,
147 int is_outgoing)
148 {
149 struct rds_connection *conn, *parent = NULL;
150 struct hlist_head *head = rds_conn_bucket(laddr, faddr);
151 struct rds_transport *loop_trans;
152 unsigned long flags;
153 int ret, i;
154 int npaths = (trans->t_mp_capable ? RDS_MPATH_WORKERS : 1);
155
156 rcu_read_lock();
157 conn = rds_conn_lookup(net, head, laddr, faddr, trans);
158 if (conn && conn->c_loopback && conn->c_trans != &rds_loop_transport &&
159 laddr == faddr && !is_outgoing) {
160 /* This is a looped back IB connection, and we're
161 * called by the code handling the incoming connect.
162 * We need a second connection object into which we
163 * can stick the other QP. */
164 parent = conn;
165 conn = parent->c_passive;
166 }
167 rcu_read_unlock();
168 if (conn)
169 goto out;
170
171 conn = kmem_cache_zalloc(rds_conn_slab, gfp);
172 if (!conn) {
173 conn = ERR_PTR(-ENOMEM);
174 goto out;
175 }
176 conn->c_path = kcalloc(npaths, sizeof(struct rds_conn_path), gfp);
177 if (!conn->c_path) {
178 kmem_cache_free(rds_conn_slab, conn);
179 conn = ERR_PTR(-ENOMEM);
180 goto out;
181 }
182
183 INIT_HLIST_NODE(&conn->c_hash_node);
184 conn->c_laddr = laddr;
185 conn->c_faddr = faddr;
186
187 rds_conn_net_set(conn, net);
188
189 ret = rds_cong_get_maps(conn);
190 if (ret) {
191 kfree(conn->c_path);
192 kmem_cache_free(rds_conn_slab, conn);
193 conn = ERR_PTR(ret);
194 goto out;
195 }
196
197 /*
198 * This is where a connection becomes loopback. If *any* RDS sockets
199 * can bind to the destination address then we'd rather the messages
200 * flow through loopback rather than either transport.
201 */
202 loop_trans = rds_trans_get_preferred(net, faddr);
203 if (loop_trans) {
204 rds_trans_put(loop_trans);
205 conn->c_loopback = 1;
206 if (is_outgoing && trans->t_prefer_loopback) {
207 /* "outgoing" connection - and the transport
208 * says it wants the connection handled by the
209 * loopback transport. This is what TCP does.
210 */
211 trans = &rds_loop_transport;
212 }
213 }
214
215 conn->c_trans = trans;
216
217 init_waitqueue_head(&conn->c_hs_waitq);
218 for (i = 0; i < npaths; i++) {
219 __rds_conn_path_init(conn, &conn->c_path[i],
220 is_outgoing);
221 conn->c_path[i].cp_index = i;
222 }
223 ret = trans->conn_alloc(conn, gfp);
224 if (ret) {
225 kfree(conn->c_path);
226 kmem_cache_free(rds_conn_slab, conn);
227 conn = ERR_PTR(ret);
228 goto out;
229 }
230
231 rdsdebug("allocated conn %p for %pI4 -> %pI4 over %s %s\n",
232 conn, &laddr, &faddr,
233 trans->t_name ? trans->t_name : "[unknown]",
234 is_outgoing ? "(outgoing)" : "");
235
236 /*
237 * Since we ran without holding the conn lock, someone could
238 * have created the same conn (either normal or passive) in the
239 * interim. We check while holding the lock. If we won, we complete
240 * init and return our conn. If we lost, we rollback and return the
241 * other one.
242 */
243 spin_lock_irqsave(&rds_conn_lock, flags);
244 if (parent) {
245 /* Creating passive conn */
246 if (parent->c_passive) {
247 trans->conn_free(conn->c_path[0].cp_transport_data);
248 kfree(conn->c_path);
249 kmem_cache_free(rds_conn_slab, conn);
250 conn = parent->c_passive;
251 } else {
252 parent->c_passive = conn;
253 rds_cong_add_conn(conn);
254 rds_conn_count++;
255 }
256 } else {
257 /* Creating normal conn */
258 struct rds_connection *found;
259
260 found = rds_conn_lookup(net, head, laddr, faddr, trans);
261 if (found) {
262 struct rds_conn_path *cp;
263 int i;
264
265 for (i = 0; i < npaths; i++) {
266 cp = &conn->c_path[i];
267 /* The ->conn_alloc invocation may have
268 * allocated resource for all paths, so all
269 * of them may have to be freed here.
270 */
271 if (cp->cp_transport_data)
272 trans->conn_free(cp->cp_transport_data);
273 }
274 kfree(conn->c_path);
275 kmem_cache_free(rds_conn_slab, conn);
276 conn = found;
277 } else {
278 conn->c_my_gen_num = rds_gen_num;
279 conn->c_peer_gen_num = 0;
280 hlist_add_head_rcu(&conn->c_hash_node, head);
281 rds_cong_add_conn(conn);
282 rds_conn_count++;
283 }
284 }
285 spin_unlock_irqrestore(&rds_conn_lock, flags);
286
287 out:
288 return conn;
289 }
290
291 struct rds_connection *rds_conn_create(struct net *net,
292 __be32 laddr, __be32 faddr,
293 struct rds_transport *trans, gfp_t gfp)
294 {
295 return __rds_conn_create(net, laddr, faddr, trans, gfp, 0);
296 }
297 EXPORT_SYMBOL_GPL(rds_conn_create);
298
299 struct rds_connection *rds_conn_create_outgoing(struct net *net,
300 __be32 laddr, __be32 faddr,
301 struct rds_transport *trans, gfp_t gfp)
302 {
303 return __rds_conn_create(net, laddr, faddr, trans, gfp, 1);
304 }
305 EXPORT_SYMBOL_GPL(rds_conn_create_outgoing);
306
307 void rds_conn_shutdown(struct rds_conn_path *cp)
308 {
309 struct rds_connection *conn = cp->cp_conn;
310
311 /* shut it down unless it's down already */
312 if (!rds_conn_path_transition(cp, RDS_CONN_DOWN, RDS_CONN_DOWN)) {
313 /*
314 * Quiesce the connection mgmt handlers before we start tearing
315 * things down. We don't hold the mutex for the entire
316 * duration of the shutdown operation, else we may be
317 * deadlocking with the CM handler. Instead, the CM event
318 * handler is supposed to check for state DISCONNECTING
319 */
320 mutex_lock(&cp->cp_cm_lock);
321 if (!rds_conn_path_transition(cp, RDS_CONN_UP,
322 RDS_CONN_DISCONNECTING) &&
323 !rds_conn_path_transition(cp, RDS_CONN_ERROR,
324 RDS_CONN_DISCONNECTING)) {
325 rds_conn_path_error(cp,
326 "shutdown called in state %d\n",
327 atomic_read(&cp->cp_state));
328 mutex_unlock(&cp->cp_cm_lock);
329 return;
330 }
331 mutex_unlock(&cp->cp_cm_lock);
332
333 wait_event(cp->cp_waitq,
334 !test_bit(RDS_IN_XMIT, &cp->cp_flags));
335 wait_event(cp->cp_waitq,
336 !test_bit(RDS_RECV_REFILL, &cp->cp_flags));
337
338 conn->c_trans->conn_path_shutdown(cp);
339 rds_conn_path_reset(cp);
340
341 if (!rds_conn_path_transition(cp, RDS_CONN_DISCONNECTING,
342 RDS_CONN_DOWN) &&
343 !rds_conn_path_transition(cp, RDS_CONN_ERROR,
344 RDS_CONN_DOWN)) {
345 /* This can happen - eg when we're in the middle of tearing
346 * down the connection, and someone unloads the rds module.
347 * Quite reproducible with loopback connections.
348 * Mostly harmless.
349 *
350 * Note that this also happens with rds-tcp because
351 * we could have triggered rds_conn_path_drop in irq
352 * mode from rds_tcp_state change on the receipt of
353 * a FIN, thus we need to recheck for RDS_CONN_ERROR
354 * here.
355 */
356 rds_conn_path_error(cp, "%s: failed to transition "
357 "to state DOWN, current state "
358 "is %d\n", __func__,
359 atomic_read(&cp->cp_state));
360 return;
361 }
362 }
363
364 /* Then reconnect if it's still live.
365 * The passive side of an IB loopback connection is never added
366 * to the conn hash, so we never trigger a reconnect on this
367 * conn - the reconnect is always triggered by the active peer. */
368 cancel_delayed_work_sync(&cp->cp_conn_w);
369 rcu_read_lock();
370 if (!hlist_unhashed(&conn->c_hash_node)) {
371 rcu_read_unlock();
372 rds_queue_reconnect(cp);
373 } else {
374 rcu_read_unlock();
375 }
376 }
377
378 /* destroy a single rds_conn_path. rds_conn_destroy() iterates over
379 * all paths using rds_conn_path_destroy()
380 */
381 static void rds_conn_path_destroy(struct rds_conn_path *cp)
382 {
383 struct rds_message *rm, *rtmp;
384
385 if (!cp->cp_transport_data)
386 return;
387
388 /* make sure lingering queued work won't try to ref the conn */
389 cancel_delayed_work_sync(&cp->cp_send_w);
390 cancel_delayed_work_sync(&cp->cp_recv_w);
391
392 rds_conn_path_drop(cp, true);
393 flush_work(&cp->cp_down_w);
394
395 /* tear down queued messages */
396 list_for_each_entry_safe(rm, rtmp,
397 &cp->cp_send_queue,
398 m_conn_item) {
399 list_del_init(&rm->m_conn_item);
400 BUG_ON(!list_empty(&rm->m_sock_item));
401 rds_message_put(rm);
402 }
403 if (cp->cp_xmit_rm)
404 rds_message_put(cp->cp_xmit_rm);
405
406 cp->cp_conn->c_trans->conn_free(cp->cp_transport_data);
407 }
408
409 /*
410 * Stop and free a connection.
411 *
412 * This can only be used in very limited circumstances. It assumes that once
413 * the conn has been shutdown that no one else is referencing the connection.
414 * We can only ensure this in the rmmod path in the current code.
415 */
416 void rds_conn_destroy(struct rds_connection *conn)
417 {
418 unsigned long flags;
419 int i;
420 struct rds_conn_path *cp;
421 int npaths = (conn->c_trans->t_mp_capable ? RDS_MPATH_WORKERS : 1);
422
423 rdsdebug("freeing conn %p for %pI4 -> "
424 "%pI4\n", conn, &conn->c_laddr,
425 &conn->c_faddr);
426
427 conn->c_destroy_in_prog = 1;
428 /* Ensure conn will not be scheduled for reconnect */
429 spin_lock_irq(&rds_conn_lock);
430 hlist_del_init_rcu(&conn->c_hash_node);
431 spin_unlock_irq(&rds_conn_lock);
432 synchronize_rcu();
433
434 /* shut the connection down */
435 for (i = 0; i < npaths; i++) {
436 cp = &conn->c_path[i];
437 rds_conn_path_destroy(cp);
438 BUG_ON(!list_empty(&cp->cp_retrans));
439 }
440
441 /*
442 * The congestion maps aren't freed up here. They're
443 * freed by rds_cong_exit() after all the connections
444 * have been freed.
445 */
446 rds_cong_remove_conn(conn);
447
448 put_net(conn->c_net);
449 kfree(conn->c_path);
450 kmem_cache_free(rds_conn_slab, conn);
451
452 spin_lock_irqsave(&rds_conn_lock, flags);
453 rds_conn_count--;
454 spin_unlock_irqrestore(&rds_conn_lock, flags);
455 }
456 EXPORT_SYMBOL_GPL(rds_conn_destroy);
457
458 static void rds_conn_message_info(struct socket *sock, unsigned int len,
459 struct rds_info_iterator *iter,
460 struct rds_info_lengths *lens,
461 int want_send)
462 {
463 struct hlist_head *head;
464 struct list_head *list;
465 struct rds_connection *conn;
466 struct rds_message *rm;
467 unsigned int total = 0;
468 unsigned long flags;
469 size_t i;
470 int j;
471
472 len /= sizeof(struct rds_info_message);
473
474 rcu_read_lock();
475
476 for (i = 0, head = rds_conn_hash; i < ARRAY_SIZE(rds_conn_hash);
477 i++, head++) {
478 hlist_for_each_entry_rcu(conn, head, c_hash_node) {
479 struct rds_conn_path *cp;
480 int npaths;
481
482 npaths = (conn->c_trans->t_mp_capable ?
483 RDS_MPATH_WORKERS : 1);
484
485 for (j = 0; j < npaths; j++) {
486 cp = &conn->c_path[j];
487 if (want_send)
488 list = &cp->cp_send_queue;
489 else
490 list = &cp->cp_retrans;
491
492 spin_lock_irqsave(&cp->cp_lock, flags);
493
494 /* XXX too lazy to maintain counts.. */
495 list_for_each_entry(rm, list, m_conn_item) {
496 total++;
497 if (total <= len)
498 rds_inc_info_copy(&rm->m_inc,
499 iter,
500 conn->c_laddr,
501 conn->c_faddr,
502 0);
503 }
504
505 spin_unlock_irqrestore(&cp->cp_lock, flags);
506 }
507 }
508 }
509 rcu_read_unlock();
510
511 lens->nr = total;
512 lens->each = sizeof(struct rds_info_message);
513 }
514
515 static void rds_conn_message_info_send(struct socket *sock, unsigned int len,
516 struct rds_info_iterator *iter,
517 struct rds_info_lengths *lens)
518 {
519 rds_conn_message_info(sock, len, iter, lens, 1);
520 }
521
522 static void rds_conn_message_info_retrans(struct socket *sock,
523 unsigned int len,
524 struct rds_info_iterator *iter,
525 struct rds_info_lengths *lens)
526 {
527 rds_conn_message_info(sock, len, iter, lens, 0);
528 }
529
530 void rds_for_each_conn_info(struct socket *sock, unsigned int len,
531 struct rds_info_iterator *iter,
532 struct rds_info_lengths *lens,
533 int (*visitor)(struct rds_connection *, void *),
534 size_t item_len)
535 {
536 uint64_t buffer[(item_len + 7) / 8];
537 struct hlist_head *head;
538 struct rds_connection *conn;
539 size_t i;
540
541 rcu_read_lock();
542
543 lens->nr = 0;
544 lens->each = item_len;
545
546 for (i = 0, head = rds_conn_hash; i < ARRAY_SIZE(rds_conn_hash);
547 i++, head++) {
548 hlist_for_each_entry_rcu(conn, head, c_hash_node) {
549
550 /* XXX no c_lock usage.. */
551 if (!visitor(conn, buffer))
552 continue;
553
554 /* We copy as much as we can fit in the buffer,
555 * but we count all items so that the caller
556 * can resize the buffer. */
557 if (len >= item_len) {
558 rds_info_copy(iter, buffer, item_len);
559 len -= item_len;
560 }
561 lens->nr++;
562 }
563 }
564 rcu_read_unlock();
565 }
566 EXPORT_SYMBOL_GPL(rds_for_each_conn_info);
567
568 static void rds_walk_conn_path_info(struct socket *sock, unsigned int len,
569 struct rds_info_iterator *iter,
570 struct rds_info_lengths *lens,
571 int (*visitor)(struct rds_conn_path *, void *),
572 size_t item_len)
573 {
574 u64 buffer[(item_len + 7) / 8];
575 struct hlist_head *head;
576 struct rds_connection *conn;
577 size_t i;
578 int j;
579
580 rcu_read_lock();
581
582 lens->nr = 0;
583 lens->each = item_len;
584
585 for (i = 0, head = rds_conn_hash; i < ARRAY_SIZE(rds_conn_hash);
586 i++, head++) {
587 hlist_for_each_entry_rcu(conn, head, c_hash_node) {
588 struct rds_conn_path *cp;
589 int npaths;
590
591 npaths = (conn->c_trans->t_mp_capable ?
592 RDS_MPATH_WORKERS : 1);
593 for (j = 0; j < npaths; j++) {
594 cp = &conn->c_path[j];
595
596 /* XXX no cp_lock usage.. */
597 if (!visitor(cp, buffer))
598 continue;
599 }
600
601 /* We copy as much as we can fit in the buffer,
602 * but we count all items so that the caller
603 * can resize the buffer.
604 */
605 if (len >= item_len) {
606 rds_info_copy(iter, buffer, item_len);
607 len -= item_len;
608 }
609 lens->nr++;
610 }
611 }
612 rcu_read_unlock();
613 }
614
615 static int rds_conn_info_visitor(struct rds_conn_path *cp, void *buffer)
616 {
617 struct rds_info_connection *cinfo = buffer;
618
619 cinfo->next_tx_seq = cp->cp_next_tx_seq;
620 cinfo->next_rx_seq = cp->cp_next_rx_seq;
621 cinfo->laddr = cp->cp_conn->c_laddr;
622 cinfo->faddr = cp->cp_conn->c_faddr;
623 strncpy(cinfo->transport, cp->cp_conn->c_trans->t_name,
624 sizeof(cinfo->transport));
625 cinfo->flags = 0;
626
627 rds_conn_info_set(cinfo->flags, test_bit(RDS_IN_XMIT, &cp->cp_flags),
628 SENDING);
629 /* XXX Future: return the state rather than these funky bits */
630 rds_conn_info_set(cinfo->flags,
631 atomic_read(&cp->cp_state) == RDS_CONN_CONNECTING,
632 CONNECTING);
633 rds_conn_info_set(cinfo->flags,
634 atomic_read(&cp->cp_state) == RDS_CONN_UP,
635 CONNECTED);
636 return 1;
637 }
638
639 static void rds_conn_info(struct socket *sock, unsigned int len,
640 struct rds_info_iterator *iter,
641 struct rds_info_lengths *lens)
642 {
643 rds_walk_conn_path_info(sock, len, iter, lens,
644 rds_conn_info_visitor,
645 sizeof(struct rds_info_connection));
646 }
647
648 int rds_conn_init(void)
649 {
650 rds_conn_slab = kmem_cache_create("rds_connection",
651 sizeof(struct rds_connection),
652 0, 0, NULL);
653 if (!rds_conn_slab)
654 return -ENOMEM;
655
656 rds_info_register_func(RDS_INFO_CONNECTIONS, rds_conn_info);
657 rds_info_register_func(RDS_INFO_SEND_MESSAGES,
658 rds_conn_message_info_send);
659 rds_info_register_func(RDS_INFO_RETRANS_MESSAGES,
660 rds_conn_message_info_retrans);
661
662 return 0;
663 }
664
665 void rds_conn_exit(void)
666 {
667 rds_loop_exit();
668
669 WARN_ON(!hlist_empty(rds_conn_hash));
670
671 kmem_cache_destroy(rds_conn_slab);
672
673 rds_info_deregister_func(RDS_INFO_CONNECTIONS, rds_conn_info);
674 rds_info_deregister_func(RDS_INFO_SEND_MESSAGES,
675 rds_conn_message_info_send);
676 rds_info_deregister_func(RDS_INFO_RETRANS_MESSAGES,
677 rds_conn_message_info_retrans);
678 }
679
680 /*
681 * Force a disconnect
682 */
683 void rds_conn_path_drop(struct rds_conn_path *cp, bool destroy)
684 {
685 atomic_set(&cp->cp_state, RDS_CONN_ERROR);
686
687 if (!destroy && cp->cp_conn->c_destroy_in_prog)
688 return;
689
690 queue_work(rds_wq, &cp->cp_down_w);
691 }
692 EXPORT_SYMBOL_GPL(rds_conn_path_drop);
693
694 void rds_conn_drop(struct rds_connection *conn)
695 {
696 WARN_ON(conn->c_trans->t_mp_capable);
697 rds_conn_path_drop(&conn->c_path[0], false);
698 }
699 EXPORT_SYMBOL_GPL(rds_conn_drop);
700
701 /*
702 * If the connection is down, trigger a connect. We may have scheduled a
703 * delayed reconnect however - in this case we should not interfere.
704 */
705 void rds_conn_path_connect_if_down(struct rds_conn_path *cp)
706 {
707 if (rds_conn_path_state(cp) == RDS_CONN_DOWN &&
708 !test_and_set_bit(RDS_RECONNECT_PENDING, &cp->cp_flags))
709 queue_delayed_work(rds_wq, &cp->cp_conn_w, 0);
710 }
711 EXPORT_SYMBOL_GPL(rds_conn_path_connect_if_down);
712
713 void rds_conn_connect_if_down(struct rds_connection *conn)
714 {
715 WARN_ON(conn->c_trans->t_mp_capable);
716 rds_conn_path_connect_if_down(&conn->c_path[0]);
717 }
718 EXPORT_SYMBOL_GPL(rds_conn_connect_if_down);
719
720 void
721 __rds_conn_path_error(struct rds_conn_path *cp, const char *fmt, ...)
722 {
723 va_list ap;
724
725 va_start(ap, fmt);
726 vprintk(fmt, ap);
727 va_end(ap);
728
729 rds_conn_path_drop(cp, false);
730 }