]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - net/rds/connection.c
RDS/IB: add refcount tracking to struct rds_ib_device
[mirror_ubuntu-artful-kernel.git] / net / rds / connection.c
CommitLineData
00e0f34c
AG
1/*
2 * Copyright (c) 2006 Oracle. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33#include <linux/kernel.h>
34#include <linux/list.h>
5a0e3ad6 35#include <linux/slab.h>
00e0f34c
AG
36#include <net/inet_hashtables.h>
37
38#include "rds.h"
39#include "loop.h"
00e0f34c
AG
40
41#define RDS_CONNECTION_HASH_BITS 12
42#define RDS_CONNECTION_HASH_ENTRIES (1 << RDS_CONNECTION_HASH_BITS)
43#define RDS_CONNECTION_HASH_MASK (RDS_CONNECTION_HASH_ENTRIES - 1)
44
45/* converting this to RCU is a chore for another day.. */
46static DEFINE_SPINLOCK(rds_conn_lock);
47static unsigned long rds_conn_count;
48static struct hlist_head rds_conn_hash[RDS_CONNECTION_HASH_ENTRIES];
49static struct kmem_cache *rds_conn_slab;
50
51static struct hlist_head *rds_conn_bucket(__be32 laddr, __be32 faddr)
52{
53 /* Pass NULL, don't need struct net for hash */
54 unsigned long hash = inet_ehashfn(NULL,
55 be32_to_cpu(laddr), 0,
56 be32_to_cpu(faddr), 0);
57 return &rds_conn_hash[hash & RDS_CONNECTION_HASH_MASK];
58}
59
60#define rds_conn_info_set(var, test, suffix) do { \
61 if (test) \
62 var |= RDS_INFO_CONNECTION_FLAG_##suffix; \
63} while (0)
64
bcf50ef2 65/* rcu read lock must be held or the connection spinlock */
00e0f34c
AG
66static struct rds_connection *rds_conn_lookup(struct hlist_head *head,
67 __be32 laddr, __be32 faddr,
68 struct rds_transport *trans)
69{
70 struct rds_connection *conn, *ret = NULL;
71 struct hlist_node *pos;
72
bcf50ef2 73 hlist_for_each_entry_rcu(conn, pos, head, c_hash_node) {
00e0f34c
AG
74 if (conn->c_faddr == faddr && conn->c_laddr == laddr &&
75 conn->c_trans == trans) {
76 ret = conn;
77 break;
78 }
79 }
80 rdsdebug("returning conn %p for %pI4 -> %pI4\n", ret,
81 &laddr, &faddr);
82 return ret;
83}
84
85/*
86 * This is called by transports as they're bringing down a connection.
87 * It clears partial message state so that the transport can start sending
88 * and receiving over this connection again in the future. It is up to
89 * the transport to have serialized this call with its send and recv.
90 */
91void rds_conn_reset(struct rds_connection *conn)
92{
93 rdsdebug("connection %pI4 to %pI4 reset\n",
94 &conn->c_laddr, &conn->c_faddr);
95
96 rds_stats_inc(s_conn_reset);
97 rds_send_reset(conn);
98 conn->c_flags = 0;
99
100 /* Do not clear next_rx_seq here, else we cannot distinguish
101 * retransmitted packets from new packets, and will hand all
102 * of them to the application. That is not consistent with the
103 * reliability guarantees of RDS. */
104}
105
106/*
107 * There is only every one 'conn' for a given pair of addresses in the
108 * system at a time. They contain messages to be retransmitted and so
109 * span the lifetime of the actual underlying transport connections.
110 *
111 * For now they are not garbage collected once they're created. They
112 * are torn down as the module is removed, if ever.
113 */
114static struct rds_connection *__rds_conn_create(__be32 laddr, __be32 faddr,
115 struct rds_transport *trans, gfp_t gfp,
116 int is_outgoing)
117{
cb24405e 118 struct rds_connection *conn, *parent = NULL;
00e0f34c
AG
119 struct hlist_head *head = rds_conn_bucket(laddr, faddr);
120 unsigned long flags;
121 int ret;
122
bcf50ef2
CM
123
124 rcu_read_lock();
00e0f34c 125 conn = rds_conn_lookup(head, laddr, faddr, trans);
f64f9e71
JP
126 if (conn && conn->c_loopback && conn->c_trans != &rds_loop_transport &&
127 !is_outgoing) {
00e0f34c
AG
128 /* This is a looped back IB connection, and we're
129 * called by the code handling the incoming connect.
130 * We need a second connection object into which we
131 * can stick the other QP. */
132 parent = conn;
133 conn = parent->c_passive;
134 }
bcf50ef2 135 rcu_read_unlock();
00e0f34c
AG
136 if (conn)
137 goto out;
138
05a178ec 139 conn = kmem_cache_zalloc(rds_conn_slab, gfp);
8690bfa1 140 if (!conn) {
00e0f34c
AG
141 conn = ERR_PTR(-ENOMEM);
142 goto out;
143 }
144
00e0f34c 145 INIT_HLIST_NODE(&conn->c_hash_node);
00e0f34c
AG
146 conn->c_laddr = laddr;
147 conn->c_faddr = faddr;
148 spin_lock_init(&conn->c_lock);
149 conn->c_next_tx_seq = 1;
150
049ee3f5 151 spin_lock_init(&conn->c_send_lock);
9e29db0e 152 atomic_set(&conn->c_send_generation, 1);
7e3f2952 153 atomic_set(&conn->c_senders, 0);
00e0f34c
AG
154 INIT_LIST_HEAD(&conn->c_send_queue);
155 INIT_LIST_HEAD(&conn->c_retrans);
156
157 ret = rds_cong_get_maps(conn);
158 if (ret) {
159 kmem_cache_free(rds_conn_slab, conn);
160 conn = ERR_PTR(ret);
161 goto out;
162 }
163
164 /*
165 * This is where a connection becomes loopback. If *any* RDS sockets
166 * can bind to the destination address then we'd rather the messages
167 * flow through loopback rather than either transport.
168 */
169 if (rds_trans_get_preferred(faddr)) {
170 conn->c_loopback = 1;
171 if (is_outgoing && trans->t_prefer_loopback) {
172 /* "outgoing" connection - and the transport
173 * says it wants the connection handled by the
174 * loopback transport. This is what TCP does.
175 */
176 trans = &rds_loop_transport;
177 }
178 }
179
180 conn->c_trans = trans;
181
182 ret = trans->conn_alloc(conn, gfp);
183 if (ret) {
184 kmem_cache_free(rds_conn_slab, conn);
185 conn = ERR_PTR(ret);
186 goto out;
187 }
188
189 atomic_set(&conn->c_state, RDS_CONN_DOWN);
190 conn->c_reconnect_jiffies = 0;
191 INIT_DELAYED_WORK(&conn->c_send_w, rds_send_worker);
192 INIT_DELAYED_WORK(&conn->c_recv_w, rds_recv_worker);
193 INIT_DELAYED_WORK(&conn->c_conn_w, rds_connect_worker);
194 INIT_WORK(&conn->c_down_w, rds_shutdown_worker);
195 mutex_init(&conn->c_cm_lock);
196 conn->c_flags = 0;
197
198 rdsdebug("allocated conn %p for %pI4 -> %pI4 over %s %s\n",
199 conn, &laddr, &faddr,
200 trans->t_name ? trans->t_name : "[unknown]",
201 is_outgoing ? "(outgoing)" : "");
202
cb24405e
AG
203 /*
204 * Since we ran without holding the conn lock, someone could
205 * have created the same conn (either normal or passive) in the
206 * interim. We check while holding the lock. If we won, we complete
207 * init and return our conn. If we lost, we rollback and return the
208 * other one.
209 */
00e0f34c 210 spin_lock_irqsave(&rds_conn_lock, flags);
cb24405e
AG
211 if (parent) {
212 /* Creating passive conn */
213 if (parent->c_passive) {
214 trans->conn_free(conn->c_transport_data);
215 kmem_cache_free(rds_conn_slab, conn);
216 conn = parent->c_passive;
217 } else {
00e0f34c 218 parent->c_passive = conn;
cb24405e
AG
219 rds_cong_add_conn(conn);
220 rds_conn_count++;
221 }
00e0f34c 222 } else {
cb24405e
AG
223 /* Creating normal conn */
224 struct rds_connection *found;
225
226 found = rds_conn_lookup(head, laddr, faddr, trans);
227 if (found) {
228 trans->conn_free(conn->c_transport_data);
229 kmem_cache_free(rds_conn_slab, conn);
230 conn = found;
231 } else {
bcf50ef2 232 hlist_add_head_rcu(&conn->c_hash_node, head);
cb24405e
AG
233 rds_cong_add_conn(conn);
234 rds_conn_count++;
235 }
00e0f34c 236 }
00e0f34c
AG
237 spin_unlock_irqrestore(&rds_conn_lock, flags);
238
239out:
240 return conn;
241}
242
243struct rds_connection *rds_conn_create(__be32 laddr, __be32 faddr,
244 struct rds_transport *trans, gfp_t gfp)
245{
246 return __rds_conn_create(laddr, faddr, trans, gfp, 0);
247}
616b757a 248EXPORT_SYMBOL_GPL(rds_conn_create);
00e0f34c
AG
249
250struct rds_connection *rds_conn_create_outgoing(__be32 laddr, __be32 faddr,
251 struct rds_transport *trans, gfp_t gfp)
252{
253 return __rds_conn_create(laddr, faddr, trans, gfp, 1);
254}
616b757a 255EXPORT_SYMBOL_GPL(rds_conn_create_outgoing);
00e0f34c 256
2dc39357
AG
257void rds_conn_shutdown(struct rds_connection *conn)
258{
259 /* shut it down unless it's down already */
260 if (!rds_conn_transition(conn, RDS_CONN_DOWN, RDS_CONN_DOWN)) {
261 /*
262 * Quiesce the connection mgmt handlers before we start tearing
263 * things down. We don't hold the mutex for the entire
264 * duration of the shutdown operation, else we may be
265 * deadlocking with the CM handler. Instead, the CM event
266 * handler is supposed to check for state DISCONNECTING
267 */
268 mutex_lock(&conn->c_cm_lock);
269 if (!rds_conn_transition(conn, RDS_CONN_UP, RDS_CONN_DISCONNECTING)
270 && !rds_conn_transition(conn, RDS_CONN_ERROR, RDS_CONN_DISCONNECTING)) {
271 rds_conn_error(conn, "shutdown called in state %d\n",
272 atomic_read(&conn->c_state));
273 mutex_unlock(&conn->c_cm_lock);
274 return;
275 }
276 mutex_unlock(&conn->c_cm_lock);
277
049ee3f5
AG
278 /* verify everybody's out of rds_send_xmit() */
279 spin_lock_irq(&conn->c_send_lock);
280 spin_unlock_irq(&conn->c_send_lock);
281
7e3f2952
CM
282 while(atomic_read(&conn->c_senders)) {
283 schedule_timeout(1);
284 spin_lock_irq(&conn->c_send_lock);
285 spin_unlock_irq(&conn->c_send_lock);
286 }
287
2dc39357
AG
288 conn->c_trans->conn_shutdown(conn);
289 rds_conn_reset(conn);
2dc39357
AG
290
291 if (!rds_conn_transition(conn, RDS_CONN_DISCONNECTING, RDS_CONN_DOWN)) {
292 /* This can happen - eg when we're in the middle of tearing
293 * down the connection, and someone unloads the rds module.
294 * Quite reproduceable with loopback connections.
295 * Mostly harmless.
296 */
297 rds_conn_error(conn,
298 "%s: failed to transition to state DOWN, "
299 "current state is %d\n",
300 __func__,
301 atomic_read(&conn->c_state));
302 return;
303 }
304 }
305
306 /* Then reconnect if it's still live.
307 * The passive side of an IB loopback connection is never added
308 * to the conn hash, so we never trigger a reconnect on this
309 * conn - the reconnect is always triggered by the active peer. */
310 cancel_delayed_work_sync(&conn->c_conn_w);
bcf50ef2
CM
311 rcu_read_lock();
312 if (!hlist_unhashed(&conn->c_hash_node)) {
313 rcu_read_unlock();
2dc39357 314 rds_queue_reconnect(conn);
bcf50ef2
CM
315 } else {
316 rcu_read_unlock();
317 }
2dc39357
AG
318}
319
320/*
321 * Stop and free a connection.
322 */
00e0f34c
AG
323void rds_conn_destroy(struct rds_connection *conn)
324{
325 struct rds_message *rm, *rtmp;
326
327 rdsdebug("freeing conn %p for %pI4 -> "
328 "%pI4\n", conn, &conn->c_laddr,
329 &conn->c_faddr);
330
abf45439
CM
331 /* Ensure conn will not be scheduled for reconnect */
332 spin_lock_irq(&rds_conn_lock);
bcf50ef2 333 hlist_del_init_rcu(&conn->c_hash_node);
abf45439 334 spin_unlock_irq(&rds_conn_lock);
00e0f34c 335
bcf50ef2
CM
336 synchronize_rcu();
337
338 rds_conn_shutdown(conn);
00e0f34c
AG
339
340 /* tear down queued messages */
341 list_for_each_entry_safe(rm, rtmp,
342 &conn->c_send_queue,
343 m_conn_item) {
344 list_del_init(&rm->m_conn_item);
345 BUG_ON(!list_empty(&rm->m_sock_item));
346 rds_message_put(rm);
347 }
348 if (conn->c_xmit_rm)
349 rds_message_put(conn->c_xmit_rm);
350
351 conn->c_trans->conn_free(conn->c_transport_data);
352
353 /*
354 * The congestion maps aren't freed up here. They're
355 * freed by rds_cong_exit() after all the connections
356 * have been freed.
357 */
358 rds_cong_remove_conn(conn);
359
360 BUG_ON(!list_empty(&conn->c_retrans));
361 kmem_cache_free(rds_conn_slab, conn);
362
363 rds_conn_count--;
364}
616b757a 365EXPORT_SYMBOL_GPL(rds_conn_destroy);
00e0f34c
AG
366
367static void rds_conn_message_info(struct socket *sock, unsigned int len,
368 struct rds_info_iterator *iter,
369 struct rds_info_lengths *lens,
370 int want_send)
371{
372 struct hlist_head *head;
373 struct hlist_node *pos;
374 struct list_head *list;
375 struct rds_connection *conn;
376 struct rds_message *rm;
00e0f34c
AG
377 unsigned int total = 0;
378 size_t i;
379
380 len /= sizeof(struct rds_info_message);
381
bcf50ef2 382 rcu_read_lock();
00e0f34c
AG
383
384 for (i = 0, head = rds_conn_hash; i < ARRAY_SIZE(rds_conn_hash);
385 i++, head++) {
bcf50ef2 386 hlist_for_each_entry_rcu(conn, pos, head, c_hash_node) {
00e0f34c
AG
387 if (want_send)
388 list = &conn->c_send_queue;
389 else
390 list = &conn->c_retrans;
391
392 spin_lock(&conn->c_lock);
393
394 /* XXX too lazy to maintain counts.. */
395 list_for_each_entry(rm, list, m_conn_item) {
396 total++;
397 if (total <= len)
398 rds_inc_info_copy(&rm->m_inc, iter,
399 conn->c_laddr,
400 conn->c_faddr, 0);
401 }
402
403 spin_unlock(&conn->c_lock);
404 }
405 }
bcf50ef2 406 rcu_read_unlock();
00e0f34c
AG
407
408 lens->nr = total;
409 lens->each = sizeof(struct rds_info_message);
410}
411
412static void rds_conn_message_info_send(struct socket *sock, unsigned int len,
413 struct rds_info_iterator *iter,
414 struct rds_info_lengths *lens)
415{
416 rds_conn_message_info(sock, len, iter, lens, 1);
417}
418
419static void rds_conn_message_info_retrans(struct socket *sock,
420 unsigned int len,
421 struct rds_info_iterator *iter,
422 struct rds_info_lengths *lens)
423{
424 rds_conn_message_info(sock, len, iter, lens, 0);
425}
426
427void rds_for_each_conn_info(struct socket *sock, unsigned int len,
428 struct rds_info_iterator *iter,
429 struct rds_info_lengths *lens,
430 int (*visitor)(struct rds_connection *, void *),
431 size_t item_len)
432{
433 uint64_t buffer[(item_len + 7) / 8];
434 struct hlist_head *head;
435 struct hlist_node *pos;
00e0f34c 436 struct rds_connection *conn;
00e0f34c
AG
437 size_t i;
438
bcf50ef2 439 rcu_read_lock();
00e0f34c
AG
440
441 lens->nr = 0;
442 lens->each = item_len;
443
444 for (i = 0, head = rds_conn_hash; i < ARRAY_SIZE(rds_conn_hash);
445 i++, head++) {
bcf50ef2 446 hlist_for_each_entry_rcu(conn, pos, head, c_hash_node) {
00e0f34c
AG
447
448 /* XXX no c_lock usage.. */
449 if (!visitor(conn, buffer))
450 continue;
451
452 /* We copy as much as we can fit in the buffer,
453 * but we count all items so that the caller
454 * can resize the buffer. */
455 if (len >= item_len) {
456 rds_info_copy(iter, buffer, item_len);
457 len -= item_len;
458 }
459 lens->nr++;
460 }
461 }
bcf50ef2 462 rcu_read_unlock();
00e0f34c 463}
616b757a 464EXPORT_SYMBOL_GPL(rds_for_each_conn_info);
00e0f34c
AG
465
466static int rds_conn_info_visitor(struct rds_connection *conn,
467 void *buffer)
468{
469 struct rds_info_connection *cinfo = buffer;
470
471 cinfo->next_tx_seq = conn->c_next_tx_seq;
472 cinfo->next_rx_seq = conn->c_next_rx_seq;
473 cinfo->laddr = conn->c_laddr;
474 cinfo->faddr = conn->c_faddr;
475 strncpy(cinfo->transport, conn->c_trans->t_name,
476 sizeof(cinfo->transport));
477 cinfo->flags = 0;
478
479 rds_conn_info_set(cinfo->flags,
049ee3f5 480 spin_is_locked(&conn->c_send_lock), SENDING);
00e0f34c
AG
481 /* XXX Future: return the state rather than these funky bits */
482 rds_conn_info_set(cinfo->flags,
483 atomic_read(&conn->c_state) == RDS_CONN_CONNECTING,
484 CONNECTING);
485 rds_conn_info_set(cinfo->flags,
486 atomic_read(&conn->c_state) == RDS_CONN_UP,
487 CONNECTED);
488 return 1;
489}
490
491static void rds_conn_info(struct socket *sock, unsigned int len,
492 struct rds_info_iterator *iter,
493 struct rds_info_lengths *lens)
494{
495 rds_for_each_conn_info(sock, len, iter, lens,
496 rds_conn_info_visitor,
497 sizeof(struct rds_info_connection));
498}
499
500int __init rds_conn_init(void)
501{
502 rds_conn_slab = kmem_cache_create("rds_connection",
503 sizeof(struct rds_connection),
504 0, 0, NULL);
8690bfa1 505 if (!rds_conn_slab)
00e0f34c
AG
506 return -ENOMEM;
507
508 rds_info_register_func(RDS_INFO_CONNECTIONS, rds_conn_info);
509 rds_info_register_func(RDS_INFO_SEND_MESSAGES,
510 rds_conn_message_info_send);
511 rds_info_register_func(RDS_INFO_RETRANS_MESSAGES,
512 rds_conn_message_info_retrans);
513
514 return 0;
515}
516
517void rds_conn_exit(void)
518{
519 rds_loop_exit();
520
521 WARN_ON(!hlist_empty(rds_conn_hash));
522
523 kmem_cache_destroy(rds_conn_slab);
524
525 rds_info_deregister_func(RDS_INFO_CONNECTIONS, rds_conn_info);
526 rds_info_deregister_func(RDS_INFO_SEND_MESSAGES,
527 rds_conn_message_info_send);
528 rds_info_deregister_func(RDS_INFO_RETRANS_MESSAGES,
529 rds_conn_message_info_retrans);
530}
531
532/*
533 * Force a disconnect
534 */
535void rds_conn_drop(struct rds_connection *conn)
536{
537 atomic_set(&conn->c_state, RDS_CONN_ERROR);
538 queue_work(rds_wq, &conn->c_down_w);
539}
616b757a 540EXPORT_SYMBOL_GPL(rds_conn_drop);
00e0f34c
AG
541
542/*
543 * An error occurred on the connection
544 */
545void
546__rds_conn_error(struct rds_connection *conn, const char *fmt, ...)
547{
548 va_list ap;
549
550 va_start(ap, fmt);
551 vprintk(fmt, ap);
552 va_end(ap);
553
554 rds_conn_drop(conn);
555}