2 * Copyright (c) 2006 Oracle. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/kernel.h>
34 #include <linux/slab.h>
36 #include <linux/module.h>
38 #include <net/net_namespace.h>
39 #include <net/netns/generic.h>
41 #include "rds_single_path.h"
45 /* only for info exporting */
46 static DEFINE_SPINLOCK(rds_tcp_tc_list_lock
);
47 static LIST_HEAD(rds_tcp_tc_list
);
48 static unsigned int rds_tcp_tc_count
;
50 /* Track rds_tcp_connection structs so they can be cleaned up */
51 static DEFINE_SPINLOCK(rds_tcp_conn_lock
);
52 static LIST_HEAD(rds_tcp_conn_list
);
54 static struct kmem_cache
*rds_tcp_conn_slab
;
56 static int rds_tcp_skbuf_handler(struct ctl_table
*ctl
, int write
,
57 void __user
*buffer
, size_t *lenp
,
60 static int rds_tcp_min_sndbuf
= SOCK_MIN_SNDBUF
;
61 static int rds_tcp_min_rcvbuf
= SOCK_MIN_RCVBUF
;
63 static struct ctl_table rds_tcp_sysctl_table
[] = {
64 #define RDS_TCP_SNDBUF 0
66 .procname
= "rds_tcp_sndbuf",
67 /* data is per-net pointer */
68 .maxlen
= sizeof(int),
70 .proc_handler
= rds_tcp_skbuf_handler
,
71 .extra1
= &rds_tcp_min_sndbuf
,
73 #define RDS_TCP_RCVBUF 1
75 .procname
= "rds_tcp_rcvbuf",
76 /* data is per-net pointer */
77 .maxlen
= sizeof(int),
79 .proc_handler
= rds_tcp_skbuf_handler
,
80 .extra1
= &rds_tcp_min_rcvbuf
,
85 /* doing it this way avoids calling tcp_sk() */
86 void rds_tcp_nonagle(struct socket
*sock
)
88 mm_segment_t oldfs
= get_fs();
92 sock
->ops
->setsockopt(sock
, SOL_TCP
, TCP_NODELAY
, (char __user
*)&val
,
97 u32
rds_tcp_snd_nxt(struct rds_tcp_connection
*tc
)
99 return tcp_sk(tc
->t_sock
->sk
)->snd_nxt
;
102 u32
rds_tcp_snd_una(struct rds_tcp_connection
*tc
)
104 return tcp_sk(tc
->t_sock
->sk
)->snd_una
;
107 void rds_tcp_restore_callbacks(struct socket
*sock
,
108 struct rds_tcp_connection
*tc
)
110 rdsdebug("restoring sock %p callbacks from tc %p\n", sock
, tc
);
111 write_lock_bh(&sock
->sk
->sk_callback_lock
);
113 /* done under the callback_lock to serialize with write_space */
114 spin_lock(&rds_tcp_tc_list_lock
);
115 list_del_init(&tc
->t_list_item
);
117 spin_unlock(&rds_tcp_tc_list_lock
);
121 sock
->sk
->sk_write_space
= tc
->t_orig_write_space
;
122 sock
->sk
->sk_data_ready
= tc
->t_orig_data_ready
;
123 sock
->sk
->sk_state_change
= tc
->t_orig_state_change
;
124 sock
->sk
->sk_user_data
= NULL
;
126 write_unlock_bh(&sock
->sk
->sk_callback_lock
);
130 * rds_tcp_reset_callbacks() switches the to the new sock and
131 * returns the existing tc->t_sock.
133 * The only functions that set tc->t_sock are rds_tcp_set_callbacks
134 * and rds_tcp_reset_callbacks. Send and receive trust that
135 * it is set. The absence of RDS_CONN_UP bit protects those paths
136 * from being called while it isn't set.
138 void rds_tcp_reset_callbacks(struct socket
*sock
,
139 struct rds_connection
*conn
)
141 struct rds_tcp_connection
*tc
= conn
->c_transport_data
;
142 struct socket
*osock
= tc
->t_sock
;
147 /* Need to resolve a duelling SYN between peers.
148 * We have an outstanding SYN to this peer, which may
149 * potentially have transitioned to the RDS_CONN_UP state,
150 * so we must quiesce any send threads before resetting
151 * c_transport_data. We quiesce these threads by setting
152 * c_state to something other than RDS_CONN_UP, and then
153 * waiting for any existing threads in rds_send_xmit to
154 * complete release_in_xmit(). (Subsequent threads entering
155 * rds_send_xmit() will bail on !rds_conn_up().
157 * However an incoming syn-ack at this point would end up
158 * marking the conn as RDS_CONN_UP, and would again permit
159 * rds_send_xmi() threads through, so ideally we would
160 * synchronize on RDS_CONN_UP after lock_sock(), but cannot
161 * do that: waiting on !RDS_IN_XMIT after lock_sock() may
162 * end up deadlocking with tcp_sendmsg(), and the RDS_IN_XMIT
163 * would not get set. As a result, we set c_state to
164 * RDS_CONN_RESETTTING, to ensure that rds_tcp_state_change
165 * cannot mark rds_conn_path_up() in the window before lock_sock()
167 atomic_set(&conn
->c_state
, RDS_CONN_RESETTING
);
168 wait_event(conn
->c_waitq
, !test_bit(RDS_IN_XMIT
, &conn
->c_flags
));
169 lock_sock(osock
->sk
);
170 /* reset receive side state for rds_tcp_data_recv() for osock */
172 rds_inc_put(&tc
->t_tinc
->ti_inc
);
175 tc
->t_tinc_hdr_rem
= sizeof(struct rds_header
);
176 tc
->t_tinc_data_rem
= 0;
179 write_lock_bh(&osock
->sk
->sk_callback_lock
);
181 osock
->sk
->sk_user_data
= NULL
;
182 osock
->sk
->sk_data_ready
= tc
->t_orig_data_ready
;
183 osock
->sk
->sk_write_space
= tc
->t_orig_write_space
;
184 osock
->sk
->sk_state_change
= tc
->t_orig_state_change
;
185 write_unlock_bh(&osock
->sk
->sk_callback_lock
);
186 release_sock(osock
->sk
);
189 rds_send_path_reset(&conn
->c_path
[0]);
191 write_lock_bh(&sock
->sk
->sk_callback_lock
);
193 sock
->sk
->sk_user_data
= conn
;
194 sock
->sk
->sk_data_ready
= rds_tcp_data_ready
;
195 sock
->sk
->sk_write_space
= rds_tcp_write_space
;
196 sock
->sk
->sk_state_change
= rds_tcp_state_change
;
198 write_unlock_bh(&sock
->sk
->sk_callback_lock
);
199 release_sock(sock
->sk
);
202 /* Add tc to rds_tcp_tc_list and set tc->t_sock. See comments
203 * above rds_tcp_reset_callbacks for notes about synchronization
206 void rds_tcp_set_callbacks(struct socket
*sock
, struct rds_connection
*conn
)
208 struct rds_tcp_connection
*tc
= conn
->c_transport_data
;
210 rdsdebug("setting sock %p callbacks to tc %p\n", sock
, tc
);
211 write_lock_bh(&sock
->sk
->sk_callback_lock
);
213 /* done under the callback_lock to serialize with write_space */
214 spin_lock(&rds_tcp_tc_list_lock
);
215 list_add_tail(&tc
->t_list_item
, &rds_tcp_tc_list
);
217 spin_unlock(&rds_tcp_tc_list_lock
);
219 /* accepted sockets need our listen data ready undone */
220 if (sock
->sk
->sk_data_ready
== rds_tcp_listen_data_ready
)
221 sock
->sk
->sk_data_ready
= sock
->sk
->sk_user_data
;
225 tc
->t_orig_data_ready
= sock
->sk
->sk_data_ready
;
226 tc
->t_orig_write_space
= sock
->sk
->sk_write_space
;
227 tc
->t_orig_state_change
= sock
->sk
->sk_state_change
;
229 sock
->sk
->sk_user_data
= conn
;
230 sock
->sk
->sk_data_ready
= rds_tcp_data_ready
;
231 sock
->sk
->sk_write_space
= rds_tcp_write_space
;
232 sock
->sk
->sk_state_change
= rds_tcp_state_change
;
234 write_unlock_bh(&sock
->sk
->sk_callback_lock
);
237 static void rds_tcp_tc_info(struct socket
*sock
, unsigned int len
,
238 struct rds_info_iterator
*iter
,
239 struct rds_info_lengths
*lens
)
241 struct rds_info_tcp_socket tsinfo
;
242 struct rds_tcp_connection
*tc
;
244 struct sockaddr_in sin
;
247 spin_lock_irqsave(&rds_tcp_tc_list_lock
, flags
);
249 if (len
/ sizeof(tsinfo
) < rds_tcp_tc_count
)
252 list_for_each_entry(tc
, &rds_tcp_tc_list
, t_list_item
) {
254 sock
->ops
->getname(sock
, (struct sockaddr
*)&sin
, &sinlen
, 0);
255 tsinfo
.local_addr
= sin
.sin_addr
.s_addr
;
256 tsinfo
.local_port
= sin
.sin_port
;
257 sock
->ops
->getname(sock
, (struct sockaddr
*)&sin
, &sinlen
, 1);
258 tsinfo
.peer_addr
= sin
.sin_addr
.s_addr
;
259 tsinfo
.peer_port
= sin
.sin_port
;
261 tsinfo
.hdr_rem
= tc
->t_tinc_hdr_rem
;
262 tsinfo
.data_rem
= tc
->t_tinc_data_rem
;
263 tsinfo
.last_sent_nxt
= tc
->t_last_sent_nxt
;
264 tsinfo
.last_expected_una
= tc
->t_last_expected_una
;
265 tsinfo
.last_seen_una
= tc
->t_last_seen_una
;
267 rds_info_copy(iter
, &tsinfo
, sizeof(tsinfo
));
271 lens
->nr
= rds_tcp_tc_count
;
272 lens
->each
= sizeof(tsinfo
);
274 spin_unlock_irqrestore(&rds_tcp_tc_list_lock
, flags
);
277 static int rds_tcp_laddr_check(struct net
*net
, __be32 addr
)
279 if (inet_addr_type(net
, addr
) == RTN_LOCAL
)
281 return -EADDRNOTAVAIL
;
284 static int rds_tcp_conn_alloc(struct rds_connection
*conn
, gfp_t gfp
)
286 struct rds_tcp_connection
*tc
;
288 tc
= kmem_cache_alloc(rds_tcp_conn_slab
, gfp
);
292 mutex_init(&tc
->t_conn_lock
);
295 tc
->t_tinc_hdr_rem
= sizeof(struct rds_header
);
296 tc
->t_tinc_data_rem
= 0;
298 conn
->c_transport_data
= tc
;
300 spin_lock_irq(&rds_tcp_conn_lock
);
301 list_add_tail(&tc
->t_tcp_node
, &rds_tcp_conn_list
);
302 spin_unlock_irq(&rds_tcp_conn_lock
);
304 rdsdebug("alloced tc %p\n", conn
->c_transport_data
);
308 static void rds_tcp_conn_free(void *arg
)
310 struct rds_tcp_connection
*tc
= arg
;
312 rdsdebug("freeing tc %p\n", tc
);
314 spin_lock_irqsave(&rds_tcp_conn_lock
, flags
);
315 list_del(&tc
->t_tcp_node
);
316 spin_unlock_irqrestore(&rds_tcp_conn_lock
, flags
);
318 kmem_cache_free(rds_tcp_conn_slab
, tc
);
321 static void rds_tcp_destroy_conns(void)
323 struct rds_tcp_connection
*tc
, *_tc
;
326 /* avoid calling conn_destroy with irqs off */
327 spin_lock_irq(&rds_tcp_conn_lock
);
328 list_splice(&rds_tcp_conn_list
, &tmp_list
);
329 INIT_LIST_HEAD(&rds_tcp_conn_list
);
330 spin_unlock_irq(&rds_tcp_conn_lock
);
332 list_for_each_entry_safe(tc
, _tc
, &tmp_list
, t_tcp_node
) {
333 if (tc
->conn
->c_passive
)
334 rds_conn_destroy(tc
->conn
->c_passive
);
335 rds_conn_destroy(tc
->conn
);
339 static void rds_tcp_exit(void);
341 struct rds_transport rds_tcp_transport
= {
342 .laddr_check
= rds_tcp_laddr_check
,
343 .xmit_prepare
= rds_tcp_xmit_prepare
,
344 .xmit_complete
= rds_tcp_xmit_complete
,
345 .xmit
= rds_tcp_xmit
,
346 .recv
= rds_tcp_recv
,
347 .conn_alloc
= rds_tcp_conn_alloc
,
348 .conn_free
= rds_tcp_conn_free
,
349 .conn_connect
= rds_tcp_conn_connect
,
350 .conn_shutdown
= rds_tcp_conn_shutdown
,
351 .inc_copy_to_user
= rds_tcp_inc_copy_to_user
,
352 .inc_free
= rds_tcp_inc_free
,
353 .stats_info_copy
= rds_tcp_stats_info_copy
,
354 .exit
= rds_tcp_exit
,
355 .t_owner
= THIS_MODULE
,
357 .t_type
= RDS_TRANS_TCP
,
358 .t_prefer_loopback
= 1,
361 static int rds_tcp_netid
;
363 /* per-network namespace private data for this module */
365 struct socket
*rds_tcp_listen_sock
;
366 struct work_struct rds_tcp_accept_w
;
367 struct ctl_table_header
*rds_tcp_sysctl
;
368 struct ctl_table
*ctl_table
;
373 /* All module specific customizations to the RDS-TCP socket should be done in
374 * rds_tcp_tune() and applied after socket creation.
376 void rds_tcp_tune(struct socket
*sock
)
378 struct sock
*sk
= sock
->sk
;
379 struct net
*net
= sock_net(sk
);
380 struct rds_tcp_net
*rtn
= net_generic(net
, rds_tcp_netid
);
382 rds_tcp_nonagle(sock
);
384 if (rtn
->sndbuf_size
> 0) {
385 sk
->sk_sndbuf
= rtn
->sndbuf_size
;
386 sk
->sk_userlocks
|= SOCK_SNDBUF_LOCK
;
388 if (rtn
->rcvbuf_size
> 0) {
389 sk
->sk_sndbuf
= rtn
->rcvbuf_size
;
390 sk
->sk_userlocks
|= SOCK_RCVBUF_LOCK
;
395 static void rds_tcp_accept_worker(struct work_struct
*work
)
397 struct rds_tcp_net
*rtn
= container_of(work
,
401 while (rds_tcp_accept_one(rtn
->rds_tcp_listen_sock
) == 0)
405 void rds_tcp_accept_work(struct sock
*sk
)
407 struct net
*net
= sock_net(sk
);
408 struct rds_tcp_net
*rtn
= net_generic(net
, rds_tcp_netid
);
410 queue_work(rds_wq
, &rtn
->rds_tcp_accept_w
);
413 static __net_init
int rds_tcp_init_net(struct net
*net
)
415 struct rds_tcp_net
*rtn
= net_generic(net
, rds_tcp_netid
);
416 struct ctl_table
*tbl
;
419 memset(rtn
, 0, sizeof(*rtn
));
421 /* {snd, rcv}buf_size default to 0, which implies we let the
422 * stack pick the value, and permit auto-tuning of buffer size.
424 if (net
== &init_net
) {
425 tbl
= rds_tcp_sysctl_table
;
427 tbl
= kmemdup(rds_tcp_sysctl_table
,
428 sizeof(rds_tcp_sysctl_table
), GFP_KERNEL
);
430 pr_warn("could not set allocate syctl table\n");
433 rtn
->ctl_table
= tbl
;
435 tbl
[RDS_TCP_SNDBUF
].data
= &rtn
->sndbuf_size
;
436 tbl
[RDS_TCP_RCVBUF
].data
= &rtn
->rcvbuf_size
;
437 rtn
->rds_tcp_sysctl
= register_net_sysctl(net
, "net/rds/tcp", tbl
);
438 if (!rtn
->rds_tcp_sysctl
) {
439 pr_warn("could not register sysctl\n");
443 rtn
->rds_tcp_listen_sock
= rds_tcp_listen_init(net
);
444 if (!rtn
->rds_tcp_listen_sock
) {
445 pr_warn("could not set up listen sock\n");
446 unregister_net_sysctl_table(rtn
->rds_tcp_sysctl
);
447 rtn
->rds_tcp_sysctl
= NULL
;
451 INIT_WORK(&rtn
->rds_tcp_accept_w
, rds_tcp_accept_worker
);
455 if (net
!= &init_net
)
460 static void __net_exit
rds_tcp_exit_net(struct net
*net
)
462 struct rds_tcp_net
*rtn
= net_generic(net
, rds_tcp_netid
);
464 if (rtn
->rds_tcp_sysctl
)
465 unregister_net_sysctl_table(rtn
->rds_tcp_sysctl
);
467 if (net
!= &init_net
&& rtn
->ctl_table
)
468 kfree(rtn
->ctl_table
);
470 /* If rds_tcp_exit_net() is called as a result of netns deletion,
471 * the rds_tcp_kill_sock() device notifier would already have cleaned
472 * up the listen socket, thus there is no work to do in this function.
474 * If rds_tcp_exit_net() is called as a result of module unload,
475 * i.e., due to rds_tcp_exit() -> unregister_pernet_subsys(), then
476 * we do need to clean up the listen socket here.
478 if (rtn
->rds_tcp_listen_sock
) {
479 rds_tcp_listen_stop(rtn
->rds_tcp_listen_sock
);
480 rtn
->rds_tcp_listen_sock
= NULL
;
481 flush_work(&rtn
->rds_tcp_accept_w
);
485 static struct pernet_operations rds_tcp_net_ops
= {
486 .init
= rds_tcp_init_net
,
487 .exit
= rds_tcp_exit_net
,
488 .id
= &rds_tcp_netid
,
489 .size
= sizeof(struct rds_tcp_net
),
492 static void rds_tcp_kill_sock(struct net
*net
)
494 struct rds_tcp_connection
*tc
, *_tc
;
497 struct rds_tcp_net
*rtn
= net_generic(net
, rds_tcp_netid
);
499 rds_tcp_listen_stop(rtn
->rds_tcp_listen_sock
);
500 rtn
->rds_tcp_listen_sock
= NULL
;
501 flush_work(&rtn
->rds_tcp_accept_w
);
502 spin_lock_irq(&rds_tcp_conn_lock
);
503 list_for_each_entry_safe(tc
, _tc
, &rds_tcp_conn_list
, t_tcp_node
) {
504 struct net
*c_net
= read_pnet(&tc
->conn
->c_net
);
506 if (net
!= c_net
|| !tc
->t_sock
)
508 list_move_tail(&tc
->t_tcp_node
, &tmp_list
);
510 spin_unlock_irq(&rds_tcp_conn_lock
);
511 list_for_each_entry_safe(tc
, _tc
, &tmp_list
, t_tcp_node
) {
513 sk
->sk_prot
->disconnect(sk
, 0);
515 if (tc
->conn
->c_passive
)
516 rds_conn_destroy(tc
->conn
->c_passive
);
517 rds_conn_destroy(tc
->conn
);
521 static int rds_tcp_dev_event(struct notifier_block
*this,
522 unsigned long event
, void *ptr
)
524 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
526 /* rds-tcp registers as a pernet subys, so the ->exit will only
527 * get invoked after network acitivity has quiesced. We need to
528 * clean up all sockets to quiesce network activity, and use
529 * the unregistration of the per-net loopback device as a trigger
530 * to start that cleanup.
532 if (event
== NETDEV_UNREGISTER_FINAL
&&
533 dev
->ifindex
== LOOPBACK_IFINDEX
)
534 rds_tcp_kill_sock(dev_net(dev
));
539 static struct notifier_block rds_tcp_dev_notifier
= {
540 .notifier_call
= rds_tcp_dev_event
,
541 .priority
= -10, /* must be called after other network notifiers */
544 /* when sysctl is used to modify some kernel socket parameters,this
545 * function resets the RDS connections in that netns so that we can
546 * restart with new parameters. The assumption is that such reset
547 * events are few and far-between.
549 static void rds_tcp_sysctl_reset(struct net
*net
)
551 struct rds_tcp_connection
*tc
, *_tc
;
553 spin_lock_irq(&rds_tcp_conn_lock
);
554 list_for_each_entry_safe(tc
, _tc
, &rds_tcp_conn_list
, t_tcp_node
) {
555 struct net
*c_net
= read_pnet(&tc
->conn
->c_net
);
557 if (net
!= c_net
|| !tc
->t_sock
)
560 rds_conn_drop(tc
->conn
); /* reconnect with new parameters */
562 spin_unlock_irq(&rds_tcp_conn_lock
);
565 static int rds_tcp_skbuf_handler(struct ctl_table
*ctl
, int write
,
566 void __user
*buffer
, size_t *lenp
,
569 struct net
*net
= current
->nsproxy
->net_ns
;
572 err
= proc_dointvec_minmax(ctl
, write
, buffer
, lenp
, fpos
);
574 pr_warn("Invalid input. Must be >= %d\n",
575 *(int *)(ctl
->extra1
));
579 rds_tcp_sysctl_reset(net
);
583 static void rds_tcp_exit(void)
585 rds_info_deregister_func(RDS_INFO_TCP_SOCKETS
, rds_tcp_tc_info
);
586 unregister_pernet_subsys(&rds_tcp_net_ops
);
587 if (unregister_netdevice_notifier(&rds_tcp_dev_notifier
))
588 pr_warn("could not unregister rds_tcp_dev_notifier\n");
589 rds_tcp_destroy_conns();
590 rds_trans_unregister(&rds_tcp_transport
);
592 kmem_cache_destroy(rds_tcp_conn_slab
);
594 module_exit(rds_tcp_exit
);
596 static int rds_tcp_init(void)
600 rds_tcp_conn_slab
= kmem_cache_create("rds_tcp_connection",
601 sizeof(struct rds_tcp_connection
),
603 if (!rds_tcp_conn_slab
) {
608 ret
= register_netdevice_notifier(&rds_tcp_dev_notifier
);
610 pr_warn("could not register rds_tcp_dev_notifier\n");
614 ret
= register_pernet_subsys(&rds_tcp_net_ops
);
618 ret
= rds_tcp_recv_init();
622 ret
= rds_trans_register(&rds_tcp_transport
);
626 rds_info_register_func(RDS_INFO_TCP_SOCKETS
, rds_tcp_tc_info
);
633 unregister_pernet_subsys(&rds_tcp_net_ops
);
634 kmem_cache_destroy(rds_tcp_conn_slab
);
638 module_init(rds_tcp_init
);
640 MODULE_AUTHOR("Oracle Corporation <rds-devel@oss.oracle.com>");
641 MODULE_DESCRIPTION("RDS: TCP transport");
642 MODULE_LICENSE("Dual BSD/GPL");