2 * Copyright (c) 2006 Oracle. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/kernel.h>
34 #include <linux/slab.h>
37 #include <linux/export.h>
38 #include <linux/time.h>
39 #include <linux/rds.h>
41 #include "rds_single_path.h"
44 void rds_inc_init(struct rds_incoming
*inc
, struct rds_connection
*conn
,
47 atomic_set(&inc
->i_refcount
, 1);
48 INIT_LIST_HEAD(&inc
->i_item
);
51 inc
->i_rdma_cookie
= 0;
52 inc
->i_rx_tstamp
.tv_sec
= 0;
53 inc
->i_rx_tstamp
.tv_usec
= 0;
55 EXPORT_SYMBOL_GPL(rds_inc_init
);
57 static void rds_inc_addref(struct rds_incoming
*inc
)
59 rdsdebug("addref inc %p ref %d\n", inc
, atomic_read(&inc
->i_refcount
));
60 atomic_inc(&inc
->i_refcount
);
63 void rds_inc_put(struct rds_incoming
*inc
)
65 rdsdebug("put inc %p ref %d\n", inc
, atomic_read(&inc
->i_refcount
));
66 if (atomic_dec_and_test(&inc
->i_refcount
)) {
67 BUG_ON(!list_empty(&inc
->i_item
));
69 inc
->i_conn
->c_trans
->inc_free(inc
);
72 EXPORT_SYMBOL_GPL(rds_inc_put
);
74 static void rds_recv_rcvbuf_delta(struct rds_sock
*rs
, struct sock
*sk
,
75 struct rds_cong_map
*map
,
76 int delta
, __be16 port
)
83 rs
->rs_rcv_bytes
+= delta
;
84 now_congested
= rs
->rs_rcv_bytes
> rds_sk_rcvbuf(rs
);
86 rdsdebug("rs %p (%pI4:%u) recv bytes %d buf %d "
87 "now_cong %d delta %d\n",
88 rs
, &rs
->rs_bound_addr
,
89 ntohs(rs
->rs_bound_port
), rs
->rs_rcv_bytes
,
90 rds_sk_rcvbuf(rs
), now_congested
, delta
);
92 /* wasn't -> am congested */
93 if (!rs
->rs_congested
&& now_congested
) {
95 rds_cong_set_bit(map
, port
);
96 rds_cong_queue_updates(map
);
98 /* was -> aren't congested */
99 /* Require more free space before reporting uncongested to prevent
100 bouncing cong/uncong state too often */
101 else if (rs
->rs_congested
&& (rs
->rs_rcv_bytes
< (rds_sk_rcvbuf(rs
)/2))) {
102 rs
->rs_congested
= 0;
103 rds_cong_clear_bit(map
, port
);
104 rds_cong_queue_updates(map
);
107 /* do nothing if no change in cong state */
111 * Process all extension headers that come with this message.
113 static void rds_recv_incoming_exthdrs(struct rds_incoming
*inc
, struct rds_sock
*rs
)
115 struct rds_header
*hdr
= &inc
->i_hdr
;
116 unsigned int pos
= 0, type
, len
;
118 struct rds_ext_header_version version
;
119 struct rds_ext_header_rdma rdma
;
120 struct rds_ext_header_rdma_dest rdma_dest
;
124 len
= sizeof(buffer
);
125 type
= rds_message_next_extension(hdr
, &pos
, &buffer
, &len
);
126 if (type
== RDS_EXTHDR_NONE
)
128 /* Process extension header here */
130 case RDS_EXTHDR_RDMA
:
131 rds_rdma_unuse(rs
, be32_to_cpu(buffer
.rdma
.h_rdma_rkey
), 0);
134 case RDS_EXTHDR_RDMA_DEST
:
135 /* We ignore the size for now. We could stash it
136 * somewhere and use it for error checking. */
137 inc
->i_rdma_cookie
= rds_rdma_make_cookie(
138 be32_to_cpu(buffer
.rdma_dest
.h_rdma_rkey
),
139 be32_to_cpu(buffer
.rdma_dest
.h_rdma_offset
));
147 * The transport must make sure that this is serialized against other
148 * rx and conn reset on this specific conn.
150 * We currently assert that only one fragmented message will be sent
151 * down a connection at a time. This lets us reassemble in the conn
152 * instead of per-flow which means that we don't have to go digging through
153 * flows to tear down partial reassembly progress on conn failure and
154 * we save flow lookup and locking for each frag arrival. It does mean
155 * that small messages will wait behind large ones. Fragmenting at all
156 * is only to reduce the memory consumption of pre-posted buffers.
158 * The caller passes in saddr and daddr instead of us getting it from the
159 * conn. This lets loopback, who only has one conn for both directions,
160 * tell us which roles the addrs in the conn are playing for this message.
162 void rds_recv_incoming(struct rds_connection
*conn
, __be32 saddr
, __be32 daddr
,
163 struct rds_incoming
*inc
, gfp_t gfp
)
165 struct rds_sock
*rs
= NULL
;
170 inc
->i_rx_jiffies
= jiffies
;
172 rdsdebug("conn %p next %llu inc %p seq %llu len %u sport %u dport %u "
173 "flags 0x%x rx_jiffies %lu\n", conn
,
174 (unsigned long long)conn
->c_next_rx_seq
,
176 (unsigned long long)be64_to_cpu(inc
->i_hdr
.h_sequence
),
177 be32_to_cpu(inc
->i_hdr
.h_len
),
178 be16_to_cpu(inc
->i_hdr
.h_sport
),
179 be16_to_cpu(inc
->i_hdr
.h_dport
),
184 * Sequence numbers should only increase. Messages get their
185 * sequence number as they're queued in a sending conn. They
186 * can be dropped, though, if the sending socket is closed before
187 * they hit the wire. So sequence numbers can skip forward
188 * under normal operation. They can also drop back in the conn
189 * failover case as previously sent messages are resent down the
190 * new instance of a conn. We drop those, otherwise we have
191 * to assume that the next valid seq does not come after a
192 * hole in the fragment stream.
194 * The headers don't give us a way to realize if fragments of
195 * a message have been dropped. We assume that frags that arrive
196 * to a flow are part of the current message on the flow that is
197 * being reassembled. This means that senders can't drop messages
198 * from the sending conn until all their frags are sent.
200 * XXX we could spend more on the wire to get more robust failure
201 * detection, arguably worth it to avoid data corruption.
203 if (be64_to_cpu(inc
->i_hdr
.h_sequence
) < conn
->c_next_rx_seq
&&
204 (inc
->i_hdr
.h_flags
& RDS_FLAG_RETRANSMITTED
)) {
205 rds_stats_inc(s_recv_drop_old_seq
);
208 conn
->c_next_rx_seq
= be64_to_cpu(inc
->i_hdr
.h_sequence
) + 1;
210 if (rds_sysctl_ping_enable
&& inc
->i_hdr
.h_dport
== 0) {
211 rds_stats_inc(s_recv_ping
);
212 rds_send_pong(conn
, inc
->i_hdr
.h_sport
);
216 rs
= rds_find_bound(daddr
, inc
->i_hdr
.h_dport
);
218 rds_stats_inc(s_recv_drop_no_sock
);
222 /* Process extension headers */
223 rds_recv_incoming_exthdrs(inc
, rs
);
225 /* We can be racing with rds_release() which marks the socket dead. */
226 sk
= rds_rs_to_sk(rs
);
228 /* serialize with rds_release -> sock_orphan */
229 write_lock_irqsave(&rs
->rs_recv_lock
, flags
);
230 if (!sock_flag(sk
, SOCK_DEAD
)) {
231 rdsdebug("adding inc %p to rs %p's recv queue\n", inc
, rs
);
232 rds_stats_inc(s_recv_queued
);
233 rds_recv_rcvbuf_delta(rs
, sk
, inc
->i_conn
->c_lcong
,
234 be32_to_cpu(inc
->i_hdr
.h_len
),
236 if (sock_flag(sk
, SOCK_RCVTSTAMP
))
237 do_gettimeofday(&inc
->i_rx_tstamp
);
239 list_add_tail(&inc
->i_item
, &rs
->rs_recv_queue
);
240 __rds_wake_sk_sleep(sk
);
242 rds_stats_inc(s_recv_drop_dead_sock
);
244 write_unlock_irqrestore(&rs
->rs_recv_lock
, flags
);
250 EXPORT_SYMBOL_GPL(rds_recv_incoming
);
253 * be very careful here. This is being called as the condition in
254 * wait_event_*() needs to cope with being called many times.
256 static int rds_next_incoming(struct rds_sock
*rs
, struct rds_incoming
**inc
)
261 read_lock_irqsave(&rs
->rs_recv_lock
, flags
);
262 if (!list_empty(&rs
->rs_recv_queue
)) {
263 *inc
= list_entry(rs
->rs_recv_queue
.next
,
266 rds_inc_addref(*inc
);
268 read_unlock_irqrestore(&rs
->rs_recv_lock
, flags
);
274 static int rds_still_queued(struct rds_sock
*rs
, struct rds_incoming
*inc
,
277 struct sock
*sk
= rds_rs_to_sk(rs
);
281 write_lock_irqsave(&rs
->rs_recv_lock
, flags
);
282 if (!list_empty(&inc
->i_item
)) {
285 /* XXX make sure this i_conn is reliable */
286 rds_recv_rcvbuf_delta(rs
, sk
, inc
->i_conn
->c_lcong
,
287 -be32_to_cpu(inc
->i_hdr
.h_len
),
289 list_del_init(&inc
->i_item
);
293 write_unlock_irqrestore(&rs
->rs_recv_lock
, flags
);
295 rdsdebug("inc %p rs %p still %d dropped %d\n", inc
, rs
, ret
, drop
);
300 * Pull errors off the error queue.
301 * If msghdr is NULL, we will just purge the error queue.
303 int rds_notify_queue_get(struct rds_sock
*rs
, struct msghdr
*msghdr
)
305 struct rds_notifier
*notifier
;
306 struct rds_rdma_notify cmsg
= { 0 }; /* fill holes with zero */
307 unsigned int count
= 0, max_messages
= ~0U;
313 /* put_cmsg copies to user space and thus may sleep. We can't do this
314 * with rs_lock held, so first grab as many notifications as we can stuff
315 * in the user provided cmsg buffer. We don't try to copy more, to avoid
316 * losing notifications - except when the buffer is so small that it wouldn't
317 * even hold a single notification. Then we give him as much of this single
318 * msg as we can squeeze in, and set MSG_CTRUNC.
321 max_messages
= msghdr
->msg_controllen
/ CMSG_SPACE(sizeof(cmsg
));
326 spin_lock_irqsave(&rs
->rs_lock
, flags
);
327 while (!list_empty(&rs
->rs_notify_queue
) && count
< max_messages
) {
328 notifier
= list_entry(rs
->rs_notify_queue
.next
,
329 struct rds_notifier
, n_list
);
330 list_move(¬ifier
->n_list
, ©
);
333 spin_unlock_irqrestore(&rs
->rs_lock
, flags
);
338 while (!list_empty(©
)) {
339 notifier
= list_entry(copy
.next
, struct rds_notifier
, n_list
);
342 cmsg
.user_token
= notifier
->n_user_token
;
343 cmsg
.status
= notifier
->n_status
;
345 err
= put_cmsg(msghdr
, SOL_RDS
, RDS_CMSG_RDMA_STATUS
,
346 sizeof(cmsg
), &cmsg
);
351 list_del_init(¬ifier
->n_list
);
355 /* If we bailed out because of an error in put_cmsg,
356 * we may be left with one or more notifications that we
357 * didn't process. Return them to the head of the list. */
358 if (!list_empty(©
)) {
359 spin_lock_irqsave(&rs
->rs_lock
, flags
);
360 list_splice(©
, &rs
->rs_notify_queue
);
361 spin_unlock_irqrestore(&rs
->rs_lock
, flags
);
368 * Queue a congestion notification
370 static int rds_notify_cong(struct rds_sock
*rs
, struct msghdr
*msghdr
)
372 uint64_t notify
= rs
->rs_cong_notify
;
376 err
= put_cmsg(msghdr
, SOL_RDS
, RDS_CMSG_CONG_UPDATE
,
377 sizeof(notify
), ¬ify
);
381 spin_lock_irqsave(&rs
->rs_lock
, flags
);
382 rs
->rs_cong_notify
&= ~notify
;
383 spin_unlock_irqrestore(&rs
->rs_lock
, flags
);
389 * Receive any control messages.
391 static int rds_cmsg_recv(struct rds_incoming
*inc
, struct msghdr
*msg
,
396 if (inc
->i_rdma_cookie
) {
397 ret
= put_cmsg(msg
, SOL_RDS
, RDS_CMSG_RDMA_DEST
,
398 sizeof(inc
->i_rdma_cookie
), &inc
->i_rdma_cookie
);
403 if ((inc
->i_rx_tstamp
.tv_sec
!= 0) &&
404 sock_flag(rds_rs_to_sk(rs
), SOCK_RCVTSTAMP
)) {
405 ret
= put_cmsg(msg
, SOL_SOCKET
, SCM_TIMESTAMP
,
406 sizeof(struct timeval
),
415 int rds_recvmsg(struct socket
*sock
, struct msghdr
*msg
, size_t size
,
418 struct sock
*sk
= sock
->sk
;
419 struct rds_sock
*rs
= rds_sk_to_rs(sk
);
421 int ret
= 0, nonblock
= msg_flags
& MSG_DONTWAIT
;
422 DECLARE_SOCKADDR(struct sockaddr_in
*, sin
, msg
->msg_name
);
423 struct rds_incoming
*inc
= NULL
;
425 /* udp_recvmsg()->sock_recvtimeo() gets away without locking too.. */
426 timeo
= sock_rcvtimeo(sk
, nonblock
);
428 rdsdebug("size %zu flags 0x%x timeo %ld\n", size
, msg_flags
, timeo
);
430 if (msg_flags
& MSG_OOB
)
434 struct iov_iter save
;
435 /* If there are pending notifications, do those - and nothing else */
436 if (!list_empty(&rs
->rs_notify_queue
)) {
437 ret
= rds_notify_queue_get(rs
, msg
);
441 if (rs
->rs_cong_notify
) {
442 ret
= rds_notify_cong(rs
, msg
);
446 if (!rds_next_incoming(rs
, &inc
)) {
452 timeo
= wait_event_interruptible_timeout(*sk_sleep(sk
),
453 (!list_empty(&rs
->rs_notify_queue
) ||
454 rs
->rs_cong_notify
||
455 rds_next_incoming(rs
, &inc
)), timeo
);
456 rdsdebug("recvmsg woke inc %p timeo %ld\n", inc
,
458 if (timeo
> 0 || timeo
== MAX_SCHEDULE_TIMEOUT
)
467 rdsdebug("copying inc %p from %pI4:%u to user\n", inc
,
468 &inc
->i_conn
->c_faddr
,
469 ntohs(inc
->i_hdr
.h_sport
));
470 save
= msg
->msg_iter
;
471 ret
= inc
->i_conn
->c_trans
->inc_copy_to_user(inc
, &msg
->msg_iter
);
476 * if the message we just copied isn't at the head of the
477 * recv queue then someone else raced us to return it, try
478 * to get the next message.
480 if (!rds_still_queued(rs
, inc
, !(msg_flags
& MSG_PEEK
))) {
483 rds_stats_inc(s_recv_deliver_raced
);
484 msg
->msg_iter
= save
;
488 if (ret
< be32_to_cpu(inc
->i_hdr
.h_len
)) {
489 if (msg_flags
& MSG_TRUNC
)
490 ret
= be32_to_cpu(inc
->i_hdr
.h_len
);
491 msg
->msg_flags
|= MSG_TRUNC
;
494 if (rds_cmsg_recv(inc
, msg
, rs
)) {
499 rds_stats_inc(s_recv_delivered
);
502 sin
->sin_family
= AF_INET
;
503 sin
->sin_port
= inc
->i_hdr
.h_sport
;
504 sin
->sin_addr
.s_addr
= inc
->i_saddr
;
505 memset(sin
->sin_zero
, 0, sizeof(sin
->sin_zero
));
506 msg
->msg_namelen
= sizeof(*sin
);
519 * The socket is being shut down and we're asked to drop messages that were
520 * queued for recvmsg. The caller has unbound the socket so the receive path
521 * won't queue any more incoming fragments or messages on the socket.
523 void rds_clear_recv_queue(struct rds_sock
*rs
)
525 struct sock
*sk
= rds_rs_to_sk(rs
);
526 struct rds_incoming
*inc
, *tmp
;
529 write_lock_irqsave(&rs
->rs_recv_lock
, flags
);
530 list_for_each_entry_safe(inc
, tmp
, &rs
->rs_recv_queue
, i_item
) {
531 rds_recv_rcvbuf_delta(rs
, sk
, inc
->i_conn
->c_lcong
,
532 -be32_to_cpu(inc
->i_hdr
.h_len
),
534 list_del_init(&inc
->i_item
);
537 write_unlock_irqrestore(&rs
->rs_recv_lock
, flags
);
541 * inc->i_saddr isn't used here because it is only set in the receive
544 void rds_inc_info_copy(struct rds_incoming
*inc
,
545 struct rds_info_iterator
*iter
,
546 __be32 saddr
, __be32 daddr
, int flip
)
548 struct rds_info_message minfo
;
550 minfo
.seq
= be64_to_cpu(inc
->i_hdr
.h_sequence
);
551 minfo
.len
= be32_to_cpu(inc
->i_hdr
.h_len
);
556 minfo
.lport
= inc
->i_hdr
.h_dport
;
557 minfo
.fport
= inc
->i_hdr
.h_sport
;
561 minfo
.lport
= inc
->i_hdr
.h_sport
;
562 minfo
.fport
= inc
->i_hdr
.h_dport
;
567 rds_info_copy(iter
, &minfo
, sizeof(minfo
));