]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - net/rds/rds.h
RDS: IB: fix panic due to handlers running post teardown
[mirror_ubuntu-artful-kernel.git] / net / rds / rds.h
CommitLineData
39de8281
AG
1#ifndef _RDS_RDS_H
2#define _RDS_RDS_H
3
4#include <net/sock.h>
5#include <linux/scatterlist.h>
6#include <linux/highmem.h>
7#include <rdma/rdma_cm.h>
8#include <linux/mutex.h>
9#include <linux/rds.h>
7b565434 10#include <linux/rhashtable.h>
39de8281
AG
11
12#include "info.h"
13
14/*
15 * RDS Network protocol version
16 */
17#define RDS_PROTOCOL_3_0 0x0300
18#define RDS_PROTOCOL_3_1 0x0301
19#define RDS_PROTOCOL_VERSION RDS_PROTOCOL_3_1
20#define RDS_PROTOCOL_MAJOR(v) ((v) >> 8)
21#define RDS_PROTOCOL_MINOR(v) ((v) & 255)
22#define RDS_PROTOCOL(maj, min) (((maj) << 8) | min)
23
24/*
25 * XXX randomly chosen, but at least seems to be unused:
26 * # 18464-18768 Unassigned
27 * We should do better. We want a reserved port to discourage unpriv'ed
28 * userspace from listening.
29 */
30#define RDS_PORT 18634
31
8cbd9606
AG
32#ifdef ATOMIC64_INIT
33#define KERNEL_HAS_ATOMIC64
34#endif
35
ff57087f 36#ifdef RDS_DEBUG
39de8281
AG
37#define rdsdebug(fmt, args...) pr_debug("%s(): " fmt, __func__ , ##args)
38#else
39/* sigh, pr_debug() causes unused variable warnings */
b9075fa9
JP
40static inline __printf(1, 2)
41void rdsdebug(char *fmt, ...)
39de8281
AG
42{
43}
44#endif
45
46/* XXX is there one of these somewhere? */
47#define ceil(x, y) \
48 ({ unsigned long __x = (x), __y = (y); (__x + __y - 1) / __y; })
49
50#define RDS_FRAG_SHIFT 12
51#define RDS_FRAG_SIZE ((unsigned int)(1 << RDS_FRAG_SHIFT))
52
53#define RDS_CONG_MAP_BYTES (65536 / 8)
39de8281
AG
54#define RDS_CONG_MAP_PAGES (PAGE_ALIGN(RDS_CONG_MAP_BYTES) / PAGE_SIZE)
55#define RDS_CONG_MAP_PAGE_BITS (PAGE_SIZE * 8)
56
57struct rds_cong_map {
58 struct rb_node m_rb_node;
59 __be32 m_addr;
60 wait_queue_head_t m_waitq;
61 struct list_head m_conn_list;
62 unsigned long m_page_addrs[RDS_CONG_MAP_PAGES];
63};
64
65
66/*
67 * This is how we will track the connection state:
68 * A connection is always in one of the following
69 * states. Updates to the state are atomic and imply
70 * a memory barrier.
71 */
72enum {
73 RDS_CONN_DOWN = 0,
74 RDS_CONN_CONNECTING,
75 RDS_CONN_DISCONNECTING,
76 RDS_CONN_UP,
9c79440e 77 RDS_CONN_RESETTING,
39de8281
AG
78 RDS_CONN_ERROR,
79};
80
81/* Bits for c_flags */
82#define RDS_LL_SEND_FULL 0
83#define RDS_RECONNECT_PENDING 1
0f4b1c7e 84#define RDS_IN_XMIT 2
73ce4317 85#define RDS_RECV_REFILL 3
39de8281 86
0cb43965 87/* Max number of multipaths per RDS connection. Must be a power of 2 */
5916e2c1
SV
88#define RDS_MPATH_WORKERS 8
89#define RDS_MPATH_HASH(rs, n) (jhash_1word((rs)->rs_bound_port, \
90 (rs)->rs_hash_initval) & ((n) - 1))
0cb43965
SV
91
92/* Per mpath connection state */
93struct rds_conn_path {
94 struct rds_connection *cp_conn;
95 struct rds_message *cp_xmit_rm;
96 unsigned long cp_xmit_sg;
97 unsigned int cp_xmit_hdr_off;
98 unsigned int cp_xmit_data_off;
99 unsigned int cp_xmit_atomic_sent;
100 unsigned int cp_xmit_rdma_sent;
101 unsigned int cp_xmit_data_sent;
102
103 spinlock_t cp_lock; /* protect msg queues */
104 u64 cp_next_tx_seq;
105 struct list_head cp_send_queue;
106 struct list_head cp_retrans;
107
108 u64 cp_next_rx_seq;
109
110 void *cp_transport_data;
111
112 atomic_t cp_state;
113 unsigned long cp_send_gen;
114 unsigned long cp_flags;
115 unsigned long cp_reconnect_jiffies;
116 struct delayed_work cp_send_w;
117 struct delayed_work cp_recv_w;
118 struct delayed_work cp_conn_w;
119 struct work_struct cp_down_w;
120 struct mutex cp_cm_lock; /* protect cp_state & cm */
121 wait_queue_head_t cp_waitq;
122
123 unsigned int cp_unacked_packets;
124 unsigned int cp_unacked_bytes;
125 unsigned int cp_outgoing:1,
126 cp_pad_to_32:31;
127 unsigned int cp_index;
128};
129
130/* One rds_connection per RDS address pair */
39de8281
AG
131struct rds_connection {
132 struct hlist_node c_hash_node;
133 __be32 c_laddr;
134 __be32 c_faddr;
3b20fc38 135 unsigned int c_loopback:1,
5916e2c1
SV
136 c_ping_triggered:1,
137 c_pad_to_32:30;
0cb43965 138 int c_npaths;
39de8281 139 struct rds_connection *c_passive;
0cb43965 140 struct rds_transport *c_trans;
39de8281
AG
141
142 struct rds_cong_map *c_lcong;
143 struct rds_cong_map *c_fcong;
144
0cb43965
SV
145 /* Protocol version */
146 unsigned int c_version;
147 possible_net_t c_net;
39de8281
AG
148
149 struct list_head c_map_item;
150 unsigned long c_map_queued;
39de8281 151
0cb43965 152 struct rds_conn_path c_path[RDS_MPATH_WORKERS];
5916e2c1 153 wait_queue_head_t c_hs_waitq; /* handshake waitq */
905dd418
SV
154
155 u32 c_my_gen_num;
156 u32 c_peer_gen_num;
39de8281
AG
157};
158
d5a8ac28
SV
159static inline
160struct net *rds_conn_net(struct rds_connection *conn)
161{
162 return read_pnet(&conn->c_net);
163}
164
165static inline
166void rds_conn_net_set(struct rds_connection *conn, struct net *net)
167{
168 write_pnet(&conn->c_net, net);
169}
170
39de8281
AG
171#define RDS_FLAG_CONG_BITMAP 0x01
172#define RDS_FLAG_ACK_REQUIRED 0x02
173#define RDS_FLAG_RETRANSMITTED 0x04
7b70d033 174#define RDS_MAX_ADV_CREDIT 255
39de8281 175
5916e2c1
SV
176/* RDS_FLAG_PROBE_PORT is the reserved sport used for sending a ping
177 * probe to exchange control information before establishing a connection.
178 * Currently the control information that is exchanged is the number of
179 * supported paths. If the peer is a legacy (older kernel revision) peer,
180 * it would return a pong message without additional control information
181 * that would then alert the sender that the peer was an older rev.
182 */
183#define RDS_FLAG_PROBE_PORT 1
184#define RDS_HS_PROBE(sport, dport) \
185 ((sport == RDS_FLAG_PROBE_PORT && dport == 0) || \
186 (sport == 0 && dport == RDS_FLAG_PROBE_PORT))
39de8281
AG
187/*
188 * Maximum space available for extension headers.
189 */
190#define RDS_HEADER_EXT_SPACE 16
191
192struct rds_header {
193 __be64 h_sequence;
194 __be64 h_ack;
195 __be32 h_len;
196 __be16 h_sport;
197 __be16 h_dport;
198 u8 h_flags;
199 u8 h_credit;
200 u8 h_padding[4];
201 __sum16 h_csum;
202
203 u8 h_exthdr[RDS_HEADER_EXT_SPACE];
204};
205
206/*
207 * Reserved - indicates end of extensions
208 */
209#define RDS_EXTHDR_NONE 0
210
211/*
212 * This extension header is included in the very
213 * first message that is sent on a new connection,
214 * and identifies the protocol level. This will help
215 * rolling updates if a future change requires breaking
216 * the protocol.
217 * NB: This is no longer true for IB, where we do a version
218 * negotiation during the connection setup phase (protocol
219 * version information is included in the RDMA CM private data).
220 */
221#define RDS_EXTHDR_VERSION 1
222struct rds_ext_header_version {
223 __be32 h_version;
224};
225
226/*
227 * This extension header is included in the RDS message
228 * chasing an RDMA operation.
229 */
230#define RDS_EXTHDR_RDMA 2
231struct rds_ext_header_rdma {
232 __be32 h_rdma_rkey;
233};
234
235/*
236 * This extension header tells the peer about the
237 * destination <R_Key,offset> of the requested RDMA
238 * operation.
239 */
240#define RDS_EXTHDR_RDMA_DEST 3
241struct rds_ext_header_rdma_dest {
242 __be32 h_rdma_rkey;
243 __be32 h_rdma_offset;
244};
245
5916e2c1
SV
246/* Extension header announcing number of paths.
247 * Implicit length = 2 bytes.
248 */
905dd418
SV
249#define RDS_EXTHDR_NPATHS 5
250#define RDS_EXTHDR_GEN_NUM 6
5916e2c1 251
39de8281
AG
252#define __RDS_EXTHDR_MAX 16 /* for now */
253
254struct rds_incoming {
255 atomic_t i_refcount;
256 struct list_head i_item;
257 struct rds_connection *i_conn;
ef9e62c2 258 struct rds_conn_path *i_conn_path;
39de8281
AG
259 struct rds_header i_hdr;
260 unsigned long i_rx_jiffies;
261 __be32 i_saddr;
262
263 rds_rdma_cookie_t i_rdma_cookie;
5711f8b3 264 struct timeval i_rx_tstamp;
39de8281
AG
265};
266
21f79afa
AG
267struct rds_mr {
268 struct rb_node r_rb_node;
269 atomic_t r_refcount;
270 u32 r_key;
271
272 /* A copy of the creation flags */
273 unsigned int r_use_once:1;
274 unsigned int r_invalidate:1;
275 unsigned int r_write:1;
276
277 /* This is for RDS_MR_DEAD.
278 * It would be nice & consistent to make this part of the above
279 * bit field here, but we need to use test_and_set_bit.
280 */
281 unsigned long r_state;
282 struct rds_sock *r_sock; /* back pointer to the socket that owns us */
283 struct rds_transport *r_trans;
284 void *r_trans_private;
285};
286
287/* Flags for mr->r_state */
288#define RDS_MR_DEAD 0
289
21f79afa
AG
290static inline rds_rdma_cookie_t rds_rdma_make_cookie(u32 r_key, u32 offset)
291{
292 return r_key | (((u64) offset) << 32);
293}
294
295static inline u32 rds_rdma_cookie_key(rds_rdma_cookie_t cookie)
296{
297 return cookie;
298}
299
300static inline u32 rds_rdma_cookie_offset(rds_rdma_cookie_t cookie)
301{
302 return cookie >> 32;
303}
304
15133f6e
AG
305/* atomic operation types */
306#define RDS_ATOMIC_TYPE_CSWP 0
307#define RDS_ATOMIC_TYPE_FADD 1
308
39de8281
AG
309/*
310 * m_sock_item and m_conn_item are on lists that are serialized under
311 * conn->c_lock. m_sock_item has additional meaning in that once it is empty
312 * the message will not be put back on the retransmit list after being sent.
313 * messages that are canceled while being sent rely on this.
314 *
315 * m_inc is used by loopback so that it can pass an incoming message straight
316 * back up into the rx path. It embeds a wire header which is also used by
317 * the send path, which is kind of awkward.
318 *
319 * m_sock_item indicates the message's presence on a socket's send or receive
320 * queue. m_rs will point to that socket.
321 *
322 * m_daddr is used by cancellation to prune messages to a given destination.
323 *
324 * The RDS_MSG_ON_SOCK and RDS_MSG_ON_CONN flags are used to avoid lock
325 * nesting. As paths iterate over messages on a sock, or conn, they must
326 * also lock the conn, or sock, to remove the message from those lists too.
327 * Testing the flag to determine if the message is still on the lists lets
328 * us avoid testing the list_head directly. That means each path can use
329 * the message's list_head to keep it on a local list while juggling locks
330 * without confusing the other path.
331 *
332 * m_ack_seq is an optional field set by transports who need a different
333 * sequence number range to invalidate. They can use this in a callback
334 * that they pass to rds_send_drop_acked() to see if each message has been
335 * acked. The HAS_ACK_SEQ flag can be used to detect messages which haven't
336 * had ack_seq set yet.
337 */
338#define RDS_MSG_ON_SOCK 1
339#define RDS_MSG_ON_CONN 2
340#define RDS_MSG_HAS_ACK_SEQ 3
341#define RDS_MSG_ACK_REQUIRED 4
342#define RDS_MSG_RETRANSMITTED 5
343#define RDS_MSG_MAPPED 6
344#define RDS_MSG_PAGEVEC 7
905dd418 345#define RDS_MSG_FLUSH 8
39de8281
AG
346
347struct rds_message {
348 atomic_t m_refcount;
349 struct list_head m_sock_item;
350 struct list_head m_conn_item;
351 struct rds_incoming m_inc;
352 u64 m_ack_seq;
353 __be32 m_daddr;
354 unsigned long m_flags;
355
356 /* Never access m_rs without holding m_rs_lock.
357 * Lock nesting is
358 * rm->m_rs_lock
359 * -> rs->rs_lock
360 */
361 spinlock_t m_rs_lock;
c83188dc
CM
362 wait_queue_head_t m_flush_wait;
363
39de8281 364 struct rds_sock *m_rs;
7e3bd65e
AG
365
366 /* cookie to send to remote, in rds header */
39de8281 367 rds_rdma_cookie_t m_rdma_cookie;
7e3bd65e
AG
368
369 unsigned int m_used_sgs;
370 unsigned int m_total_sgs;
371
ff3d7d36
AG
372 void *m_final_op;
373
e779137a 374 struct {
15133f6e
AG
375 struct rm_atomic_op {
376 int op_type;
20c72bd5
AG
377 union {
378 struct {
379 uint64_t compare;
380 uint64_t swap;
381 uint64_t compare_mask;
382 uint64_t swap_mask;
383 } op_m_cswp;
384 struct {
385 uint64_t add;
386 uint64_t nocarry_mask;
387 } op_m_fadd;
388 };
15133f6e
AG
389
390 u32 op_rkey;
391 u64 op_remote_addr;
392 unsigned int op_notify:1;
393 unsigned int op_recverr:1;
394 unsigned int op_mapped:1;
2c3a5f9a 395 unsigned int op_silent:1;
15133f6e 396 unsigned int op_active:1;
15133f6e 397 struct scatterlist *op_sg;
f8b3aaf2 398 struct rds_notifier *op_notifier;
15133f6e
AG
399
400 struct rds_mr *op_rdma_mr;
401 } atomic;
402 struct rm_rdma_op {
f8b3aaf2
AG
403 u32 op_rkey;
404 u64 op_remote_addr;
405 unsigned int op_write:1;
406 unsigned int op_fence:1;
407 unsigned int op_notify:1;
408 unsigned int op_recverr:1;
409 unsigned int op_mapped:1;
2c3a5f9a 410 unsigned int op_silent:1;
f8b3aaf2
AG
411 unsigned int op_active:1;
412 unsigned int op_bytes;
413 unsigned int op_nents;
414 unsigned int op_count;
415 struct scatterlist *op_sg;
416 struct rds_notifier *op_notifier;
417
418 struct rds_mr *op_rdma_mr;
e779137a 419 } rdma;
15133f6e 420 struct rm_data_op {
241eef3e 421 unsigned int op_active:1;
941f8d55 422 unsigned int op_notify:1;
6c7cc6e4
AG
423 unsigned int op_nents;
424 unsigned int op_count;
d655a9fb
WW
425 unsigned int op_dmasg;
426 unsigned int op_dmaoff;
6c7cc6e4 427 struct scatterlist *op_sg;
e779137a
AG
428 } data;
429 };
39de8281
AG
430};
431
432/*
433 * The RDS notifier is used (optionally) to tell the application about
434 * completed RDMA operations. Rather than keeping the whole rds message
435 * around on the queue, we allocate a small notifier that is put on the
436 * socket's notifier_list. Notifications are delivered to the application
437 * through control messages.
438 */
439struct rds_notifier {
440 struct list_head n_list;
441 uint64_t n_user_token;
442 int n_status;
443};
444
445/**
446 * struct rds_transport - transport specific behavioural hooks
447 *
448 * @xmit: .xmit is called by rds_send_xmit() to tell the transport to send
449 * part of a message. The caller serializes on the send_sem so this
450 * doesn't need to be reentrant for a given conn. The header must be
451 * sent before the data payload. .xmit must be prepared to send a
452 * message with no data payload. .xmit should return the number of
453 * bytes that were sent down the connection, including header bytes.
454 * Returning 0 tells the caller that it doesn't need to perform any
455 * additional work now. This is usually the case when the transport has
456 * filled the sending queue for its connection and will handle
457 * triggering the rds thread to continue the send when space becomes
458 * available. Returning -EAGAIN tells the caller to retry the send
459 * immediately. Returning -ENOMEM tells the caller to retry the send at
460 * some point in the future.
461 *
462 * @conn_shutdown: conn_shutdown stops traffic on the given connection. Once
463 * it returns the connection can not call rds_recv_incoming().
464 * This will only be called once after conn_connect returns
465 * non-zero success and will The caller serializes this with
466 * the send and connecting paths (xmit_* and conn_*). The
467 * transport is responsible for other serialization, including
468 * rds_recv_incoming(). This is called in process context but
469 * should try hard not to block.
39de8281
AG
470 */
471
472struct rds_transport {
473 char t_name[TRANSNAMSIZ];
474 struct list_head t_item;
475 struct module *t_owner;
7e8f4413
SV
476 unsigned int t_prefer_loopback:1,
477 t_mp_capable:1;
335776bd 478 unsigned int t_type;
39de8281 479
d5a8ac28 480 int (*laddr_check)(struct net *net, __be32 addr);
39de8281
AG
481 int (*conn_alloc)(struct rds_connection *conn, gfp_t gfp);
482 void (*conn_free)(void *data);
b04e8554 483 int (*conn_path_connect)(struct rds_conn_path *cp);
d769ef81 484 void (*conn_path_shutdown)(struct rds_conn_path *conn);
1f9ecd7e 485 void (*xmit_path_prepare)(struct rds_conn_path *cp);
1f9ecd7e 486 void (*xmit_path_complete)(struct rds_conn_path *cp);
39de8281
AG
487 int (*xmit)(struct rds_connection *conn, struct rds_message *rm,
488 unsigned int hdr_off, unsigned int sg, unsigned int off);
f8b3aaf2 489 int (*xmit_rdma)(struct rds_connection *conn, struct rm_rdma_op *op);
ff3d7d36 490 int (*xmit_atomic)(struct rds_connection *conn, struct rm_atomic_op *op);
2da43c4a 491 int (*recv_path)(struct rds_conn_path *cp);
c310e72c 492 int (*inc_copy_to_user)(struct rds_incoming *inc, struct iov_iter *to);
39de8281
AG
493 void (*inc_free)(struct rds_incoming *inc);
494
495 int (*cm_handle_connect)(struct rdma_cm_id *cm_id,
496 struct rdma_cm_event *event);
497 int (*cm_initiate_connect)(struct rdma_cm_id *cm_id);
498 void (*cm_connect_complete)(struct rds_connection *conn,
499 struct rdma_cm_event *event);
500
501 unsigned int (*stats_info_copy)(struct rds_info_iterator *iter,
502 unsigned int avail);
503 void (*exit)(void);
504 void *(*get_mr)(struct scatterlist *sg, unsigned long nr_sg,
505 struct rds_sock *rs, u32 *key_ret);
506 void (*sync_mr)(void *trans_private, int direction);
507 void (*free_mr)(void *trans_private, int invalidate);
508 void (*flush_mrs)(void);
509};
510
511struct rds_sock {
512 struct sock rs_sk;
513
514 u64 rs_user_addr;
515 u64 rs_user_bytes;
516
517 /*
518 * bound_addr used for both incoming and outgoing, no INADDR_ANY
519 * support.
520 */
7b565434 521 struct rhash_head rs_bound_node;
522 u64 rs_bound_key;
39de8281
AG
523 __be32 rs_bound_addr;
524 __be32 rs_conn_addr;
525 __be16 rs_bound_port;
526 __be16 rs_conn_port;
39de8281
AG
527 struct rds_transport *rs_transport;
528
529 /*
530 * rds_sendmsg caches the conn it used the last time around.
531 * This helps avoid costly lookups.
532 */
533 struct rds_connection *rs_conn;
534
535 /* flag indicating we were congested or not */
536 int rs_congested;
b98ba52f
AG
537 /* seen congestion (ENOBUFS) when sending? */
538 int rs_seen_congestion;
39de8281
AG
539
540 /* rs_lock protects all these adjacent members before the newline */
541 spinlock_t rs_lock;
542 struct list_head rs_send_queue;
543 u32 rs_snd_bytes;
544 int rs_rcv_bytes;
545 struct list_head rs_notify_queue; /* currently used for failed RDMAs */
546
547 /* Congestion wake_up. If rs_cong_monitor is set, we use cong_mask
548 * to decide whether the application should be woken up.
549 * If not set, we use rs_cong_track to find out whether a cong map
550 * update arrived.
551 */
552 uint64_t rs_cong_mask;
553 uint64_t rs_cong_notify;
554 struct list_head rs_cong_list;
555 unsigned long rs_cong_track;
556
557 /*
558 * rs_recv_lock protects the receive queue, and is
559 * used to serialize with rds_release.
560 */
561 rwlock_t rs_recv_lock;
562 struct list_head rs_recv_queue;
563
564 /* just for stats reporting */
565 struct list_head rs_item;
566
567 /* these have their own lock */
568 spinlock_t rs_rdma_lock;
569 struct rb_root rs_rdma_keys;
570
571 /* Socket options - in case there will be more */
572 unsigned char rs_recverr,
573 rs_cong_monitor;
5916e2c1 574 u32 rs_hash_initval;
39de8281
AG
575};
576
577static inline struct rds_sock *rds_sk_to_rs(const struct sock *sk)
578{
579 return container_of(sk, struct rds_sock, rs_sk);
580}
581static inline struct sock *rds_rs_to_sk(struct rds_sock *rs)
582{
583 return &rs->rs_sk;
584}
585
586/*
587 * The stack assigns sk_sndbuf and sk_rcvbuf to twice the specified value
588 * to account for overhead. We don't account for overhead, we just apply
589 * the number of payload bytes to the specified value.
590 */
591static inline int rds_sk_sndbuf(struct rds_sock *rs)
592{
593 return rds_rs_to_sk(rs)->sk_sndbuf / 2;
594}
595static inline int rds_sk_rcvbuf(struct rds_sock *rs)
596{
597 return rds_rs_to_sk(rs)->sk_rcvbuf / 2;
598}
599
600struct rds_statistics {
601 uint64_t s_conn_reset;
602 uint64_t s_recv_drop_bad_checksum;
603 uint64_t s_recv_drop_old_seq;
604 uint64_t s_recv_drop_no_sock;
605 uint64_t s_recv_drop_dead_sock;
606 uint64_t s_recv_deliver_raced;
607 uint64_t s_recv_delivered;
608 uint64_t s_recv_queued;
609 uint64_t s_recv_immediate_retry;
610 uint64_t s_recv_delayed_retry;
611 uint64_t s_recv_ack_required;
612 uint64_t s_recv_rdma_bytes;
613 uint64_t s_recv_ping;
614 uint64_t s_send_queue_empty;
615 uint64_t s_send_queue_full;
049ee3f5
AG
616 uint64_t s_send_lock_contention;
617 uint64_t s_send_lock_queue_raced;
39de8281
AG
618 uint64_t s_send_immediate_retry;
619 uint64_t s_send_delayed_retry;
620 uint64_t s_send_drop_acked;
621 uint64_t s_send_ack_required;
622 uint64_t s_send_queued;
623 uint64_t s_send_rdma;
624 uint64_t s_send_rdma_bytes;
625 uint64_t s_send_pong;
626 uint64_t s_page_remainder_hit;
627 uint64_t s_page_remainder_miss;
628 uint64_t s_copy_to_user;
629 uint64_t s_copy_from_user;
630 uint64_t s_cong_update_queued;
631 uint64_t s_cong_update_received;
632 uint64_t s_cong_send_error;
633 uint64_t s_cong_send_blocked;
634};
635
636/* af_rds.c */
637void rds_sock_addref(struct rds_sock *rs);
638void rds_sock_put(struct rds_sock *rs);
639void rds_wake_sk_sleep(struct rds_sock *rs);
640static inline void __rds_wake_sk_sleep(struct sock *sk)
641{
aa395145 642 wait_queue_head_t *waitq = sk_sleep(sk);
39de8281
AG
643
644 if (!sock_flag(sk, SOCK_DEAD) && waitq)
645 wake_up(waitq);
646}
647extern wait_queue_head_t rds_poll_waitq;
648
649
650/* bind.c */
651int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len);
652void rds_remove_bound(struct rds_sock *rs);
653struct rds_sock *rds_find_bound(__be32 addr, __be16 port);
7b565434 654int rds_bind_lock_init(void);
655void rds_bind_lock_destroy(void);
39de8281
AG
656
657/* cong.c */
658int rds_cong_get_maps(struct rds_connection *conn);
659void rds_cong_add_conn(struct rds_connection *conn);
660void rds_cong_remove_conn(struct rds_connection *conn);
661void rds_cong_set_bit(struct rds_cong_map *map, __be16 port);
662void rds_cong_clear_bit(struct rds_cong_map *map, __be16 port);
663int rds_cong_wait(struct rds_cong_map *map, __be16 port, int nonblock, struct rds_sock *rs);
664void rds_cong_queue_updates(struct rds_cong_map *map);
665void rds_cong_map_updated(struct rds_cong_map *map, uint64_t);
666int rds_cong_updated_since(unsigned long *recent);
667void rds_cong_add_socket(struct rds_sock *);
668void rds_cong_remove_socket(struct rds_sock *);
669void rds_cong_exit(void);
670struct rds_message *rds_cong_update_alloc(struct rds_connection *conn);
671
672/* conn.c */
905dd418 673extern u32 rds_gen_num;
ef87b7ea 674int rds_conn_init(void);
39de8281 675void rds_conn_exit(void);
d5a8ac28
SV
676struct rds_connection *rds_conn_create(struct net *net,
677 __be32 laddr, __be32 faddr,
39de8281 678 struct rds_transport *trans, gfp_t gfp);
d5a8ac28
SV
679struct rds_connection *rds_conn_create_outgoing(struct net *net,
680 __be32 laddr, __be32 faddr,
39de8281 681 struct rds_transport *trans, gfp_t gfp);
d769ef81 682void rds_conn_shutdown(struct rds_conn_path *cpath);
39de8281 683void rds_conn_destroy(struct rds_connection *conn);
39de8281 684void rds_conn_drop(struct rds_connection *conn);
0cb43965 685void rds_conn_path_drop(struct rds_conn_path *cpath);
f3c6808d 686void rds_conn_connect_if_down(struct rds_connection *conn);
3c0a5900 687void rds_conn_path_connect_if_down(struct rds_conn_path *cp);
39de8281
AG
688void rds_for_each_conn_info(struct socket *sock, unsigned int len,
689 struct rds_info_iterator *iter,
690 struct rds_info_lengths *lens,
691 int (*visitor)(struct rds_connection *, void *),
692 size_t item_len);
39de8281 693
6cdaf03f 694__printf(2, 3)
fb1b3dc4
SV
695void __rds_conn_path_error(struct rds_conn_path *cp, const char *, ...);
696#define rds_conn_path_error(cp, fmt...) \
697 __rds_conn_path_error(cp, KERN_WARNING "RDS: " fmt)
698
0cb43965
SV
699static inline int
700rds_conn_path_transition(struct rds_conn_path *cp, int old, int new)
701{
702 return atomic_cmpxchg(&cp->cp_state, old, new) == old;
703}
704
39de8281
AG
705static inline int
706rds_conn_transition(struct rds_connection *conn, int old, int new)
707{
7e8f4413 708 WARN_ON(conn->c_trans->t_mp_capable);
0cb43965
SV
709 return rds_conn_path_transition(&conn->c_path[0], old, new);
710}
711
712static inline int
713rds_conn_path_state(struct rds_conn_path *cp)
714{
715 return atomic_read(&cp->cp_state);
39de8281
AG
716}
717
718static inline int
719rds_conn_state(struct rds_connection *conn)
720{
7e8f4413 721 WARN_ON(conn->c_trans->t_mp_capable);
0cb43965
SV
722 return rds_conn_path_state(&conn->c_path[0]);
723}
724
725static inline int
726rds_conn_path_up(struct rds_conn_path *cp)
727{
728 return atomic_read(&cp->cp_state) == RDS_CONN_UP;
39de8281
AG
729}
730
731static inline int
732rds_conn_up(struct rds_connection *conn)
733{
7e8f4413 734 WARN_ON(conn->c_trans->t_mp_capable);
0cb43965
SV
735 return rds_conn_path_up(&conn->c_path[0]);
736}
737
738static inline int
739rds_conn_path_connecting(struct rds_conn_path *cp)
740{
741 return atomic_read(&cp->cp_state) == RDS_CONN_CONNECTING;
39de8281
AG
742}
743
744static inline int
745rds_conn_connecting(struct rds_connection *conn)
746{
7e8f4413 747 WARN_ON(conn->c_trans->t_mp_capable);
0cb43965 748 return rds_conn_path_connecting(&conn->c_path[0]);
39de8281
AG
749}
750
751/* message.c */
752struct rds_message *rds_message_alloc(unsigned int nents, gfp_t gfp);
fc445084 753struct scatterlist *rds_message_alloc_sgs(struct rds_message *rm, int nents);
083735f4 754int rds_message_copy_from_user(struct rds_message *rm, struct iov_iter *from);
39de8281
AG
755struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned int total_len);
756void rds_message_populate_header(struct rds_header *hdr, __be16 sport,
757 __be16 dport, u64 seq);
758int rds_message_add_extension(struct rds_header *hdr,
759 unsigned int type, const void *data, unsigned int len);
760int rds_message_next_extension(struct rds_header *hdr,
761 unsigned int *pos, void *buf, unsigned int *buflen);
39de8281 762int rds_message_add_rdma_dest_extension(struct rds_header *hdr, u32 r_key, u32 offset);
c310e72c 763int rds_message_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to);
39de8281
AG
764void rds_message_inc_free(struct rds_incoming *inc);
765void rds_message_addref(struct rds_message *rm);
766void rds_message_put(struct rds_message *rm);
767void rds_message_wait(struct rds_message *rm);
768void rds_message_unmapped(struct rds_message *rm);
769
770static inline void rds_message_make_checksum(struct rds_header *hdr)
771{
772 hdr->h_csum = 0;
773 hdr->h_csum = ip_fast_csum((void *) hdr, sizeof(*hdr) >> 2);
774}
775
776static inline int rds_message_verify_checksum(const struct rds_header *hdr)
777{
778 return !hdr->h_csum || ip_fast_csum((void *) hdr, sizeof(*hdr) >> 2) == 0;
779}
780
781
782/* page.c */
783int rds_page_remainder_alloc(struct scatterlist *scat, unsigned long bytes,
784 gfp_t gfp);
785int rds_page_copy_user(struct page *page, unsigned long offset,
786 void __user *ptr, unsigned long bytes,
787 int to_user);
788#define rds_page_copy_to_user(page, offset, ptr, bytes) \
789 rds_page_copy_user(page, offset, ptr, bytes, 1)
790#define rds_page_copy_from_user(page, offset, ptr, bytes) \
791 rds_page_copy_user(page, offset, ptr, bytes, 0)
792void rds_page_exit(void);
793
794/* recv.c */
795void rds_inc_init(struct rds_incoming *inc, struct rds_connection *conn,
796 __be32 saddr);
5e833e02
SV
797void rds_inc_path_init(struct rds_incoming *inc, struct rds_conn_path *conn,
798 __be32 saddr);
39de8281
AG
799void rds_inc_put(struct rds_incoming *inc);
800void rds_recv_incoming(struct rds_connection *conn, __be32 saddr, __be32 daddr,
6114eab5 801 struct rds_incoming *inc, gfp_t gfp);
1b784140
YX
802int rds_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
803 int msg_flags);
39de8281
AG
804void rds_clear_recv_queue(struct rds_sock *rs);
805int rds_notify_queue_get(struct rds_sock *rs, struct msghdr *msg);
806void rds_inc_info_copy(struct rds_incoming *inc,
807 struct rds_info_iterator *iter,
808 __be32 saddr, __be32 daddr, int flip);
809
810/* send.c */
1b784140 811int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len);
d769ef81 812void rds_send_path_reset(struct rds_conn_path *conn);
1f9ecd7e 813int rds_send_xmit(struct rds_conn_path *cp);
39de8281
AG
814struct sockaddr_in;
815void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest);
816typedef int (*is_acked_func)(struct rds_message *rm, uint64_t ack);
817void rds_send_drop_acked(struct rds_connection *conn, u64 ack,
818 is_acked_func is_acked);
5c3d274c
SV
819void rds_send_path_drop_acked(struct rds_conn_path *cp, u64 ack,
820 is_acked_func is_acked);
45997e9e 821int rds_send_pong(struct rds_conn_path *cp, __be16 dport);
39de8281
AG
822
823/* rdma.c */
824void rds_rdma_unuse(struct rds_sock *rs, u32 r_key, int force);
21f79afa
AG
825int rds_get_mr(struct rds_sock *rs, char __user *optval, int optlen);
826int rds_get_mr_for_dest(struct rds_sock *rs, char __user *optval, int optlen);
827int rds_free_mr(struct rds_sock *rs, char __user *optval, int optlen);
828void rds_rdma_drop_keys(struct rds_sock *rs);
829int rds_rdma_extra_size(struct rds_rdma_args *args);
830int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
831 struct cmsghdr *cmsg);
832int rds_cmsg_rdma_dest(struct rds_sock *rs, struct rds_message *rm,
833 struct cmsghdr *cmsg);
834int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
835 struct cmsghdr *cmsg);
836int rds_cmsg_rdma_map(struct rds_sock *rs, struct rds_message *rm,
837 struct cmsghdr *cmsg);
f8b3aaf2 838void rds_rdma_free_op(struct rm_rdma_op *ro);
d0ab25a8 839void rds_atomic_free_op(struct rm_atomic_op *ao);
15133f6e
AG
840void rds_rdma_send_complete(struct rds_message *rm, int wc_status);
841void rds_atomic_send_complete(struct rds_message *rm, int wc_status);
842int rds_cmsg_atomic(struct rds_sock *rs, struct rds_message *rm,
843 struct cmsghdr *cmsg);
21f79afa 844
c1b1203d 845void __rds_put_mr_final(struct rds_mr *mr);
21f79afa
AG
846static inline void rds_mr_put(struct rds_mr *mr)
847{
848 if (atomic_dec_and_test(&mr->r_refcount))
849 __rds_put_mr_final(mr);
850}
39de8281
AG
851
852/* stats.c */
9b8de747 853DECLARE_PER_CPU_SHARED_ALIGNED(struct rds_statistics, rds_stats);
39de8281
AG
854#define rds_stats_inc_which(which, member) do { \
855 per_cpu(which, get_cpu()).member++; \
856 put_cpu(); \
857} while (0)
858#define rds_stats_inc(member) rds_stats_inc_which(rds_stats, member)
859#define rds_stats_add_which(which, member, count) do { \
860 per_cpu(which, get_cpu()).member += count; \
861 put_cpu(); \
862} while (0)
863#define rds_stats_add(member, count) rds_stats_add_which(rds_stats, member, count)
ef87b7ea 864int rds_stats_init(void);
39de8281
AG
865void rds_stats_exit(void);
866void rds_stats_info_copy(struct rds_info_iterator *iter,
36cbd3dc
JE
867 uint64_t *values, const char *const *names,
868 size_t nr);
39de8281
AG
869
870/* sysctl.c */
ef87b7ea 871int rds_sysctl_init(void);
39de8281
AG
872void rds_sysctl_exit(void);
873extern unsigned long rds_sysctl_sndbuf_min;
874extern unsigned long rds_sysctl_sndbuf_default;
875extern unsigned long rds_sysctl_sndbuf_max;
876extern unsigned long rds_sysctl_reconnect_min_jiffies;
877extern unsigned long rds_sysctl_reconnect_max_jiffies;
878extern unsigned int rds_sysctl_max_unacked_packets;
879extern unsigned int rds_sysctl_max_unacked_bytes;
880extern unsigned int rds_sysctl_ping_enable;
881extern unsigned long rds_sysctl_trace_flags;
882extern unsigned int rds_sysctl_trace_level;
883
884/* threads.c */
ef87b7ea 885int rds_threads_init(void);
39de8281
AG
886void rds_threads_exit(void);
887extern struct workqueue_struct *rds_wq;
0cb43965 888void rds_queue_reconnect(struct rds_conn_path *cp);
39de8281
AG
889void rds_connect_worker(struct work_struct *);
890void rds_shutdown_worker(struct work_struct *);
891void rds_send_worker(struct work_struct *);
892void rds_recv_worker(struct work_struct *);
0cb43965 893void rds_connect_path_complete(struct rds_conn_path *conn, int curr);
39de8281
AG
894void rds_connect_complete(struct rds_connection *conn);
895
896/* transport.c */
897int rds_trans_register(struct rds_transport *trans);
898void rds_trans_unregister(struct rds_transport *trans);
d5a8ac28 899struct rds_transport *rds_trans_get_preferred(struct net *net, __be32 addr);
5adb5bc6 900void rds_trans_put(struct rds_transport *trans);
39de8281
AG
901unsigned int rds_trans_stats_info_copy(struct rds_info_iterator *iter,
902 unsigned int avail);
d97dac54 903struct rds_transport *rds_trans_get(int t_type);
ef87b7ea 904int rds_trans_init(void);
39de8281
AG
905void rds_trans_exit(void);
906
907#endif