]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/rds/rds.h
iommu/amd: Reserve exclusion range in iova-domain
[mirror_ubuntu-bionic-kernel.git] / net / rds / rds.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _RDS_RDS_H
3 #define _RDS_RDS_H
4
5 #include <net/sock.h>
6 #include <linux/scatterlist.h>
7 #include <linux/highmem.h>
8 #include <rdma/rdma_cm.h>
9 #include <linux/mutex.h>
10 #include <linux/rds.h>
11 #include <linux/rhashtable.h>
12 #include <linux/refcount.h>
13
14 #include "info.h"
15
16 /*
17 * RDS Network protocol version
18 */
19 #define RDS_PROTOCOL_3_0 0x0300
20 #define RDS_PROTOCOL_3_1 0x0301
21 #define RDS_PROTOCOL_VERSION RDS_PROTOCOL_3_1
22 #define RDS_PROTOCOL_MAJOR(v) ((v) >> 8)
23 #define RDS_PROTOCOL_MINOR(v) ((v) & 255)
24 #define RDS_PROTOCOL(maj, min) (((maj) << 8) | min)
25
26 /*
27 * XXX randomly chosen, but at least seems to be unused:
28 * # 18464-18768 Unassigned
29 * We should do better. We want a reserved port to discourage unpriv'ed
30 * userspace from listening.
31 */
32 #define RDS_PORT 18634
33
34 #ifdef ATOMIC64_INIT
35 #define KERNEL_HAS_ATOMIC64
36 #endif
37
38 #ifdef RDS_DEBUG
39 #define rdsdebug(fmt, args...) pr_debug("%s(): " fmt, __func__ , ##args)
40 #else
41 /* sigh, pr_debug() causes unused variable warnings */
42 static inline __printf(1, 2)
43 void rdsdebug(char *fmt, ...)
44 {
45 }
46 #endif
47
48 /* XXX is there one of these somewhere? */
49 #define ceil(x, y) \
50 ({ unsigned long __x = (x), __y = (y); (__x + __y - 1) / __y; })
51
52 #define RDS_FRAG_SHIFT 12
53 #define RDS_FRAG_SIZE ((unsigned int)(1 << RDS_FRAG_SHIFT))
54
55 /* Used to limit both RDMA and non-RDMA RDS message to 1MB */
56 #define RDS_MAX_MSG_SIZE ((unsigned int)(1 << 20))
57
58 #define RDS_CONG_MAP_BYTES (65536 / 8)
59 #define RDS_CONG_MAP_PAGES (PAGE_ALIGN(RDS_CONG_MAP_BYTES) / PAGE_SIZE)
60 #define RDS_CONG_MAP_PAGE_BITS (PAGE_SIZE * 8)
61
62 struct rds_cong_map {
63 struct rb_node m_rb_node;
64 __be32 m_addr;
65 wait_queue_head_t m_waitq;
66 struct list_head m_conn_list;
67 unsigned long m_page_addrs[RDS_CONG_MAP_PAGES];
68 };
69
70
71 /*
72 * This is how we will track the connection state:
73 * A connection is always in one of the following
74 * states. Updates to the state are atomic and imply
75 * a memory barrier.
76 */
77 enum {
78 RDS_CONN_DOWN = 0,
79 RDS_CONN_CONNECTING,
80 RDS_CONN_DISCONNECTING,
81 RDS_CONN_UP,
82 RDS_CONN_RESETTING,
83 RDS_CONN_ERROR,
84 };
85
86 /* Bits for c_flags */
87 #define RDS_LL_SEND_FULL 0
88 #define RDS_RECONNECT_PENDING 1
89 #define RDS_IN_XMIT 2
90 #define RDS_RECV_REFILL 3
91
92 /* Max number of multipaths per RDS connection. Must be a power of 2 */
93 #define RDS_MPATH_WORKERS 8
94 #define RDS_MPATH_HASH(rs, n) (jhash_1word((rs)->rs_bound_port, \
95 (rs)->rs_hash_initval) & ((n) - 1))
96
97 #define IS_CANONICAL(laddr, faddr) (htonl(laddr) < htonl(faddr))
98
99 /* Per mpath connection state */
100 struct rds_conn_path {
101 struct rds_connection *cp_conn;
102 struct rds_message *cp_xmit_rm;
103 unsigned long cp_xmit_sg;
104 unsigned int cp_xmit_hdr_off;
105 unsigned int cp_xmit_data_off;
106 unsigned int cp_xmit_atomic_sent;
107 unsigned int cp_xmit_rdma_sent;
108 unsigned int cp_xmit_data_sent;
109
110 spinlock_t cp_lock; /* protect msg queues */
111 u64 cp_next_tx_seq;
112 struct list_head cp_send_queue;
113 struct list_head cp_retrans;
114
115 u64 cp_next_rx_seq;
116
117 void *cp_transport_data;
118
119 atomic_t cp_state;
120 unsigned long cp_send_gen;
121 unsigned long cp_flags;
122 unsigned long cp_reconnect_jiffies;
123 struct delayed_work cp_send_w;
124 struct delayed_work cp_recv_w;
125 struct delayed_work cp_conn_w;
126 struct work_struct cp_down_w;
127 struct mutex cp_cm_lock; /* protect cp_state & cm */
128 wait_queue_head_t cp_waitq;
129
130 unsigned int cp_unacked_packets;
131 unsigned int cp_unacked_bytes;
132 unsigned int cp_index;
133 };
134
135 /* One rds_connection per RDS address pair */
136 struct rds_connection {
137 struct hlist_node c_hash_node;
138 __be32 c_laddr;
139 __be32 c_faddr;
140 unsigned int c_loopback:1,
141 c_ping_triggered:1,
142 c_destroy_in_prog:1,
143 c_pad_to_32:29;
144 int c_npaths;
145 struct rds_connection *c_passive;
146 struct rds_transport *c_trans;
147
148 struct rds_cong_map *c_lcong;
149 struct rds_cong_map *c_fcong;
150
151 /* Protocol version */
152 unsigned int c_version;
153 possible_net_t c_net;
154
155 struct list_head c_map_item;
156 unsigned long c_map_queued;
157
158 struct rds_conn_path *c_path;
159 wait_queue_head_t c_hs_waitq; /* handshake waitq */
160
161 u32 c_my_gen_num;
162 u32 c_peer_gen_num;
163 };
164
165 static inline
166 struct net *rds_conn_net(struct rds_connection *conn)
167 {
168 return read_pnet(&conn->c_net);
169 }
170
171 static inline
172 void rds_conn_net_set(struct rds_connection *conn, struct net *net)
173 {
174 write_pnet(&conn->c_net, net);
175 }
176
177 #define RDS_FLAG_CONG_BITMAP 0x01
178 #define RDS_FLAG_ACK_REQUIRED 0x02
179 #define RDS_FLAG_RETRANSMITTED 0x04
180 #define RDS_MAX_ADV_CREDIT 255
181
182 /* RDS_FLAG_PROBE_PORT is the reserved sport used for sending a ping
183 * probe to exchange control information before establishing a connection.
184 * Currently the control information that is exchanged is the number of
185 * supported paths. If the peer is a legacy (older kernel revision) peer,
186 * it would return a pong message without additional control information
187 * that would then alert the sender that the peer was an older rev.
188 */
189 #define RDS_FLAG_PROBE_PORT 1
190 #define RDS_HS_PROBE(sport, dport) \
191 ((sport == RDS_FLAG_PROBE_PORT && dport == 0) || \
192 (sport == 0 && dport == RDS_FLAG_PROBE_PORT))
193 /*
194 * Maximum space available for extension headers.
195 */
196 #define RDS_HEADER_EXT_SPACE 16
197
198 struct rds_header {
199 __be64 h_sequence;
200 __be64 h_ack;
201 __be32 h_len;
202 __be16 h_sport;
203 __be16 h_dport;
204 u8 h_flags;
205 u8 h_credit;
206 u8 h_padding[4];
207 __sum16 h_csum;
208
209 u8 h_exthdr[RDS_HEADER_EXT_SPACE];
210 };
211
212 /*
213 * Reserved - indicates end of extensions
214 */
215 #define RDS_EXTHDR_NONE 0
216
217 /*
218 * This extension header is included in the very
219 * first message that is sent on a new connection,
220 * and identifies the protocol level. This will help
221 * rolling updates if a future change requires breaking
222 * the protocol.
223 * NB: This is no longer true for IB, where we do a version
224 * negotiation during the connection setup phase (protocol
225 * version information is included in the RDMA CM private data).
226 */
227 #define RDS_EXTHDR_VERSION 1
228 struct rds_ext_header_version {
229 __be32 h_version;
230 };
231
232 /*
233 * This extension header is included in the RDS message
234 * chasing an RDMA operation.
235 */
236 #define RDS_EXTHDR_RDMA 2
237 struct rds_ext_header_rdma {
238 __be32 h_rdma_rkey;
239 };
240
241 /*
242 * This extension header tells the peer about the
243 * destination <R_Key,offset> of the requested RDMA
244 * operation.
245 */
246 #define RDS_EXTHDR_RDMA_DEST 3
247 struct rds_ext_header_rdma_dest {
248 __be32 h_rdma_rkey;
249 __be32 h_rdma_offset;
250 };
251
252 /* Extension header announcing number of paths.
253 * Implicit length = 2 bytes.
254 */
255 #define RDS_EXTHDR_NPATHS 5
256 #define RDS_EXTHDR_GEN_NUM 6
257
258 #define __RDS_EXTHDR_MAX 16 /* for now */
259 #define RDS_RX_MAX_TRACES (RDS_MSG_RX_DGRAM_TRACE_MAX + 1)
260 #define RDS_MSG_RX_HDR 0
261 #define RDS_MSG_RX_START 1
262 #define RDS_MSG_RX_END 2
263 #define RDS_MSG_RX_CMSG 3
264
265 struct rds_incoming {
266 refcount_t i_refcount;
267 struct list_head i_item;
268 struct rds_connection *i_conn;
269 struct rds_conn_path *i_conn_path;
270 struct rds_header i_hdr;
271 unsigned long i_rx_jiffies;
272 __be32 i_saddr;
273
274 rds_rdma_cookie_t i_rdma_cookie;
275 struct timeval i_rx_tstamp;
276 u64 i_rx_lat_trace[RDS_RX_MAX_TRACES];
277 };
278
279 struct rds_mr {
280 struct rb_node r_rb_node;
281 refcount_t r_refcount;
282 u32 r_key;
283
284 /* A copy of the creation flags */
285 unsigned int r_use_once:1;
286 unsigned int r_invalidate:1;
287 unsigned int r_write:1;
288
289 /* This is for RDS_MR_DEAD.
290 * It would be nice & consistent to make this part of the above
291 * bit field here, but we need to use test_and_set_bit.
292 */
293 unsigned long r_state;
294 struct rds_sock *r_sock; /* back pointer to the socket that owns us */
295 struct rds_transport *r_trans;
296 void *r_trans_private;
297 };
298
299 /* Flags for mr->r_state */
300 #define RDS_MR_DEAD 0
301
302 static inline rds_rdma_cookie_t rds_rdma_make_cookie(u32 r_key, u32 offset)
303 {
304 return r_key | (((u64) offset) << 32);
305 }
306
307 static inline u32 rds_rdma_cookie_key(rds_rdma_cookie_t cookie)
308 {
309 return cookie;
310 }
311
312 static inline u32 rds_rdma_cookie_offset(rds_rdma_cookie_t cookie)
313 {
314 return cookie >> 32;
315 }
316
317 /* atomic operation types */
318 #define RDS_ATOMIC_TYPE_CSWP 0
319 #define RDS_ATOMIC_TYPE_FADD 1
320
321 /*
322 * m_sock_item and m_conn_item are on lists that are serialized under
323 * conn->c_lock. m_sock_item has additional meaning in that once it is empty
324 * the message will not be put back on the retransmit list after being sent.
325 * messages that are canceled while being sent rely on this.
326 *
327 * m_inc is used by loopback so that it can pass an incoming message straight
328 * back up into the rx path. It embeds a wire header which is also used by
329 * the send path, which is kind of awkward.
330 *
331 * m_sock_item indicates the message's presence on a socket's send or receive
332 * queue. m_rs will point to that socket.
333 *
334 * m_daddr is used by cancellation to prune messages to a given destination.
335 *
336 * The RDS_MSG_ON_SOCK and RDS_MSG_ON_CONN flags are used to avoid lock
337 * nesting. As paths iterate over messages on a sock, or conn, they must
338 * also lock the conn, or sock, to remove the message from those lists too.
339 * Testing the flag to determine if the message is still on the lists lets
340 * us avoid testing the list_head directly. That means each path can use
341 * the message's list_head to keep it on a local list while juggling locks
342 * without confusing the other path.
343 *
344 * m_ack_seq is an optional field set by transports who need a different
345 * sequence number range to invalidate. They can use this in a callback
346 * that they pass to rds_send_drop_acked() to see if each message has been
347 * acked. The HAS_ACK_SEQ flag can be used to detect messages which haven't
348 * had ack_seq set yet.
349 */
350 #define RDS_MSG_ON_SOCK 1
351 #define RDS_MSG_ON_CONN 2
352 #define RDS_MSG_HAS_ACK_SEQ 3
353 #define RDS_MSG_ACK_REQUIRED 4
354 #define RDS_MSG_RETRANSMITTED 5
355 #define RDS_MSG_MAPPED 6
356 #define RDS_MSG_PAGEVEC 7
357 #define RDS_MSG_FLUSH 8
358
359 struct rds_message {
360 refcount_t m_refcount;
361 struct list_head m_sock_item;
362 struct list_head m_conn_item;
363 struct rds_incoming m_inc;
364 u64 m_ack_seq;
365 __be32 m_daddr;
366 unsigned long m_flags;
367
368 /* Never access m_rs without holding m_rs_lock.
369 * Lock nesting is
370 * rm->m_rs_lock
371 * -> rs->rs_lock
372 */
373 spinlock_t m_rs_lock;
374 wait_queue_head_t m_flush_wait;
375
376 struct rds_sock *m_rs;
377
378 /* cookie to send to remote, in rds header */
379 rds_rdma_cookie_t m_rdma_cookie;
380
381 unsigned int m_used_sgs;
382 unsigned int m_total_sgs;
383
384 void *m_final_op;
385
386 struct {
387 struct rm_atomic_op {
388 int op_type;
389 union {
390 struct {
391 uint64_t compare;
392 uint64_t swap;
393 uint64_t compare_mask;
394 uint64_t swap_mask;
395 } op_m_cswp;
396 struct {
397 uint64_t add;
398 uint64_t nocarry_mask;
399 } op_m_fadd;
400 };
401
402 u32 op_rkey;
403 u64 op_remote_addr;
404 unsigned int op_notify:1;
405 unsigned int op_recverr:1;
406 unsigned int op_mapped:1;
407 unsigned int op_silent:1;
408 unsigned int op_active:1;
409 struct scatterlist *op_sg;
410 struct rds_notifier *op_notifier;
411
412 struct rds_mr *op_rdma_mr;
413 } atomic;
414 struct rm_rdma_op {
415 u32 op_rkey;
416 u64 op_remote_addr;
417 unsigned int op_write:1;
418 unsigned int op_fence:1;
419 unsigned int op_notify:1;
420 unsigned int op_recverr:1;
421 unsigned int op_mapped:1;
422 unsigned int op_silent:1;
423 unsigned int op_active:1;
424 unsigned int op_bytes;
425 unsigned int op_nents;
426 unsigned int op_count;
427 struct scatterlist *op_sg;
428 struct rds_notifier *op_notifier;
429
430 struct rds_mr *op_rdma_mr;
431 } rdma;
432 struct rm_data_op {
433 unsigned int op_active:1;
434 unsigned int op_notify:1;
435 unsigned int op_nents;
436 unsigned int op_count;
437 unsigned int op_dmasg;
438 unsigned int op_dmaoff;
439 struct scatterlist *op_sg;
440 } data;
441 };
442 };
443
444 /*
445 * The RDS notifier is used (optionally) to tell the application about
446 * completed RDMA operations. Rather than keeping the whole rds message
447 * around on the queue, we allocate a small notifier that is put on the
448 * socket's notifier_list. Notifications are delivered to the application
449 * through control messages.
450 */
451 struct rds_notifier {
452 struct list_head n_list;
453 uint64_t n_user_token;
454 int n_status;
455 };
456
457 /* Available as part of RDS core, so doesn't need to participate
458 * in get_preferred transport etc
459 */
460 #define RDS_TRANS_LOOP 3
461
462 /**
463 * struct rds_transport - transport specific behavioural hooks
464 *
465 * @xmit: .xmit is called by rds_send_xmit() to tell the transport to send
466 * part of a message. The caller serializes on the send_sem so this
467 * doesn't need to be reentrant for a given conn. The header must be
468 * sent before the data payload. .xmit must be prepared to send a
469 * message with no data payload. .xmit should return the number of
470 * bytes that were sent down the connection, including header bytes.
471 * Returning 0 tells the caller that it doesn't need to perform any
472 * additional work now. This is usually the case when the transport has
473 * filled the sending queue for its connection and will handle
474 * triggering the rds thread to continue the send when space becomes
475 * available. Returning -EAGAIN tells the caller to retry the send
476 * immediately. Returning -ENOMEM tells the caller to retry the send at
477 * some point in the future.
478 *
479 * @conn_shutdown: conn_shutdown stops traffic on the given connection. Once
480 * it returns the connection can not call rds_recv_incoming().
481 * This will only be called once after conn_connect returns
482 * non-zero success and will The caller serializes this with
483 * the send and connecting paths (xmit_* and conn_*). The
484 * transport is responsible for other serialization, including
485 * rds_recv_incoming(). This is called in process context but
486 * should try hard not to block.
487 */
488
489 struct rds_transport {
490 char t_name[TRANSNAMSIZ];
491 struct list_head t_item;
492 struct module *t_owner;
493 unsigned int t_prefer_loopback:1,
494 t_mp_capable:1;
495 unsigned int t_type;
496
497 int (*laddr_check)(struct net *net, __be32 addr);
498 int (*conn_alloc)(struct rds_connection *conn, gfp_t gfp);
499 void (*conn_free)(void *data);
500 int (*conn_path_connect)(struct rds_conn_path *cp);
501 void (*conn_path_shutdown)(struct rds_conn_path *conn);
502 void (*xmit_path_prepare)(struct rds_conn_path *cp);
503 void (*xmit_path_complete)(struct rds_conn_path *cp);
504 int (*xmit)(struct rds_connection *conn, struct rds_message *rm,
505 unsigned int hdr_off, unsigned int sg, unsigned int off);
506 int (*xmit_rdma)(struct rds_connection *conn, struct rm_rdma_op *op);
507 int (*xmit_atomic)(struct rds_connection *conn, struct rm_atomic_op *op);
508 int (*recv_path)(struct rds_conn_path *cp);
509 int (*inc_copy_to_user)(struct rds_incoming *inc, struct iov_iter *to);
510 void (*inc_free)(struct rds_incoming *inc);
511
512 int (*cm_handle_connect)(struct rdma_cm_id *cm_id,
513 struct rdma_cm_event *event);
514 int (*cm_initiate_connect)(struct rdma_cm_id *cm_id);
515 void (*cm_connect_complete)(struct rds_connection *conn,
516 struct rdma_cm_event *event);
517
518 unsigned int (*stats_info_copy)(struct rds_info_iterator *iter,
519 unsigned int avail);
520 void (*exit)(void);
521 void *(*get_mr)(struct scatterlist *sg, unsigned long nr_sg,
522 struct rds_sock *rs, u32 *key_ret);
523 void (*sync_mr)(void *trans_private, int direction);
524 void (*free_mr)(void *trans_private, int invalidate);
525 void (*flush_mrs)(void);
526 };
527
528 struct rds_sock {
529 struct sock rs_sk;
530
531 u64 rs_user_addr;
532 u64 rs_user_bytes;
533
534 /*
535 * bound_addr used for both incoming and outgoing, no INADDR_ANY
536 * support.
537 */
538 struct rhash_head rs_bound_node;
539 u64 rs_bound_key;
540 __be32 rs_bound_addr;
541 __be32 rs_conn_addr;
542 __be16 rs_bound_port;
543 __be16 rs_conn_port;
544 struct rds_transport *rs_transport;
545
546 /*
547 * rds_sendmsg caches the conn it used the last time around.
548 * This helps avoid costly lookups.
549 */
550 struct rds_connection *rs_conn;
551
552 /* flag indicating we were congested or not */
553 int rs_congested;
554 /* seen congestion (ENOBUFS) when sending? */
555 int rs_seen_congestion;
556
557 /* rs_lock protects all these adjacent members before the newline */
558 spinlock_t rs_lock;
559 struct list_head rs_send_queue;
560 u32 rs_snd_bytes;
561 int rs_rcv_bytes;
562 struct list_head rs_notify_queue; /* currently used for failed RDMAs */
563
564 /* Congestion wake_up. If rs_cong_monitor is set, we use cong_mask
565 * to decide whether the application should be woken up.
566 * If not set, we use rs_cong_track to find out whether a cong map
567 * update arrived.
568 */
569 uint64_t rs_cong_mask;
570 uint64_t rs_cong_notify;
571 struct list_head rs_cong_list;
572 unsigned long rs_cong_track;
573
574 /*
575 * rs_recv_lock protects the receive queue, and is
576 * used to serialize with rds_release.
577 */
578 rwlock_t rs_recv_lock;
579 struct list_head rs_recv_queue;
580
581 /* just for stats reporting */
582 struct list_head rs_item;
583
584 /* these have their own lock */
585 spinlock_t rs_rdma_lock;
586 struct rb_root rs_rdma_keys;
587
588 /* Socket options - in case there will be more */
589 unsigned char rs_recverr,
590 rs_cong_monitor;
591 u32 rs_hash_initval;
592
593 /* Socket receive path trace points*/
594 u8 rs_rx_traces;
595 u8 rs_rx_trace[RDS_MSG_RX_DGRAM_TRACE_MAX];
596 };
597
598 static inline struct rds_sock *rds_sk_to_rs(const struct sock *sk)
599 {
600 return container_of(sk, struct rds_sock, rs_sk);
601 }
602 static inline struct sock *rds_rs_to_sk(struct rds_sock *rs)
603 {
604 return &rs->rs_sk;
605 }
606
607 /*
608 * The stack assigns sk_sndbuf and sk_rcvbuf to twice the specified value
609 * to account for overhead. We don't account for overhead, we just apply
610 * the number of payload bytes to the specified value.
611 */
612 static inline int rds_sk_sndbuf(struct rds_sock *rs)
613 {
614 return rds_rs_to_sk(rs)->sk_sndbuf / 2;
615 }
616 static inline int rds_sk_rcvbuf(struct rds_sock *rs)
617 {
618 return rds_rs_to_sk(rs)->sk_rcvbuf / 2;
619 }
620
621 struct rds_statistics {
622 uint64_t s_conn_reset;
623 uint64_t s_recv_drop_bad_checksum;
624 uint64_t s_recv_drop_old_seq;
625 uint64_t s_recv_drop_no_sock;
626 uint64_t s_recv_drop_dead_sock;
627 uint64_t s_recv_deliver_raced;
628 uint64_t s_recv_delivered;
629 uint64_t s_recv_queued;
630 uint64_t s_recv_immediate_retry;
631 uint64_t s_recv_delayed_retry;
632 uint64_t s_recv_ack_required;
633 uint64_t s_recv_rdma_bytes;
634 uint64_t s_recv_ping;
635 uint64_t s_send_queue_empty;
636 uint64_t s_send_queue_full;
637 uint64_t s_send_lock_contention;
638 uint64_t s_send_lock_queue_raced;
639 uint64_t s_send_immediate_retry;
640 uint64_t s_send_delayed_retry;
641 uint64_t s_send_drop_acked;
642 uint64_t s_send_ack_required;
643 uint64_t s_send_queued;
644 uint64_t s_send_rdma;
645 uint64_t s_send_rdma_bytes;
646 uint64_t s_send_pong;
647 uint64_t s_page_remainder_hit;
648 uint64_t s_page_remainder_miss;
649 uint64_t s_copy_to_user;
650 uint64_t s_copy_from_user;
651 uint64_t s_cong_update_queued;
652 uint64_t s_cong_update_received;
653 uint64_t s_cong_send_error;
654 uint64_t s_cong_send_blocked;
655 uint64_t s_recv_bytes_added_to_socket;
656 uint64_t s_recv_bytes_removed_from_socket;
657
658 };
659
660 /* af_rds.c */
661 void rds_sock_addref(struct rds_sock *rs);
662 void rds_sock_put(struct rds_sock *rs);
663 void rds_wake_sk_sleep(struct rds_sock *rs);
664 static inline void __rds_wake_sk_sleep(struct sock *sk)
665 {
666 wait_queue_head_t *waitq = sk_sleep(sk);
667
668 if (!sock_flag(sk, SOCK_DEAD) && waitq)
669 wake_up(waitq);
670 }
671 extern wait_queue_head_t rds_poll_waitq;
672
673
674 /* bind.c */
675 int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len);
676 void rds_remove_bound(struct rds_sock *rs);
677 struct rds_sock *rds_find_bound(__be32 addr, __be16 port);
678 int rds_bind_lock_init(void);
679 void rds_bind_lock_destroy(void);
680
681 /* cong.c */
682 int rds_cong_get_maps(struct rds_connection *conn);
683 void rds_cong_add_conn(struct rds_connection *conn);
684 void rds_cong_remove_conn(struct rds_connection *conn);
685 void rds_cong_set_bit(struct rds_cong_map *map, __be16 port);
686 void rds_cong_clear_bit(struct rds_cong_map *map, __be16 port);
687 int rds_cong_wait(struct rds_cong_map *map, __be16 port, int nonblock, struct rds_sock *rs);
688 void rds_cong_queue_updates(struct rds_cong_map *map);
689 void rds_cong_map_updated(struct rds_cong_map *map, uint64_t);
690 int rds_cong_updated_since(unsigned long *recent);
691 void rds_cong_add_socket(struct rds_sock *);
692 void rds_cong_remove_socket(struct rds_sock *);
693 void rds_cong_exit(void);
694 struct rds_message *rds_cong_update_alloc(struct rds_connection *conn);
695
696 /* conn.c */
697 extern u32 rds_gen_num;
698 int rds_conn_init(void);
699 void rds_conn_exit(void);
700 struct rds_connection *rds_conn_create(struct net *net,
701 __be32 laddr, __be32 faddr,
702 struct rds_transport *trans, gfp_t gfp);
703 struct rds_connection *rds_conn_create_outgoing(struct net *net,
704 __be32 laddr, __be32 faddr,
705 struct rds_transport *trans, gfp_t gfp);
706 void rds_conn_shutdown(struct rds_conn_path *cpath);
707 void rds_conn_destroy(struct rds_connection *conn);
708 void rds_conn_drop(struct rds_connection *conn);
709 void rds_conn_path_drop(struct rds_conn_path *cpath, bool destroy);
710 void rds_conn_connect_if_down(struct rds_connection *conn);
711 void rds_conn_path_connect_if_down(struct rds_conn_path *cp);
712 void rds_for_each_conn_info(struct socket *sock, unsigned int len,
713 struct rds_info_iterator *iter,
714 struct rds_info_lengths *lens,
715 int (*visitor)(struct rds_connection *, void *),
716 size_t item_len);
717
718 __printf(2, 3)
719 void __rds_conn_path_error(struct rds_conn_path *cp, const char *, ...);
720 #define rds_conn_path_error(cp, fmt...) \
721 __rds_conn_path_error(cp, KERN_WARNING "RDS: " fmt)
722
723 static inline int
724 rds_conn_path_transition(struct rds_conn_path *cp, int old, int new)
725 {
726 return atomic_cmpxchg(&cp->cp_state, old, new) == old;
727 }
728
729 static inline int
730 rds_conn_transition(struct rds_connection *conn, int old, int new)
731 {
732 WARN_ON(conn->c_trans->t_mp_capable);
733 return rds_conn_path_transition(&conn->c_path[0], old, new);
734 }
735
736 static inline int
737 rds_conn_path_state(struct rds_conn_path *cp)
738 {
739 return atomic_read(&cp->cp_state);
740 }
741
742 static inline int
743 rds_conn_state(struct rds_connection *conn)
744 {
745 WARN_ON(conn->c_trans->t_mp_capable);
746 return rds_conn_path_state(&conn->c_path[0]);
747 }
748
749 static inline int
750 rds_conn_path_up(struct rds_conn_path *cp)
751 {
752 return atomic_read(&cp->cp_state) == RDS_CONN_UP;
753 }
754
755 static inline int
756 rds_conn_up(struct rds_connection *conn)
757 {
758 WARN_ON(conn->c_trans->t_mp_capable);
759 return rds_conn_path_up(&conn->c_path[0]);
760 }
761
762 static inline int
763 rds_conn_path_connecting(struct rds_conn_path *cp)
764 {
765 return atomic_read(&cp->cp_state) == RDS_CONN_CONNECTING;
766 }
767
768 static inline int
769 rds_conn_connecting(struct rds_connection *conn)
770 {
771 WARN_ON(conn->c_trans->t_mp_capable);
772 return rds_conn_path_connecting(&conn->c_path[0]);
773 }
774
775 /* message.c */
776 struct rds_message *rds_message_alloc(unsigned int nents, gfp_t gfp);
777 struct scatterlist *rds_message_alloc_sgs(struct rds_message *rm, int nents);
778 int rds_message_copy_from_user(struct rds_message *rm, struct iov_iter *from);
779 struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned int total_len);
780 void rds_message_populate_header(struct rds_header *hdr, __be16 sport,
781 __be16 dport, u64 seq);
782 int rds_message_add_extension(struct rds_header *hdr,
783 unsigned int type, const void *data, unsigned int len);
784 int rds_message_next_extension(struct rds_header *hdr,
785 unsigned int *pos, void *buf, unsigned int *buflen);
786 int rds_message_add_rdma_dest_extension(struct rds_header *hdr, u32 r_key, u32 offset);
787 int rds_message_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to);
788 void rds_message_inc_free(struct rds_incoming *inc);
789 void rds_message_addref(struct rds_message *rm);
790 void rds_message_put(struct rds_message *rm);
791 void rds_message_wait(struct rds_message *rm);
792 void rds_message_unmapped(struct rds_message *rm);
793
794 static inline void rds_message_make_checksum(struct rds_header *hdr)
795 {
796 hdr->h_csum = 0;
797 hdr->h_csum = ip_fast_csum((void *) hdr, sizeof(*hdr) >> 2);
798 }
799
800 static inline int rds_message_verify_checksum(const struct rds_header *hdr)
801 {
802 return !hdr->h_csum || ip_fast_csum((void *) hdr, sizeof(*hdr) >> 2) == 0;
803 }
804
805
806 /* page.c */
807 int rds_page_remainder_alloc(struct scatterlist *scat, unsigned long bytes,
808 gfp_t gfp);
809 void rds_page_exit(void);
810
811 /* recv.c */
812 void rds_inc_init(struct rds_incoming *inc, struct rds_connection *conn,
813 __be32 saddr);
814 void rds_inc_path_init(struct rds_incoming *inc, struct rds_conn_path *conn,
815 __be32 saddr);
816 void rds_inc_put(struct rds_incoming *inc);
817 void rds_recv_incoming(struct rds_connection *conn, __be32 saddr, __be32 daddr,
818 struct rds_incoming *inc, gfp_t gfp);
819 int rds_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
820 int msg_flags);
821 void rds_clear_recv_queue(struct rds_sock *rs);
822 int rds_notify_queue_get(struct rds_sock *rs, struct msghdr *msg);
823 void rds_inc_info_copy(struct rds_incoming *inc,
824 struct rds_info_iterator *iter,
825 __be32 saddr, __be32 daddr, int flip);
826
827 /* send.c */
828 int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len);
829 void rds_send_path_reset(struct rds_conn_path *conn);
830 int rds_send_xmit(struct rds_conn_path *cp);
831 struct sockaddr_in;
832 void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest);
833 typedef int (*is_acked_func)(struct rds_message *rm, uint64_t ack);
834 void rds_send_drop_acked(struct rds_connection *conn, u64 ack,
835 is_acked_func is_acked);
836 void rds_send_path_drop_acked(struct rds_conn_path *cp, u64 ack,
837 is_acked_func is_acked);
838 void rds_send_ping(struct rds_connection *conn, int cp_index);
839 int rds_send_pong(struct rds_conn_path *cp, __be16 dport);
840
841 /* rdma.c */
842 void rds_rdma_unuse(struct rds_sock *rs, u32 r_key, int force);
843 int rds_get_mr(struct rds_sock *rs, char __user *optval, int optlen);
844 int rds_get_mr_for_dest(struct rds_sock *rs, char __user *optval, int optlen);
845 int rds_free_mr(struct rds_sock *rs, char __user *optval, int optlen);
846 void rds_rdma_drop_keys(struct rds_sock *rs);
847 int rds_rdma_extra_size(struct rds_rdma_args *args);
848 int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
849 struct cmsghdr *cmsg);
850 int rds_cmsg_rdma_dest(struct rds_sock *rs, struct rds_message *rm,
851 struct cmsghdr *cmsg);
852 int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
853 struct cmsghdr *cmsg);
854 int rds_cmsg_rdma_map(struct rds_sock *rs, struct rds_message *rm,
855 struct cmsghdr *cmsg);
856 void rds_rdma_free_op(struct rm_rdma_op *ro);
857 void rds_atomic_free_op(struct rm_atomic_op *ao);
858 void rds_rdma_send_complete(struct rds_message *rm, int wc_status);
859 void rds_atomic_send_complete(struct rds_message *rm, int wc_status);
860 int rds_cmsg_atomic(struct rds_sock *rs, struct rds_message *rm,
861 struct cmsghdr *cmsg);
862
863 void __rds_put_mr_final(struct rds_mr *mr);
864 static inline void rds_mr_put(struct rds_mr *mr)
865 {
866 if (refcount_dec_and_test(&mr->r_refcount))
867 __rds_put_mr_final(mr);
868 }
869
870 /* stats.c */
871 DECLARE_PER_CPU_SHARED_ALIGNED(struct rds_statistics, rds_stats);
872 #define rds_stats_inc_which(which, member) do { \
873 per_cpu(which, get_cpu()).member++; \
874 put_cpu(); \
875 } while (0)
876 #define rds_stats_inc(member) rds_stats_inc_which(rds_stats, member)
877 #define rds_stats_add_which(which, member, count) do { \
878 per_cpu(which, get_cpu()).member += count; \
879 put_cpu(); \
880 } while (0)
881 #define rds_stats_add(member, count) rds_stats_add_which(rds_stats, member, count)
882 int rds_stats_init(void);
883 void rds_stats_exit(void);
884 void rds_stats_info_copy(struct rds_info_iterator *iter,
885 uint64_t *values, const char *const *names,
886 size_t nr);
887
888 /* sysctl.c */
889 int rds_sysctl_init(void);
890 void rds_sysctl_exit(void);
891 extern unsigned long rds_sysctl_sndbuf_min;
892 extern unsigned long rds_sysctl_sndbuf_default;
893 extern unsigned long rds_sysctl_sndbuf_max;
894 extern unsigned long rds_sysctl_reconnect_min_jiffies;
895 extern unsigned long rds_sysctl_reconnect_max_jiffies;
896 extern unsigned int rds_sysctl_max_unacked_packets;
897 extern unsigned int rds_sysctl_max_unacked_bytes;
898 extern unsigned int rds_sysctl_ping_enable;
899 extern unsigned long rds_sysctl_trace_flags;
900 extern unsigned int rds_sysctl_trace_level;
901
902 /* threads.c */
903 int rds_threads_init(void);
904 void rds_threads_exit(void);
905 extern struct workqueue_struct *rds_wq;
906 void rds_queue_reconnect(struct rds_conn_path *cp);
907 void rds_connect_worker(struct work_struct *);
908 void rds_shutdown_worker(struct work_struct *);
909 void rds_send_worker(struct work_struct *);
910 void rds_recv_worker(struct work_struct *);
911 void rds_connect_path_complete(struct rds_conn_path *conn, int curr);
912 void rds_connect_complete(struct rds_connection *conn);
913
914 /* transport.c */
915 void rds_trans_register(struct rds_transport *trans);
916 void rds_trans_unregister(struct rds_transport *trans);
917 struct rds_transport *rds_trans_get_preferred(struct net *net, __be32 addr);
918 void rds_trans_put(struct rds_transport *trans);
919 unsigned int rds_trans_stats_info_copy(struct rds_info_iterator *iter,
920 unsigned int avail);
921 struct rds_transport *rds_trans_get(int t_type);
922 int rds_trans_init(void);
923 void rds_trans_exit(void);
924
925 #endif