]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - net/rds/rds.h
RDS: split out connection specific state from rds_connection to rds_conn_path
[mirror_ubuntu-artful-kernel.git] / net / rds / rds.h
1 #ifndef _RDS_RDS_H
2 #define _RDS_RDS_H
3
4 #include <net/sock.h>
5 #include <linux/scatterlist.h>
6 #include <linux/highmem.h>
7 #include <rdma/rdma_cm.h>
8 #include <linux/mutex.h>
9 #include <linux/rds.h>
10 #include <linux/rhashtable.h>
11
12 #include "info.h"
13
14 /*
15 * RDS Network protocol version
16 */
17 #define RDS_PROTOCOL_3_0 0x0300
18 #define RDS_PROTOCOL_3_1 0x0301
19 #define RDS_PROTOCOL_VERSION RDS_PROTOCOL_3_1
20 #define RDS_PROTOCOL_MAJOR(v) ((v) >> 8)
21 #define RDS_PROTOCOL_MINOR(v) ((v) & 255)
22 #define RDS_PROTOCOL(maj, min) (((maj) << 8) | min)
23
24 /*
25 * XXX randomly chosen, but at least seems to be unused:
26 * # 18464-18768 Unassigned
27 * We should do better. We want a reserved port to discourage unpriv'ed
28 * userspace from listening.
29 */
30 #define RDS_PORT 18634
31
32 #ifdef ATOMIC64_INIT
33 #define KERNEL_HAS_ATOMIC64
34 #endif
35
36 #ifdef DEBUG
37 #define rdsdebug(fmt, args...) pr_debug("%s(): " fmt, __func__ , ##args)
38 #else
39 /* sigh, pr_debug() causes unused variable warnings */
40 static inline __printf(1, 2)
41 void rdsdebug(char *fmt, ...)
42 {
43 }
44 #endif
45
46 /* XXX is there one of these somewhere? */
47 #define ceil(x, y) \
48 ({ unsigned long __x = (x), __y = (y); (__x + __y - 1) / __y; })
49
50 #define RDS_FRAG_SHIFT 12
51 #define RDS_FRAG_SIZE ((unsigned int)(1 << RDS_FRAG_SHIFT))
52
53 #define RDS_CONG_MAP_BYTES (65536 / 8)
54 #define RDS_CONG_MAP_PAGES (PAGE_ALIGN(RDS_CONG_MAP_BYTES) / PAGE_SIZE)
55 #define RDS_CONG_MAP_PAGE_BITS (PAGE_SIZE * 8)
56
57 struct rds_cong_map {
58 struct rb_node m_rb_node;
59 __be32 m_addr;
60 wait_queue_head_t m_waitq;
61 struct list_head m_conn_list;
62 unsigned long m_page_addrs[RDS_CONG_MAP_PAGES];
63 };
64
65
66 /*
67 * This is how we will track the connection state:
68 * A connection is always in one of the following
69 * states. Updates to the state are atomic and imply
70 * a memory barrier.
71 */
72 enum {
73 RDS_CONN_DOWN = 0,
74 RDS_CONN_CONNECTING,
75 RDS_CONN_DISCONNECTING,
76 RDS_CONN_UP,
77 RDS_CONN_RESETTING,
78 RDS_CONN_ERROR,
79 };
80
81 /* Bits for c_flags */
82 #define RDS_LL_SEND_FULL 0
83 #define RDS_RECONNECT_PENDING 1
84 #define RDS_IN_XMIT 2
85 #define RDS_RECV_REFILL 3
86
87 /* Max number of multipaths per RDS connection. Must be a power of 2 */
88 #define RDS_MPATH_WORKERS 1
89
90 /* Per mpath connection state */
91 struct rds_conn_path {
92 struct rds_connection *cp_conn;
93 struct rds_message *cp_xmit_rm;
94 unsigned long cp_xmit_sg;
95 unsigned int cp_xmit_hdr_off;
96 unsigned int cp_xmit_data_off;
97 unsigned int cp_xmit_atomic_sent;
98 unsigned int cp_xmit_rdma_sent;
99 unsigned int cp_xmit_data_sent;
100
101 spinlock_t cp_lock; /* protect msg queues */
102 u64 cp_next_tx_seq;
103 struct list_head cp_send_queue;
104 struct list_head cp_retrans;
105
106 u64 cp_next_rx_seq;
107
108 void *cp_transport_data;
109
110 atomic_t cp_state;
111 unsigned long cp_send_gen;
112 unsigned long cp_flags;
113 unsigned long cp_reconnect_jiffies;
114 struct delayed_work cp_send_w;
115 struct delayed_work cp_recv_w;
116 struct delayed_work cp_conn_w;
117 struct work_struct cp_down_w;
118 struct mutex cp_cm_lock; /* protect cp_state & cm */
119 wait_queue_head_t cp_waitq;
120
121 unsigned int cp_unacked_packets;
122 unsigned int cp_unacked_bytes;
123 unsigned int cp_outgoing:1,
124 cp_pad_to_32:31;
125 unsigned int cp_index;
126 };
127
128 /* One rds_connection per RDS address pair */
129 struct rds_connection {
130 struct hlist_node c_hash_node;
131 __be32 c_laddr;
132 __be32 c_faddr;
133 unsigned int c_loopback:1,
134 c_pad_to_32:31;
135 int c_npaths;
136 struct rds_connection *c_passive;
137 struct rds_transport *c_trans;
138
139 struct rds_cong_map *c_lcong;
140 struct rds_cong_map *c_fcong;
141
142 /* Protocol version */
143 unsigned int c_version;
144 possible_net_t c_net;
145
146 struct list_head c_map_item;
147 unsigned long c_map_queued;
148
149 struct rds_conn_path c_path[RDS_MPATH_WORKERS];
150 };
151
152 static inline
153 struct net *rds_conn_net(struct rds_connection *conn)
154 {
155 return read_pnet(&conn->c_net);
156 }
157
158 static inline
159 void rds_conn_net_set(struct rds_connection *conn, struct net *net)
160 {
161 write_pnet(&conn->c_net, net);
162 }
163
164 #define RDS_FLAG_CONG_BITMAP 0x01
165 #define RDS_FLAG_ACK_REQUIRED 0x02
166 #define RDS_FLAG_RETRANSMITTED 0x04
167 #define RDS_MAX_ADV_CREDIT 255
168
169 /*
170 * Maximum space available for extension headers.
171 */
172 #define RDS_HEADER_EXT_SPACE 16
173
174 struct rds_header {
175 __be64 h_sequence;
176 __be64 h_ack;
177 __be32 h_len;
178 __be16 h_sport;
179 __be16 h_dport;
180 u8 h_flags;
181 u8 h_credit;
182 u8 h_padding[4];
183 __sum16 h_csum;
184
185 u8 h_exthdr[RDS_HEADER_EXT_SPACE];
186 };
187
188 /*
189 * Reserved - indicates end of extensions
190 */
191 #define RDS_EXTHDR_NONE 0
192
193 /*
194 * This extension header is included in the very
195 * first message that is sent on a new connection,
196 * and identifies the protocol level. This will help
197 * rolling updates if a future change requires breaking
198 * the protocol.
199 * NB: This is no longer true for IB, where we do a version
200 * negotiation during the connection setup phase (protocol
201 * version information is included in the RDMA CM private data).
202 */
203 #define RDS_EXTHDR_VERSION 1
204 struct rds_ext_header_version {
205 __be32 h_version;
206 };
207
208 /*
209 * This extension header is included in the RDS message
210 * chasing an RDMA operation.
211 */
212 #define RDS_EXTHDR_RDMA 2
213 struct rds_ext_header_rdma {
214 __be32 h_rdma_rkey;
215 };
216
217 /*
218 * This extension header tells the peer about the
219 * destination <R_Key,offset> of the requested RDMA
220 * operation.
221 */
222 #define RDS_EXTHDR_RDMA_DEST 3
223 struct rds_ext_header_rdma_dest {
224 __be32 h_rdma_rkey;
225 __be32 h_rdma_offset;
226 };
227
228 #define __RDS_EXTHDR_MAX 16 /* for now */
229
230 struct rds_incoming {
231 atomic_t i_refcount;
232 struct list_head i_item;
233 struct rds_connection *i_conn;
234 struct rds_header i_hdr;
235 unsigned long i_rx_jiffies;
236 __be32 i_saddr;
237
238 rds_rdma_cookie_t i_rdma_cookie;
239 struct timeval i_rx_tstamp;
240 };
241
242 struct rds_mr {
243 struct rb_node r_rb_node;
244 atomic_t r_refcount;
245 u32 r_key;
246
247 /* A copy of the creation flags */
248 unsigned int r_use_once:1;
249 unsigned int r_invalidate:1;
250 unsigned int r_write:1;
251
252 /* This is for RDS_MR_DEAD.
253 * It would be nice & consistent to make this part of the above
254 * bit field here, but we need to use test_and_set_bit.
255 */
256 unsigned long r_state;
257 struct rds_sock *r_sock; /* back pointer to the socket that owns us */
258 struct rds_transport *r_trans;
259 void *r_trans_private;
260 };
261
262 /* Flags for mr->r_state */
263 #define RDS_MR_DEAD 0
264
265 static inline rds_rdma_cookie_t rds_rdma_make_cookie(u32 r_key, u32 offset)
266 {
267 return r_key | (((u64) offset) << 32);
268 }
269
270 static inline u32 rds_rdma_cookie_key(rds_rdma_cookie_t cookie)
271 {
272 return cookie;
273 }
274
275 static inline u32 rds_rdma_cookie_offset(rds_rdma_cookie_t cookie)
276 {
277 return cookie >> 32;
278 }
279
280 /* atomic operation types */
281 #define RDS_ATOMIC_TYPE_CSWP 0
282 #define RDS_ATOMIC_TYPE_FADD 1
283
284 /*
285 * m_sock_item and m_conn_item are on lists that are serialized under
286 * conn->c_lock. m_sock_item has additional meaning in that once it is empty
287 * the message will not be put back on the retransmit list after being sent.
288 * messages that are canceled while being sent rely on this.
289 *
290 * m_inc is used by loopback so that it can pass an incoming message straight
291 * back up into the rx path. It embeds a wire header which is also used by
292 * the send path, which is kind of awkward.
293 *
294 * m_sock_item indicates the message's presence on a socket's send or receive
295 * queue. m_rs will point to that socket.
296 *
297 * m_daddr is used by cancellation to prune messages to a given destination.
298 *
299 * The RDS_MSG_ON_SOCK and RDS_MSG_ON_CONN flags are used to avoid lock
300 * nesting. As paths iterate over messages on a sock, or conn, they must
301 * also lock the conn, or sock, to remove the message from those lists too.
302 * Testing the flag to determine if the message is still on the lists lets
303 * us avoid testing the list_head directly. That means each path can use
304 * the message's list_head to keep it on a local list while juggling locks
305 * without confusing the other path.
306 *
307 * m_ack_seq is an optional field set by transports who need a different
308 * sequence number range to invalidate. They can use this in a callback
309 * that they pass to rds_send_drop_acked() to see if each message has been
310 * acked. The HAS_ACK_SEQ flag can be used to detect messages which haven't
311 * had ack_seq set yet.
312 */
313 #define RDS_MSG_ON_SOCK 1
314 #define RDS_MSG_ON_CONN 2
315 #define RDS_MSG_HAS_ACK_SEQ 3
316 #define RDS_MSG_ACK_REQUIRED 4
317 #define RDS_MSG_RETRANSMITTED 5
318 #define RDS_MSG_MAPPED 6
319 #define RDS_MSG_PAGEVEC 7
320
321 struct rds_message {
322 atomic_t m_refcount;
323 struct list_head m_sock_item;
324 struct list_head m_conn_item;
325 struct rds_incoming m_inc;
326 u64 m_ack_seq;
327 __be32 m_daddr;
328 unsigned long m_flags;
329
330 /* Never access m_rs without holding m_rs_lock.
331 * Lock nesting is
332 * rm->m_rs_lock
333 * -> rs->rs_lock
334 */
335 spinlock_t m_rs_lock;
336 wait_queue_head_t m_flush_wait;
337
338 struct rds_sock *m_rs;
339
340 /* cookie to send to remote, in rds header */
341 rds_rdma_cookie_t m_rdma_cookie;
342
343 unsigned int m_used_sgs;
344 unsigned int m_total_sgs;
345
346 void *m_final_op;
347
348 struct {
349 struct rm_atomic_op {
350 int op_type;
351 union {
352 struct {
353 uint64_t compare;
354 uint64_t swap;
355 uint64_t compare_mask;
356 uint64_t swap_mask;
357 } op_m_cswp;
358 struct {
359 uint64_t add;
360 uint64_t nocarry_mask;
361 } op_m_fadd;
362 };
363
364 u32 op_rkey;
365 u64 op_remote_addr;
366 unsigned int op_notify:1;
367 unsigned int op_recverr:1;
368 unsigned int op_mapped:1;
369 unsigned int op_silent:1;
370 unsigned int op_active:1;
371 struct scatterlist *op_sg;
372 struct rds_notifier *op_notifier;
373
374 struct rds_mr *op_rdma_mr;
375 } atomic;
376 struct rm_rdma_op {
377 u32 op_rkey;
378 u64 op_remote_addr;
379 unsigned int op_write:1;
380 unsigned int op_fence:1;
381 unsigned int op_notify:1;
382 unsigned int op_recverr:1;
383 unsigned int op_mapped:1;
384 unsigned int op_silent:1;
385 unsigned int op_active:1;
386 unsigned int op_bytes;
387 unsigned int op_nents;
388 unsigned int op_count;
389 struct scatterlist *op_sg;
390 struct rds_notifier *op_notifier;
391
392 struct rds_mr *op_rdma_mr;
393 } rdma;
394 struct rm_data_op {
395 unsigned int op_active:1;
396 unsigned int op_nents;
397 unsigned int op_count;
398 unsigned int op_dmasg;
399 unsigned int op_dmaoff;
400 struct scatterlist *op_sg;
401 } data;
402 };
403 };
404
405 /*
406 * The RDS notifier is used (optionally) to tell the application about
407 * completed RDMA operations. Rather than keeping the whole rds message
408 * around on the queue, we allocate a small notifier that is put on the
409 * socket's notifier_list. Notifications are delivered to the application
410 * through control messages.
411 */
412 struct rds_notifier {
413 struct list_head n_list;
414 uint64_t n_user_token;
415 int n_status;
416 };
417
418 /**
419 * struct rds_transport - transport specific behavioural hooks
420 *
421 * @xmit: .xmit is called by rds_send_xmit() to tell the transport to send
422 * part of a message. The caller serializes on the send_sem so this
423 * doesn't need to be reentrant for a given conn. The header must be
424 * sent before the data payload. .xmit must be prepared to send a
425 * message with no data payload. .xmit should return the number of
426 * bytes that were sent down the connection, including header bytes.
427 * Returning 0 tells the caller that it doesn't need to perform any
428 * additional work now. This is usually the case when the transport has
429 * filled the sending queue for its connection and will handle
430 * triggering the rds thread to continue the send when space becomes
431 * available. Returning -EAGAIN tells the caller to retry the send
432 * immediately. Returning -ENOMEM tells the caller to retry the send at
433 * some point in the future.
434 *
435 * @conn_shutdown: conn_shutdown stops traffic on the given connection. Once
436 * it returns the connection can not call rds_recv_incoming().
437 * This will only be called once after conn_connect returns
438 * non-zero success and will The caller serializes this with
439 * the send and connecting paths (xmit_* and conn_*). The
440 * transport is responsible for other serialization, including
441 * rds_recv_incoming(). This is called in process context but
442 * should try hard not to block.
443 */
444
445 struct rds_transport {
446 char t_name[TRANSNAMSIZ];
447 struct list_head t_item;
448 struct module *t_owner;
449 unsigned int t_prefer_loopback:1;
450 unsigned int t_type;
451
452 int (*laddr_check)(struct net *net, __be32 addr);
453 int (*conn_alloc)(struct rds_connection *conn, gfp_t gfp);
454 void (*conn_free)(void *data);
455 int (*conn_connect)(struct rds_connection *conn);
456 void (*conn_shutdown)(struct rds_connection *conn);
457 void (*xmit_prepare)(struct rds_connection *conn);
458 void (*xmit_complete)(struct rds_connection *conn);
459 int (*xmit)(struct rds_connection *conn, struct rds_message *rm,
460 unsigned int hdr_off, unsigned int sg, unsigned int off);
461 int (*xmit_rdma)(struct rds_connection *conn, struct rm_rdma_op *op);
462 int (*xmit_atomic)(struct rds_connection *conn, struct rm_atomic_op *op);
463 int (*recv)(struct rds_connection *conn);
464 int (*inc_copy_to_user)(struct rds_incoming *inc, struct iov_iter *to);
465 void (*inc_free)(struct rds_incoming *inc);
466
467 int (*cm_handle_connect)(struct rdma_cm_id *cm_id,
468 struct rdma_cm_event *event);
469 int (*cm_initiate_connect)(struct rdma_cm_id *cm_id);
470 void (*cm_connect_complete)(struct rds_connection *conn,
471 struct rdma_cm_event *event);
472
473 unsigned int (*stats_info_copy)(struct rds_info_iterator *iter,
474 unsigned int avail);
475 void (*exit)(void);
476 void *(*get_mr)(struct scatterlist *sg, unsigned long nr_sg,
477 struct rds_sock *rs, u32 *key_ret);
478 void (*sync_mr)(void *trans_private, int direction);
479 void (*free_mr)(void *trans_private, int invalidate);
480 void (*flush_mrs)(void);
481 };
482
483 struct rds_sock {
484 struct sock rs_sk;
485
486 u64 rs_user_addr;
487 u64 rs_user_bytes;
488
489 /*
490 * bound_addr used for both incoming and outgoing, no INADDR_ANY
491 * support.
492 */
493 struct rhash_head rs_bound_node;
494 u64 rs_bound_key;
495 __be32 rs_bound_addr;
496 __be32 rs_conn_addr;
497 __be16 rs_bound_port;
498 __be16 rs_conn_port;
499 struct rds_transport *rs_transport;
500
501 /*
502 * rds_sendmsg caches the conn it used the last time around.
503 * This helps avoid costly lookups.
504 */
505 struct rds_connection *rs_conn;
506
507 /* flag indicating we were congested or not */
508 int rs_congested;
509 /* seen congestion (ENOBUFS) when sending? */
510 int rs_seen_congestion;
511
512 /* rs_lock protects all these adjacent members before the newline */
513 spinlock_t rs_lock;
514 struct list_head rs_send_queue;
515 u32 rs_snd_bytes;
516 int rs_rcv_bytes;
517 struct list_head rs_notify_queue; /* currently used for failed RDMAs */
518
519 /* Congestion wake_up. If rs_cong_monitor is set, we use cong_mask
520 * to decide whether the application should be woken up.
521 * If not set, we use rs_cong_track to find out whether a cong map
522 * update arrived.
523 */
524 uint64_t rs_cong_mask;
525 uint64_t rs_cong_notify;
526 struct list_head rs_cong_list;
527 unsigned long rs_cong_track;
528
529 /*
530 * rs_recv_lock protects the receive queue, and is
531 * used to serialize with rds_release.
532 */
533 rwlock_t rs_recv_lock;
534 struct list_head rs_recv_queue;
535
536 /* just for stats reporting */
537 struct list_head rs_item;
538
539 /* these have their own lock */
540 spinlock_t rs_rdma_lock;
541 struct rb_root rs_rdma_keys;
542
543 /* Socket options - in case there will be more */
544 unsigned char rs_recverr,
545 rs_cong_monitor;
546 };
547
548 static inline struct rds_sock *rds_sk_to_rs(const struct sock *sk)
549 {
550 return container_of(sk, struct rds_sock, rs_sk);
551 }
552 static inline struct sock *rds_rs_to_sk(struct rds_sock *rs)
553 {
554 return &rs->rs_sk;
555 }
556
557 /*
558 * The stack assigns sk_sndbuf and sk_rcvbuf to twice the specified value
559 * to account for overhead. We don't account for overhead, we just apply
560 * the number of payload bytes to the specified value.
561 */
562 static inline int rds_sk_sndbuf(struct rds_sock *rs)
563 {
564 return rds_rs_to_sk(rs)->sk_sndbuf / 2;
565 }
566 static inline int rds_sk_rcvbuf(struct rds_sock *rs)
567 {
568 return rds_rs_to_sk(rs)->sk_rcvbuf / 2;
569 }
570
571 struct rds_statistics {
572 uint64_t s_conn_reset;
573 uint64_t s_recv_drop_bad_checksum;
574 uint64_t s_recv_drop_old_seq;
575 uint64_t s_recv_drop_no_sock;
576 uint64_t s_recv_drop_dead_sock;
577 uint64_t s_recv_deliver_raced;
578 uint64_t s_recv_delivered;
579 uint64_t s_recv_queued;
580 uint64_t s_recv_immediate_retry;
581 uint64_t s_recv_delayed_retry;
582 uint64_t s_recv_ack_required;
583 uint64_t s_recv_rdma_bytes;
584 uint64_t s_recv_ping;
585 uint64_t s_send_queue_empty;
586 uint64_t s_send_queue_full;
587 uint64_t s_send_lock_contention;
588 uint64_t s_send_lock_queue_raced;
589 uint64_t s_send_immediate_retry;
590 uint64_t s_send_delayed_retry;
591 uint64_t s_send_drop_acked;
592 uint64_t s_send_ack_required;
593 uint64_t s_send_queued;
594 uint64_t s_send_rdma;
595 uint64_t s_send_rdma_bytes;
596 uint64_t s_send_pong;
597 uint64_t s_page_remainder_hit;
598 uint64_t s_page_remainder_miss;
599 uint64_t s_copy_to_user;
600 uint64_t s_copy_from_user;
601 uint64_t s_cong_update_queued;
602 uint64_t s_cong_update_received;
603 uint64_t s_cong_send_error;
604 uint64_t s_cong_send_blocked;
605 };
606
607 /* af_rds.c */
608 void rds_sock_addref(struct rds_sock *rs);
609 void rds_sock_put(struct rds_sock *rs);
610 void rds_wake_sk_sleep(struct rds_sock *rs);
611 static inline void __rds_wake_sk_sleep(struct sock *sk)
612 {
613 wait_queue_head_t *waitq = sk_sleep(sk);
614
615 if (!sock_flag(sk, SOCK_DEAD) && waitq)
616 wake_up(waitq);
617 }
618 extern wait_queue_head_t rds_poll_waitq;
619
620
621 /* bind.c */
622 int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len);
623 void rds_remove_bound(struct rds_sock *rs);
624 struct rds_sock *rds_find_bound(__be32 addr, __be16 port);
625 int rds_bind_lock_init(void);
626 void rds_bind_lock_destroy(void);
627
628 /* cong.c */
629 int rds_cong_get_maps(struct rds_connection *conn);
630 void rds_cong_add_conn(struct rds_connection *conn);
631 void rds_cong_remove_conn(struct rds_connection *conn);
632 void rds_cong_set_bit(struct rds_cong_map *map, __be16 port);
633 void rds_cong_clear_bit(struct rds_cong_map *map, __be16 port);
634 int rds_cong_wait(struct rds_cong_map *map, __be16 port, int nonblock, struct rds_sock *rs);
635 void rds_cong_queue_updates(struct rds_cong_map *map);
636 void rds_cong_map_updated(struct rds_cong_map *map, uint64_t);
637 int rds_cong_updated_since(unsigned long *recent);
638 void rds_cong_add_socket(struct rds_sock *);
639 void rds_cong_remove_socket(struct rds_sock *);
640 void rds_cong_exit(void);
641 struct rds_message *rds_cong_update_alloc(struct rds_connection *conn);
642
643 /* conn.c */
644 int rds_conn_init(void);
645 void rds_conn_exit(void);
646 struct rds_connection *rds_conn_create(struct net *net,
647 __be32 laddr, __be32 faddr,
648 struct rds_transport *trans, gfp_t gfp);
649 struct rds_connection *rds_conn_create_outgoing(struct net *net,
650 __be32 laddr, __be32 faddr,
651 struct rds_transport *trans, gfp_t gfp);
652 void rds_conn_shutdown(struct rds_connection *conn);
653 void rds_conn_destroy(struct rds_connection *conn);
654 void rds_conn_drop(struct rds_connection *conn);
655 void rds_conn_path_drop(struct rds_conn_path *cpath);
656 void rds_conn_connect_if_down(struct rds_connection *conn);
657 void rds_for_each_conn_info(struct socket *sock, unsigned int len,
658 struct rds_info_iterator *iter,
659 struct rds_info_lengths *lens,
660 int (*visitor)(struct rds_connection *, void *),
661 size_t item_len);
662 __printf(2, 3)
663 void __rds_conn_error(struct rds_connection *conn, const char *, ...);
664 #define rds_conn_error(conn, fmt...) \
665 __rds_conn_error(conn, KERN_WARNING "RDS: " fmt)
666
667 static inline int
668 rds_conn_path_transition(struct rds_conn_path *cp, int old, int new)
669 {
670 return atomic_cmpxchg(&cp->cp_state, old, new) == old;
671 }
672
673 static inline int
674 rds_conn_transition(struct rds_connection *conn, int old, int new)
675 {
676 return rds_conn_path_transition(&conn->c_path[0], old, new);
677 }
678
679 static inline int
680 rds_conn_path_state(struct rds_conn_path *cp)
681 {
682 return atomic_read(&cp->cp_state);
683 }
684
685 static inline int
686 rds_conn_state(struct rds_connection *conn)
687 {
688 return rds_conn_path_state(&conn->c_path[0]);
689 }
690
691 static inline int
692 rds_conn_path_up(struct rds_conn_path *cp)
693 {
694 return atomic_read(&cp->cp_state) == RDS_CONN_UP;
695 }
696
697 static inline int
698 rds_conn_up(struct rds_connection *conn)
699 {
700 return rds_conn_path_up(&conn->c_path[0]);
701 }
702
703 static inline int
704 rds_conn_path_connecting(struct rds_conn_path *cp)
705 {
706 return atomic_read(&cp->cp_state) == RDS_CONN_CONNECTING;
707 }
708
709 static inline int
710 rds_conn_connecting(struct rds_connection *conn)
711 {
712 return rds_conn_path_connecting(&conn->c_path[0]);
713 }
714
715 /* message.c */
716 struct rds_message *rds_message_alloc(unsigned int nents, gfp_t gfp);
717 struct scatterlist *rds_message_alloc_sgs(struct rds_message *rm, int nents);
718 int rds_message_copy_from_user(struct rds_message *rm, struct iov_iter *from);
719 struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned int total_len);
720 void rds_message_populate_header(struct rds_header *hdr, __be16 sport,
721 __be16 dport, u64 seq);
722 int rds_message_add_extension(struct rds_header *hdr,
723 unsigned int type, const void *data, unsigned int len);
724 int rds_message_next_extension(struct rds_header *hdr,
725 unsigned int *pos, void *buf, unsigned int *buflen);
726 int rds_message_add_rdma_dest_extension(struct rds_header *hdr, u32 r_key, u32 offset);
727 int rds_message_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to);
728 void rds_message_inc_free(struct rds_incoming *inc);
729 void rds_message_addref(struct rds_message *rm);
730 void rds_message_put(struct rds_message *rm);
731 void rds_message_wait(struct rds_message *rm);
732 void rds_message_unmapped(struct rds_message *rm);
733
734 static inline void rds_message_make_checksum(struct rds_header *hdr)
735 {
736 hdr->h_csum = 0;
737 hdr->h_csum = ip_fast_csum((void *) hdr, sizeof(*hdr) >> 2);
738 }
739
740 static inline int rds_message_verify_checksum(const struct rds_header *hdr)
741 {
742 return !hdr->h_csum || ip_fast_csum((void *) hdr, sizeof(*hdr) >> 2) == 0;
743 }
744
745
746 /* page.c */
747 int rds_page_remainder_alloc(struct scatterlist *scat, unsigned long bytes,
748 gfp_t gfp);
749 int rds_page_copy_user(struct page *page, unsigned long offset,
750 void __user *ptr, unsigned long bytes,
751 int to_user);
752 #define rds_page_copy_to_user(page, offset, ptr, bytes) \
753 rds_page_copy_user(page, offset, ptr, bytes, 1)
754 #define rds_page_copy_from_user(page, offset, ptr, bytes) \
755 rds_page_copy_user(page, offset, ptr, bytes, 0)
756 void rds_page_exit(void);
757
758 /* recv.c */
759 void rds_inc_init(struct rds_incoming *inc, struct rds_connection *conn,
760 __be32 saddr);
761 void rds_inc_put(struct rds_incoming *inc);
762 void rds_recv_incoming(struct rds_connection *conn, __be32 saddr, __be32 daddr,
763 struct rds_incoming *inc, gfp_t gfp);
764 int rds_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
765 int msg_flags);
766 void rds_clear_recv_queue(struct rds_sock *rs);
767 int rds_notify_queue_get(struct rds_sock *rs, struct msghdr *msg);
768 void rds_inc_info_copy(struct rds_incoming *inc,
769 struct rds_info_iterator *iter,
770 __be32 saddr, __be32 daddr, int flip);
771
772 /* send.c */
773 int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len);
774 void rds_send_reset(struct rds_connection *conn);
775 int rds_send_xmit(struct rds_connection *conn);
776 struct sockaddr_in;
777 void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest);
778 typedef int (*is_acked_func)(struct rds_message *rm, uint64_t ack);
779 void rds_send_drop_acked(struct rds_connection *conn, u64 ack,
780 is_acked_func is_acked);
781 int rds_send_pong(struct rds_connection *conn, __be16 dport);
782 struct rds_message *rds_send_get_message(struct rds_connection *,
783 struct rm_rdma_op *);
784
785 /* rdma.c */
786 void rds_rdma_unuse(struct rds_sock *rs, u32 r_key, int force);
787 int rds_get_mr(struct rds_sock *rs, char __user *optval, int optlen);
788 int rds_get_mr_for_dest(struct rds_sock *rs, char __user *optval, int optlen);
789 int rds_free_mr(struct rds_sock *rs, char __user *optval, int optlen);
790 void rds_rdma_drop_keys(struct rds_sock *rs);
791 int rds_rdma_extra_size(struct rds_rdma_args *args);
792 int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
793 struct cmsghdr *cmsg);
794 int rds_cmsg_rdma_dest(struct rds_sock *rs, struct rds_message *rm,
795 struct cmsghdr *cmsg);
796 int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
797 struct cmsghdr *cmsg);
798 int rds_cmsg_rdma_map(struct rds_sock *rs, struct rds_message *rm,
799 struct cmsghdr *cmsg);
800 void rds_rdma_free_op(struct rm_rdma_op *ro);
801 void rds_atomic_free_op(struct rm_atomic_op *ao);
802 void rds_rdma_send_complete(struct rds_message *rm, int wc_status);
803 void rds_atomic_send_complete(struct rds_message *rm, int wc_status);
804 int rds_cmsg_atomic(struct rds_sock *rs, struct rds_message *rm,
805 struct cmsghdr *cmsg);
806
807 void __rds_put_mr_final(struct rds_mr *mr);
808 static inline void rds_mr_put(struct rds_mr *mr)
809 {
810 if (atomic_dec_and_test(&mr->r_refcount))
811 __rds_put_mr_final(mr);
812 }
813
814 /* stats.c */
815 DECLARE_PER_CPU_SHARED_ALIGNED(struct rds_statistics, rds_stats);
816 #define rds_stats_inc_which(which, member) do { \
817 per_cpu(which, get_cpu()).member++; \
818 put_cpu(); \
819 } while (0)
820 #define rds_stats_inc(member) rds_stats_inc_which(rds_stats, member)
821 #define rds_stats_add_which(which, member, count) do { \
822 per_cpu(which, get_cpu()).member += count; \
823 put_cpu(); \
824 } while (0)
825 #define rds_stats_add(member, count) rds_stats_add_which(rds_stats, member, count)
826 int rds_stats_init(void);
827 void rds_stats_exit(void);
828 void rds_stats_info_copy(struct rds_info_iterator *iter,
829 uint64_t *values, const char *const *names,
830 size_t nr);
831
832 /* sysctl.c */
833 int rds_sysctl_init(void);
834 void rds_sysctl_exit(void);
835 extern unsigned long rds_sysctl_sndbuf_min;
836 extern unsigned long rds_sysctl_sndbuf_default;
837 extern unsigned long rds_sysctl_sndbuf_max;
838 extern unsigned long rds_sysctl_reconnect_min_jiffies;
839 extern unsigned long rds_sysctl_reconnect_max_jiffies;
840 extern unsigned int rds_sysctl_max_unacked_packets;
841 extern unsigned int rds_sysctl_max_unacked_bytes;
842 extern unsigned int rds_sysctl_ping_enable;
843 extern unsigned long rds_sysctl_trace_flags;
844 extern unsigned int rds_sysctl_trace_level;
845
846 /* threads.c */
847 int rds_threads_init(void);
848 void rds_threads_exit(void);
849 extern struct workqueue_struct *rds_wq;
850 void rds_queue_reconnect(struct rds_conn_path *cp);
851 void rds_connect_worker(struct work_struct *);
852 void rds_shutdown_worker(struct work_struct *);
853 void rds_send_worker(struct work_struct *);
854 void rds_recv_worker(struct work_struct *);
855 void rds_connect_path_complete(struct rds_conn_path *conn, int curr);
856 void rds_connect_complete(struct rds_connection *conn);
857
858 /* transport.c */
859 int rds_trans_register(struct rds_transport *trans);
860 void rds_trans_unregister(struct rds_transport *trans);
861 struct rds_transport *rds_trans_get_preferred(struct net *net, __be32 addr);
862 void rds_trans_put(struct rds_transport *trans);
863 unsigned int rds_trans_stats_info_copy(struct rds_info_iterator *iter,
864 unsigned int avail);
865 struct rds_transport *rds_trans_get(int t_type);
866 int rds_trans_init(void);
867 void rds_trans_exit(void);
868
869 #endif