]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - net/rds/ib.h
RDS: RDMA: Fix the composite message user notification
[mirror_ubuntu-jammy-kernel.git] / net / rds / ib.h
CommitLineData
ec16227e
AG
1#ifndef _RDS_IB_H
2#define _RDS_IB_H
3
4#include <rdma/ib_verbs.h>
5#include <rdma/rdma_cm.h>
a6b7a407 6#include <linux/interrupt.h>
e4c52c98
AG
7#include <linux/pci.h>
8#include <linux/slab.h>
ec16227e
AG
9#include "rds.h"
10#include "rdma_transport.h"
11
ec16227e
AG
12#define RDS_IB_MAX_SGE 8
13#define RDS_IB_RECV_SGE 2
14
15#define RDS_IB_DEFAULT_RECV_WR 1024
16#define RDS_IB_DEFAULT_SEND_WR 256
56012459
SS
17#define RDS_IB_DEFAULT_FR_WR 256
18#define RDS_IB_DEFAULT_FR_INV_WR 256
ec16227e 19
fab8688d 20#define RDS_IB_DEFAULT_RETRY_COUNT 1
3ba23ade 21
ec16227e
AG
22#define RDS_IB_SUPPORTED_PROTOCOLS 0x00000003 /* minor versions supported */
23
33244125
CM
24#define RDS_IB_RECYCLE_BATCH_COUNT 32
25
f4f943c9
SS
26#define RDS_IB_WC_MAX 32
27
ea819867 28extern struct rw_semaphore rds_ib_devices_lock;
ec16227e
AG
29extern struct list_head rds_ib_devices;
30
31/*
32 * IB posts RDS_FRAG_SIZE fragments of pages to the receive queues to
33 * try and minimize the amount of memory tied up both the device and
34 * socket receive queues.
35 */
ec16227e
AG
36struct rds_page_frag {
37 struct list_head f_item;
33244125 38 struct list_head f_cache_entry;
0b088e00 39 struct scatterlist f_sg;
ec16227e
AG
40};
41
42struct rds_ib_incoming {
43 struct list_head ii_frags;
33244125 44 struct list_head ii_cache_entry;
ec16227e
AG
45 struct rds_incoming ii_inc;
46};
47
33244125
CM
48struct rds_ib_cache_head {
49 struct list_head *first;
50 unsigned long count;
51};
52
53struct rds_ib_refill_cache {
ae4b46e9 54 struct rds_ib_cache_head __percpu *percpu;
33244125
CM
55 struct list_head *xfer;
56 struct list_head *ready;
57};
58
ec16227e
AG
59struct rds_ib_connect_private {
60 /* Add new fields at the end, and don't permute existing fields. */
61 __be32 dp_saddr;
62 __be32 dp_daddr;
63 u8 dp_protocol_major;
64 u8 dp_protocol_minor;
65 __be16 dp_protocol_minor_mask; /* bitmask */
66 __be32 dp_reserved1;
67 __be64 dp_ack_seq;
68 __be32 dp_credit; /* non-zero enables flow ctl */
69};
70
71struct rds_ib_send_work {
ff3d7d36 72 void *s_op;
e622f2f4
CH
73 union {
74 struct ib_send_wr s_wr;
75 struct ib_rdma_wr s_rdma_wr;
76 struct ib_atomic_wr s_atomic_wr;
77 };
ec16227e
AG
78 struct ib_sge s_sge[RDS_IB_MAX_SGE];
79 unsigned long s_queued;
80};
81
82struct rds_ib_recv_work {
83 struct rds_ib_incoming *r_ibinc;
84 struct rds_page_frag *r_frag;
85 struct ib_recv_wr r_wr;
86 struct ib_sge r_sge[2];
87};
88
89struct rds_ib_work_ring {
90 u32 w_nr;
91 u32 w_alloc_ptr;
92 u32 w_alloc_ctr;
93 u32 w_free_ptr;
94 atomic_t w_free_ctr;
95};
96
f4f943c9
SS
97/* Rings are posted with all the allocations they'll need to queue the
98 * incoming message to the receiving socket so this can't fail.
99 * All fragments start with a header, so we can make sure we're not receiving
100 * garbage, and we can tell a small 8 byte fragment from an ACK frame.
101 */
102struct rds_ib_ack_state {
103 u64 ack_next;
104 u64 ack_recv;
105 unsigned int ack_required:1;
106 unsigned int ack_next_valid:1;
107 unsigned int ack_recv_valid:1;
108};
109
110
ec16227e
AG
111struct rds_ib_device;
112
113struct rds_ib_connection {
114
115 struct list_head ib_node;
116 struct rds_ib_device *rds_ibdev;
117 struct rds_connection *conn;
118
119 /* alphabet soup, IBTA style */
120 struct rdma_cm_id *i_cm_id;
121 struct ib_pd *i_pd;
ec16227e
AG
122 struct ib_cq *i_send_cq;
123 struct ib_cq *i_recv_cq;
0c28c045 124 struct ib_wc i_send_wc[RDS_IB_WC_MAX];
f4f943c9
SS
125 struct ib_wc i_recv_wc[RDS_IB_WC_MAX];
126
ad6832f9 127 /* To control the number of wrs from fastreg */
128 atomic_t i_fastreg_wrs;
56012459 129 atomic_t i_fastunreg_wrs;
ad6832f9 130
f4f943c9 131 /* interrupt handling */
0c28c045 132 struct tasklet_struct i_send_tasklet;
f4f943c9 133 struct tasklet_struct i_recv_tasklet;
ec16227e
AG
134
135 /* tx */
136 struct rds_ib_work_ring i_send_ring;
ff3d7d36 137 struct rm_data_op *i_data_op;
ec16227e
AG
138 struct rds_header *i_send_hdrs;
139 u64 i_send_hdrs_dma;
140 struct rds_ib_send_work *i_sends;
f046011c 141 atomic_t i_signaled_sends;
ec16227e
AG
142
143 /* rx */
144 struct mutex i_recv_mutex;
145 struct rds_ib_work_ring i_recv_ring;
146 struct rds_ib_incoming *i_ibinc;
147 u32 i_recv_data_rem;
148 struct rds_header *i_recv_hdrs;
149 u64 i_recv_hdrs_dma;
150 struct rds_ib_recv_work *i_recvs;
ec16227e 151 u64 i_ack_recv; /* last ACK received */
33244125
CM
152 struct rds_ib_refill_cache i_cache_incs;
153 struct rds_ib_refill_cache i_cache_frags;
09b2b8f5 154 atomic_t i_cache_allocs;
ec16227e
AG
155
156 /* sending acks */
157 unsigned long i_ack_flags;
8cbd9606
AG
158#ifdef KERNEL_HAS_ATOMIC64
159 atomic64_t i_ack_next; /* next ACK to send */
160#else
161 spinlock_t i_ack_lock; /* protect i_ack_next */
ec16227e 162 u64 i_ack_next; /* next ACK to send */
8cbd9606 163#endif
ec16227e
AG
164 struct rds_header *i_ack;
165 struct ib_send_wr i_ack_wr;
166 struct ib_sge i_ack_sge;
167 u64 i_ack_dma;
168 unsigned long i_ack_queued;
169
170 /* Flow control related information
171 *
172 * Our algorithm uses a pair variables that we need to access
173 * atomically - one for the send credits, and one posted
174 * recv credits we need to transfer to remote.
175 * Rather than protect them using a slow spinlock, we put both into
176 * a single atomic_t and update it using cmpxchg
177 */
178 atomic_t i_credits;
179
180 /* Protocol version specific information */
181 unsigned int i_flowctl:1; /* enable/disable flow ctl */
182
183 /* Batched completions */
184 unsigned int i_unsignaled_wrs;
581d53c9
SS
185
186 /* Endpoint role in connection */
187 bool i_active_side;
be2f76ea
SS
188
189 /* Send/Recv vectors */
190 int i_scq_vector;
191 int i_rcq_vector;
ec16227e
AG
192};
193
194/* This assumes that atomic_t is at least 32 bits */
195#define IB_GET_SEND_CREDITS(v) ((v) & 0xffff)
196#define IB_GET_POST_CREDITS(v) ((v) >> 16)
197#define IB_SET_SEND_CREDITS(v) ((v) & 0xffff)
198#define IB_SET_POST_CREDITS(v) ((v) << 16)
199
200struct rds_ib_ipaddr {
201 struct list_head list;
202 __be32 ipaddr;
59fe4606 203 struct rcu_head rcu;
ec16227e
AG
204};
205
06766513
SS
206enum {
207 RDS_IB_MR_8K_POOL,
208 RDS_IB_MR_1M_POOL,
209};
210
ec16227e
AG
211struct rds_ib_device {
212 struct list_head list;
213 struct list_head ipaddr_list;
214 struct list_head conn_list;
215 struct ib_device *dev;
216 struct ib_pd *pd;
2cb2912d 217 bool has_fmr;
218 bool has_fr;
219 bool use_fastreg;
220
f6df683f 221 unsigned int max_mrs;
06766513
SS
222 struct rds_ib_mr_pool *mr_1m_pool;
223 struct rds_ib_mr_pool *mr_8k_pool;
224 unsigned int fmr_max_remaps;
f6df683f 225 unsigned int max_8k_mrs;
226 unsigned int max_1m_mrs;
ec16227e
AG
227 int max_sge;
228 unsigned int max_wrs;
40589e74
AG
229 unsigned int max_initiator_depth;
230 unsigned int max_responder_resources;
ec16227e 231 spinlock_t spinlock; /* protect the above */
3e0249f9
ZB
232 atomic_t refcount;
233 struct work_struct free_work;
be2f76ea 234 int *vector_load;
ec16227e
AG
235};
236
a0c6ffbc 237#define ibdev_to_node(ibdev) dev_to_node(ibdev->dma_device)
e4c52c98
AG
238#define rdsibdev_to_node(rdsibdev) ibdev_to_node(rdsibdev->dev)
239
ec16227e
AG
240/* bits for i_ack_flags */
241#define IB_ACK_IN_FLIGHT 0
242#define IB_ACK_REQUESTED 1
243
244/* Magic WR_ID for ACKs */
245#define RDS_IB_ACK_WR_ID (~(u64) 0)
246
247struct rds_ib_statistics {
248 uint64_t s_ib_connect_raced;
249 uint64_t s_ib_listen_closed_stale;
f4f943c9
SS
250 uint64_t s_ib_evt_handler_call;
251 uint64_t s_ib_tasklet_call;
ec16227e
AG
252 uint64_t s_ib_tx_cq_event;
253 uint64_t s_ib_tx_ring_full;
254 uint64_t s_ib_tx_throttle;
255 uint64_t s_ib_tx_sg_mapping_failure;
256 uint64_t s_ib_tx_stalled;
257 uint64_t s_ib_tx_credit_updates;
ec16227e
AG
258 uint64_t s_ib_rx_cq_event;
259 uint64_t s_ib_rx_ring_empty;
260 uint64_t s_ib_rx_refill_from_cq;
261 uint64_t s_ib_rx_refill_from_thread;
262 uint64_t s_ib_rx_alloc_limit;
09b2b8f5
SS
263 uint64_t s_ib_rx_total_frags;
264 uint64_t s_ib_rx_total_incs;
ec16227e
AG
265 uint64_t s_ib_rx_credit_updates;
266 uint64_t s_ib_ack_sent;
267 uint64_t s_ib_ack_send_failure;
268 uint64_t s_ib_ack_send_delayed;
269 uint64_t s_ib_ack_send_piggybacked;
270 uint64_t s_ib_ack_received;
06766513
SS
271 uint64_t s_ib_rdma_mr_8k_alloc;
272 uint64_t s_ib_rdma_mr_8k_free;
273 uint64_t s_ib_rdma_mr_8k_used;
274 uint64_t s_ib_rdma_mr_8k_pool_flush;
275 uint64_t s_ib_rdma_mr_8k_pool_wait;
276 uint64_t s_ib_rdma_mr_8k_pool_depleted;
277 uint64_t s_ib_rdma_mr_1m_alloc;
278 uint64_t s_ib_rdma_mr_1m_free;
279 uint64_t s_ib_rdma_mr_1m_used;
280 uint64_t s_ib_rdma_mr_1m_pool_flush;
281 uint64_t s_ib_rdma_mr_1m_pool_wait;
282 uint64_t s_ib_rdma_mr_1m_pool_depleted;
db42753a 283 uint64_t s_ib_rdma_mr_8k_reused;
284 uint64_t s_ib_rdma_mr_1m_reused;
51e2cba8
AG
285 uint64_t s_ib_atomic_cswp;
286 uint64_t s_ib_atomic_fadd;
09b2b8f5
SS
287 uint64_t s_ib_recv_added_to_cache;
288 uint64_t s_ib_recv_removed_from_cache;
ec16227e
AG
289};
290
291extern struct workqueue_struct *rds_ib_wq;
292
293/*
294 * Fake ib_dma_sync_sg_for_{cpu,device} as long as ib_verbs.h
295 * doesn't define it.
296 */
297static inline void rds_ib_dma_sync_sg_for_cpu(struct ib_device *dev,
d2a9ec64
FF
298 struct scatterlist *sglist,
299 unsigned int sg_dma_len,
300 int direction)
ec16227e 301{
d2a9ec64 302 struct scatterlist *sg;
ec16227e
AG
303 unsigned int i;
304
d2a9ec64 305 for_each_sg(sglist, sg, sg_dma_len, i) {
ec16227e 306 ib_dma_sync_single_for_cpu(dev,
d2a9ec64
FF
307 ib_sg_dma_address(dev, sg),
308 ib_sg_dma_len(dev, sg),
ec16227e
AG
309 direction);
310 }
311}
312#define ib_dma_sync_sg_for_cpu rds_ib_dma_sync_sg_for_cpu
313
314static inline void rds_ib_dma_sync_sg_for_device(struct ib_device *dev,
d2a9ec64
FF
315 struct scatterlist *sglist,
316 unsigned int sg_dma_len,
317 int direction)
ec16227e 318{
d2a9ec64 319 struct scatterlist *sg;
ec16227e
AG
320 unsigned int i;
321
d2a9ec64 322 for_each_sg(sglist, sg, sg_dma_len, i) {
ec16227e 323 ib_dma_sync_single_for_device(dev,
d2a9ec64
FF
324 ib_sg_dma_address(dev, sg),
325 ib_sg_dma_len(dev, sg),
ec16227e
AG
326 direction);
327 }
328}
329#define ib_dma_sync_sg_for_device rds_ib_dma_sync_sg_for_device
330
331
332/* ib.c */
333extern struct rds_transport rds_ib_transport;
3e0249f9
ZB
334struct rds_ib_device *rds_ib_get_client_data(struct ib_device *device);
335void rds_ib_dev_put(struct rds_ib_device *rds_ibdev);
ec16227e
AG
336extern struct ib_client rds_ib_client;
337
3ba23ade 338extern unsigned int rds_ib_retry_count;
ec16227e
AG
339
340extern spinlock_t ib_nodev_conns_lock;
341extern struct list_head ib_nodev_conns;
342
343/* ib_cm.c */
344int rds_ib_conn_alloc(struct rds_connection *conn, gfp_t gfp);
345void rds_ib_conn_free(void *arg);
b04e8554 346int rds_ib_conn_path_connect(struct rds_conn_path *cp);
226f7a7d 347void rds_ib_conn_path_shutdown(struct rds_conn_path *cp);
ec16227e 348void rds_ib_state_change(struct sock *sk);
ef87b7ea 349int rds_ib_listen_init(void);
ec16227e 350void rds_ib_listen_stop(void);
6cdaf03f 351__printf(2, 3)
ec16227e
AG
352void __rds_ib_conn_error(struct rds_connection *conn, const char *, ...);
353int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id,
354 struct rdma_cm_event *event);
355int rds_ib_cm_initiate_connect(struct rdma_cm_id *cm_id);
356void rds_ib_cm_connect_complete(struct rds_connection *conn,
357 struct rdma_cm_event *event);
358
359
360#define rds_ib_conn_error(conn, fmt...) \
361 __rds_ib_conn_error(conn, KERN_WARNING "RDS/IB: " fmt)
362
363/* ib_rdma.c */
364int rds_ib_update_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr);
745cbcca
AG
365void rds_ib_add_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn);
366void rds_ib_remove_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn);
8aeb1ba6 367void rds_ib_destroy_nodev_conns(void);
1659185f 368void rds_ib_mr_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc);
ec16227e
AG
369
370/* ib_recv.c */
ef87b7ea 371int rds_ib_recv_init(void);
ec16227e 372void rds_ib_recv_exit(void);
2da43c4a 373int rds_ib_recv_path(struct rds_conn_path *conn);
33244125
CM
374int rds_ib_recv_alloc_caches(struct rds_ib_connection *ic);
375void rds_ib_recv_free_caches(struct rds_ib_connection *ic);
73ce4317 376void rds_ib_recv_refill(struct rds_connection *conn, int prefill, gfp_t gfp);
ec16227e 377void rds_ib_inc_free(struct rds_incoming *inc);
c310e72c 378int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to);
f4f943c9
SS
379void rds_ib_recv_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc,
380 struct rds_ib_ack_state *state);
d521b63b 381void rds_ib_recv_tasklet_fn(unsigned long data);
ec16227e
AG
382void rds_ib_recv_init_ring(struct rds_ib_connection *ic);
383void rds_ib_recv_clear_ring(struct rds_ib_connection *ic);
384void rds_ib_recv_init_ack(struct rds_ib_connection *ic);
385void rds_ib_attempt_ack(struct rds_ib_connection *ic);
386void rds_ib_ack_send_complete(struct rds_ib_connection *ic);
387u64 rds_ib_piggyb_ack(struct rds_ib_connection *ic);
f4f943c9 388void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq, int ack_required);
ec16227e
AG
389
390/* ib_ring.c */
391void rds_ib_ring_init(struct rds_ib_work_ring *ring, u32 nr);
392void rds_ib_ring_resize(struct rds_ib_work_ring *ring, u32 nr);
393u32 rds_ib_ring_alloc(struct rds_ib_work_ring *ring, u32 val, u32 *pos);
394void rds_ib_ring_free(struct rds_ib_work_ring *ring, u32 val);
395void rds_ib_ring_unalloc(struct rds_ib_work_ring *ring, u32 val);
396int rds_ib_ring_empty(struct rds_ib_work_ring *ring);
397int rds_ib_ring_low(struct rds_ib_work_ring *ring);
398u32 rds_ib_ring_oldest(struct rds_ib_work_ring *ring);
399u32 rds_ib_ring_completed(struct rds_ib_work_ring *ring, u32 wr_id, u32 oldest);
400extern wait_queue_head_t rds_ib_ring_empty_wait;
401
402/* ib_send.c */
226f7a7d 403void rds_ib_xmit_path_complete(struct rds_conn_path *cp);
ec16227e
AG
404int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
405 unsigned int hdr_off, unsigned int sg, unsigned int off);
0c28c045 406void rds_ib_send_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc);
ec16227e
AG
407void rds_ib_send_init_ring(struct rds_ib_connection *ic);
408void rds_ib_send_clear_ring(struct rds_ib_connection *ic);
f8b3aaf2 409int rds_ib_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op);
ec16227e
AG
410void rds_ib_send_add_credits(struct rds_connection *conn, unsigned int credits);
411void rds_ib_advertise_credits(struct rds_connection *conn, unsigned int posted);
412int rds_ib_send_grab_credits(struct rds_ib_connection *ic, u32 wanted,
7b70d033 413 u32 *adv_credits, int need_posted, int max_posted);
ff3d7d36 414int rds_ib_xmit_atomic(struct rds_connection *conn, struct rm_atomic_op *op);
ec16227e
AG
415
416/* ib_stats.c */
417DECLARE_PER_CPU(struct rds_ib_statistics, rds_ib_stats);
418#define rds_ib_stats_inc(member) rds_stats_inc_which(rds_ib_stats, member)
09b2b8f5
SS
419#define rds_ib_stats_add(member, count) \
420 rds_stats_add_which(rds_ib_stats, member, count)
ec16227e
AG
421unsigned int rds_ib_stats_info_copy(struct rds_info_iterator *iter,
422 unsigned int avail);
423
424/* ib_sysctl.c */
ef87b7ea 425int rds_ib_sysctl_init(void);
ec16227e
AG
426void rds_ib_sysctl_exit(void);
427extern unsigned long rds_ib_sysctl_max_send_wr;
428extern unsigned long rds_ib_sysctl_max_recv_wr;
429extern unsigned long rds_ib_sysctl_max_unsig_wrs;
430extern unsigned long rds_ib_sysctl_max_unsig_bytes;
431extern unsigned long rds_ib_sysctl_max_recv_allocation;
432extern unsigned int rds_ib_sysctl_flow_control;
ec16227e 433
ec16227e 434#endif