]>
Commit | Line | Data |
---|---|---|
ec16227e AG |
1 | #ifndef _RDS_IB_H |
2 | #define _RDS_IB_H | |
3 | ||
4 | #include <rdma/ib_verbs.h> | |
5 | #include <rdma/rdma_cm.h> | |
a6b7a407 | 6 | #include <linux/interrupt.h> |
e4c52c98 AG |
7 | #include <linux/pci.h> |
8 | #include <linux/slab.h> | |
ec16227e AG |
9 | #include "rds.h" |
10 | #include "rdma_transport.h" | |
11 | ||
ec16227e AG |
12 | #define RDS_IB_MAX_SGE 8 |
13 | #define RDS_IB_RECV_SGE 2 | |
14 | ||
15 | #define RDS_IB_DEFAULT_RECV_WR 1024 | |
16 | #define RDS_IB_DEFAULT_SEND_WR 256 | |
ad6832f9 | 17 | #define RDS_IB_DEFAULT_FR_WR 512 |
ec16227e | 18 | |
3ba23ade AG |
19 | #define RDS_IB_DEFAULT_RETRY_COUNT 2 |
20 | ||
ec16227e AG |
21 | #define RDS_IB_SUPPORTED_PROTOCOLS 0x00000003 /* minor versions supported */ |
22 | ||
33244125 CM |
23 | #define RDS_IB_RECYCLE_BATCH_COUNT 32 |
24 | ||
f4f943c9 SS |
25 | #define RDS_IB_WC_MAX 32 |
26 | ||
ea819867 | 27 | extern struct rw_semaphore rds_ib_devices_lock; |
ec16227e AG |
28 | extern struct list_head rds_ib_devices; |
29 | ||
30 | /* | |
31 | * IB posts RDS_FRAG_SIZE fragments of pages to the receive queues to | |
32 | * try and minimize the amount of memory tied up both the device and | |
33 | * socket receive queues. | |
34 | */ | |
ec16227e AG |
35 | struct rds_page_frag { |
36 | struct list_head f_item; | |
33244125 | 37 | struct list_head f_cache_entry; |
0b088e00 | 38 | struct scatterlist f_sg; |
ec16227e AG |
39 | }; |
40 | ||
41 | struct rds_ib_incoming { | |
42 | struct list_head ii_frags; | |
33244125 | 43 | struct list_head ii_cache_entry; |
ec16227e AG |
44 | struct rds_incoming ii_inc; |
45 | }; | |
46 | ||
33244125 CM |
47 | struct rds_ib_cache_head { |
48 | struct list_head *first; | |
49 | unsigned long count; | |
50 | }; | |
51 | ||
52 | struct rds_ib_refill_cache { | |
ae4b46e9 | 53 | struct rds_ib_cache_head __percpu *percpu; |
33244125 CM |
54 | struct list_head *xfer; |
55 | struct list_head *ready; | |
56 | }; | |
57 | ||
ec16227e AG |
58 | struct rds_ib_connect_private { |
59 | /* Add new fields at the end, and don't permute existing fields. */ | |
60 | __be32 dp_saddr; | |
61 | __be32 dp_daddr; | |
62 | u8 dp_protocol_major; | |
63 | u8 dp_protocol_minor; | |
64 | __be16 dp_protocol_minor_mask; /* bitmask */ | |
65 | __be32 dp_reserved1; | |
66 | __be64 dp_ack_seq; | |
67 | __be32 dp_credit; /* non-zero enables flow ctl */ | |
68 | }; | |
69 | ||
70 | struct rds_ib_send_work { | |
ff3d7d36 | 71 | void *s_op; |
e622f2f4 CH |
72 | union { |
73 | struct ib_send_wr s_wr; | |
74 | struct ib_rdma_wr s_rdma_wr; | |
75 | struct ib_atomic_wr s_atomic_wr; | |
76 | }; | |
ec16227e AG |
77 | struct ib_sge s_sge[RDS_IB_MAX_SGE]; |
78 | unsigned long s_queued; | |
79 | }; | |
80 | ||
81 | struct rds_ib_recv_work { | |
82 | struct rds_ib_incoming *r_ibinc; | |
83 | struct rds_page_frag *r_frag; | |
84 | struct ib_recv_wr r_wr; | |
85 | struct ib_sge r_sge[2]; | |
86 | }; | |
87 | ||
88 | struct rds_ib_work_ring { | |
89 | u32 w_nr; | |
90 | u32 w_alloc_ptr; | |
91 | u32 w_alloc_ctr; | |
92 | u32 w_free_ptr; | |
93 | atomic_t w_free_ctr; | |
94 | }; | |
95 | ||
f4f943c9 SS |
96 | /* Rings are posted with all the allocations they'll need to queue the |
97 | * incoming message to the receiving socket so this can't fail. | |
98 | * All fragments start with a header, so we can make sure we're not receiving | |
99 | * garbage, and we can tell a small 8 byte fragment from an ACK frame. | |
100 | */ | |
101 | struct rds_ib_ack_state { | |
102 | u64 ack_next; | |
103 | u64 ack_recv; | |
104 | unsigned int ack_required:1; | |
105 | unsigned int ack_next_valid:1; | |
106 | unsigned int ack_recv_valid:1; | |
107 | }; | |
108 | ||
109 | ||
ec16227e AG |
110 | struct rds_ib_device; |
111 | ||
112 | struct rds_ib_connection { | |
113 | ||
114 | struct list_head ib_node; | |
115 | struct rds_ib_device *rds_ibdev; | |
116 | struct rds_connection *conn; | |
117 | ||
118 | /* alphabet soup, IBTA style */ | |
119 | struct rdma_cm_id *i_cm_id; | |
120 | struct ib_pd *i_pd; | |
ec16227e AG |
121 | struct ib_cq *i_send_cq; |
122 | struct ib_cq *i_recv_cq; | |
0c28c045 | 123 | struct ib_wc i_send_wc[RDS_IB_WC_MAX]; |
f4f943c9 SS |
124 | struct ib_wc i_recv_wc[RDS_IB_WC_MAX]; |
125 | ||
ad6832f9 | 126 | /* To control the number of wrs from fastreg */ |
127 | atomic_t i_fastreg_wrs; | |
128 | ||
f4f943c9 | 129 | /* interrupt handling */ |
0c28c045 | 130 | struct tasklet_struct i_send_tasklet; |
f4f943c9 | 131 | struct tasklet_struct i_recv_tasklet; |
ec16227e AG |
132 | |
133 | /* tx */ | |
134 | struct rds_ib_work_ring i_send_ring; | |
ff3d7d36 | 135 | struct rm_data_op *i_data_op; |
ec16227e AG |
136 | struct rds_header *i_send_hdrs; |
137 | u64 i_send_hdrs_dma; | |
138 | struct rds_ib_send_work *i_sends; | |
f046011c | 139 | atomic_t i_signaled_sends; |
ec16227e AG |
140 | |
141 | /* rx */ | |
142 | struct mutex i_recv_mutex; | |
143 | struct rds_ib_work_ring i_recv_ring; | |
144 | struct rds_ib_incoming *i_ibinc; | |
145 | u32 i_recv_data_rem; | |
146 | struct rds_header *i_recv_hdrs; | |
147 | u64 i_recv_hdrs_dma; | |
148 | struct rds_ib_recv_work *i_recvs; | |
ec16227e | 149 | u64 i_ack_recv; /* last ACK received */ |
33244125 CM |
150 | struct rds_ib_refill_cache i_cache_incs; |
151 | struct rds_ib_refill_cache i_cache_frags; | |
ec16227e AG |
152 | |
153 | /* sending acks */ | |
154 | unsigned long i_ack_flags; | |
8cbd9606 AG |
155 | #ifdef KERNEL_HAS_ATOMIC64 |
156 | atomic64_t i_ack_next; /* next ACK to send */ | |
157 | #else | |
158 | spinlock_t i_ack_lock; /* protect i_ack_next */ | |
ec16227e | 159 | u64 i_ack_next; /* next ACK to send */ |
8cbd9606 | 160 | #endif |
ec16227e AG |
161 | struct rds_header *i_ack; |
162 | struct ib_send_wr i_ack_wr; | |
163 | struct ib_sge i_ack_sge; | |
164 | u64 i_ack_dma; | |
165 | unsigned long i_ack_queued; | |
166 | ||
167 | /* Flow control related information | |
168 | * | |
169 | * Our algorithm uses a pair variables that we need to access | |
170 | * atomically - one for the send credits, and one posted | |
171 | * recv credits we need to transfer to remote. | |
172 | * Rather than protect them using a slow spinlock, we put both into | |
173 | * a single atomic_t and update it using cmpxchg | |
174 | */ | |
175 | atomic_t i_credits; | |
176 | ||
177 | /* Protocol version specific information */ | |
178 | unsigned int i_flowctl:1; /* enable/disable flow ctl */ | |
179 | ||
180 | /* Batched completions */ | |
181 | unsigned int i_unsignaled_wrs; | |
ec16227e AG |
182 | }; |
183 | ||
184 | /* This assumes that atomic_t is at least 32 bits */ | |
185 | #define IB_GET_SEND_CREDITS(v) ((v) & 0xffff) | |
186 | #define IB_GET_POST_CREDITS(v) ((v) >> 16) | |
187 | #define IB_SET_SEND_CREDITS(v) ((v) & 0xffff) | |
188 | #define IB_SET_POST_CREDITS(v) ((v) << 16) | |
189 | ||
190 | struct rds_ib_ipaddr { | |
191 | struct list_head list; | |
192 | __be32 ipaddr; | |
59fe4606 | 193 | struct rcu_head rcu; |
ec16227e AG |
194 | }; |
195 | ||
06766513 SS |
196 | enum { |
197 | RDS_IB_MR_8K_POOL, | |
198 | RDS_IB_MR_1M_POOL, | |
199 | }; | |
200 | ||
ec16227e AG |
201 | struct rds_ib_device { |
202 | struct list_head list; | |
203 | struct list_head ipaddr_list; | |
204 | struct list_head conn_list; | |
205 | struct ib_device *dev; | |
206 | struct ib_pd *pd; | |
2cb2912d | 207 | bool has_fmr; |
208 | bool has_fr; | |
209 | bool use_fastreg; | |
210 | ||
f6df683f | 211 | unsigned int max_mrs; |
06766513 SS |
212 | struct rds_ib_mr_pool *mr_1m_pool; |
213 | struct rds_ib_mr_pool *mr_8k_pool; | |
214 | unsigned int fmr_max_remaps; | |
f6df683f | 215 | unsigned int max_8k_mrs; |
216 | unsigned int max_1m_mrs; | |
ec16227e AG |
217 | int max_sge; |
218 | unsigned int max_wrs; | |
40589e74 AG |
219 | unsigned int max_initiator_depth; |
220 | unsigned int max_responder_resources; | |
ec16227e | 221 | spinlock_t spinlock; /* protect the above */ |
3e0249f9 ZB |
222 | atomic_t refcount; |
223 | struct work_struct free_work; | |
ec16227e AG |
224 | }; |
225 | ||
a0c6ffbc | 226 | #define ibdev_to_node(ibdev) dev_to_node(ibdev->dma_device) |
e4c52c98 AG |
227 | #define rdsibdev_to_node(rdsibdev) ibdev_to_node(rdsibdev->dev) |
228 | ||
ec16227e AG |
229 | /* bits for i_ack_flags */ |
230 | #define IB_ACK_IN_FLIGHT 0 | |
231 | #define IB_ACK_REQUESTED 1 | |
232 | ||
233 | /* Magic WR_ID for ACKs */ | |
234 | #define RDS_IB_ACK_WR_ID (~(u64) 0) | |
235 | ||
236 | struct rds_ib_statistics { | |
237 | uint64_t s_ib_connect_raced; | |
238 | uint64_t s_ib_listen_closed_stale; | |
f4f943c9 SS |
239 | uint64_t s_ib_evt_handler_call; |
240 | uint64_t s_ib_tasklet_call; | |
ec16227e AG |
241 | uint64_t s_ib_tx_cq_event; |
242 | uint64_t s_ib_tx_ring_full; | |
243 | uint64_t s_ib_tx_throttle; | |
244 | uint64_t s_ib_tx_sg_mapping_failure; | |
245 | uint64_t s_ib_tx_stalled; | |
246 | uint64_t s_ib_tx_credit_updates; | |
ec16227e AG |
247 | uint64_t s_ib_rx_cq_event; |
248 | uint64_t s_ib_rx_ring_empty; | |
249 | uint64_t s_ib_rx_refill_from_cq; | |
250 | uint64_t s_ib_rx_refill_from_thread; | |
251 | uint64_t s_ib_rx_alloc_limit; | |
252 | uint64_t s_ib_rx_credit_updates; | |
253 | uint64_t s_ib_ack_sent; | |
254 | uint64_t s_ib_ack_send_failure; | |
255 | uint64_t s_ib_ack_send_delayed; | |
256 | uint64_t s_ib_ack_send_piggybacked; | |
257 | uint64_t s_ib_ack_received; | |
06766513 SS |
258 | uint64_t s_ib_rdma_mr_8k_alloc; |
259 | uint64_t s_ib_rdma_mr_8k_free; | |
260 | uint64_t s_ib_rdma_mr_8k_used; | |
261 | uint64_t s_ib_rdma_mr_8k_pool_flush; | |
262 | uint64_t s_ib_rdma_mr_8k_pool_wait; | |
263 | uint64_t s_ib_rdma_mr_8k_pool_depleted; | |
264 | uint64_t s_ib_rdma_mr_1m_alloc; | |
265 | uint64_t s_ib_rdma_mr_1m_free; | |
266 | uint64_t s_ib_rdma_mr_1m_used; | |
267 | uint64_t s_ib_rdma_mr_1m_pool_flush; | |
268 | uint64_t s_ib_rdma_mr_1m_pool_wait; | |
269 | uint64_t s_ib_rdma_mr_1m_pool_depleted; | |
db42753a | 270 | uint64_t s_ib_rdma_mr_8k_reused; |
271 | uint64_t s_ib_rdma_mr_1m_reused; | |
51e2cba8 AG |
272 | uint64_t s_ib_atomic_cswp; |
273 | uint64_t s_ib_atomic_fadd; | |
ec16227e AG |
274 | }; |
275 | ||
276 | extern struct workqueue_struct *rds_ib_wq; | |
277 | ||
278 | /* | |
279 | * Fake ib_dma_sync_sg_for_{cpu,device} as long as ib_verbs.h | |
280 | * doesn't define it. | |
281 | */ | |
282 | static inline void rds_ib_dma_sync_sg_for_cpu(struct ib_device *dev, | |
d2a9ec64 FF |
283 | struct scatterlist *sglist, |
284 | unsigned int sg_dma_len, | |
285 | int direction) | |
ec16227e | 286 | { |
d2a9ec64 | 287 | struct scatterlist *sg; |
ec16227e AG |
288 | unsigned int i; |
289 | ||
d2a9ec64 | 290 | for_each_sg(sglist, sg, sg_dma_len, i) { |
ec16227e | 291 | ib_dma_sync_single_for_cpu(dev, |
d2a9ec64 FF |
292 | ib_sg_dma_address(dev, sg), |
293 | ib_sg_dma_len(dev, sg), | |
ec16227e AG |
294 | direction); |
295 | } | |
296 | } | |
297 | #define ib_dma_sync_sg_for_cpu rds_ib_dma_sync_sg_for_cpu | |
298 | ||
299 | static inline void rds_ib_dma_sync_sg_for_device(struct ib_device *dev, | |
d2a9ec64 FF |
300 | struct scatterlist *sglist, |
301 | unsigned int sg_dma_len, | |
302 | int direction) | |
ec16227e | 303 | { |
d2a9ec64 | 304 | struct scatterlist *sg; |
ec16227e AG |
305 | unsigned int i; |
306 | ||
d2a9ec64 | 307 | for_each_sg(sglist, sg, sg_dma_len, i) { |
ec16227e | 308 | ib_dma_sync_single_for_device(dev, |
d2a9ec64 FF |
309 | ib_sg_dma_address(dev, sg), |
310 | ib_sg_dma_len(dev, sg), | |
ec16227e AG |
311 | direction); |
312 | } | |
313 | } | |
314 | #define ib_dma_sync_sg_for_device rds_ib_dma_sync_sg_for_device | |
315 | ||
316 | ||
317 | /* ib.c */ | |
318 | extern struct rds_transport rds_ib_transport; | |
3e0249f9 ZB |
319 | struct rds_ib_device *rds_ib_get_client_data(struct ib_device *device); |
320 | void rds_ib_dev_put(struct rds_ib_device *rds_ibdev); | |
ec16227e AG |
321 | extern struct ib_client rds_ib_client; |
322 | ||
3ba23ade | 323 | extern unsigned int rds_ib_retry_count; |
ec16227e AG |
324 | |
325 | extern spinlock_t ib_nodev_conns_lock; | |
326 | extern struct list_head ib_nodev_conns; | |
327 | ||
328 | /* ib_cm.c */ | |
329 | int rds_ib_conn_alloc(struct rds_connection *conn, gfp_t gfp); | |
330 | void rds_ib_conn_free(void *arg); | |
b04e8554 | 331 | int rds_ib_conn_path_connect(struct rds_conn_path *cp); |
226f7a7d | 332 | void rds_ib_conn_path_shutdown(struct rds_conn_path *cp); |
ec16227e | 333 | void rds_ib_state_change(struct sock *sk); |
ef87b7ea | 334 | int rds_ib_listen_init(void); |
ec16227e | 335 | void rds_ib_listen_stop(void); |
6cdaf03f | 336 | __printf(2, 3) |
ec16227e AG |
337 | void __rds_ib_conn_error(struct rds_connection *conn, const char *, ...); |
338 | int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id, | |
339 | struct rdma_cm_event *event); | |
340 | int rds_ib_cm_initiate_connect(struct rdma_cm_id *cm_id); | |
341 | void rds_ib_cm_connect_complete(struct rds_connection *conn, | |
342 | struct rdma_cm_event *event); | |
343 | ||
344 | ||
345 | #define rds_ib_conn_error(conn, fmt...) \ | |
346 | __rds_ib_conn_error(conn, KERN_WARNING "RDS/IB: " fmt) | |
347 | ||
348 | /* ib_rdma.c */ | |
349 | int rds_ib_update_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr); | |
745cbcca AG |
350 | void rds_ib_add_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn); |
351 | void rds_ib_remove_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn); | |
8aeb1ba6 | 352 | void rds_ib_destroy_nodev_conns(void); |
1659185f | 353 | void rds_ib_mr_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc); |
ec16227e AG |
354 | |
355 | /* ib_recv.c */ | |
ef87b7ea | 356 | int rds_ib_recv_init(void); |
ec16227e | 357 | void rds_ib_recv_exit(void); |
2da43c4a | 358 | int rds_ib_recv_path(struct rds_conn_path *conn); |
33244125 CM |
359 | int rds_ib_recv_alloc_caches(struct rds_ib_connection *ic); |
360 | void rds_ib_recv_free_caches(struct rds_ib_connection *ic); | |
73ce4317 | 361 | void rds_ib_recv_refill(struct rds_connection *conn, int prefill, gfp_t gfp); |
ec16227e | 362 | void rds_ib_inc_free(struct rds_incoming *inc); |
c310e72c | 363 | int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to); |
f4f943c9 SS |
364 | void rds_ib_recv_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc, |
365 | struct rds_ib_ack_state *state); | |
d521b63b | 366 | void rds_ib_recv_tasklet_fn(unsigned long data); |
ec16227e AG |
367 | void rds_ib_recv_init_ring(struct rds_ib_connection *ic); |
368 | void rds_ib_recv_clear_ring(struct rds_ib_connection *ic); | |
369 | void rds_ib_recv_init_ack(struct rds_ib_connection *ic); | |
370 | void rds_ib_attempt_ack(struct rds_ib_connection *ic); | |
371 | void rds_ib_ack_send_complete(struct rds_ib_connection *ic); | |
372 | u64 rds_ib_piggyb_ack(struct rds_ib_connection *ic); | |
f4f943c9 | 373 | void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq, int ack_required); |
ec16227e AG |
374 | |
375 | /* ib_ring.c */ | |
376 | void rds_ib_ring_init(struct rds_ib_work_ring *ring, u32 nr); | |
377 | void rds_ib_ring_resize(struct rds_ib_work_ring *ring, u32 nr); | |
378 | u32 rds_ib_ring_alloc(struct rds_ib_work_ring *ring, u32 val, u32 *pos); | |
379 | void rds_ib_ring_free(struct rds_ib_work_ring *ring, u32 val); | |
380 | void rds_ib_ring_unalloc(struct rds_ib_work_ring *ring, u32 val); | |
381 | int rds_ib_ring_empty(struct rds_ib_work_ring *ring); | |
382 | int rds_ib_ring_low(struct rds_ib_work_ring *ring); | |
383 | u32 rds_ib_ring_oldest(struct rds_ib_work_ring *ring); | |
384 | u32 rds_ib_ring_completed(struct rds_ib_work_ring *ring, u32 wr_id, u32 oldest); | |
385 | extern wait_queue_head_t rds_ib_ring_empty_wait; | |
386 | ||
387 | /* ib_send.c */ | |
226f7a7d | 388 | void rds_ib_xmit_path_complete(struct rds_conn_path *cp); |
ec16227e AG |
389 | int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm, |
390 | unsigned int hdr_off, unsigned int sg, unsigned int off); | |
0c28c045 | 391 | void rds_ib_send_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc); |
ec16227e AG |
392 | void rds_ib_send_init_ring(struct rds_ib_connection *ic); |
393 | void rds_ib_send_clear_ring(struct rds_ib_connection *ic); | |
f8b3aaf2 | 394 | int rds_ib_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op); |
ec16227e AG |
395 | void rds_ib_send_add_credits(struct rds_connection *conn, unsigned int credits); |
396 | void rds_ib_advertise_credits(struct rds_connection *conn, unsigned int posted); | |
397 | int rds_ib_send_grab_credits(struct rds_ib_connection *ic, u32 wanted, | |
7b70d033 | 398 | u32 *adv_credits, int need_posted, int max_posted); |
ff3d7d36 | 399 | int rds_ib_xmit_atomic(struct rds_connection *conn, struct rm_atomic_op *op); |
ec16227e AG |
400 | |
401 | /* ib_stats.c */ | |
402 | DECLARE_PER_CPU(struct rds_ib_statistics, rds_ib_stats); | |
403 | #define rds_ib_stats_inc(member) rds_stats_inc_which(rds_ib_stats, member) | |
404 | unsigned int rds_ib_stats_info_copy(struct rds_info_iterator *iter, | |
405 | unsigned int avail); | |
406 | ||
407 | /* ib_sysctl.c */ | |
ef87b7ea | 408 | int rds_ib_sysctl_init(void); |
ec16227e AG |
409 | void rds_ib_sysctl_exit(void); |
410 | extern unsigned long rds_ib_sysctl_max_send_wr; | |
411 | extern unsigned long rds_ib_sysctl_max_recv_wr; | |
412 | extern unsigned long rds_ib_sysctl_max_unsig_wrs; | |
413 | extern unsigned long rds_ib_sysctl_max_unsig_bytes; | |
414 | extern unsigned long rds_ib_sysctl_max_recv_allocation; | |
415 | extern unsigned int rds_ib_sysctl_flow_control; | |
ec16227e | 416 | |
ec16227e | 417 | #endif |