4 #include <rdma/ib_verbs.h>
5 #include <rdma/rdma_cm.h>
6 #include <linux/interrupt.h>
8 #include <linux/slab.h>
10 #include "rdma_transport.h"
12 #define RDS_IB_MAX_SGE 8
13 #define RDS_IB_RECV_SGE 2
15 #define RDS_IB_DEFAULT_RECV_WR 1024
16 #define RDS_IB_DEFAULT_SEND_WR 256
18 #define RDS_IB_DEFAULT_RETRY_COUNT 2
20 #define RDS_IB_SUPPORTED_PROTOCOLS 0x00000003 /* minor versions supported */
22 #define RDS_IB_RECYCLE_BATCH_COUNT 32
24 #define RDS_IB_WC_MAX 32
26 extern struct rw_semaphore rds_ib_devices_lock
;
27 extern struct list_head rds_ib_devices
;
30 * IB posts RDS_FRAG_SIZE fragments of pages to the receive queues to
31 * try and minimize the amount of memory tied up both the device and
32 * socket receive queues.
34 struct rds_page_frag
{
35 struct list_head f_item
;
36 struct list_head f_cache_entry
;
37 struct scatterlist f_sg
;
40 struct rds_ib_incoming
{
41 struct list_head ii_frags
;
42 struct list_head ii_cache_entry
;
43 struct rds_incoming ii_inc
;
46 struct rds_ib_cache_head
{
47 struct list_head
*first
;
51 struct rds_ib_refill_cache
{
52 struct rds_ib_cache_head __percpu
*percpu
;
53 struct list_head
*xfer
;
54 struct list_head
*ready
;
57 struct rds_ib_connect_private
{
58 /* Add new fields at the end, and don't permute existing fields. */
63 __be16 dp_protocol_minor_mask
; /* bitmask */
66 __be32 dp_credit
; /* non-zero enables flow ctl */
69 struct rds_ib_send_work
{
72 struct ib_send_wr s_wr
;
73 struct ib_rdma_wr s_rdma_wr
;
74 struct ib_atomic_wr s_atomic_wr
;
76 struct ib_sge s_sge
[RDS_IB_MAX_SGE
];
77 unsigned long s_queued
;
80 struct rds_ib_recv_work
{
81 struct rds_ib_incoming
*r_ibinc
;
82 struct rds_page_frag
*r_frag
;
83 struct ib_recv_wr r_wr
;
84 struct ib_sge r_sge
[2];
87 struct rds_ib_work_ring
{
95 /* Rings are posted with all the allocations they'll need to queue the
96 * incoming message to the receiving socket so this can't fail.
97 * All fragments start with a header, so we can make sure we're not receiving
98 * garbage, and we can tell a small 8 byte fragment from an ACK frame.
100 struct rds_ib_ack_state
{
103 unsigned int ack_required
:1;
104 unsigned int ack_next_valid
:1;
105 unsigned int ack_recv_valid
:1;
109 struct rds_ib_device
;
111 struct rds_ib_connection
{
113 struct list_head ib_node
;
114 struct rds_ib_device
*rds_ibdev
;
115 struct rds_connection
*conn
;
117 /* alphabet soup, IBTA style */
118 struct rdma_cm_id
*i_cm_id
;
120 struct ib_cq
*i_send_cq
;
121 struct ib_cq
*i_recv_cq
;
122 struct ib_wc i_send_wc
[RDS_IB_WC_MAX
];
123 struct ib_wc i_recv_wc
[RDS_IB_WC_MAX
];
125 /* interrupt handling */
126 struct tasklet_struct i_send_tasklet
;
127 struct tasklet_struct i_recv_tasklet
;
130 struct rds_ib_work_ring i_send_ring
;
131 struct rm_data_op
*i_data_op
;
132 struct rds_header
*i_send_hdrs
;
134 struct rds_ib_send_work
*i_sends
;
135 atomic_t i_signaled_sends
;
138 struct mutex i_recv_mutex
;
139 struct rds_ib_work_ring i_recv_ring
;
140 struct rds_ib_incoming
*i_ibinc
;
142 struct rds_header
*i_recv_hdrs
;
144 struct rds_ib_recv_work
*i_recvs
;
145 u64 i_ack_recv
; /* last ACK received */
146 struct rds_ib_refill_cache i_cache_incs
;
147 struct rds_ib_refill_cache i_cache_frags
;
150 unsigned long i_ack_flags
;
151 #ifdef KERNEL_HAS_ATOMIC64
152 atomic64_t i_ack_next
; /* next ACK to send */
154 spinlock_t i_ack_lock
; /* protect i_ack_next */
155 u64 i_ack_next
; /* next ACK to send */
157 struct rds_header
*i_ack
;
158 struct ib_send_wr i_ack_wr
;
159 struct ib_sge i_ack_sge
;
161 unsigned long i_ack_queued
;
163 /* Flow control related information
165 * Our algorithm uses a pair variables that we need to access
166 * atomically - one for the send credits, and one posted
167 * recv credits we need to transfer to remote.
168 * Rather than protect them using a slow spinlock, we put both into
169 * a single atomic_t and update it using cmpxchg
173 /* Protocol version specific information */
174 unsigned int i_flowctl
:1; /* enable/disable flow ctl */
176 /* Batched completions */
177 unsigned int i_unsignaled_wrs
;
180 /* This assumes that atomic_t is at least 32 bits */
181 #define IB_GET_SEND_CREDITS(v) ((v) & 0xffff)
182 #define IB_GET_POST_CREDITS(v) ((v) >> 16)
183 #define IB_SET_SEND_CREDITS(v) ((v) & 0xffff)
184 #define IB_SET_POST_CREDITS(v) ((v) << 16)
186 struct rds_ib_ipaddr
{
187 struct list_head list
;
197 struct rds_ib_device
{
198 struct list_head list
;
199 struct list_head ipaddr_list
;
200 struct list_head conn_list
;
201 struct ib_device
*dev
;
207 unsigned int max_mrs
;
208 struct rds_ib_mr_pool
*mr_1m_pool
;
209 struct rds_ib_mr_pool
*mr_8k_pool
;
210 unsigned int fmr_max_remaps
;
211 unsigned int max_8k_mrs
;
212 unsigned int max_1m_mrs
;
214 unsigned int max_wrs
;
215 unsigned int max_initiator_depth
;
216 unsigned int max_responder_resources
;
217 spinlock_t spinlock
; /* protect the above */
219 struct work_struct free_work
;
222 #define ibdev_to_node(ibdev) dev_to_node(ibdev->dma_device)
223 #define rdsibdev_to_node(rdsibdev) ibdev_to_node(rdsibdev->dev)
225 /* bits for i_ack_flags */
226 #define IB_ACK_IN_FLIGHT 0
227 #define IB_ACK_REQUESTED 1
229 /* Magic WR_ID for ACKs */
230 #define RDS_IB_ACK_WR_ID (~(u64) 0)
232 struct rds_ib_statistics
{
233 uint64_t s_ib_connect_raced
;
234 uint64_t s_ib_listen_closed_stale
;
235 uint64_t s_ib_evt_handler_call
;
236 uint64_t s_ib_tasklet_call
;
237 uint64_t s_ib_tx_cq_event
;
238 uint64_t s_ib_tx_ring_full
;
239 uint64_t s_ib_tx_throttle
;
240 uint64_t s_ib_tx_sg_mapping_failure
;
241 uint64_t s_ib_tx_stalled
;
242 uint64_t s_ib_tx_credit_updates
;
243 uint64_t s_ib_rx_cq_event
;
244 uint64_t s_ib_rx_ring_empty
;
245 uint64_t s_ib_rx_refill_from_cq
;
246 uint64_t s_ib_rx_refill_from_thread
;
247 uint64_t s_ib_rx_alloc_limit
;
248 uint64_t s_ib_rx_credit_updates
;
249 uint64_t s_ib_ack_sent
;
250 uint64_t s_ib_ack_send_failure
;
251 uint64_t s_ib_ack_send_delayed
;
252 uint64_t s_ib_ack_send_piggybacked
;
253 uint64_t s_ib_ack_received
;
254 uint64_t s_ib_rdma_mr_8k_alloc
;
255 uint64_t s_ib_rdma_mr_8k_free
;
256 uint64_t s_ib_rdma_mr_8k_used
;
257 uint64_t s_ib_rdma_mr_8k_pool_flush
;
258 uint64_t s_ib_rdma_mr_8k_pool_wait
;
259 uint64_t s_ib_rdma_mr_8k_pool_depleted
;
260 uint64_t s_ib_rdma_mr_1m_alloc
;
261 uint64_t s_ib_rdma_mr_1m_free
;
262 uint64_t s_ib_rdma_mr_1m_used
;
263 uint64_t s_ib_rdma_mr_1m_pool_flush
;
264 uint64_t s_ib_rdma_mr_1m_pool_wait
;
265 uint64_t s_ib_rdma_mr_1m_pool_depleted
;
266 uint64_t s_ib_rdma_mr_8k_reused
;
267 uint64_t s_ib_rdma_mr_1m_reused
;
268 uint64_t s_ib_atomic_cswp
;
269 uint64_t s_ib_atomic_fadd
;
272 extern struct workqueue_struct
*rds_ib_wq
;
275 * Fake ib_dma_sync_sg_for_{cpu,device} as long as ib_verbs.h
278 static inline void rds_ib_dma_sync_sg_for_cpu(struct ib_device
*dev
,
279 struct scatterlist
*sglist
,
280 unsigned int sg_dma_len
,
283 struct scatterlist
*sg
;
286 for_each_sg(sglist
, sg
, sg_dma_len
, i
) {
287 ib_dma_sync_single_for_cpu(dev
,
288 ib_sg_dma_address(dev
, sg
),
289 ib_sg_dma_len(dev
, sg
),
293 #define ib_dma_sync_sg_for_cpu rds_ib_dma_sync_sg_for_cpu
295 static inline void rds_ib_dma_sync_sg_for_device(struct ib_device
*dev
,
296 struct scatterlist
*sglist
,
297 unsigned int sg_dma_len
,
300 struct scatterlist
*sg
;
303 for_each_sg(sglist
, sg
, sg_dma_len
, i
) {
304 ib_dma_sync_single_for_device(dev
,
305 ib_sg_dma_address(dev
, sg
),
306 ib_sg_dma_len(dev
, sg
),
310 #define ib_dma_sync_sg_for_device rds_ib_dma_sync_sg_for_device
314 extern struct rds_transport rds_ib_transport
;
315 struct rds_ib_device
*rds_ib_get_client_data(struct ib_device
*device
);
316 void rds_ib_dev_put(struct rds_ib_device
*rds_ibdev
);
317 extern struct ib_client rds_ib_client
;
319 extern unsigned int rds_ib_retry_count
;
321 extern spinlock_t ib_nodev_conns_lock
;
322 extern struct list_head ib_nodev_conns
;
325 int rds_ib_conn_alloc(struct rds_connection
*conn
, gfp_t gfp
);
326 void rds_ib_conn_free(void *arg
);
327 int rds_ib_conn_connect(struct rds_connection
*conn
);
328 void rds_ib_conn_shutdown(struct rds_connection
*conn
);
329 void rds_ib_state_change(struct sock
*sk
);
330 int rds_ib_listen_init(void);
331 void rds_ib_listen_stop(void);
332 void __rds_ib_conn_error(struct rds_connection
*conn
, const char *, ...);
333 int rds_ib_cm_handle_connect(struct rdma_cm_id
*cm_id
,
334 struct rdma_cm_event
*event
);
335 int rds_ib_cm_initiate_connect(struct rdma_cm_id
*cm_id
);
336 void rds_ib_cm_connect_complete(struct rds_connection
*conn
,
337 struct rdma_cm_event
*event
);
340 #define rds_ib_conn_error(conn, fmt...) \
341 __rds_ib_conn_error(conn, KERN_WARNING "RDS/IB: " fmt)
344 int rds_ib_update_ipaddr(struct rds_ib_device
*rds_ibdev
, __be32 ipaddr
);
345 void rds_ib_add_conn(struct rds_ib_device
*rds_ibdev
, struct rds_connection
*conn
);
346 void rds_ib_remove_conn(struct rds_ib_device
*rds_ibdev
, struct rds_connection
*conn
);
347 void rds_ib_destroy_nodev_conns(void);
350 int rds_ib_recv_init(void);
351 void rds_ib_recv_exit(void);
352 int rds_ib_recv(struct rds_connection
*conn
);
353 int rds_ib_recv_alloc_caches(struct rds_ib_connection
*ic
);
354 void rds_ib_recv_free_caches(struct rds_ib_connection
*ic
);
355 void rds_ib_recv_refill(struct rds_connection
*conn
, int prefill
, gfp_t gfp
);
356 void rds_ib_inc_free(struct rds_incoming
*inc
);
357 int rds_ib_inc_copy_to_user(struct rds_incoming
*inc
, struct iov_iter
*to
);
358 void rds_ib_recv_cqe_handler(struct rds_ib_connection
*ic
, struct ib_wc
*wc
,
359 struct rds_ib_ack_state
*state
);
360 void rds_ib_recv_tasklet_fn(unsigned long data
);
361 void rds_ib_recv_init_ring(struct rds_ib_connection
*ic
);
362 void rds_ib_recv_clear_ring(struct rds_ib_connection
*ic
);
363 void rds_ib_recv_init_ack(struct rds_ib_connection
*ic
);
364 void rds_ib_attempt_ack(struct rds_ib_connection
*ic
);
365 void rds_ib_ack_send_complete(struct rds_ib_connection
*ic
);
366 u64
rds_ib_piggyb_ack(struct rds_ib_connection
*ic
);
367 void rds_ib_set_ack(struct rds_ib_connection
*ic
, u64 seq
, int ack_required
);
370 void rds_ib_ring_init(struct rds_ib_work_ring
*ring
, u32 nr
);
371 void rds_ib_ring_resize(struct rds_ib_work_ring
*ring
, u32 nr
);
372 u32
rds_ib_ring_alloc(struct rds_ib_work_ring
*ring
, u32 val
, u32
*pos
);
373 void rds_ib_ring_free(struct rds_ib_work_ring
*ring
, u32 val
);
374 void rds_ib_ring_unalloc(struct rds_ib_work_ring
*ring
, u32 val
);
375 int rds_ib_ring_empty(struct rds_ib_work_ring
*ring
);
376 int rds_ib_ring_low(struct rds_ib_work_ring
*ring
);
377 u32
rds_ib_ring_oldest(struct rds_ib_work_ring
*ring
);
378 u32
rds_ib_ring_completed(struct rds_ib_work_ring
*ring
, u32 wr_id
, u32 oldest
);
379 extern wait_queue_head_t rds_ib_ring_empty_wait
;
382 void rds_ib_xmit_complete(struct rds_connection
*conn
);
383 int rds_ib_xmit(struct rds_connection
*conn
, struct rds_message
*rm
,
384 unsigned int hdr_off
, unsigned int sg
, unsigned int off
);
385 void rds_ib_send_cqe_handler(struct rds_ib_connection
*ic
, struct ib_wc
*wc
);
386 void rds_ib_send_init_ring(struct rds_ib_connection
*ic
);
387 void rds_ib_send_clear_ring(struct rds_ib_connection
*ic
);
388 int rds_ib_xmit_rdma(struct rds_connection
*conn
, struct rm_rdma_op
*op
);
389 void rds_ib_send_add_credits(struct rds_connection
*conn
, unsigned int credits
);
390 void rds_ib_advertise_credits(struct rds_connection
*conn
, unsigned int posted
);
391 int rds_ib_send_grab_credits(struct rds_ib_connection
*ic
, u32 wanted
,
392 u32
*adv_credits
, int need_posted
, int max_posted
);
393 int rds_ib_xmit_atomic(struct rds_connection
*conn
, struct rm_atomic_op
*op
);
396 DECLARE_PER_CPU(struct rds_ib_statistics
, rds_ib_stats
);
397 #define rds_ib_stats_inc(member) rds_stats_inc_which(rds_ib_stats, member)
398 unsigned int rds_ib_stats_info_copy(struct rds_info_iterator
*iter
,
402 int rds_ib_sysctl_init(void);
403 void rds_ib_sysctl_exit(void);
404 extern unsigned long rds_ib_sysctl_max_send_wr
;
405 extern unsigned long rds_ib_sysctl_max_recv_wr
;
406 extern unsigned long rds_ib_sysctl_max_unsig_wrs
;
407 extern unsigned long rds_ib_sysctl_max_unsig_bytes
;
408 extern unsigned long rds_ib_sysctl_max_recv_allocation
;
409 extern unsigned int rds_ib_sysctl_flow_control
;