2 * Copyright (c) 2006 Oracle. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/kernel.h>
35 #include <linux/slab.h>
36 #include <linux/vmalloc.h>
37 #include <linux/ratelimit.h>
39 #include "rds_single_path.h"
44 * Set the selected protocol version
46 static void rds_ib_set_protocol(struct rds_connection
*conn
, unsigned int version
)
48 conn
->c_version
= version
;
54 static void rds_ib_set_flow_control(struct rds_connection
*conn
, u32 credits
)
56 struct rds_ib_connection
*ic
= conn
->c_transport_data
;
58 if (rds_ib_sysctl_flow_control
&& credits
!= 0) {
59 /* We're doing flow control */
61 rds_ib_send_add_credits(conn
, credits
);
68 * Tune RNR behavior. Without flow control, we use a rather
69 * low timeout, but not the absolute minimum - this should
72 * We already set the RNR retry count to 7 (which is the
73 * smallest infinite number :-) above.
74 * If flow control is off, we want to change this back to 0
75 * so that we learn quickly when our credit accounting is
78 * Caller passes in a qp_attr pointer - don't waste stack spacv
79 * by allocation this twice.
82 rds_ib_tune_rnr(struct rds_ib_connection
*ic
, struct ib_qp_attr
*attr
)
86 attr
->min_rnr_timer
= IB_RNR_TIMER_000_32
;
87 ret
= ib_modify_qp(ic
->i_cm_id
->qp
, attr
, IB_QP_MIN_RNR_TIMER
);
89 printk(KERN_NOTICE
"ib_modify_qp(IB_QP_MIN_RNR_TIMER): err=%d\n", -ret
);
93 * Connection established.
94 * We get here for both outgoing and incoming connection.
96 void rds_ib_cm_connect_complete(struct rds_connection
*conn
, struct rdma_cm_event
*event
)
98 const struct rds_ib_connect_private
*dp
= NULL
;
99 struct rds_ib_connection
*ic
= conn
->c_transport_data
;
100 struct ib_qp_attr qp_attr
;
103 if (event
->param
.conn
.private_data_len
>= sizeof(*dp
)) {
104 dp
= event
->param
.conn
.private_data
;
106 /* make sure it isn't empty data */
107 if (dp
->dp_protocol_major
) {
108 rds_ib_set_protocol(conn
,
109 RDS_PROTOCOL(dp
->dp_protocol_major
,
110 dp
->dp_protocol_minor
));
111 rds_ib_set_flow_control(conn
, be32_to_cpu(dp
->dp_credit
));
115 if (conn
->c_version
< RDS_PROTOCOL(3, 1)) {
116 pr_notice("RDS/IB: Connection <%pI4,%pI4> version %u.%u no longer supported\n",
117 &conn
->c_laddr
, &conn
->c_faddr
,
118 RDS_PROTOCOL_MAJOR(conn
->c_version
),
119 RDS_PROTOCOL_MINOR(conn
->c_version
));
120 rds_conn_destroy(conn
);
123 pr_notice("RDS/IB: %s conn connected <%pI4,%pI4> version %u.%u%s\n",
124 ic
->i_active_side
? "Active" : "Passive",
125 &conn
->c_laddr
, &conn
->c_faddr
,
126 RDS_PROTOCOL_MAJOR(conn
->c_version
),
127 RDS_PROTOCOL_MINOR(conn
->c_version
),
128 ic
->i_flowctl
? ", flow control" : "");
131 atomic_set(&ic
->i_cq_quiesce
, 0);
133 /* Init rings and fill recv. this needs to wait until protocol
134 * negotiation is complete, since ring layout is different
137 rds_ib_send_init_ring(ic
);
138 rds_ib_recv_init_ring(ic
);
139 /* Post receive buffers - as a side effect, this will update
140 * the posted credit count. */
141 rds_ib_recv_refill(conn
, 1, GFP_KERNEL
);
143 /* Tune RNR behavior */
144 rds_ib_tune_rnr(ic
, &qp_attr
);
146 qp_attr
.qp_state
= IB_QPS_RTS
;
147 err
= ib_modify_qp(ic
->i_cm_id
->qp
, &qp_attr
, IB_QP_STATE
);
149 printk(KERN_NOTICE
"ib_modify_qp(IB_QP_STATE, RTS): err=%d\n", err
);
151 /* update ib_device with this local ipaddr */
152 err
= rds_ib_update_ipaddr(ic
->rds_ibdev
, conn
->c_laddr
);
154 printk(KERN_ERR
"rds_ib_update_ipaddr failed (%d)\n",
157 /* If the peer gave us the last packet it saw, process this as if
158 * we had received a regular ACK. */
160 /* dp structure start is not guaranteed to be 8 bytes aligned.
161 * Since dp_ack_seq is 64-bit extended load operations can be
162 * used so go through get_unaligned to avoid unaligned errors.
164 __be64 dp_ack_seq
= get_unaligned(&dp
->dp_ack_seq
);
167 rds_send_drop_acked(conn
, be64_to_cpu(dp_ack_seq
),
171 rds_connect_complete(conn
);
174 static void rds_ib_cm_fill_conn_param(struct rds_connection
*conn
,
175 struct rdma_conn_param
*conn_param
,
176 struct rds_ib_connect_private
*dp
,
177 u32 protocol_version
,
178 u32 max_responder_resources
,
179 u32 max_initiator_depth
)
181 struct rds_ib_connection
*ic
= conn
->c_transport_data
;
182 struct rds_ib_device
*rds_ibdev
= ic
->rds_ibdev
;
184 memset(conn_param
, 0, sizeof(struct rdma_conn_param
));
186 conn_param
->responder_resources
=
187 min_t(u32
, rds_ibdev
->max_responder_resources
, max_responder_resources
);
188 conn_param
->initiator_depth
=
189 min_t(u32
, rds_ibdev
->max_initiator_depth
, max_initiator_depth
);
190 conn_param
->retry_count
= min_t(unsigned int, rds_ib_retry_count
, 7);
191 conn_param
->rnr_retry_count
= 7;
194 memset(dp
, 0, sizeof(*dp
));
195 dp
->dp_saddr
= conn
->c_laddr
;
196 dp
->dp_daddr
= conn
->c_faddr
;
197 dp
->dp_protocol_major
= RDS_PROTOCOL_MAJOR(protocol_version
);
198 dp
->dp_protocol_minor
= RDS_PROTOCOL_MINOR(protocol_version
);
199 dp
->dp_protocol_minor_mask
= cpu_to_be16(RDS_IB_SUPPORTED_PROTOCOLS
);
200 dp
->dp_ack_seq
= cpu_to_be64(rds_ib_piggyb_ack(ic
));
202 /* Advertise flow control */
204 unsigned int credits
;
206 credits
= IB_GET_POST_CREDITS(atomic_read(&ic
->i_credits
));
207 dp
->dp_credit
= cpu_to_be32(credits
);
208 atomic_sub(IB_SET_POST_CREDITS(credits
), &ic
->i_credits
);
211 conn_param
->private_data
= dp
;
212 conn_param
->private_data_len
= sizeof(*dp
);
216 static void rds_ib_cq_event_handler(struct ib_event
*event
, void *data
)
218 rdsdebug("event %u (%s) data %p\n",
219 event
->event
, ib_event_msg(event
->event
), data
);
222 /* Plucking the oldest entry from the ring can be done concurrently with
223 * the thread refilling the ring. Each ring operation is protected by
224 * spinlocks and the transient state of refilling doesn't change the
225 * recording of which entry is oldest.
227 * This relies on IB only calling one cq comp_handler for each cq so that
228 * there will only be one caller of rds_recv_incoming() per RDS connection.
230 static void rds_ib_cq_comp_handler_recv(struct ib_cq
*cq
, void *context
)
232 struct rds_connection
*conn
= context
;
233 struct rds_ib_connection
*ic
= conn
->c_transport_data
;
235 rdsdebug("conn %p cq %p\n", conn
, cq
);
237 rds_ib_stats_inc(s_ib_evt_handler_call
);
239 tasklet_schedule(&ic
->i_recv_tasklet
);
242 static void poll_scq(struct rds_ib_connection
*ic
, struct ib_cq
*cq
,
248 while ((nr
= ib_poll_cq(cq
, RDS_IB_WC_MAX
, wcs
)) > 0) {
249 for (i
= 0; i
< nr
; i
++) {
251 rdsdebug("wc wr_id 0x%llx status %u byte_len %u imm_data %u\n",
252 (unsigned long long)wc
->wr_id
, wc
->status
,
253 wc
->byte_len
, be32_to_cpu(wc
->ex
.imm_data
));
255 if (wc
->wr_id
<= ic
->i_send_ring
.w_nr
||
256 wc
->wr_id
== RDS_IB_ACK_WR_ID
)
257 rds_ib_send_cqe_handler(ic
, wc
);
259 rds_ib_mr_cqe_handler(ic
, wc
);
265 static void rds_ib_tasklet_fn_send(unsigned long data
)
267 struct rds_ib_connection
*ic
= (struct rds_ib_connection
*)data
;
268 struct rds_connection
*conn
= ic
->conn
;
270 rds_ib_stats_inc(s_ib_tasklet_call
);
272 /* if cq has been already reaped, ignore incoming cq event */
273 if (atomic_read(&ic
->i_cq_quiesce
))
276 poll_scq(ic
, ic
->i_send_cq
, ic
->i_send_wc
);
277 ib_req_notify_cq(ic
->i_send_cq
, IB_CQ_NEXT_COMP
);
278 poll_scq(ic
, ic
->i_send_cq
, ic
->i_send_wc
);
280 if (rds_conn_up(conn
) &&
281 (!test_bit(RDS_LL_SEND_FULL
, &conn
->c_flags
) ||
282 test_bit(0, &conn
->c_map_queued
)))
283 rds_send_xmit(&ic
->conn
->c_path
[0]);
286 static void poll_rcq(struct rds_ib_connection
*ic
, struct ib_cq
*cq
,
288 struct rds_ib_ack_state
*ack_state
)
293 while ((nr
= ib_poll_cq(cq
, RDS_IB_WC_MAX
, wcs
)) > 0) {
294 for (i
= 0; i
< nr
; i
++) {
296 rdsdebug("wc wr_id 0x%llx status %u byte_len %u imm_data %u\n",
297 (unsigned long long)wc
->wr_id
, wc
->status
,
298 wc
->byte_len
, be32_to_cpu(wc
->ex
.imm_data
));
300 rds_ib_recv_cqe_handler(ic
, wc
, ack_state
);
305 static void rds_ib_tasklet_fn_recv(unsigned long data
)
307 struct rds_ib_connection
*ic
= (struct rds_ib_connection
*)data
;
308 struct rds_connection
*conn
= ic
->conn
;
309 struct rds_ib_device
*rds_ibdev
= ic
->rds_ibdev
;
310 struct rds_ib_ack_state state
;
315 rds_ib_stats_inc(s_ib_tasklet_call
);
317 /* if cq has been already reaped, ignore incoming cq event */
318 if (atomic_read(&ic
->i_cq_quiesce
))
321 memset(&state
, 0, sizeof(state
));
322 poll_rcq(ic
, ic
->i_recv_cq
, ic
->i_recv_wc
, &state
);
323 ib_req_notify_cq(ic
->i_recv_cq
, IB_CQ_SOLICITED
);
324 poll_rcq(ic
, ic
->i_recv_cq
, ic
->i_recv_wc
, &state
);
326 if (state
.ack_next_valid
)
327 rds_ib_set_ack(ic
, state
.ack_next
, state
.ack_required
);
328 if (state
.ack_recv_valid
&& state
.ack_recv
> ic
->i_ack_recv
) {
329 rds_send_drop_acked(conn
, state
.ack_recv
, NULL
);
330 ic
->i_ack_recv
= state
.ack_recv
;
333 if (rds_conn_up(conn
))
334 rds_ib_attempt_ack(ic
);
337 static void rds_ib_qp_event_handler(struct ib_event
*event
, void *data
)
339 struct rds_connection
*conn
= data
;
340 struct rds_ib_connection
*ic
= conn
->c_transport_data
;
342 rdsdebug("conn %p ic %p event %u (%s)\n", conn
, ic
, event
->event
,
343 ib_event_msg(event
->event
));
345 switch (event
->event
) {
346 case IB_EVENT_COMM_EST
:
347 rdma_notify(ic
->i_cm_id
, IB_EVENT_COMM_EST
);
350 rdsdebug("Fatal QP Event %u (%s) "
351 "- connection %pI4->%pI4, reconnecting\n",
352 event
->event
, ib_event_msg(event
->event
),
353 &conn
->c_laddr
, &conn
->c_faddr
);
359 static void rds_ib_cq_comp_handler_send(struct ib_cq
*cq
, void *context
)
361 struct rds_connection
*conn
= context
;
362 struct rds_ib_connection
*ic
= conn
->c_transport_data
;
364 rdsdebug("conn %p cq %p\n", conn
, cq
);
366 rds_ib_stats_inc(s_ib_evt_handler_call
);
368 tasklet_schedule(&ic
->i_send_tasklet
);
371 static inline int ibdev_get_unused_vector(struct rds_ib_device
*rds_ibdev
)
373 int min
= rds_ibdev
->vector_load
[rds_ibdev
->dev
->num_comp_vectors
- 1];
374 int index
= rds_ibdev
->dev
->num_comp_vectors
- 1;
377 for (i
= rds_ibdev
->dev
->num_comp_vectors
- 1; i
>= 0; i
--) {
378 if (rds_ibdev
->vector_load
[i
] < min
) {
380 min
= rds_ibdev
->vector_load
[i
];
384 rds_ibdev
->vector_load
[index
]++;
388 static inline void ibdev_put_vector(struct rds_ib_device
*rds_ibdev
, int index
)
390 rds_ibdev
->vector_load
[index
]--;
394 * This needs to be very careful to not leave IS_ERR pointers around for
395 * cleanup to trip over.
397 static int rds_ib_setup_qp(struct rds_connection
*conn
)
399 struct rds_ib_connection
*ic
= conn
->c_transport_data
;
400 struct ib_device
*dev
= ic
->i_cm_id
->device
;
401 struct ib_qp_init_attr attr
;
402 struct ib_cq_init_attr cq_attr
= {};
403 struct rds_ib_device
*rds_ibdev
;
404 int ret
, fr_queue_space
;
407 * It's normal to see a null device if an incoming connection races
408 * with device removal, so we don't print a warning.
410 rds_ibdev
= rds_ib_get_client_data(dev
);
414 /* The fr_queue_space is currently set to 512, to add extra space on
415 * completion queue and send queue. This extra space is used for FRMR
416 * registration and invalidation work requests
418 fr_queue_space
= rds_ibdev
->use_fastreg
?
419 (RDS_IB_DEFAULT_FR_WR
+ 1) +
420 (RDS_IB_DEFAULT_FR_INV_WR
+ 1)
423 /* add the conn now so that connection establishment has the dev */
424 rds_ib_add_conn(rds_ibdev
, conn
);
426 if (rds_ibdev
->max_wrs
< ic
->i_send_ring
.w_nr
+ 1)
427 rds_ib_ring_resize(&ic
->i_send_ring
, rds_ibdev
->max_wrs
- 1);
428 if (rds_ibdev
->max_wrs
< ic
->i_recv_ring
.w_nr
+ 1)
429 rds_ib_ring_resize(&ic
->i_recv_ring
, rds_ibdev
->max_wrs
- 1);
431 /* Protection domain and memory range */
432 ic
->i_pd
= rds_ibdev
->pd
;
434 ic
->i_scq_vector
= ibdev_get_unused_vector(rds_ibdev
);
435 cq_attr
.cqe
= ic
->i_send_ring
.w_nr
+ fr_queue_space
+ 1;
436 cq_attr
.comp_vector
= ic
->i_scq_vector
;
437 ic
->i_send_cq
= ib_create_cq(dev
, rds_ib_cq_comp_handler_send
,
438 rds_ib_cq_event_handler
, conn
,
440 if (IS_ERR(ic
->i_send_cq
)) {
441 ret
= PTR_ERR(ic
->i_send_cq
);
442 ic
->i_send_cq
= NULL
;
443 ibdev_put_vector(rds_ibdev
, ic
->i_scq_vector
);
444 rdsdebug("ib_create_cq send failed: %d\n", ret
);
448 ic
->i_rcq_vector
= ibdev_get_unused_vector(rds_ibdev
);
449 cq_attr
.cqe
= ic
->i_recv_ring
.w_nr
;
450 cq_attr
.comp_vector
= ic
->i_rcq_vector
;
451 ic
->i_recv_cq
= ib_create_cq(dev
, rds_ib_cq_comp_handler_recv
,
452 rds_ib_cq_event_handler
, conn
,
454 if (IS_ERR(ic
->i_recv_cq
)) {
455 ret
= PTR_ERR(ic
->i_recv_cq
);
456 ic
->i_recv_cq
= NULL
;
457 ibdev_put_vector(rds_ibdev
, ic
->i_rcq_vector
);
458 rdsdebug("ib_create_cq recv failed: %d\n", ret
);
462 ret
= ib_req_notify_cq(ic
->i_send_cq
, IB_CQ_NEXT_COMP
);
464 rdsdebug("ib_req_notify_cq send failed: %d\n", ret
);
468 ret
= ib_req_notify_cq(ic
->i_recv_cq
, IB_CQ_SOLICITED
);
470 rdsdebug("ib_req_notify_cq recv failed: %d\n", ret
);
474 /* XXX negotiate max send/recv with remote? */
475 memset(&attr
, 0, sizeof(attr
));
476 attr
.event_handler
= rds_ib_qp_event_handler
;
477 attr
.qp_context
= conn
;
478 /* + 1 to allow for the single ack message */
479 attr
.cap
.max_send_wr
= ic
->i_send_ring
.w_nr
+ fr_queue_space
+ 1;
480 attr
.cap
.max_recv_wr
= ic
->i_recv_ring
.w_nr
+ 1;
481 attr
.cap
.max_send_sge
= rds_ibdev
->max_sge
;
482 attr
.cap
.max_recv_sge
= RDS_IB_RECV_SGE
;
483 attr
.sq_sig_type
= IB_SIGNAL_REQ_WR
;
484 attr
.qp_type
= IB_QPT_RC
;
485 attr
.send_cq
= ic
->i_send_cq
;
486 attr
.recv_cq
= ic
->i_recv_cq
;
487 atomic_set(&ic
->i_fastreg_wrs
, RDS_IB_DEFAULT_FR_WR
);
488 atomic_set(&ic
->i_fastunreg_wrs
, RDS_IB_DEFAULT_FR_INV_WR
);
491 * XXX this can fail if max_*_wr is too large? Are we supposed
492 * to back off until we get a value that the hardware can support?
494 ret
= rdma_create_qp(ic
->i_cm_id
, ic
->i_pd
, &attr
);
496 rdsdebug("rdma_create_qp failed: %d\n", ret
);
500 ic
->i_send_hdrs
= ib_dma_alloc_coherent(dev
,
501 ic
->i_send_ring
.w_nr
*
502 sizeof(struct rds_header
),
503 &ic
->i_send_hdrs_dma
, GFP_KERNEL
);
504 if (!ic
->i_send_hdrs
) {
506 rdsdebug("ib_dma_alloc_coherent send failed\n");
510 ic
->i_recv_hdrs
= ib_dma_alloc_coherent(dev
,
511 ic
->i_recv_ring
.w_nr
*
512 sizeof(struct rds_header
),
513 &ic
->i_recv_hdrs_dma
, GFP_KERNEL
);
514 if (!ic
->i_recv_hdrs
) {
516 rdsdebug("ib_dma_alloc_coherent recv failed\n");
520 ic
->i_ack
= ib_dma_alloc_coherent(dev
, sizeof(struct rds_header
),
521 &ic
->i_ack_dma
, GFP_KERNEL
);
524 rdsdebug("ib_dma_alloc_coherent ack failed\n");
528 ic
->i_sends
= vzalloc_node(ic
->i_send_ring
.w_nr
* sizeof(struct rds_ib_send_work
),
532 rdsdebug("send allocation failed\n");
536 ic
->i_recvs
= vzalloc_node(ic
->i_recv_ring
.w_nr
* sizeof(struct rds_ib_recv_work
),
540 rdsdebug("recv allocation failed\n");
544 rds_ib_recv_init_ack(ic
);
546 rdsdebug("conn %p pd %p cq %p %p\n", conn
, ic
->i_pd
,
547 ic
->i_send_cq
, ic
->i_recv_cq
);
550 rds_ib_dev_put(rds_ibdev
);
554 static u32
rds_ib_protocol_compatible(struct rdma_cm_event
*event
)
556 const struct rds_ib_connect_private
*dp
= event
->param
.conn
.private_data
;
561 * rdma_cm private data is odd - when there is any private data in the
562 * request, we will be given a pretty large buffer without telling us the
563 * original size. The only way to tell the difference is by looking at
564 * the contents, which are initialized to zero.
565 * If the protocol version fields aren't set, this is a connection attempt
566 * from an older version. This could could be 3.0 or 2.0 - we can't tell.
567 * We really should have changed this for OFED 1.3 :-(
570 /* Be paranoid. RDS always has privdata */
571 if (!event
->param
.conn
.private_data_len
) {
572 printk(KERN_NOTICE
"RDS incoming connection has no private data, "
577 /* Even if len is crap *now* I still want to check it. -ASG */
578 if (event
->param
.conn
.private_data_len
< sizeof (*dp
) ||
579 dp
->dp_protocol_major
== 0)
580 return RDS_PROTOCOL_3_0
;
582 common
= be16_to_cpu(dp
->dp_protocol_minor_mask
) & RDS_IB_SUPPORTED_PROTOCOLS
;
583 if (dp
->dp_protocol_major
== 3 && common
) {
584 version
= RDS_PROTOCOL_3_0
;
585 while ((common
>>= 1) != 0)
588 printk_ratelimited(KERN_NOTICE
"RDS: Connection from %pI4 using incompatible protocol version %u.%u\n",
590 dp
->dp_protocol_major
,
591 dp
->dp_protocol_minor
);
595 int rds_ib_cm_handle_connect(struct rdma_cm_id
*cm_id
,
596 struct rdma_cm_event
*event
)
598 __be64 lguid
= cm_id
->route
.path_rec
->sgid
.global
.interface_id
;
599 __be64 fguid
= cm_id
->route
.path_rec
->dgid
.global
.interface_id
;
600 const struct rds_ib_connect_private
*dp
= event
->param
.conn
.private_data
;
601 struct rds_ib_connect_private dp_rep
;
602 struct rds_connection
*conn
= NULL
;
603 struct rds_ib_connection
*ic
= NULL
;
604 struct rdma_conn_param conn_param
;
606 int err
= 1, destroy
= 1;
608 /* Check whether the remote protocol version matches ours. */
609 version
= rds_ib_protocol_compatible(event
);
613 rdsdebug("saddr %pI4 daddr %pI4 RDSv%u.%u lguid 0x%llx fguid "
614 "0x%llx\n", &dp
->dp_saddr
, &dp
->dp_daddr
,
615 RDS_PROTOCOL_MAJOR(version
), RDS_PROTOCOL_MINOR(version
),
616 (unsigned long long)be64_to_cpu(lguid
),
617 (unsigned long long)be64_to_cpu(fguid
));
619 /* RDS/IB is not currently netns aware, thus init_net */
620 conn
= rds_conn_create(&init_net
, dp
->dp_daddr
, dp
->dp_saddr
,
621 &rds_ib_transport
, GFP_KERNEL
);
623 rdsdebug("rds_conn_create failed (%ld)\n", PTR_ERR(conn
));
629 * The connection request may occur while the
630 * previous connection exist, e.g. in case of failover.
631 * But as connections may be initiated simultaneously
632 * by both hosts, we have a random backoff mechanism -
633 * see the comment above rds_queue_reconnect()
635 mutex_lock(&conn
->c_cm_lock
);
636 if (!rds_conn_transition(conn
, RDS_CONN_DOWN
, RDS_CONN_CONNECTING
)) {
637 if (rds_conn_state(conn
) == RDS_CONN_UP
) {
638 rdsdebug("incoming connect while connecting\n");
640 rds_ib_stats_inc(s_ib_listen_closed_stale
);
642 if (rds_conn_state(conn
) == RDS_CONN_CONNECTING
) {
643 /* Wait and see - our connect may still be succeeding */
644 rds_ib_stats_inc(s_ib_connect_raced
);
649 ic
= conn
->c_transport_data
;
651 rds_ib_set_protocol(conn
, version
);
652 rds_ib_set_flow_control(conn
, be32_to_cpu(dp
->dp_credit
));
654 /* If the peer gave us the last packet it saw, process this as if
655 * we had received a regular ACK. */
657 rds_send_drop_acked(conn
, be64_to_cpu(dp
->dp_ack_seq
), NULL
);
659 BUG_ON(cm_id
->context
);
663 cm_id
->context
= conn
;
665 /* We got halfway through setting up the ib_connection, if we
666 * fail now, we have to take the long route out of this mess. */
669 err
= rds_ib_setup_qp(conn
);
671 rds_ib_conn_error(conn
, "rds_ib_setup_qp failed (%d)\n", err
);
675 rds_ib_cm_fill_conn_param(conn
, &conn_param
, &dp_rep
, version
,
676 event
->param
.conn
.responder_resources
,
677 event
->param
.conn
.initiator_depth
);
679 /* rdma_accept() calls rdma_reject() internally if it fails */
680 err
= rdma_accept(cm_id
, &conn_param
);
682 rds_ib_conn_error(conn
, "rdma_accept failed (%d)\n", err
);
686 mutex_unlock(&conn
->c_cm_lock
);
688 rdma_reject(cm_id
, NULL
, 0);
693 int rds_ib_cm_initiate_connect(struct rdma_cm_id
*cm_id
)
695 struct rds_connection
*conn
= cm_id
->context
;
696 struct rds_ib_connection
*ic
= conn
->c_transport_data
;
697 struct rdma_conn_param conn_param
;
698 struct rds_ib_connect_private dp
;
701 /* If the peer doesn't do protocol negotiation, we must
702 * default to RDSv3.0 */
703 rds_ib_set_protocol(conn
, RDS_PROTOCOL_3_0
);
704 ic
->i_flowctl
= rds_ib_sysctl_flow_control
; /* advertise flow control */
706 ret
= rds_ib_setup_qp(conn
);
708 rds_ib_conn_error(conn
, "rds_ib_setup_qp failed (%d)\n", ret
);
712 rds_ib_cm_fill_conn_param(conn
, &conn_param
, &dp
, RDS_PROTOCOL_VERSION
,
714 ret
= rdma_connect(cm_id
, &conn_param
);
716 rds_ib_conn_error(conn
, "rdma_connect failed (%d)\n", ret
);
719 /* Beware - returning non-zero tells the rdma_cm to destroy
720 * the cm_id. We should certainly not do it as long as we still
721 * "own" the cm_id. */
723 if (ic
->i_cm_id
== cm_id
)
726 ic
->i_active_side
= true;
730 int rds_ib_conn_path_connect(struct rds_conn_path
*cp
)
732 struct rds_connection
*conn
= cp
->cp_conn
;
733 struct rds_ib_connection
*ic
= conn
->c_transport_data
;
734 struct sockaddr_in src
, dest
;
737 /* XXX I wonder what affect the port space has */
738 /* delegate cm event handler to rdma_transport */
739 ic
->i_cm_id
= rdma_create_id(&init_net
, rds_rdma_cm_event_handler
, conn
,
740 RDMA_PS_TCP
, IB_QPT_RC
);
741 if (IS_ERR(ic
->i_cm_id
)) {
742 ret
= PTR_ERR(ic
->i_cm_id
);
744 rdsdebug("rdma_create_id() failed: %d\n", ret
);
748 rdsdebug("created cm id %p for conn %p\n", ic
->i_cm_id
, conn
);
750 src
.sin_family
= AF_INET
;
751 src
.sin_addr
.s_addr
= (__force u32
)conn
->c_laddr
;
752 src
.sin_port
= (__force u16
)htons(0);
754 dest
.sin_family
= AF_INET
;
755 dest
.sin_addr
.s_addr
= (__force u32
)conn
->c_faddr
;
756 dest
.sin_port
= (__force u16
)htons(RDS_PORT
);
758 ret
= rdma_resolve_addr(ic
->i_cm_id
, (struct sockaddr
*)&src
,
759 (struct sockaddr
*)&dest
,
760 RDS_RDMA_RESOLVE_TIMEOUT_MS
);
762 rdsdebug("addr resolve failed for cm id %p: %d\n", ic
->i_cm_id
,
764 rdma_destroy_id(ic
->i_cm_id
);
773 * This is so careful about only cleaning up resources that were built up
774 * so that it can be called at any point during startup. In fact it
775 * can be called multiple times for a given connection.
777 void rds_ib_conn_path_shutdown(struct rds_conn_path
*cp
)
779 struct rds_connection
*conn
= cp
->cp_conn
;
780 struct rds_ib_connection
*ic
= conn
->c_transport_data
;
783 rdsdebug("cm %p pd %p cq %p %p qp %p\n", ic
->i_cm_id
,
784 ic
->i_pd
, ic
->i_send_cq
, ic
->i_recv_cq
,
785 ic
->i_cm_id
? ic
->i_cm_id
->qp
: NULL
);
788 struct ib_device
*dev
= ic
->i_cm_id
->device
;
790 rdsdebug("disconnecting cm %p\n", ic
->i_cm_id
);
791 err
= rdma_disconnect(ic
->i_cm_id
);
793 /* Actually this may happen quite frequently, when
794 * an outgoing connect raced with an incoming connect.
796 rdsdebug("failed to disconnect, cm: %p err %d\n",
801 * We want to wait for tx and rx completion to finish
802 * before we tear down the connection, but we have to be
803 * careful not to get stuck waiting on a send ring that
804 * only has unsignaled sends in it. We've shutdown new
805 * sends before getting here so by waiting for signaled
806 * sends to complete we're ensured that there will be no
807 * more tx processing.
809 wait_event(rds_ib_ring_empty_wait
,
810 rds_ib_ring_empty(&ic
->i_recv_ring
) &&
811 (atomic_read(&ic
->i_signaled_sends
) == 0) &&
812 (atomic_read(&ic
->i_fastreg_wrs
) == RDS_IB_DEFAULT_FR_WR
) &&
813 (atomic_read(&ic
->i_fastunreg_wrs
) == RDS_IB_DEFAULT_FR_INV_WR
));
814 tasklet_kill(&ic
->i_send_tasklet
);
815 tasklet_kill(&ic
->i_recv_tasklet
);
817 atomic_set(&ic
->i_cq_quiesce
, 1);
819 /* first destroy the ib state that generates callbacks */
821 rdma_destroy_qp(ic
->i_cm_id
);
824 ibdev_put_vector(ic
->rds_ibdev
, ic
->i_scq_vector
);
825 ib_destroy_cq(ic
->i_send_cq
);
830 ibdev_put_vector(ic
->rds_ibdev
, ic
->i_rcq_vector
);
831 ib_destroy_cq(ic
->i_recv_cq
);
834 /* then free the resources that ib callbacks use */
836 ib_dma_free_coherent(dev
,
837 ic
->i_send_ring
.w_nr
*
838 sizeof(struct rds_header
),
840 ic
->i_send_hdrs_dma
);
843 ib_dma_free_coherent(dev
,
844 ic
->i_recv_ring
.w_nr
*
845 sizeof(struct rds_header
),
847 ic
->i_recv_hdrs_dma
);
850 ib_dma_free_coherent(dev
, sizeof(struct rds_header
),
851 ic
->i_ack
, ic
->i_ack_dma
);
854 rds_ib_send_clear_ring(ic
);
856 rds_ib_recv_clear_ring(ic
);
858 rdma_destroy_id(ic
->i_cm_id
);
861 * Move connection back to the nodev list.
864 rds_ib_remove_conn(ic
->rds_ibdev
, conn
);
868 ic
->i_send_cq
= NULL
;
869 ic
->i_recv_cq
= NULL
;
870 ic
->i_send_hdrs
= NULL
;
871 ic
->i_recv_hdrs
= NULL
;
874 BUG_ON(ic
->rds_ibdev
);
876 /* Clear pending transmit */
878 struct rds_message
*rm
;
880 rm
= container_of(ic
->i_data_op
, struct rds_message
, data
);
882 ic
->i_data_op
= NULL
;
885 /* Clear the ACK state */
886 clear_bit(IB_ACK_IN_FLIGHT
, &ic
->i_ack_flags
);
887 #ifdef KERNEL_HAS_ATOMIC64
888 atomic64_set(&ic
->i_ack_next
, 0);
894 /* Clear flow control state */
896 atomic_set(&ic
->i_credits
, 0);
898 rds_ib_ring_init(&ic
->i_send_ring
, rds_ib_sysctl_max_send_wr
);
899 rds_ib_ring_init(&ic
->i_recv_ring
, rds_ib_sysctl_max_recv_wr
);
902 rds_inc_put(&ic
->i_ibinc
->ii_inc
);
910 ic
->i_active_side
= false;
913 int rds_ib_conn_alloc(struct rds_connection
*conn
, gfp_t gfp
)
915 struct rds_ib_connection
*ic
;
920 ic
= kzalloc(sizeof(struct rds_ib_connection
), gfp
);
924 ret
= rds_ib_recv_alloc_caches(ic
);
930 INIT_LIST_HEAD(&ic
->ib_node
);
931 tasklet_init(&ic
->i_send_tasklet
, rds_ib_tasklet_fn_send
,
933 tasklet_init(&ic
->i_recv_tasklet
, rds_ib_tasklet_fn_recv
,
935 mutex_init(&ic
->i_recv_mutex
);
936 #ifndef KERNEL_HAS_ATOMIC64
937 spin_lock_init(&ic
->i_ack_lock
);
939 atomic_set(&ic
->i_signaled_sends
, 0);
942 * rds_ib_conn_shutdown() waits for these to be emptied so they
943 * must be initialized before it can be called.
945 rds_ib_ring_init(&ic
->i_send_ring
, rds_ib_sysctl_max_send_wr
);
946 rds_ib_ring_init(&ic
->i_recv_ring
, rds_ib_sysctl_max_recv_wr
);
949 conn
->c_transport_data
= ic
;
951 spin_lock_irqsave(&ib_nodev_conns_lock
, flags
);
952 list_add_tail(&ic
->ib_node
, &ib_nodev_conns
);
953 spin_unlock_irqrestore(&ib_nodev_conns_lock
, flags
);
956 rdsdebug("conn %p conn ic %p\n", conn
, conn
->c_transport_data
);
961 * Free a connection. Connection must be shut down and not set for reconnect.
963 void rds_ib_conn_free(void *arg
)
965 struct rds_ib_connection
*ic
= arg
;
966 spinlock_t
*lock_ptr
;
968 rdsdebug("ic %p\n", ic
);
971 * Conn is either on a dev's list or on the nodev list.
972 * A race with shutdown() or connect() would cause problems
973 * (since rds_ibdev would change) but that should never happen.
975 lock_ptr
= ic
->rds_ibdev
? &ic
->rds_ibdev
->spinlock
: &ib_nodev_conns_lock
;
977 spin_lock_irq(lock_ptr
);
978 list_del(&ic
->ib_node
);
979 spin_unlock_irq(lock_ptr
);
981 rds_ib_recv_free_caches(ic
);
988 * An error occurred on the connection
991 __rds_ib_conn_error(struct rds_connection
*conn
, const char *fmt
, ...)