2 * Copyright (c) 2014 Open Grid Computing, Inc. All rights reserved.
3 * Copyright (c) 2005-2007 Network Appliance, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the BSD-type
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
15 * Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
18 * Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials provided
21 * with the distribution.
23 * Neither the name of the Network Appliance, Inc. nor the names of
24 * its contributors may be used to endorse or promote products
25 * derived from this software without specific prior written
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 * Author: Tom Tucker <tom@opengridcomputing.com>
43 #include <linux/sunrpc/svc_xprt.h>
44 #include <linux/sunrpc/debug.h>
45 #include <linux/sunrpc/rpc_rdma.h>
46 #include <linux/interrupt.h>
47 #include <linux/sched.h>
48 #include <linux/slab.h>
49 #include <linux/spinlock.h>
50 #include <linux/workqueue.h>
51 #include <rdma/ib_verbs.h>
52 #include <rdma/rdma_cm.h>
53 #include <linux/sunrpc/svc_rdma.h>
54 #include <linux/export.h>
55 #include "xprt_rdma.h"
57 #define RPCDBG_FACILITY RPCDBG_SVCXPRT
59 static struct svcxprt_rdma
*rdma_create_xprt(struct svc_serv
*, int);
60 static struct svc_xprt
*svc_rdma_create(struct svc_serv
*serv
,
62 struct sockaddr
*sa
, int salen
,
64 static struct svc_xprt
*svc_rdma_accept(struct svc_xprt
*xprt
);
65 static void svc_rdma_release_rqst(struct svc_rqst
*);
66 static void dto_tasklet_func(unsigned long data
);
67 static void svc_rdma_detach(struct svc_xprt
*xprt
);
68 static void svc_rdma_free(struct svc_xprt
*xprt
);
69 static int svc_rdma_has_wspace(struct svc_xprt
*xprt
);
70 static int svc_rdma_secure_port(struct svc_rqst
*);
71 static void rq_cq_reap(struct svcxprt_rdma
*xprt
);
72 static void sq_cq_reap(struct svcxprt_rdma
*xprt
);
74 static DECLARE_TASKLET(dto_tasklet
, dto_tasklet_func
, 0UL);
75 static DEFINE_SPINLOCK(dto_lock
);
76 static LIST_HEAD(dto_xprt_q
);
78 static struct svc_xprt_ops svc_rdma_ops
= {
79 .xpo_create
= svc_rdma_create
,
80 .xpo_recvfrom
= svc_rdma_recvfrom
,
81 .xpo_sendto
= svc_rdma_sendto
,
82 .xpo_release_rqst
= svc_rdma_release_rqst
,
83 .xpo_detach
= svc_rdma_detach
,
84 .xpo_free
= svc_rdma_free
,
85 .xpo_prep_reply_hdr
= svc_rdma_prep_reply_hdr
,
86 .xpo_has_wspace
= svc_rdma_has_wspace
,
87 .xpo_accept
= svc_rdma_accept
,
88 .xpo_secure_port
= svc_rdma_secure_port
,
91 struct svc_xprt_class svc_rdma_class
= {
93 .xcl_owner
= THIS_MODULE
,
94 .xcl_ops
= &svc_rdma_ops
,
95 .xcl_max_payload
= RPCSVC_MAXPAYLOAD_RDMA
,
96 .xcl_ident
= XPRT_TRANSPORT_RDMA
,
99 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
100 static struct svc_xprt
*svc_rdma_bc_create(struct svc_serv
*, struct net
*,
101 struct sockaddr
*, int, int);
102 static void svc_rdma_bc_detach(struct svc_xprt
*);
103 static void svc_rdma_bc_free(struct svc_xprt
*);
105 static struct svc_xprt_ops svc_rdma_bc_ops
= {
106 .xpo_create
= svc_rdma_bc_create
,
107 .xpo_detach
= svc_rdma_bc_detach
,
108 .xpo_free
= svc_rdma_bc_free
,
109 .xpo_prep_reply_hdr
= svc_rdma_prep_reply_hdr
,
110 .xpo_secure_port
= svc_rdma_secure_port
,
113 struct svc_xprt_class svc_rdma_bc_class
= {
114 .xcl_name
= "rdma-bc",
115 .xcl_owner
= THIS_MODULE
,
116 .xcl_ops
= &svc_rdma_bc_ops
,
117 .xcl_max_payload
= (1024 - RPCRDMA_HDRLEN_MIN
)
120 static struct svc_xprt
*svc_rdma_bc_create(struct svc_serv
*serv
,
122 struct sockaddr
*sa
, int salen
,
125 struct svcxprt_rdma
*cma_xprt
;
126 struct svc_xprt
*xprt
;
128 cma_xprt
= rdma_create_xprt(serv
, 0);
130 return ERR_PTR(-ENOMEM
);
131 xprt
= &cma_xprt
->sc_xprt
;
133 svc_xprt_init(net
, &svc_rdma_bc_class
, xprt
, serv
);
134 serv
->sv_bc_xprt
= xprt
;
136 dprintk("svcrdma: %s(%p)\n", __func__
, xprt
);
140 static void svc_rdma_bc_detach(struct svc_xprt
*xprt
)
142 dprintk("svcrdma: %s(%p)\n", __func__
, xprt
);
145 static void svc_rdma_bc_free(struct svc_xprt
*xprt
)
147 struct svcxprt_rdma
*rdma
=
148 container_of(xprt
, struct svcxprt_rdma
, sc_xprt
);
150 dprintk("svcrdma: %s(%p)\n", __func__
, xprt
);
154 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
156 static struct svc_rdma_op_ctxt
*alloc_ctxt(struct svcxprt_rdma
*xprt
,
159 struct svc_rdma_op_ctxt
*ctxt
;
161 ctxt
= kmalloc(sizeof(*ctxt
), flags
);
164 INIT_LIST_HEAD(&ctxt
->free
);
165 INIT_LIST_HEAD(&ctxt
->dto_q
);
170 static bool svc_rdma_prealloc_ctxts(struct svcxprt_rdma
*xprt
)
174 /* Each RPC/RDMA credit can consume a number of send
175 * and receive WQEs. One ctxt is allocated for each.
177 i
= xprt
->sc_sq_depth
+ xprt
->sc_rq_depth
;
180 struct svc_rdma_op_ctxt
*ctxt
;
182 ctxt
= alloc_ctxt(xprt
, GFP_KERNEL
);
184 dprintk("svcrdma: No memory for RDMA ctxt\n");
187 list_add(&ctxt
->free
, &xprt
->sc_ctxts
);
192 struct svc_rdma_op_ctxt
*svc_rdma_get_context(struct svcxprt_rdma
*xprt
)
194 struct svc_rdma_op_ctxt
*ctxt
= NULL
;
196 spin_lock_bh(&xprt
->sc_ctxt_lock
);
197 xprt
->sc_ctxt_used
++;
198 if (list_empty(&xprt
->sc_ctxts
))
201 ctxt
= list_first_entry(&xprt
->sc_ctxts
,
202 struct svc_rdma_op_ctxt
, free
);
203 list_del_init(&ctxt
->free
);
204 spin_unlock_bh(&xprt
->sc_ctxt_lock
);
212 /* Either pre-allocation missed the mark, or send
213 * queue accounting is broken.
215 spin_unlock_bh(&xprt
->sc_ctxt_lock
);
217 ctxt
= alloc_ctxt(xprt
, GFP_NOIO
);
221 spin_lock_bh(&xprt
->sc_ctxt_lock
);
222 xprt
->sc_ctxt_used
--;
223 spin_unlock_bh(&xprt
->sc_ctxt_lock
);
224 WARN_ONCE(1, "svcrdma: empty RDMA ctxt list?\n");
228 void svc_rdma_unmap_dma(struct svc_rdma_op_ctxt
*ctxt
)
230 struct svcxprt_rdma
*xprt
= ctxt
->xprt
;
232 for (i
= 0; i
< ctxt
->count
&& ctxt
->sge
[i
].length
; i
++) {
234 * Unmap the DMA addr in the SGE if the lkey matches
235 * the sc_dma_lkey, otherwise, ignore it since it is
236 * an FRMR lkey and will be unmapped later when the
237 * last WR that uses it completes.
239 if (ctxt
->sge
[i
].lkey
== xprt
->sc_dma_lkey
) {
240 atomic_dec(&xprt
->sc_dma_used
);
241 ib_dma_unmap_page(xprt
->sc_cm_id
->device
,
249 void svc_rdma_put_context(struct svc_rdma_op_ctxt
*ctxt
, int free_pages
)
251 struct svcxprt_rdma
*xprt
= ctxt
->xprt
;
255 for (i
= 0; i
< ctxt
->count
; i
++)
256 put_page(ctxt
->pages
[i
]);
258 spin_lock_bh(&xprt
->sc_ctxt_lock
);
259 xprt
->sc_ctxt_used
--;
260 list_add(&ctxt
->free
, &xprt
->sc_ctxts
);
261 spin_unlock_bh(&xprt
->sc_ctxt_lock
);
264 static void svc_rdma_destroy_ctxts(struct svcxprt_rdma
*xprt
)
266 while (!list_empty(&xprt
->sc_ctxts
)) {
267 struct svc_rdma_op_ctxt
*ctxt
;
269 ctxt
= list_first_entry(&xprt
->sc_ctxts
,
270 struct svc_rdma_op_ctxt
, free
);
271 list_del(&ctxt
->free
);
276 static struct svc_rdma_req_map
*alloc_req_map(gfp_t flags
)
278 struct svc_rdma_req_map
*map
;
280 map
= kmalloc(sizeof(*map
), flags
);
282 INIT_LIST_HEAD(&map
->free
);
286 static bool svc_rdma_prealloc_maps(struct svcxprt_rdma
*xprt
)
290 /* One for each receive buffer on this connection. */
291 i
= xprt
->sc_max_requests
;
294 struct svc_rdma_req_map
*map
;
296 map
= alloc_req_map(GFP_KERNEL
);
298 dprintk("svcrdma: No memory for request map\n");
301 list_add(&map
->free
, &xprt
->sc_maps
);
306 struct svc_rdma_req_map
*svc_rdma_get_req_map(struct svcxprt_rdma
*xprt
)
308 struct svc_rdma_req_map
*map
= NULL
;
310 spin_lock(&xprt
->sc_map_lock
);
311 if (list_empty(&xprt
->sc_maps
))
314 map
= list_first_entry(&xprt
->sc_maps
,
315 struct svc_rdma_req_map
, free
);
316 list_del_init(&map
->free
);
317 spin_unlock(&xprt
->sc_map_lock
);
324 spin_unlock(&xprt
->sc_map_lock
);
326 /* Pre-allocation amount was incorrect */
327 map
= alloc_req_map(GFP_NOIO
);
331 WARN_ONCE(1, "svcrdma: empty request map list?\n");
335 void svc_rdma_put_req_map(struct svcxprt_rdma
*xprt
,
336 struct svc_rdma_req_map
*map
)
338 spin_lock(&xprt
->sc_map_lock
);
339 list_add(&map
->free
, &xprt
->sc_maps
);
340 spin_unlock(&xprt
->sc_map_lock
);
343 static void svc_rdma_destroy_maps(struct svcxprt_rdma
*xprt
)
345 while (!list_empty(&xprt
->sc_maps
)) {
346 struct svc_rdma_req_map
*map
;
348 map
= list_first_entry(&xprt
->sc_maps
,
349 struct svc_rdma_req_map
, free
);
350 list_del(&map
->free
);
355 /* ib_cq event handler */
356 static void cq_event_handler(struct ib_event
*event
, void *context
)
358 struct svc_xprt
*xprt
= context
;
359 dprintk("svcrdma: received CQ event %s (%d), context=%p\n",
360 ib_event_msg(event
->event
), event
->event
, context
);
361 set_bit(XPT_CLOSE
, &xprt
->xpt_flags
);
364 /* QP event handler */
365 static void qp_event_handler(struct ib_event
*event
, void *context
)
367 struct svc_xprt
*xprt
= context
;
369 switch (event
->event
) {
370 /* These are considered benign events */
371 case IB_EVENT_PATH_MIG
:
372 case IB_EVENT_COMM_EST
:
373 case IB_EVENT_SQ_DRAINED
:
374 case IB_EVENT_QP_LAST_WQE_REACHED
:
375 dprintk("svcrdma: QP event %s (%d) received for QP=%p\n",
376 ib_event_msg(event
->event
), event
->event
,
379 /* These are considered fatal events */
380 case IB_EVENT_PATH_MIG_ERR
:
381 case IB_EVENT_QP_FATAL
:
382 case IB_EVENT_QP_REQ_ERR
:
383 case IB_EVENT_QP_ACCESS_ERR
:
384 case IB_EVENT_DEVICE_FATAL
:
386 dprintk("svcrdma: QP ERROR event %s (%d) received for QP=%p, "
387 "closing transport\n",
388 ib_event_msg(event
->event
), event
->event
,
390 set_bit(XPT_CLOSE
, &xprt
->xpt_flags
);
396 * Data Transfer Operation Tasklet
398 * Walks a list of transports with I/O pending, removing entries as
399 * they are added to the server's I/O pending list. Two bits indicate
400 * if SQ, RQ, or both have I/O pending. The dto_lock is an irqsave
401 * spinlock that serializes access to the transport list with the RQ
402 * and SQ interrupt handlers.
404 static void dto_tasklet_func(unsigned long data
)
406 struct svcxprt_rdma
*xprt
;
409 spin_lock_irqsave(&dto_lock
, flags
);
410 while (!list_empty(&dto_xprt_q
)) {
411 xprt
= list_entry(dto_xprt_q
.next
,
412 struct svcxprt_rdma
, sc_dto_q
);
413 list_del_init(&xprt
->sc_dto_q
);
414 spin_unlock_irqrestore(&dto_lock
, flags
);
419 svc_xprt_put(&xprt
->sc_xprt
);
420 spin_lock_irqsave(&dto_lock
, flags
);
422 spin_unlock_irqrestore(&dto_lock
, flags
);
426 * Receive Queue Completion Handler
428 * Since an RQ completion handler is called on interrupt context, we
429 * need to defer the handling of the I/O to a tasklet
431 static void rq_comp_handler(struct ib_cq
*cq
, void *cq_context
)
433 struct svcxprt_rdma
*xprt
= cq_context
;
436 /* Guard against unconditional flush call for destroyed QP */
437 if (atomic_read(&xprt
->sc_xprt
.xpt_ref
.refcount
)==0)
441 * Set the bit regardless of whether or not it's on the list
442 * because it may be on the list already due to an SQ
445 set_bit(RDMAXPRT_RQ_PENDING
, &xprt
->sc_flags
);
448 * If this transport is not already on the DTO transport queue,
451 spin_lock_irqsave(&dto_lock
, flags
);
452 if (list_empty(&xprt
->sc_dto_q
)) {
453 svc_xprt_get(&xprt
->sc_xprt
);
454 list_add_tail(&xprt
->sc_dto_q
, &dto_xprt_q
);
456 spin_unlock_irqrestore(&dto_lock
, flags
);
458 /* Tasklet does all the work to avoid irqsave locks. */
459 tasklet_schedule(&dto_tasklet
);
463 * rq_cq_reap - Process the RQ CQ.
465 * Take all completing WC off the CQE and enqueue the associated DTO
466 * context on the dto_q for the transport.
468 * Note that caller must hold a transport reference.
470 static void rq_cq_reap(struct svcxprt_rdma
*xprt
)
474 struct svc_rdma_op_ctxt
*ctxt
= NULL
;
476 if (!test_and_clear_bit(RDMAXPRT_RQ_PENDING
, &xprt
->sc_flags
))
479 ib_req_notify_cq(xprt
->sc_rq_cq
, IB_CQ_NEXT_COMP
);
480 atomic_inc(&rdma_stat_rq_poll
);
482 while ((ret
= ib_poll_cq(xprt
->sc_rq_cq
, 1, &wc
)) > 0) {
483 ctxt
= (struct svc_rdma_op_ctxt
*)(unsigned long)wc
.wr_id
;
484 ctxt
->wc_status
= wc
.status
;
485 ctxt
->byte_len
= wc
.byte_len
;
486 svc_rdma_unmap_dma(ctxt
);
487 if (wc
.status
!= IB_WC_SUCCESS
) {
488 /* Close the transport */
489 dprintk("svcrdma: transport closing putting ctxt %p\n", ctxt
);
490 set_bit(XPT_CLOSE
, &xprt
->sc_xprt
.xpt_flags
);
491 svc_rdma_put_context(ctxt
, 1);
492 svc_xprt_put(&xprt
->sc_xprt
);
495 spin_lock_bh(&xprt
->sc_rq_dto_lock
);
496 list_add_tail(&ctxt
->dto_q
, &xprt
->sc_rq_dto_q
);
497 spin_unlock_bh(&xprt
->sc_rq_dto_lock
);
498 svc_xprt_put(&xprt
->sc_xprt
);
502 atomic_inc(&rdma_stat_rq_prod
);
504 set_bit(XPT_DATA
, &xprt
->sc_xprt
.xpt_flags
);
506 * If data arrived before established event,
507 * don't enqueue. This defers RPC I/O until the
508 * RDMA connection is complete.
510 if (!test_bit(RDMAXPRT_CONN_PENDING
, &xprt
->sc_flags
))
511 svc_xprt_enqueue(&xprt
->sc_xprt
);
515 * Process a completion context
517 static void process_context(struct svcxprt_rdma
*xprt
,
518 struct svc_rdma_op_ctxt
*ctxt
)
520 struct svc_rdma_op_ctxt
*read_hdr
;
523 svc_rdma_unmap_dma(ctxt
);
525 switch (ctxt
->wr_op
) {
530 case IB_WR_RDMA_WRITE
:
533 case IB_WR_RDMA_READ
:
534 case IB_WR_RDMA_READ_WITH_INV
:
535 svc_rdma_put_frmr(xprt
, ctxt
->frmr
);
537 if (!test_bit(RDMACTXT_F_LAST_CTXT
, &ctxt
->flags
))
540 read_hdr
= ctxt
->read_hdr
;
541 svc_rdma_put_context(ctxt
, 0);
543 spin_lock_bh(&xprt
->sc_rq_dto_lock
);
544 set_bit(XPT_DATA
, &xprt
->sc_xprt
.xpt_flags
);
545 list_add_tail(&read_hdr
->dto_q
,
546 &xprt
->sc_read_complete_q
);
547 spin_unlock_bh(&xprt
->sc_rq_dto_lock
);
548 svc_xprt_enqueue(&xprt
->sc_xprt
);
552 dprintk("svcrdma: unexpected completion opcode=%d\n",
557 svc_rdma_put_context(ctxt
, free_pages
);
561 * Send Queue Completion Handler - potentially called on interrupt context.
563 * Note that caller must hold a transport reference.
565 static void sq_cq_reap(struct svcxprt_rdma
*xprt
)
567 struct svc_rdma_op_ctxt
*ctxt
= NULL
;
568 struct ib_wc wc_a
[6];
570 struct ib_cq
*cq
= xprt
->sc_sq_cq
;
573 memset(wc_a
, 0, sizeof(wc_a
));
575 if (!test_and_clear_bit(RDMAXPRT_SQ_PENDING
, &xprt
->sc_flags
))
578 ib_req_notify_cq(xprt
->sc_sq_cq
, IB_CQ_NEXT_COMP
);
579 atomic_inc(&rdma_stat_sq_poll
);
580 while ((ret
= ib_poll_cq(cq
, ARRAY_SIZE(wc_a
), wc_a
)) > 0) {
583 for (i
= 0; i
< ret
; i
++) {
585 if (wc
->status
!= IB_WC_SUCCESS
) {
586 dprintk("svcrdma: sq wc err status %s (%d)\n",
587 ib_wc_status_msg(wc
->status
),
590 /* Close the transport */
591 set_bit(XPT_CLOSE
, &xprt
->sc_xprt
.xpt_flags
);
594 /* Decrement used SQ WR count */
595 atomic_dec(&xprt
->sc_sq_count
);
596 wake_up(&xprt
->sc_send_wait
);
598 ctxt
= (struct svc_rdma_op_ctxt
*)
599 (unsigned long)wc
->wr_id
;
601 process_context(xprt
, ctxt
);
603 svc_xprt_put(&xprt
->sc_xprt
);
608 atomic_inc(&rdma_stat_sq_prod
);
611 static void sq_comp_handler(struct ib_cq
*cq
, void *cq_context
)
613 struct svcxprt_rdma
*xprt
= cq_context
;
616 /* Guard against unconditional flush call for destroyed QP */
617 if (atomic_read(&xprt
->sc_xprt
.xpt_ref
.refcount
)==0)
621 * Set the bit regardless of whether or not it's on the list
622 * because it may be on the list already due to an RQ
625 set_bit(RDMAXPRT_SQ_PENDING
, &xprt
->sc_flags
);
628 * If this transport is not already on the DTO transport queue,
631 spin_lock_irqsave(&dto_lock
, flags
);
632 if (list_empty(&xprt
->sc_dto_q
)) {
633 svc_xprt_get(&xprt
->sc_xprt
);
634 list_add_tail(&xprt
->sc_dto_q
, &dto_xprt_q
);
636 spin_unlock_irqrestore(&dto_lock
, flags
);
638 /* Tasklet does all the work to avoid irqsave locks. */
639 tasklet_schedule(&dto_tasklet
);
642 static struct svcxprt_rdma
*rdma_create_xprt(struct svc_serv
*serv
,
645 struct svcxprt_rdma
*cma_xprt
= kzalloc(sizeof *cma_xprt
, GFP_KERNEL
);
649 svc_xprt_init(&init_net
, &svc_rdma_class
, &cma_xprt
->sc_xprt
, serv
);
650 INIT_LIST_HEAD(&cma_xprt
->sc_accept_q
);
651 INIT_LIST_HEAD(&cma_xprt
->sc_dto_q
);
652 INIT_LIST_HEAD(&cma_xprt
->sc_rq_dto_q
);
653 INIT_LIST_HEAD(&cma_xprt
->sc_read_complete_q
);
654 INIT_LIST_HEAD(&cma_xprt
->sc_frmr_q
);
655 INIT_LIST_HEAD(&cma_xprt
->sc_ctxts
);
656 INIT_LIST_HEAD(&cma_xprt
->sc_maps
);
657 init_waitqueue_head(&cma_xprt
->sc_send_wait
);
659 spin_lock_init(&cma_xprt
->sc_lock
);
660 spin_lock_init(&cma_xprt
->sc_rq_dto_lock
);
661 spin_lock_init(&cma_xprt
->sc_frmr_q_lock
);
662 spin_lock_init(&cma_xprt
->sc_ctxt_lock
);
663 spin_lock_init(&cma_xprt
->sc_map_lock
);
666 set_bit(XPT_LISTENER
, &cma_xprt
->sc_xprt
.xpt_flags
);
671 int svc_rdma_post_recv(struct svcxprt_rdma
*xprt
, gfp_t flags
)
673 struct ib_recv_wr recv_wr
, *bad_recv_wr
;
674 struct svc_rdma_op_ctxt
*ctxt
;
681 ctxt
= svc_rdma_get_context(xprt
);
683 ctxt
->direction
= DMA_FROM_DEVICE
;
684 for (sge_no
= 0; buflen
< xprt
->sc_max_req_size
; sge_no
++) {
685 if (sge_no
>= xprt
->sc_max_sge
) {
686 pr_err("svcrdma: Too many sges (%d)\n", sge_no
);
689 page
= alloc_page(flags
);
692 ctxt
->pages
[sge_no
] = page
;
693 pa
= ib_dma_map_page(xprt
->sc_cm_id
->device
,
696 if (ib_dma_mapping_error(xprt
->sc_cm_id
->device
, pa
))
698 atomic_inc(&xprt
->sc_dma_used
);
699 ctxt
->sge
[sge_no
].addr
= pa
;
700 ctxt
->sge
[sge_no
].length
= PAGE_SIZE
;
701 ctxt
->sge
[sge_no
].lkey
= xprt
->sc_dma_lkey
;
702 ctxt
->count
= sge_no
+ 1;
706 recv_wr
.sg_list
= &ctxt
->sge
[0];
707 recv_wr
.num_sge
= ctxt
->count
;
708 recv_wr
.wr_id
= (u64
)(unsigned long)ctxt
;
710 svc_xprt_get(&xprt
->sc_xprt
);
711 ret
= ib_post_recv(xprt
->sc_qp
, &recv_wr
, &bad_recv_wr
);
713 svc_rdma_unmap_dma(ctxt
);
714 svc_rdma_put_context(ctxt
, 1);
715 svc_xprt_put(&xprt
->sc_xprt
);
720 svc_rdma_unmap_dma(ctxt
);
721 svc_rdma_put_context(ctxt
, 1);
726 * This function handles the CONNECT_REQUEST event on a listening
727 * endpoint. It is passed the cma_id for the _new_ connection. The context in
728 * this cma_id is inherited from the listening cma_id and is the svc_xprt
729 * structure for the listening endpoint.
731 * This function creates a new xprt for the new connection and enqueues it on
732 * the accept queue for the listent xprt. When the listen thread is kicked, it
733 * will call the recvfrom method on the listen xprt which will accept the new
736 static void handle_connect_req(struct rdma_cm_id
*new_cma_id
, size_t client_ird
)
738 struct svcxprt_rdma
*listen_xprt
= new_cma_id
->context
;
739 struct svcxprt_rdma
*newxprt
;
742 /* Create a new transport */
743 newxprt
= rdma_create_xprt(listen_xprt
->sc_xprt
.xpt_server
, 0);
745 dprintk("svcrdma: failed to create new transport\n");
748 newxprt
->sc_cm_id
= new_cma_id
;
749 new_cma_id
->context
= newxprt
;
750 dprintk("svcrdma: Creating newxprt=%p, cm_id=%p, listenxprt=%p\n",
751 newxprt
, newxprt
->sc_cm_id
, listen_xprt
);
753 /* Save client advertised inbound read limit for use later in accept. */
754 newxprt
->sc_ord
= client_ird
;
756 /* Set the local and remote addresses in the transport */
757 sa
= (struct sockaddr
*)&newxprt
->sc_cm_id
->route
.addr
.dst_addr
;
758 svc_xprt_set_remote(&newxprt
->sc_xprt
, sa
, svc_addr_len(sa
));
759 sa
= (struct sockaddr
*)&newxprt
->sc_cm_id
->route
.addr
.src_addr
;
760 svc_xprt_set_local(&newxprt
->sc_xprt
, sa
, svc_addr_len(sa
));
763 * Enqueue the new transport on the accept queue of the listening
766 spin_lock_bh(&listen_xprt
->sc_lock
);
767 list_add_tail(&newxprt
->sc_accept_q
, &listen_xprt
->sc_accept_q
);
768 spin_unlock_bh(&listen_xprt
->sc_lock
);
770 set_bit(XPT_CONN
, &listen_xprt
->sc_xprt
.xpt_flags
);
771 svc_xprt_enqueue(&listen_xprt
->sc_xprt
);
775 * Handles events generated on the listening endpoint. These events will be
776 * either be incoming connect requests or adapter removal events.
778 static int rdma_listen_handler(struct rdma_cm_id
*cma_id
,
779 struct rdma_cm_event
*event
)
781 struct svcxprt_rdma
*xprt
= cma_id
->context
;
784 switch (event
->event
) {
785 case RDMA_CM_EVENT_CONNECT_REQUEST
:
786 dprintk("svcrdma: Connect request on cma_id=%p, xprt = %p, "
787 "event = %s (%d)\n", cma_id
, cma_id
->context
,
788 rdma_event_msg(event
->event
), event
->event
);
789 handle_connect_req(cma_id
,
790 event
->param
.conn
.initiator_depth
);
793 case RDMA_CM_EVENT_ESTABLISHED
:
794 /* Accept complete */
795 dprintk("svcrdma: Connection completed on LISTEN xprt=%p, "
796 "cm_id=%p\n", xprt
, cma_id
);
799 case RDMA_CM_EVENT_DEVICE_REMOVAL
:
800 dprintk("svcrdma: Device removal xprt=%p, cm_id=%p\n",
803 set_bit(XPT_CLOSE
, &xprt
->sc_xprt
.xpt_flags
);
807 dprintk("svcrdma: Unexpected event on listening endpoint %p, "
808 "event = %s (%d)\n", cma_id
,
809 rdma_event_msg(event
->event
), event
->event
);
816 static int rdma_cma_handler(struct rdma_cm_id
*cma_id
,
817 struct rdma_cm_event
*event
)
819 struct svc_xprt
*xprt
= cma_id
->context
;
820 struct svcxprt_rdma
*rdma
=
821 container_of(xprt
, struct svcxprt_rdma
, sc_xprt
);
822 switch (event
->event
) {
823 case RDMA_CM_EVENT_ESTABLISHED
:
824 /* Accept complete */
826 dprintk("svcrdma: Connection completed on DTO xprt=%p, "
827 "cm_id=%p\n", xprt
, cma_id
);
828 clear_bit(RDMAXPRT_CONN_PENDING
, &rdma
->sc_flags
);
829 svc_xprt_enqueue(xprt
);
831 case RDMA_CM_EVENT_DISCONNECTED
:
832 dprintk("svcrdma: Disconnect on DTO xprt=%p, cm_id=%p\n",
835 set_bit(XPT_CLOSE
, &xprt
->xpt_flags
);
836 svc_xprt_enqueue(xprt
);
840 case RDMA_CM_EVENT_DEVICE_REMOVAL
:
841 dprintk("svcrdma: Device removal cma_id=%p, xprt = %p, "
842 "event = %s (%d)\n", cma_id
, xprt
,
843 rdma_event_msg(event
->event
), event
->event
);
845 set_bit(XPT_CLOSE
, &xprt
->xpt_flags
);
846 svc_xprt_enqueue(xprt
);
851 dprintk("svcrdma: Unexpected event on DTO endpoint %p, "
852 "event = %s (%d)\n", cma_id
,
853 rdma_event_msg(event
->event
), event
->event
);
860 * Create a listening RDMA service endpoint.
862 static struct svc_xprt
*svc_rdma_create(struct svc_serv
*serv
,
864 struct sockaddr
*sa
, int salen
,
867 struct rdma_cm_id
*listen_id
;
868 struct svcxprt_rdma
*cma_xprt
;
871 dprintk("svcrdma: Creating RDMA socket\n");
872 if (sa
->sa_family
!= AF_INET
) {
873 dprintk("svcrdma: Address family %d is not supported.\n", sa
->sa_family
);
874 return ERR_PTR(-EAFNOSUPPORT
);
876 cma_xprt
= rdma_create_xprt(serv
, 1);
878 return ERR_PTR(-ENOMEM
);
880 listen_id
= rdma_create_id(&init_net
, rdma_listen_handler
, cma_xprt
,
881 RDMA_PS_TCP
, IB_QPT_RC
);
882 if (IS_ERR(listen_id
)) {
883 ret
= PTR_ERR(listen_id
);
884 dprintk("svcrdma: rdma_create_id failed = %d\n", ret
);
888 ret
= rdma_bind_addr(listen_id
, sa
);
890 dprintk("svcrdma: rdma_bind_addr failed = %d\n", ret
);
893 cma_xprt
->sc_cm_id
= listen_id
;
895 ret
= rdma_listen(listen_id
, RPCRDMA_LISTEN_BACKLOG
);
897 dprintk("svcrdma: rdma_listen failed = %d\n", ret
);
902 * We need to use the address from the cm_id in case the
903 * caller specified 0 for the port number.
905 sa
= (struct sockaddr
*)&cma_xprt
->sc_cm_id
->route
.addr
.src_addr
;
906 svc_xprt_set_local(&cma_xprt
->sc_xprt
, sa
, salen
);
908 return &cma_xprt
->sc_xprt
;
911 rdma_destroy_id(listen_id
);
917 static struct svc_rdma_fastreg_mr
*rdma_alloc_frmr(struct svcxprt_rdma
*xprt
)
920 struct scatterlist
*sg
;
921 struct svc_rdma_fastreg_mr
*frmr
;
924 frmr
= kmalloc(sizeof(*frmr
), GFP_KERNEL
);
928 num_sg
= min_t(u32
, RPCSVC_MAXPAGES
, xprt
->sc_frmr_pg_list_len
);
929 mr
= ib_alloc_mr(xprt
->sc_pd
, IB_MR_TYPE_MEM_REG
, num_sg
);
933 sg
= kcalloc(RPCSVC_MAXPAGES
, sizeof(*sg
), GFP_KERNEL
);
937 sg_init_table(sg
, RPCSVC_MAXPAGES
);
941 INIT_LIST_HEAD(&frmr
->frmr_list
);
949 return ERR_PTR(-ENOMEM
);
952 static void rdma_dealloc_frmr_q(struct svcxprt_rdma
*xprt
)
954 struct svc_rdma_fastreg_mr
*frmr
;
956 while (!list_empty(&xprt
->sc_frmr_q
)) {
957 frmr
= list_entry(xprt
->sc_frmr_q
.next
,
958 struct svc_rdma_fastreg_mr
, frmr_list
);
959 list_del_init(&frmr
->frmr_list
);
961 ib_dereg_mr(frmr
->mr
);
966 struct svc_rdma_fastreg_mr
*svc_rdma_get_frmr(struct svcxprt_rdma
*rdma
)
968 struct svc_rdma_fastreg_mr
*frmr
= NULL
;
970 spin_lock_bh(&rdma
->sc_frmr_q_lock
);
971 if (!list_empty(&rdma
->sc_frmr_q
)) {
972 frmr
= list_entry(rdma
->sc_frmr_q
.next
,
973 struct svc_rdma_fastreg_mr
, frmr_list
);
974 list_del_init(&frmr
->frmr_list
);
977 spin_unlock_bh(&rdma
->sc_frmr_q_lock
);
981 return rdma_alloc_frmr(rdma
);
984 void svc_rdma_put_frmr(struct svcxprt_rdma
*rdma
,
985 struct svc_rdma_fastreg_mr
*frmr
)
988 ib_dma_unmap_sg(rdma
->sc_cm_id
->device
,
989 frmr
->sg
, frmr
->sg_nents
, frmr
->direction
);
990 atomic_dec(&rdma
->sc_dma_used
);
991 spin_lock_bh(&rdma
->sc_frmr_q_lock
);
992 WARN_ON_ONCE(!list_empty(&frmr
->frmr_list
));
993 list_add(&frmr
->frmr_list
, &rdma
->sc_frmr_q
);
994 spin_unlock_bh(&rdma
->sc_frmr_q_lock
);
999 * This is the xpo_recvfrom function for listening endpoints. Its
1000 * purpose is to accept incoming connections. The CMA callback handler
1001 * has already created a new transport and attached it to the new CMA
1004 * There is a queue of pending connections hung on the listening
1005 * transport. This queue contains the new svc_xprt structure. This
1006 * function takes svc_xprt structures off the accept_q and completes
1009 static struct svc_xprt
*svc_rdma_accept(struct svc_xprt
*xprt
)
1011 struct svcxprt_rdma
*listen_rdma
;
1012 struct svcxprt_rdma
*newxprt
= NULL
;
1013 struct rdma_conn_param conn_param
;
1014 struct ib_cq_init_attr cq_attr
= {};
1015 struct ib_qp_init_attr qp_attr
;
1016 struct ib_device
*dev
;
1017 int uninitialized_var(dma_mr_acc
);
1018 int need_dma_mr
= 0;
1022 listen_rdma
= container_of(xprt
, struct svcxprt_rdma
, sc_xprt
);
1023 clear_bit(XPT_CONN
, &xprt
->xpt_flags
);
1024 /* Get the next entry off the accept list */
1025 spin_lock_bh(&listen_rdma
->sc_lock
);
1026 if (!list_empty(&listen_rdma
->sc_accept_q
)) {
1027 newxprt
= list_entry(listen_rdma
->sc_accept_q
.next
,
1028 struct svcxprt_rdma
, sc_accept_q
);
1029 list_del_init(&newxprt
->sc_accept_q
);
1031 if (!list_empty(&listen_rdma
->sc_accept_q
))
1032 set_bit(XPT_CONN
, &listen_rdma
->sc_xprt
.xpt_flags
);
1033 spin_unlock_bh(&listen_rdma
->sc_lock
);
1037 dprintk("svcrdma: newxprt from accept queue = %p, cm_id=%p\n",
1038 newxprt
, newxprt
->sc_cm_id
);
1040 dev
= newxprt
->sc_cm_id
->device
;
1042 /* Qualify the transport resource defaults with the
1043 * capabilities of this particular device */
1044 newxprt
->sc_max_sge
= min((size_t)dev
->attrs
.max_sge
,
1045 (size_t)RPCSVC_MAXPAGES
);
1046 newxprt
->sc_max_sge_rd
= min_t(size_t, dev
->attrs
.max_sge_rd
,
1048 newxprt
->sc_max_req_size
= svcrdma_max_req_size
;
1049 newxprt
->sc_max_requests
= min_t(u32
, dev
->attrs
.max_qp_wr
,
1050 svcrdma_max_requests
);
1051 newxprt
->sc_max_bc_requests
= min_t(u32
, dev
->attrs
.max_qp_wr
,
1052 svcrdma_max_bc_requests
);
1053 newxprt
->sc_rq_depth
= newxprt
->sc_max_requests
+
1054 newxprt
->sc_max_bc_requests
;
1055 newxprt
->sc_sq_depth
= RPCRDMA_SQ_DEPTH_MULT
* newxprt
->sc_rq_depth
;
1057 if (!svc_rdma_prealloc_ctxts(newxprt
))
1059 if (!svc_rdma_prealloc_maps(newxprt
))
1063 * Limit ORD based on client limit, local device limit, and
1064 * configured svcrdma limit.
1066 newxprt
->sc_ord
= min_t(size_t, dev
->attrs
.max_qp_rd_atom
, newxprt
->sc_ord
);
1067 newxprt
->sc_ord
= min_t(size_t, svcrdma_ord
, newxprt
->sc_ord
);
1069 newxprt
->sc_pd
= ib_alloc_pd(dev
);
1070 if (IS_ERR(newxprt
->sc_pd
)) {
1071 dprintk("svcrdma: error creating PD for connect request\n");
1074 cq_attr
.cqe
= newxprt
->sc_sq_depth
;
1075 newxprt
->sc_sq_cq
= ib_create_cq(dev
,
1080 if (IS_ERR(newxprt
->sc_sq_cq
)) {
1081 dprintk("svcrdma: error creating SQ CQ for connect request\n");
1084 cq_attr
.cqe
= newxprt
->sc_rq_depth
;
1085 newxprt
->sc_rq_cq
= ib_create_cq(dev
,
1090 if (IS_ERR(newxprt
->sc_rq_cq
)) {
1091 dprintk("svcrdma: error creating RQ CQ for connect request\n");
1095 memset(&qp_attr
, 0, sizeof qp_attr
);
1096 qp_attr
.event_handler
= qp_event_handler
;
1097 qp_attr
.qp_context
= &newxprt
->sc_xprt
;
1098 qp_attr
.cap
.max_send_wr
= newxprt
->sc_sq_depth
;
1099 qp_attr
.cap
.max_recv_wr
= newxprt
->sc_rq_depth
;
1100 qp_attr
.cap
.max_send_sge
= newxprt
->sc_max_sge
;
1101 qp_attr
.cap
.max_recv_sge
= newxprt
->sc_max_sge
;
1102 qp_attr
.sq_sig_type
= IB_SIGNAL_REQ_WR
;
1103 qp_attr
.qp_type
= IB_QPT_RC
;
1104 qp_attr
.send_cq
= newxprt
->sc_sq_cq
;
1105 qp_attr
.recv_cq
= newxprt
->sc_rq_cq
;
1106 dprintk("svcrdma: newxprt->sc_cm_id=%p, newxprt->sc_pd=%p\n"
1107 " cm_id->device=%p, sc_pd->device=%p\n"
1108 " cap.max_send_wr = %d\n"
1109 " cap.max_recv_wr = %d\n"
1110 " cap.max_send_sge = %d\n"
1111 " cap.max_recv_sge = %d\n",
1112 newxprt
->sc_cm_id
, newxprt
->sc_pd
,
1113 dev
, newxprt
->sc_pd
->device
,
1114 qp_attr
.cap
.max_send_wr
,
1115 qp_attr
.cap
.max_recv_wr
,
1116 qp_attr
.cap
.max_send_sge
,
1117 qp_attr
.cap
.max_recv_sge
);
1119 ret
= rdma_create_qp(newxprt
->sc_cm_id
, newxprt
->sc_pd
, &qp_attr
);
1121 dprintk("svcrdma: failed to create QP, ret=%d\n", ret
);
1124 newxprt
->sc_qp
= newxprt
->sc_cm_id
->qp
;
1127 * Use the most secure set of MR resources based on the
1128 * transport type and available memory management features in
1129 * the device. Here's the table implemented below:
1131 * Fast Global DMA Remote WR
1132 * Reg LKEY MR Access
1133 * Sup'd Sup'd Needed Needed
1145 * NB: iWARP requires remote write access for the data sink
1146 * of an RDMA_READ. IB does not.
1148 newxprt
->sc_reader
= rdma_read_chunk_lcl
;
1149 if (dev
->attrs
.device_cap_flags
& IB_DEVICE_MEM_MGT_EXTENSIONS
) {
1150 newxprt
->sc_frmr_pg_list_len
=
1151 dev
->attrs
.max_fast_reg_page_list_len
;
1152 newxprt
->sc_dev_caps
|= SVCRDMA_DEVCAP_FAST_REG
;
1153 newxprt
->sc_reader
= rdma_read_chunk_frmr
;
1157 * Determine if a DMA MR is required and if so, what privs are required
1159 if (!rdma_protocol_iwarp(dev
, newxprt
->sc_cm_id
->port_num
) &&
1160 !rdma_ib_or_roce(dev
, newxprt
->sc_cm_id
->port_num
))
1163 if (!(newxprt
->sc_dev_caps
& SVCRDMA_DEVCAP_FAST_REG
) ||
1164 !(dev
->attrs
.device_cap_flags
& IB_DEVICE_LOCAL_DMA_LKEY
)) {
1166 dma_mr_acc
= IB_ACCESS_LOCAL_WRITE
;
1167 if (rdma_protocol_iwarp(dev
, newxprt
->sc_cm_id
->port_num
) &&
1168 !(newxprt
->sc_dev_caps
& SVCRDMA_DEVCAP_FAST_REG
))
1169 dma_mr_acc
|= IB_ACCESS_REMOTE_WRITE
;
1172 if (rdma_protocol_iwarp(dev
, newxprt
->sc_cm_id
->port_num
))
1173 newxprt
->sc_dev_caps
|= SVCRDMA_DEVCAP_READ_W_INV
;
1175 /* Create the DMA MR if needed, otherwise, use the DMA LKEY */
1177 /* Register all of physical memory */
1178 newxprt
->sc_phys_mr
=
1179 ib_get_dma_mr(newxprt
->sc_pd
, dma_mr_acc
);
1180 if (IS_ERR(newxprt
->sc_phys_mr
)) {
1181 dprintk("svcrdma: Failed to create DMA MR ret=%d\n",
1185 newxprt
->sc_dma_lkey
= newxprt
->sc_phys_mr
->lkey
;
1187 newxprt
->sc_dma_lkey
= dev
->local_dma_lkey
;
1189 /* Post receive buffers */
1190 for (i
= 0; i
< newxprt
->sc_rq_depth
; i
++) {
1191 ret
= svc_rdma_post_recv(newxprt
, GFP_KERNEL
);
1193 dprintk("svcrdma: failure posting receive buffers\n");
1198 /* Swap out the handler */
1199 newxprt
->sc_cm_id
->event_handler
= rdma_cma_handler
;
1202 * Arm the CQs for the SQ and RQ before accepting so we can't
1203 * miss the first message
1205 ib_req_notify_cq(newxprt
->sc_sq_cq
, IB_CQ_NEXT_COMP
);
1206 ib_req_notify_cq(newxprt
->sc_rq_cq
, IB_CQ_NEXT_COMP
);
1208 /* Accept Connection */
1209 set_bit(RDMAXPRT_CONN_PENDING
, &newxprt
->sc_flags
);
1210 memset(&conn_param
, 0, sizeof conn_param
);
1211 conn_param
.responder_resources
= 0;
1212 conn_param
.initiator_depth
= newxprt
->sc_ord
;
1213 ret
= rdma_accept(newxprt
->sc_cm_id
, &conn_param
);
1215 dprintk("svcrdma: failed to accept new connection, ret=%d\n",
1220 dprintk("svcrdma: new connection %p accepted with the following "
1222 " local_ip : %pI4\n"
1223 " local_port : %d\n"
1224 " remote_ip : %pI4\n"
1225 " remote_port : %d\n"
1227 " max_sge_rd : %d\n"
1229 " max_requests : %d\n"
1232 &((struct sockaddr_in
*)&newxprt
->sc_cm_id
->
1233 route
.addr
.src_addr
)->sin_addr
.s_addr
,
1234 ntohs(((struct sockaddr_in
*)&newxprt
->sc_cm_id
->
1235 route
.addr
.src_addr
)->sin_port
),
1236 &((struct sockaddr_in
*)&newxprt
->sc_cm_id
->
1237 route
.addr
.dst_addr
)->sin_addr
.s_addr
,
1238 ntohs(((struct sockaddr_in
*)&newxprt
->sc_cm_id
->
1239 route
.addr
.dst_addr
)->sin_port
),
1240 newxprt
->sc_max_sge
,
1241 newxprt
->sc_max_sge_rd
,
1242 newxprt
->sc_sq_depth
,
1243 newxprt
->sc_max_requests
,
1246 return &newxprt
->sc_xprt
;
1249 dprintk("svcrdma: failure accepting new connection rc=%d.\n", ret
);
1250 /* Take a reference in case the DTO handler runs */
1251 svc_xprt_get(&newxprt
->sc_xprt
);
1252 if (newxprt
->sc_qp
&& !IS_ERR(newxprt
->sc_qp
))
1253 ib_destroy_qp(newxprt
->sc_qp
);
1254 rdma_destroy_id(newxprt
->sc_cm_id
);
1255 /* This call to put will destroy the transport */
1256 svc_xprt_put(&newxprt
->sc_xprt
);
1260 static void svc_rdma_release_rqst(struct svc_rqst
*rqstp
)
1265 * When connected, an svc_xprt has at least two references:
1267 * - A reference held by the cm_id between the ESTABLISHED and
1268 * DISCONNECTED events. If the remote peer disconnected first, this
1269 * reference could be gone.
1271 * - A reference held by the svc_recv code that called this function
1272 * as part of close processing.
1274 * At a minimum one references should still be held.
1276 static void svc_rdma_detach(struct svc_xprt
*xprt
)
1278 struct svcxprt_rdma
*rdma
=
1279 container_of(xprt
, struct svcxprt_rdma
, sc_xprt
);
1280 dprintk("svc: svc_rdma_detach(%p)\n", xprt
);
1282 /* Disconnect and flush posted WQE */
1283 rdma_disconnect(rdma
->sc_cm_id
);
1286 static void __svc_rdma_free(struct work_struct
*work
)
1288 struct svcxprt_rdma
*rdma
=
1289 container_of(work
, struct svcxprt_rdma
, sc_work
);
1290 struct svc_xprt
*xprt
= &rdma
->sc_xprt
;
1292 dprintk("svcrdma: %s(%p)\n", __func__
, rdma
);
1294 /* We should only be called from kref_put */
1295 if (atomic_read(&xprt
->xpt_ref
.refcount
) != 0)
1296 pr_err("svcrdma: sc_xprt still in use? (%d)\n",
1297 atomic_read(&xprt
->xpt_ref
.refcount
));
1300 * Destroy queued, but not processed read completions. Note
1301 * that this cleanup has to be done before destroying the
1302 * cm_id because the device ptr is needed to unmap the dma in
1303 * svc_rdma_put_context.
1305 while (!list_empty(&rdma
->sc_read_complete_q
)) {
1306 struct svc_rdma_op_ctxt
*ctxt
;
1307 ctxt
= list_entry(rdma
->sc_read_complete_q
.next
,
1308 struct svc_rdma_op_ctxt
,
1310 list_del_init(&ctxt
->dto_q
);
1311 svc_rdma_put_context(ctxt
, 1);
1314 /* Destroy queued, but not processed recv completions */
1315 while (!list_empty(&rdma
->sc_rq_dto_q
)) {
1316 struct svc_rdma_op_ctxt
*ctxt
;
1317 ctxt
= list_entry(rdma
->sc_rq_dto_q
.next
,
1318 struct svc_rdma_op_ctxt
,
1320 list_del_init(&ctxt
->dto_q
);
1321 svc_rdma_put_context(ctxt
, 1);
1324 /* Warn if we leaked a resource or under-referenced */
1325 if (rdma
->sc_ctxt_used
!= 0)
1326 pr_err("svcrdma: ctxt still in use? (%d)\n",
1327 rdma
->sc_ctxt_used
);
1328 if (atomic_read(&rdma
->sc_dma_used
) != 0)
1329 pr_err("svcrdma: dma still in use? (%d)\n",
1330 atomic_read(&rdma
->sc_dma_used
));
1332 /* Final put of backchannel client transport */
1333 if (xprt
->xpt_bc_xprt
) {
1334 xprt_put(xprt
->xpt_bc_xprt
);
1335 xprt
->xpt_bc_xprt
= NULL
;
1338 rdma_dealloc_frmr_q(rdma
);
1339 svc_rdma_destroy_ctxts(rdma
);
1340 svc_rdma_destroy_maps(rdma
);
1342 /* Destroy the QP if present (not a listener) */
1343 if (rdma
->sc_qp
&& !IS_ERR(rdma
->sc_qp
))
1344 ib_destroy_qp(rdma
->sc_qp
);
1346 if (rdma
->sc_sq_cq
&& !IS_ERR(rdma
->sc_sq_cq
))
1347 ib_destroy_cq(rdma
->sc_sq_cq
);
1349 if (rdma
->sc_rq_cq
&& !IS_ERR(rdma
->sc_rq_cq
))
1350 ib_destroy_cq(rdma
->sc_rq_cq
);
1352 if (rdma
->sc_phys_mr
&& !IS_ERR(rdma
->sc_phys_mr
))
1353 ib_dereg_mr(rdma
->sc_phys_mr
);
1355 if (rdma
->sc_pd
&& !IS_ERR(rdma
->sc_pd
))
1356 ib_dealloc_pd(rdma
->sc_pd
);
1358 /* Destroy the CM ID */
1359 rdma_destroy_id(rdma
->sc_cm_id
);
1364 static void svc_rdma_free(struct svc_xprt
*xprt
)
1366 struct svcxprt_rdma
*rdma
=
1367 container_of(xprt
, struct svcxprt_rdma
, sc_xprt
);
1368 INIT_WORK(&rdma
->sc_work
, __svc_rdma_free
);
1369 queue_work(svc_rdma_wq
, &rdma
->sc_work
);
1372 static int svc_rdma_has_wspace(struct svc_xprt
*xprt
)
1374 struct svcxprt_rdma
*rdma
=
1375 container_of(xprt
, struct svcxprt_rdma
, sc_xprt
);
1378 * If there are already waiters on the SQ,
1381 if (waitqueue_active(&rdma
->sc_send_wait
))
1384 /* Otherwise return true. */
1388 static int svc_rdma_secure_port(struct svc_rqst
*rqstp
)
1393 int svc_rdma_send(struct svcxprt_rdma
*xprt
, struct ib_send_wr
*wr
)
1395 struct ib_send_wr
*bad_wr
, *n_wr
;
1400 if (test_bit(XPT_CLOSE
, &xprt
->sc_xprt
.xpt_flags
))
1404 for (n_wr
= wr
->next
; n_wr
; n_wr
= n_wr
->next
)
1407 /* If the SQ is full, wait until an SQ entry is available */
1409 spin_lock_bh(&xprt
->sc_lock
);
1410 if (xprt
->sc_sq_depth
< atomic_read(&xprt
->sc_sq_count
) + wr_count
) {
1411 spin_unlock_bh(&xprt
->sc_lock
);
1412 atomic_inc(&rdma_stat_sq_starve
);
1414 /* See if we can opportunistically reap SQ WR to make room */
1417 /* Wait until SQ WR available if SQ still full */
1418 wait_event(xprt
->sc_send_wait
,
1419 atomic_read(&xprt
->sc_sq_count
) <
1421 if (test_bit(XPT_CLOSE
, &xprt
->sc_xprt
.xpt_flags
))
1425 /* Take a transport ref for each WR posted */
1426 for (i
= 0; i
< wr_count
; i
++)
1427 svc_xprt_get(&xprt
->sc_xprt
);
1429 /* Bump used SQ WR count and post */
1430 atomic_add(wr_count
, &xprt
->sc_sq_count
);
1431 ret
= ib_post_send(xprt
->sc_qp
, wr
, &bad_wr
);
1433 set_bit(XPT_CLOSE
, &xprt
->sc_xprt
.xpt_flags
);
1434 atomic_sub(wr_count
, &xprt
->sc_sq_count
);
1435 for (i
= 0; i
< wr_count
; i
++)
1436 svc_xprt_put(&xprt
->sc_xprt
);
1437 dprintk("svcrdma: failed to post SQ WR rc=%d, "
1438 "sc_sq_count=%d, sc_sq_depth=%d\n",
1439 ret
, atomic_read(&xprt
->sc_sq_count
),
1442 spin_unlock_bh(&xprt
->sc_lock
);
1444 wake_up(&xprt
->sc_send_wait
);
1450 void svc_rdma_send_error(struct svcxprt_rdma
*xprt
, struct rpcrdma_msg
*rmsgp
,
1451 enum rpcrdma_errcode err
)
1453 struct ib_send_wr err_wr
;
1455 struct svc_rdma_op_ctxt
*ctxt
;
1460 p
= alloc_page(GFP_KERNEL
);
1463 va
= page_address(p
);
1465 /* XDR encode error */
1466 length
= svc_rdma_xdr_encode_error(xprt
, rmsgp
, err
, va
);
1468 ctxt
= svc_rdma_get_context(xprt
);
1469 ctxt
->direction
= DMA_FROM_DEVICE
;
1473 /* Prepare SGE for local address */
1474 ctxt
->sge
[0].addr
= ib_dma_map_page(xprt
->sc_cm_id
->device
,
1475 p
, 0, length
, DMA_FROM_DEVICE
);
1476 if (ib_dma_mapping_error(xprt
->sc_cm_id
->device
, ctxt
->sge
[0].addr
)) {
1478 svc_rdma_put_context(ctxt
, 1);
1481 atomic_inc(&xprt
->sc_dma_used
);
1482 ctxt
->sge
[0].lkey
= xprt
->sc_dma_lkey
;
1483 ctxt
->sge
[0].length
= length
;
1485 /* Prepare SEND WR */
1486 memset(&err_wr
, 0, sizeof err_wr
);
1487 ctxt
->wr_op
= IB_WR_SEND
;
1488 err_wr
.wr_id
= (unsigned long)ctxt
;
1489 err_wr
.sg_list
= ctxt
->sge
;
1491 err_wr
.opcode
= IB_WR_SEND
;
1492 err_wr
.send_flags
= IB_SEND_SIGNALED
;
1495 ret
= svc_rdma_send(xprt
, &err_wr
);
1497 dprintk("svcrdma: Error %d posting send for protocol error\n",
1499 svc_rdma_unmap_dma(ctxt
);
1500 svc_rdma_put_context(ctxt
, 1);