]>
Commit | Line | Data |
---|---|---|
377f9b2f | 1 | /* |
0bf48289 | 2 | * Copyright (c) 2014 Open Grid Computing, Inc. All rights reserved. |
377f9b2f TT |
3 | * Copyright (c) 2005-2007 Network Appliance, Inc. All rights reserved. |
4 | * | |
5 | * This software is available to you under a choice of one of two | |
6 | * licenses. You may choose to be licensed under the terms of the GNU | |
7 | * General Public License (GPL) Version 2, available from the file | |
8 | * COPYING in the main directory of this source tree, or the BSD-type | |
9 | * license below: | |
10 | * | |
11 | * Redistribution and use in source and binary forms, with or without | |
12 | * modification, are permitted provided that the following conditions | |
13 | * are met: | |
14 | * | |
15 | * Redistributions of source code must retain the above copyright | |
16 | * notice, this list of conditions and the following disclaimer. | |
17 | * | |
18 | * Redistributions in binary form must reproduce the above | |
19 | * copyright notice, this list of conditions and the following | |
20 | * disclaimer in the documentation and/or other materials provided | |
21 | * with the distribution. | |
22 | * | |
23 | * Neither the name of the Network Appliance, Inc. nor the names of | |
24 | * its contributors may be used to endorse or promote products | |
25 | * derived from this software without specific prior written | |
26 | * permission. | |
27 | * | |
28 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
29 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
30 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
31 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
32 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
33 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
34 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
35 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
36 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
37 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
38 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
39 | * | |
40 | * Author: Tom Tucker <tom@opengridcomputing.com> | |
41 | */ | |
42 | ||
43 | #include <linux/sunrpc/svc_xprt.h> | |
44 | #include <linux/sunrpc/debug.h> | |
45 | #include <linux/sunrpc/rpc_rdma.h> | |
a6b7a407 | 46 | #include <linux/interrupt.h> |
d43c36dc | 47 | #include <linux/sched.h> |
5a0e3ad6 | 48 | #include <linux/slab.h> |
377f9b2f | 49 | #include <linux/spinlock.h> |
a25e758c | 50 | #include <linux/workqueue.h> |
377f9b2f TT |
51 | #include <rdma/ib_verbs.h> |
52 | #include <rdma/rdma_cm.h> | |
53 | #include <linux/sunrpc/svc_rdma.h> | |
bc3b2d7f | 54 | #include <linux/export.h> |
cec56c8f | 55 | #include "xprt_rdma.h" |
377f9b2f TT |
56 | |
57 | #define RPCDBG_FACILITY RPCDBG_SVCXPRT | |
58 | ||
94684319 | 59 | static struct svcxprt_rdma *rdma_create_xprt(struct svc_serv *, int); |
377f9b2f | 60 | static struct svc_xprt *svc_rdma_create(struct svc_serv *serv, |
62832c03 | 61 | struct net *net, |
377f9b2f TT |
62 | struct sockaddr *sa, int salen, |
63 | int flags); | |
64 | static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt); | |
65 | static void svc_rdma_release_rqst(struct svc_rqst *); | |
377f9b2f TT |
66 | static void dto_tasklet_func(unsigned long data); |
67 | static void svc_rdma_detach(struct svc_xprt *xprt); | |
68 | static void svc_rdma_free(struct svc_xprt *xprt); | |
69 | static int svc_rdma_has_wspace(struct svc_xprt *xprt); | |
16e4d93f | 70 | static int svc_rdma_secure_port(struct svc_rqst *); |
377f9b2f TT |
71 | static void rq_cq_reap(struct svcxprt_rdma *xprt); |
72 | static void sq_cq_reap(struct svcxprt_rdma *xprt); | |
73 | ||
5eaa65b2 | 74 | static DECLARE_TASKLET(dto_tasklet, dto_tasklet_func, 0UL); |
377f9b2f TT |
75 | static DEFINE_SPINLOCK(dto_lock); |
76 | static LIST_HEAD(dto_xprt_q); | |
77 | ||
78 | static struct svc_xprt_ops svc_rdma_ops = { | |
79 | .xpo_create = svc_rdma_create, | |
80 | .xpo_recvfrom = svc_rdma_recvfrom, | |
81 | .xpo_sendto = svc_rdma_sendto, | |
82 | .xpo_release_rqst = svc_rdma_release_rqst, | |
83 | .xpo_detach = svc_rdma_detach, | |
84 | .xpo_free = svc_rdma_free, | |
85 | .xpo_prep_reply_hdr = svc_rdma_prep_reply_hdr, | |
86 | .xpo_has_wspace = svc_rdma_has_wspace, | |
87 | .xpo_accept = svc_rdma_accept, | |
16e4d93f | 88 | .xpo_secure_port = svc_rdma_secure_port, |
377f9b2f TT |
89 | }; |
90 | ||
91 | struct svc_xprt_class svc_rdma_class = { | |
92 | .xcl_name = "rdma", | |
93 | .xcl_owner = THIS_MODULE, | |
94 | .xcl_ops = &svc_rdma_ops, | |
cc9a903d | 95 | .xcl_max_payload = RPCSVC_MAXPAYLOAD_RDMA, |
3c45ddf8 | 96 | .xcl_ident = XPRT_TRANSPORT_RDMA, |
377f9b2f TT |
97 | }; |
98 | ||
94684319 CL |
99 | #if defined(CONFIG_SUNRPC_BACKCHANNEL) |
100 | static struct svc_xprt *svc_rdma_bc_create(struct svc_serv *, struct net *, | |
101 | struct sockaddr *, int, int); | |
102 | static void svc_rdma_bc_detach(struct svc_xprt *); | |
103 | static void svc_rdma_bc_free(struct svc_xprt *); | |
104 | ||
105 | static struct svc_xprt_ops svc_rdma_bc_ops = { | |
106 | .xpo_create = svc_rdma_bc_create, | |
107 | .xpo_detach = svc_rdma_bc_detach, | |
108 | .xpo_free = svc_rdma_bc_free, | |
109 | .xpo_prep_reply_hdr = svc_rdma_prep_reply_hdr, | |
110 | .xpo_secure_port = svc_rdma_secure_port, | |
111 | }; | |
112 | ||
113 | struct svc_xprt_class svc_rdma_bc_class = { | |
114 | .xcl_name = "rdma-bc", | |
115 | .xcl_owner = THIS_MODULE, | |
116 | .xcl_ops = &svc_rdma_bc_ops, | |
117 | .xcl_max_payload = (1024 - RPCRDMA_HDRLEN_MIN) | |
118 | }; | |
119 | ||
120 | static struct svc_xprt *svc_rdma_bc_create(struct svc_serv *serv, | |
121 | struct net *net, | |
122 | struct sockaddr *sa, int salen, | |
123 | int flags) | |
124 | { | |
125 | struct svcxprt_rdma *cma_xprt; | |
126 | struct svc_xprt *xprt; | |
127 | ||
128 | cma_xprt = rdma_create_xprt(serv, 0); | |
129 | if (!cma_xprt) | |
130 | return ERR_PTR(-ENOMEM); | |
131 | xprt = &cma_xprt->sc_xprt; | |
132 | ||
133 | svc_xprt_init(net, &svc_rdma_bc_class, xprt, serv); | |
134 | serv->sv_bc_xprt = xprt; | |
135 | ||
136 | dprintk("svcrdma: %s(%p)\n", __func__, xprt); | |
137 | return xprt; | |
138 | } | |
139 | ||
140 | static void svc_rdma_bc_detach(struct svc_xprt *xprt) | |
141 | { | |
142 | dprintk("svcrdma: %s(%p)\n", __func__, xprt); | |
143 | } | |
144 | ||
145 | static void svc_rdma_bc_free(struct svc_xprt *xprt) | |
146 | { | |
147 | struct svcxprt_rdma *rdma = | |
148 | container_of(xprt, struct svcxprt_rdma, sc_xprt); | |
149 | ||
150 | dprintk("svcrdma: %s(%p)\n", __func__, xprt); | |
151 | if (xprt) | |
152 | kfree(rdma); | |
153 | } | |
154 | #endif /* CONFIG_SUNRPC_BACKCHANNEL */ | |
155 | ||
377f9b2f TT |
156 | struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *xprt) |
157 | { | |
158 | struct svc_rdma_op_ctxt *ctxt; | |
159 | ||
b7e0b9a9 CL |
160 | ctxt = kmem_cache_alloc(svc_rdma_ctxt_cachep, |
161 | GFP_KERNEL | __GFP_NOFAIL); | |
8948896c TT |
162 | ctxt->xprt = xprt; |
163 | INIT_LIST_HEAD(&ctxt->dto_q); | |
164 | ctxt->count = 0; | |
64be8608 | 165 | ctxt->frmr = NULL; |
8948896c | 166 | atomic_inc(&xprt->sc_ctxt_used); |
377f9b2f TT |
167 | return ctxt; |
168 | } | |
169 | ||
146b6df6 | 170 | void svc_rdma_unmap_dma(struct svc_rdma_op_ctxt *ctxt) |
e6ab9143 TT |
171 | { |
172 | struct svcxprt_rdma *xprt = ctxt->xprt; | |
173 | int i; | |
174 | for (i = 0; i < ctxt->count && ctxt->sge[i].length; i++) { | |
64be8608 TT |
175 | /* |
176 | * Unmap the DMA addr in the SGE if the lkey matches | |
177 | * the sc_dma_lkey, otherwise, ignore it since it is | |
178 | * an FRMR lkey and will be unmapped later when the | |
179 | * last WR that uses it completes. | |
180 | */ | |
181 | if (ctxt->sge[i].lkey == xprt->sc_dma_lkey) { | |
182 | atomic_dec(&xprt->sc_dma_used); | |
b432e6b3 | 183 | ib_dma_unmap_page(xprt->sc_cm_id->device, |
64be8608 TT |
184 | ctxt->sge[i].addr, |
185 | ctxt->sge[i].length, | |
186 | ctxt->direction); | |
187 | } | |
e6ab9143 TT |
188 | } |
189 | } | |
190 | ||
377f9b2f TT |
191 | void svc_rdma_put_context(struct svc_rdma_op_ctxt *ctxt, int free_pages) |
192 | { | |
193 | struct svcxprt_rdma *xprt; | |
194 | int i; | |
195 | ||
377f9b2f TT |
196 | xprt = ctxt->xprt; |
197 | if (free_pages) | |
198 | for (i = 0; i < ctxt->count; i++) | |
199 | put_page(ctxt->pages[i]); | |
200 | ||
8948896c | 201 | kmem_cache_free(svc_rdma_ctxt_cachep, ctxt); |
87407673 | 202 | atomic_dec(&xprt->sc_ctxt_used); |
377f9b2f TT |
203 | } |
204 | ||
ab96dddb TT |
205 | /* |
206 | * Temporary NFS req mappings are shared across all transport | |
207 | * instances. These are short lived and should be bounded by the number | |
208 | * of concurrent server threads * depth of the SQ. | |
209 | */ | |
210 | struct svc_rdma_req_map *svc_rdma_get_req_map(void) | |
211 | { | |
212 | struct svc_rdma_req_map *map; | |
b7e0b9a9 CL |
213 | map = kmem_cache_alloc(svc_rdma_map_cachep, |
214 | GFP_KERNEL | __GFP_NOFAIL); | |
ab96dddb TT |
215 | map->count = 0; |
216 | return map; | |
217 | } | |
218 | ||
219 | void svc_rdma_put_req_map(struct svc_rdma_req_map *map) | |
220 | { | |
221 | kmem_cache_free(svc_rdma_map_cachep, map); | |
222 | } | |
223 | ||
377f9b2f TT |
224 | /* ib_cq event handler */ |
225 | static void cq_event_handler(struct ib_event *event, void *context) | |
226 | { | |
227 | struct svc_xprt *xprt = context; | |
76357c71 SG |
228 | dprintk("svcrdma: received CQ event %s (%d), context=%p\n", |
229 | ib_event_msg(event->event), event->event, context); | |
377f9b2f TT |
230 | set_bit(XPT_CLOSE, &xprt->xpt_flags); |
231 | } | |
232 | ||
233 | /* QP event handler */ | |
234 | static void qp_event_handler(struct ib_event *event, void *context) | |
235 | { | |
236 | struct svc_xprt *xprt = context; | |
237 | ||
238 | switch (event->event) { | |
239 | /* These are considered benign events */ | |
240 | case IB_EVENT_PATH_MIG: | |
241 | case IB_EVENT_COMM_EST: | |
242 | case IB_EVENT_SQ_DRAINED: | |
243 | case IB_EVENT_QP_LAST_WQE_REACHED: | |
76357c71 SG |
244 | dprintk("svcrdma: QP event %s (%d) received for QP=%p\n", |
245 | ib_event_msg(event->event), event->event, | |
246 | event->element.qp); | |
377f9b2f TT |
247 | break; |
248 | /* These are considered fatal events */ | |
249 | case IB_EVENT_PATH_MIG_ERR: | |
250 | case IB_EVENT_QP_FATAL: | |
251 | case IB_EVENT_QP_REQ_ERR: | |
252 | case IB_EVENT_QP_ACCESS_ERR: | |
253 | case IB_EVENT_DEVICE_FATAL: | |
254 | default: | |
76357c71 | 255 | dprintk("svcrdma: QP ERROR event %s (%d) received for QP=%p, " |
377f9b2f | 256 | "closing transport\n", |
76357c71 SG |
257 | ib_event_msg(event->event), event->event, |
258 | event->element.qp); | |
377f9b2f TT |
259 | set_bit(XPT_CLOSE, &xprt->xpt_flags); |
260 | break; | |
261 | } | |
262 | } | |
263 | ||
264 | /* | |
265 | * Data Transfer Operation Tasklet | |
266 | * | |
267 | * Walks a list of transports with I/O pending, removing entries as | |
268 | * they are added to the server's I/O pending list. Two bits indicate | |
269 | * if SQ, RQ, or both have I/O pending. The dto_lock is an irqsave | |
270 | * spinlock that serializes access to the transport list with the RQ | |
271 | * and SQ interrupt handlers. | |
272 | */ | |
273 | static void dto_tasklet_func(unsigned long data) | |
274 | { | |
275 | struct svcxprt_rdma *xprt; | |
276 | unsigned long flags; | |
277 | ||
278 | spin_lock_irqsave(&dto_lock, flags); | |
279 | while (!list_empty(&dto_xprt_q)) { | |
280 | xprt = list_entry(dto_xprt_q.next, | |
281 | struct svcxprt_rdma, sc_dto_q); | |
282 | list_del_init(&xprt->sc_dto_q); | |
283 | spin_unlock_irqrestore(&dto_lock, flags); | |
284 | ||
dbcd00eb TT |
285 | rq_cq_reap(xprt); |
286 | sq_cq_reap(xprt); | |
377f9b2f | 287 | |
c48cbb40 | 288 | svc_xprt_put(&xprt->sc_xprt); |
377f9b2f TT |
289 | spin_lock_irqsave(&dto_lock, flags); |
290 | } | |
291 | spin_unlock_irqrestore(&dto_lock, flags); | |
292 | } | |
293 | ||
294 | /* | |
295 | * Receive Queue Completion Handler | |
296 | * | |
297 | * Since an RQ completion handler is called on interrupt context, we | |
298 | * need to defer the handling of the I/O to a tasklet | |
299 | */ | |
300 | static void rq_comp_handler(struct ib_cq *cq, void *cq_context) | |
301 | { | |
302 | struct svcxprt_rdma *xprt = cq_context; | |
303 | unsigned long flags; | |
304 | ||
1711386c TT |
305 | /* Guard against unconditional flush call for destroyed QP */ |
306 | if (atomic_read(&xprt->sc_xprt.xpt_ref.refcount)==0) | |
307 | return; | |
308 | ||
377f9b2f TT |
309 | /* |
310 | * Set the bit regardless of whether or not it's on the list | |
311 | * because it may be on the list already due to an SQ | |
312 | * completion. | |
1711386c | 313 | */ |
377f9b2f TT |
314 | set_bit(RDMAXPRT_RQ_PENDING, &xprt->sc_flags); |
315 | ||
316 | /* | |
317 | * If this transport is not already on the DTO transport queue, | |
318 | * add it | |
319 | */ | |
320 | spin_lock_irqsave(&dto_lock, flags); | |
c48cbb40 TT |
321 | if (list_empty(&xprt->sc_dto_q)) { |
322 | svc_xprt_get(&xprt->sc_xprt); | |
377f9b2f | 323 | list_add_tail(&xprt->sc_dto_q, &dto_xprt_q); |
c48cbb40 | 324 | } |
377f9b2f TT |
325 | spin_unlock_irqrestore(&dto_lock, flags); |
326 | ||
327 | /* Tasklet does all the work to avoid irqsave locks. */ | |
328 | tasklet_schedule(&dto_tasklet); | |
329 | } | |
330 | ||
331 | /* | |
332 | * rq_cq_reap - Process the RQ CQ. | |
333 | * | |
334 | * Take all completing WC off the CQE and enqueue the associated DTO | |
335 | * context on the dto_q for the transport. | |
0905c0f0 TT |
336 | * |
337 | * Note that caller must hold a transport reference. | |
377f9b2f TT |
338 | */ |
339 | static void rq_cq_reap(struct svcxprt_rdma *xprt) | |
340 | { | |
341 | int ret; | |
342 | struct ib_wc wc; | |
343 | struct svc_rdma_op_ctxt *ctxt = NULL; | |
344 | ||
dbcd00eb TT |
345 | if (!test_and_clear_bit(RDMAXPRT_RQ_PENDING, &xprt->sc_flags)) |
346 | return; | |
347 | ||
348 | ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP); | |
377f9b2f TT |
349 | atomic_inc(&rdma_stat_rq_poll); |
350 | ||
377f9b2f TT |
351 | while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) { |
352 | ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id; | |
353 | ctxt->wc_status = wc.status; | |
354 | ctxt->byte_len = wc.byte_len; | |
e6ab9143 | 355 | svc_rdma_unmap_dma(ctxt); |
377f9b2f TT |
356 | if (wc.status != IB_WC_SUCCESS) { |
357 | /* Close the transport */ | |
0905c0f0 | 358 | dprintk("svcrdma: transport closing putting ctxt %p\n", ctxt); |
377f9b2f TT |
359 | set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags); |
360 | svc_rdma_put_context(ctxt, 1); | |
0905c0f0 | 361 | svc_xprt_put(&xprt->sc_xprt); |
377f9b2f TT |
362 | continue; |
363 | } | |
47698e08 | 364 | spin_lock_bh(&xprt->sc_rq_dto_lock); |
377f9b2f | 365 | list_add_tail(&ctxt->dto_q, &xprt->sc_rq_dto_q); |
47698e08 | 366 | spin_unlock_bh(&xprt->sc_rq_dto_lock); |
0905c0f0 | 367 | svc_xprt_put(&xprt->sc_xprt); |
377f9b2f | 368 | } |
377f9b2f TT |
369 | |
370 | if (ctxt) | |
371 | atomic_inc(&rdma_stat_rq_prod); | |
dbcd00eb TT |
372 | |
373 | set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags); | |
374 | /* | |
375 | * If data arrived before established event, | |
376 | * don't enqueue. This defers RPC I/O until the | |
377 | * RDMA connection is complete. | |
378 | */ | |
379 | if (!test_bit(RDMAXPRT_CONN_PENDING, &xprt->sc_flags)) | |
380 | svc_xprt_enqueue(&xprt->sc_xprt); | |
377f9b2f TT |
381 | } |
382 | ||
e1183210 | 383 | /* |
70f23fd6 | 384 | * Process a completion context |
e1183210 TT |
385 | */ |
386 | static void process_context(struct svcxprt_rdma *xprt, | |
387 | struct svc_rdma_op_ctxt *ctxt) | |
388 | { | |
389 | svc_rdma_unmap_dma(ctxt); | |
390 | ||
391 | switch (ctxt->wr_op) { | |
392 | case IB_WR_SEND: | |
3fe04ee9 CL |
393 | if (ctxt->frmr) |
394 | pr_err("svcrdma: SEND: ctxt->frmr != NULL\n"); | |
e1183210 TT |
395 | svc_rdma_put_context(ctxt, 1); |
396 | break; | |
397 | ||
398 | case IB_WR_RDMA_WRITE: | |
3fe04ee9 CL |
399 | if (ctxt->frmr) |
400 | pr_err("svcrdma: WRITE: ctxt->frmr != NULL\n"); | |
e1183210 TT |
401 | svc_rdma_put_context(ctxt, 0); |
402 | break; | |
403 | ||
404 | case IB_WR_RDMA_READ: | |
146b6df6 | 405 | case IB_WR_RDMA_READ_WITH_INV: |
0bf48289 | 406 | svc_rdma_put_frmr(xprt, ctxt->frmr); |
e1183210 TT |
407 | if (test_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags)) { |
408 | struct svc_rdma_op_ctxt *read_hdr = ctxt->read_hdr; | |
3fe04ee9 CL |
409 | if (read_hdr) { |
410 | spin_lock_bh(&xprt->sc_rq_dto_lock); | |
411 | set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags); | |
412 | list_add_tail(&read_hdr->dto_q, | |
413 | &xprt->sc_read_complete_q); | |
414 | spin_unlock_bh(&xprt->sc_rq_dto_lock); | |
415 | } else { | |
416 | pr_err("svcrdma: ctxt->read_hdr == NULL\n"); | |
417 | } | |
e1183210 TT |
418 | svc_xprt_enqueue(&xprt->sc_xprt); |
419 | } | |
420 | svc_rdma_put_context(ctxt, 0); | |
421 | break; | |
422 | ||
423 | default: | |
424 | printk(KERN_ERR "svcrdma: unexpected completion type, " | |
425 | "opcode=%d\n", | |
426 | ctxt->wr_op); | |
427 | break; | |
428 | } | |
429 | } | |
430 | ||
377f9b2f TT |
431 | /* |
432 | * Send Queue Completion Handler - potentially called on interrupt context. | |
0905c0f0 TT |
433 | * |
434 | * Note that caller must hold a transport reference. | |
377f9b2f TT |
435 | */ |
436 | static void sq_cq_reap(struct svcxprt_rdma *xprt) | |
437 | { | |
438 | struct svc_rdma_op_ctxt *ctxt = NULL; | |
0bf48289 SW |
439 | struct ib_wc wc_a[6]; |
440 | struct ib_wc *wc; | |
377f9b2f TT |
441 | struct ib_cq *cq = xprt->sc_sq_cq; |
442 | int ret; | |
443 | ||
0bf48289 SW |
444 | memset(wc_a, 0, sizeof(wc_a)); |
445 | ||
dbcd00eb TT |
446 | if (!test_and_clear_bit(RDMAXPRT_SQ_PENDING, &xprt->sc_flags)) |
447 | return; | |
448 | ||
449 | ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP); | |
377f9b2f | 450 | atomic_inc(&rdma_stat_sq_poll); |
0bf48289 SW |
451 | while ((ret = ib_poll_cq(cq, ARRAY_SIZE(wc_a), wc_a)) > 0) { |
452 | int i; | |
377f9b2f | 453 | |
0bf48289 SW |
454 | for (i = 0; i < ret; i++) { |
455 | wc = &wc_a[i]; | |
456 | if (wc->status != IB_WC_SUCCESS) { | |
76357c71 SG |
457 | dprintk("svcrdma: sq wc err status %s (%d)\n", |
458 | ib_wc_status_msg(wc->status), | |
0bf48289 | 459 | wc->status); |
377f9b2f | 460 | |
0bf48289 SW |
461 | /* Close the transport */ |
462 | set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags); | |
463 | } | |
377f9b2f | 464 | |
0bf48289 SW |
465 | /* Decrement used SQ WR count */ |
466 | atomic_dec(&xprt->sc_sq_count); | |
467 | wake_up(&xprt->sc_send_wait); | |
468 | ||
469 | ctxt = (struct svc_rdma_op_ctxt *) | |
470 | (unsigned long)wc->wr_id; | |
471 | if (ctxt) | |
472 | process_context(xprt, ctxt); | |
473 | ||
474 | svc_xprt_put(&xprt->sc_xprt); | |
475 | } | |
377f9b2f TT |
476 | } |
477 | ||
478 | if (ctxt) | |
479 | atomic_inc(&rdma_stat_sq_prod); | |
480 | } | |
481 | ||
482 | static void sq_comp_handler(struct ib_cq *cq, void *cq_context) | |
483 | { | |
484 | struct svcxprt_rdma *xprt = cq_context; | |
485 | unsigned long flags; | |
486 | ||
1711386c TT |
487 | /* Guard against unconditional flush call for destroyed QP */ |
488 | if (atomic_read(&xprt->sc_xprt.xpt_ref.refcount)==0) | |
489 | return; | |
490 | ||
377f9b2f TT |
491 | /* |
492 | * Set the bit regardless of whether or not it's on the list | |
493 | * because it may be on the list already due to an RQ | |
494 | * completion. | |
1711386c | 495 | */ |
377f9b2f TT |
496 | set_bit(RDMAXPRT_SQ_PENDING, &xprt->sc_flags); |
497 | ||
498 | /* | |
499 | * If this transport is not already on the DTO transport queue, | |
500 | * add it | |
501 | */ | |
502 | spin_lock_irqsave(&dto_lock, flags); | |
c48cbb40 TT |
503 | if (list_empty(&xprt->sc_dto_q)) { |
504 | svc_xprt_get(&xprt->sc_xprt); | |
377f9b2f | 505 | list_add_tail(&xprt->sc_dto_q, &dto_xprt_q); |
c48cbb40 | 506 | } |
377f9b2f TT |
507 | spin_unlock_irqrestore(&dto_lock, flags); |
508 | ||
509 | /* Tasklet does all the work to avoid irqsave locks. */ | |
510 | tasklet_schedule(&dto_tasklet); | |
511 | } | |
512 | ||
377f9b2f TT |
513 | static struct svcxprt_rdma *rdma_create_xprt(struct svc_serv *serv, |
514 | int listener) | |
515 | { | |
516 | struct svcxprt_rdma *cma_xprt = kzalloc(sizeof *cma_xprt, GFP_KERNEL); | |
517 | ||
518 | if (!cma_xprt) | |
519 | return NULL; | |
bd4620dd | 520 | svc_xprt_init(&init_net, &svc_rdma_class, &cma_xprt->sc_xprt, serv); |
377f9b2f TT |
521 | INIT_LIST_HEAD(&cma_xprt->sc_accept_q); |
522 | INIT_LIST_HEAD(&cma_xprt->sc_dto_q); | |
523 | INIT_LIST_HEAD(&cma_xprt->sc_rq_dto_q); | |
524 | INIT_LIST_HEAD(&cma_xprt->sc_read_complete_q); | |
64be8608 | 525 | INIT_LIST_HEAD(&cma_xprt->sc_frmr_q); |
377f9b2f TT |
526 | init_waitqueue_head(&cma_xprt->sc_send_wait); |
527 | ||
528 | spin_lock_init(&cma_xprt->sc_lock); | |
377f9b2f | 529 | spin_lock_init(&cma_xprt->sc_rq_dto_lock); |
64be8608 | 530 | spin_lock_init(&cma_xprt->sc_frmr_q_lock); |
377f9b2f TT |
531 | |
532 | cma_xprt->sc_ord = svcrdma_ord; | |
533 | ||
534 | cma_xprt->sc_max_req_size = svcrdma_max_req_size; | |
535 | cma_xprt->sc_max_requests = svcrdma_max_requests; | |
536 | cma_xprt->sc_sq_depth = svcrdma_max_requests * RPCRDMA_SQ_DEPTH_MULT; | |
537 | atomic_set(&cma_xprt->sc_sq_count, 0); | |
87295b6c | 538 | atomic_set(&cma_xprt->sc_ctxt_used, 0); |
377f9b2f | 539 | |
8948896c | 540 | if (listener) |
377f9b2f TT |
541 | set_bit(XPT_LISTENER, &cma_xprt->sc_xprt.xpt_flags); |
542 | ||
543 | return cma_xprt; | |
544 | } | |
545 | ||
377f9b2f TT |
546 | int svc_rdma_post_recv(struct svcxprt_rdma *xprt) |
547 | { | |
548 | struct ib_recv_wr recv_wr, *bad_recv_wr; | |
549 | struct svc_rdma_op_ctxt *ctxt; | |
550 | struct page *page; | |
a5abf4e8 | 551 | dma_addr_t pa; |
377f9b2f TT |
552 | int sge_no; |
553 | int buflen; | |
554 | int ret; | |
555 | ||
556 | ctxt = svc_rdma_get_context(xprt); | |
557 | buflen = 0; | |
558 | ctxt->direction = DMA_FROM_DEVICE; | |
559 | for (sge_no = 0; buflen < xprt->sc_max_req_size; sge_no++) { | |
3fe04ee9 CL |
560 | if (sge_no >= xprt->sc_max_sge) { |
561 | pr_err("svcrdma: Too many sges (%d)\n", sge_no); | |
562 | goto err_put_ctxt; | |
563 | } | |
b7e0b9a9 | 564 | page = alloc_page(GFP_KERNEL | __GFP_NOFAIL); |
377f9b2f | 565 | ctxt->pages[sge_no] = page; |
b432e6b3 TT |
566 | pa = ib_dma_map_page(xprt->sc_cm_id->device, |
567 | page, 0, PAGE_SIZE, | |
377f9b2f | 568 | DMA_FROM_DEVICE); |
a5abf4e8 TT |
569 | if (ib_dma_mapping_error(xprt->sc_cm_id->device, pa)) |
570 | goto err_put_ctxt; | |
571 | atomic_inc(&xprt->sc_dma_used); | |
377f9b2f TT |
572 | ctxt->sge[sge_no].addr = pa; |
573 | ctxt->sge[sge_no].length = PAGE_SIZE; | |
a5abf4e8 | 574 | ctxt->sge[sge_no].lkey = xprt->sc_dma_lkey; |
4a84386f | 575 | ctxt->count = sge_no + 1; |
377f9b2f TT |
576 | buflen += PAGE_SIZE; |
577 | } | |
377f9b2f TT |
578 | recv_wr.next = NULL; |
579 | recv_wr.sg_list = &ctxt->sge[0]; | |
580 | recv_wr.num_sge = ctxt->count; | |
581 | recv_wr.wr_id = (u64)(unsigned long)ctxt; | |
582 | ||
0905c0f0 | 583 | svc_xprt_get(&xprt->sc_xprt); |
377f9b2f | 584 | ret = ib_post_recv(xprt->sc_qp, &recv_wr, &bad_recv_wr); |
0905c0f0 | 585 | if (ret) { |
21515e46 | 586 | svc_rdma_unmap_dma(ctxt); |
05a0826a | 587 | svc_rdma_put_context(ctxt, 1); |
21515e46 | 588 | svc_xprt_put(&xprt->sc_xprt); |
0905c0f0 | 589 | } |
377f9b2f | 590 | return ret; |
a5abf4e8 TT |
591 | |
592 | err_put_ctxt: | |
4a84386f | 593 | svc_rdma_unmap_dma(ctxt); |
a5abf4e8 TT |
594 | svc_rdma_put_context(ctxt, 1); |
595 | return -ENOMEM; | |
377f9b2f TT |
596 | } |
597 | ||
598 | /* | |
599 | * This function handles the CONNECT_REQUEST event on a listening | |
600 | * endpoint. It is passed the cma_id for the _new_ connection. The context in | |
601 | * this cma_id is inherited from the listening cma_id and is the svc_xprt | |
602 | * structure for the listening endpoint. | |
603 | * | |
604 | * This function creates a new xprt for the new connection and enqueues it on | |
605 | * the accept queue for the listent xprt. When the listen thread is kicked, it | |
606 | * will call the recvfrom method on the listen xprt which will accept the new | |
607 | * connection. | |
608 | */ | |
36ef25e4 | 609 | static void handle_connect_req(struct rdma_cm_id *new_cma_id, size_t client_ird) |
377f9b2f TT |
610 | { |
611 | struct svcxprt_rdma *listen_xprt = new_cma_id->context; | |
612 | struct svcxprt_rdma *newxprt; | |
af261af4 | 613 | struct sockaddr *sa; |
377f9b2f TT |
614 | |
615 | /* Create a new transport */ | |
616 | newxprt = rdma_create_xprt(listen_xprt->sc_xprt.xpt_server, 0); | |
617 | if (!newxprt) { | |
618 | dprintk("svcrdma: failed to create new transport\n"); | |
619 | return; | |
620 | } | |
621 | newxprt->sc_cm_id = new_cma_id; | |
622 | new_cma_id->context = newxprt; | |
623 | dprintk("svcrdma: Creating newxprt=%p, cm_id=%p, listenxprt=%p\n", | |
624 | newxprt, newxprt->sc_cm_id, listen_xprt); | |
625 | ||
36ef25e4 TT |
626 | /* Save client advertised inbound read limit for use later in accept. */ |
627 | newxprt->sc_ord = client_ird; | |
628 | ||
af261af4 TT |
629 | /* Set the local and remote addresses in the transport */ |
630 | sa = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.dst_addr; | |
631 | svc_xprt_set_remote(&newxprt->sc_xprt, sa, svc_addr_len(sa)); | |
632 | sa = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.src_addr; | |
633 | svc_xprt_set_local(&newxprt->sc_xprt, sa, svc_addr_len(sa)); | |
634 | ||
377f9b2f TT |
635 | /* |
636 | * Enqueue the new transport on the accept queue of the listening | |
637 | * transport | |
638 | */ | |
639 | spin_lock_bh(&listen_xprt->sc_lock); | |
640 | list_add_tail(&newxprt->sc_accept_q, &listen_xprt->sc_accept_q); | |
641 | spin_unlock_bh(&listen_xprt->sc_lock); | |
642 | ||
377f9b2f TT |
643 | set_bit(XPT_CONN, &listen_xprt->sc_xprt.xpt_flags); |
644 | svc_xprt_enqueue(&listen_xprt->sc_xprt); | |
645 | } | |
646 | ||
647 | /* | |
648 | * Handles events generated on the listening endpoint. These events will be | |
649 | * either be incoming connect requests or adapter removal events. | |
650 | */ | |
651 | static int rdma_listen_handler(struct rdma_cm_id *cma_id, | |
652 | struct rdma_cm_event *event) | |
653 | { | |
654 | struct svcxprt_rdma *xprt = cma_id->context; | |
655 | int ret = 0; | |
656 | ||
657 | switch (event->event) { | |
658 | case RDMA_CM_EVENT_CONNECT_REQUEST: | |
659 | dprintk("svcrdma: Connect request on cma_id=%p, xprt = %p, " | |
76357c71 SG |
660 | "event = %s (%d)\n", cma_id, cma_id->context, |
661 | rdma_event_msg(event->event), event->event); | |
36ef25e4 | 662 | handle_connect_req(cma_id, |
67080c82 | 663 | event->param.conn.initiator_depth); |
377f9b2f TT |
664 | break; |
665 | ||
666 | case RDMA_CM_EVENT_ESTABLISHED: | |
667 | /* Accept complete */ | |
668 | dprintk("svcrdma: Connection completed on LISTEN xprt=%p, " | |
669 | "cm_id=%p\n", xprt, cma_id); | |
670 | break; | |
671 | ||
672 | case RDMA_CM_EVENT_DEVICE_REMOVAL: | |
673 | dprintk("svcrdma: Device removal xprt=%p, cm_id=%p\n", | |
674 | xprt, cma_id); | |
675 | if (xprt) | |
676 | set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags); | |
677 | break; | |
678 | ||
679 | default: | |
680 | dprintk("svcrdma: Unexpected event on listening endpoint %p, " | |
76357c71 SG |
681 | "event = %s (%d)\n", cma_id, |
682 | rdma_event_msg(event->event), event->event); | |
377f9b2f TT |
683 | break; |
684 | } | |
685 | ||
686 | return ret; | |
687 | } | |
688 | ||
689 | static int rdma_cma_handler(struct rdma_cm_id *cma_id, | |
690 | struct rdma_cm_event *event) | |
691 | { | |
692 | struct svc_xprt *xprt = cma_id->context; | |
693 | struct svcxprt_rdma *rdma = | |
694 | container_of(xprt, struct svcxprt_rdma, sc_xprt); | |
695 | switch (event->event) { | |
696 | case RDMA_CM_EVENT_ESTABLISHED: | |
697 | /* Accept complete */ | |
c48cbb40 | 698 | svc_xprt_get(xprt); |
377f9b2f TT |
699 | dprintk("svcrdma: Connection completed on DTO xprt=%p, " |
700 | "cm_id=%p\n", xprt, cma_id); | |
701 | clear_bit(RDMAXPRT_CONN_PENDING, &rdma->sc_flags); | |
702 | svc_xprt_enqueue(xprt); | |
703 | break; | |
704 | case RDMA_CM_EVENT_DISCONNECTED: | |
705 | dprintk("svcrdma: Disconnect on DTO xprt=%p, cm_id=%p\n", | |
706 | xprt, cma_id); | |
707 | if (xprt) { | |
708 | set_bit(XPT_CLOSE, &xprt->xpt_flags); | |
709 | svc_xprt_enqueue(xprt); | |
120693d1 | 710 | svc_xprt_put(xprt); |
377f9b2f TT |
711 | } |
712 | break; | |
713 | case RDMA_CM_EVENT_DEVICE_REMOVAL: | |
714 | dprintk("svcrdma: Device removal cma_id=%p, xprt = %p, " | |
76357c71 SG |
715 | "event = %s (%d)\n", cma_id, xprt, |
716 | rdma_event_msg(event->event), event->event); | |
377f9b2f TT |
717 | if (xprt) { |
718 | set_bit(XPT_CLOSE, &xprt->xpt_flags); | |
719 | svc_xprt_enqueue(xprt); | |
ff79c74d | 720 | svc_xprt_put(xprt); |
377f9b2f TT |
721 | } |
722 | break; | |
723 | default: | |
724 | dprintk("svcrdma: Unexpected event on DTO endpoint %p, " | |
76357c71 SG |
725 | "event = %s (%d)\n", cma_id, |
726 | rdma_event_msg(event->event), event->event); | |
377f9b2f TT |
727 | break; |
728 | } | |
729 | return 0; | |
730 | } | |
731 | ||
732 | /* | |
733 | * Create a listening RDMA service endpoint. | |
734 | */ | |
735 | static struct svc_xprt *svc_rdma_create(struct svc_serv *serv, | |
62832c03 | 736 | struct net *net, |
377f9b2f TT |
737 | struct sockaddr *sa, int salen, |
738 | int flags) | |
739 | { | |
740 | struct rdma_cm_id *listen_id; | |
741 | struct svcxprt_rdma *cma_xprt; | |
377f9b2f TT |
742 | int ret; |
743 | ||
744 | dprintk("svcrdma: Creating RDMA socket\n"); | |
bade732a TT |
745 | if (sa->sa_family != AF_INET) { |
746 | dprintk("svcrdma: Address family %d is not supported.\n", sa->sa_family); | |
747 | return ERR_PTR(-EAFNOSUPPORT); | |
748 | } | |
377f9b2f TT |
749 | cma_xprt = rdma_create_xprt(serv, 1); |
750 | if (!cma_xprt) | |
58e8f621 | 751 | return ERR_PTR(-ENOMEM); |
377f9b2f | 752 | |
fa20105e GS |
753 | listen_id = rdma_create_id(&init_net, rdma_listen_handler, cma_xprt, |
754 | RDMA_PS_TCP, IB_QPT_RC); | |
377f9b2f | 755 | if (IS_ERR(listen_id)) { |
58e8f621 TT |
756 | ret = PTR_ERR(listen_id); |
757 | dprintk("svcrdma: rdma_create_id failed = %d\n", ret); | |
758 | goto err0; | |
377f9b2f | 759 | } |
58e8f621 | 760 | |
377f9b2f TT |
761 | ret = rdma_bind_addr(listen_id, sa); |
762 | if (ret) { | |
377f9b2f | 763 | dprintk("svcrdma: rdma_bind_addr failed = %d\n", ret); |
58e8f621 | 764 | goto err1; |
377f9b2f TT |
765 | } |
766 | cma_xprt->sc_cm_id = listen_id; | |
767 | ||
768 | ret = rdma_listen(listen_id, RPCRDMA_LISTEN_BACKLOG); | |
769 | if (ret) { | |
377f9b2f | 770 | dprintk("svcrdma: rdma_listen failed = %d\n", ret); |
58e8f621 | 771 | goto err1; |
377f9b2f TT |
772 | } |
773 | ||
774 | /* | |
775 | * We need to use the address from the cm_id in case the | |
776 | * caller specified 0 for the port number. | |
777 | */ | |
778 | sa = (struct sockaddr *)&cma_xprt->sc_cm_id->route.addr.src_addr; | |
779 | svc_xprt_set_local(&cma_xprt->sc_xprt, sa, salen); | |
780 | ||
781 | return &cma_xprt->sc_xprt; | |
58e8f621 TT |
782 | |
783 | err1: | |
784 | rdma_destroy_id(listen_id); | |
785 | err0: | |
786 | kfree(cma_xprt); | |
787 | return ERR_PTR(ret); | |
377f9b2f TT |
788 | } |
789 | ||
64be8608 TT |
790 | static struct svc_rdma_fastreg_mr *rdma_alloc_frmr(struct svcxprt_rdma *xprt) |
791 | { | |
792 | struct ib_mr *mr; | |
412a15c0 | 793 | struct scatterlist *sg; |
64be8608 | 794 | struct svc_rdma_fastreg_mr *frmr; |
9ac07501 | 795 | u32 num_sg; |
64be8608 TT |
796 | |
797 | frmr = kmalloc(sizeof(*frmr), GFP_KERNEL); | |
798 | if (!frmr) | |
799 | goto err; | |
800 | ||
9ac07501 SW |
801 | num_sg = min_t(u32, RPCSVC_MAXPAGES, xprt->sc_frmr_pg_list_len); |
802 | mr = ib_alloc_mr(xprt->sc_pd, IB_MR_TYPE_MEM_REG, num_sg); | |
846d8e7c | 803 | if (IS_ERR(mr)) |
64be8608 TT |
804 | goto err_free_frmr; |
805 | ||
412a15c0 SG |
806 | sg = kcalloc(RPCSVC_MAXPAGES, sizeof(*sg), GFP_KERNEL); |
807 | if (!sg) | |
64be8608 TT |
808 | goto err_free_mr; |
809 | ||
412a15c0 SG |
810 | sg_init_table(sg, RPCSVC_MAXPAGES); |
811 | ||
64be8608 | 812 | frmr->mr = mr; |
412a15c0 | 813 | frmr->sg = sg; |
64be8608 TT |
814 | INIT_LIST_HEAD(&frmr->frmr_list); |
815 | return frmr; | |
816 | ||
817 | err_free_mr: | |
818 | ib_dereg_mr(mr); | |
819 | err_free_frmr: | |
820 | kfree(frmr); | |
821 | err: | |
822 | return ERR_PTR(-ENOMEM); | |
823 | } | |
824 | ||
825 | static void rdma_dealloc_frmr_q(struct svcxprt_rdma *xprt) | |
826 | { | |
827 | struct svc_rdma_fastreg_mr *frmr; | |
828 | ||
829 | while (!list_empty(&xprt->sc_frmr_q)) { | |
830 | frmr = list_entry(xprt->sc_frmr_q.next, | |
831 | struct svc_rdma_fastreg_mr, frmr_list); | |
832 | list_del_init(&frmr->frmr_list); | |
412a15c0 | 833 | kfree(frmr->sg); |
64be8608 | 834 | ib_dereg_mr(frmr->mr); |
64be8608 TT |
835 | kfree(frmr); |
836 | } | |
837 | } | |
838 | ||
839 | struct svc_rdma_fastreg_mr *svc_rdma_get_frmr(struct svcxprt_rdma *rdma) | |
840 | { | |
841 | struct svc_rdma_fastreg_mr *frmr = NULL; | |
842 | ||
843 | spin_lock_bh(&rdma->sc_frmr_q_lock); | |
844 | if (!list_empty(&rdma->sc_frmr_q)) { | |
845 | frmr = list_entry(rdma->sc_frmr_q.next, | |
846 | struct svc_rdma_fastreg_mr, frmr_list); | |
847 | list_del_init(&frmr->frmr_list); | |
412a15c0 | 848 | frmr->sg_nents = 0; |
64be8608 TT |
849 | } |
850 | spin_unlock_bh(&rdma->sc_frmr_q_lock); | |
851 | if (frmr) | |
852 | return frmr; | |
853 | ||
854 | return rdma_alloc_frmr(rdma); | |
855 | } | |
856 | ||
64be8608 TT |
857 | void svc_rdma_put_frmr(struct svcxprt_rdma *rdma, |
858 | struct svc_rdma_fastreg_mr *frmr) | |
859 | { | |
860 | if (frmr) { | |
412a15c0 SG |
861 | ib_dma_unmap_sg(rdma->sc_cm_id->device, |
862 | frmr->sg, frmr->sg_nents, frmr->direction); | |
863 | atomic_dec(&rdma->sc_dma_used); | |
64be8608 | 864 | spin_lock_bh(&rdma->sc_frmr_q_lock); |
3fe04ee9 | 865 | WARN_ON_ONCE(!list_empty(&frmr->frmr_list)); |
64be8608 TT |
866 | list_add(&frmr->frmr_list, &rdma->sc_frmr_q); |
867 | spin_unlock_bh(&rdma->sc_frmr_q_lock); | |
868 | } | |
869 | } | |
870 | ||
377f9b2f TT |
871 | /* |
872 | * This is the xpo_recvfrom function for listening endpoints. Its | |
873 | * purpose is to accept incoming connections. The CMA callback handler | |
874 | * has already created a new transport and attached it to the new CMA | |
875 | * ID. | |
876 | * | |
877 | * There is a queue of pending connections hung on the listening | |
878 | * transport. This queue contains the new svc_xprt structure. This | |
879 | * function takes svc_xprt structures off the accept_q and completes | |
880 | * the connection. | |
881 | */ | |
882 | static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt) | |
883 | { | |
884 | struct svcxprt_rdma *listen_rdma; | |
885 | struct svcxprt_rdma *newxprt = NULL; | |
886 | struct rdma_conn_param conn_param; | |
8e37210b | 887 | struct ib_cq_init_attr cq_attr = {}; |
377f9b2f | 888 | struct ib_qp_init_attr qp_attr; |
e3e45b1b | 889 | struct ib_device *dev; |
ed72b9c6 | 890 | int uninitialized_var(dma_mr_acc); |
3de2c31c | 891 | int need_dma_mr = 0; |
e3e45b1b | 892 | int ret = 0; |
377f9b2f TT |
893 | int i; |
894 | ||
895 | listen_rdma = container_of(xprt, struct svcxprt_rdma, sc_xprt); | |
896 | clear_bit(XPT_CONN, &xprt->xpt_flags); | |
897 | /* Get the next entry off the accept list */ | |
898 | spin_lock_bh(&listen_rdma->sc_lock); | |
899 | if (!list_empty(&listen_rdma->sc_accept_q)) { | |
900 | newxprt = list_entry(listen_rdma->sc_accept_q.next, | |
901 | struct svcxprt_rdma, sc_accept_q); | |
902 | list_del_init(&newxprt->sc_accept_q); | |
903 | } | |
904 | if (!list_empty(&listen_rdma->sc_accept_q)) | |
905 | set_bit(XPT_CONN, &listen_rdma->sc_xprt.xpt_flags); | |
906 | spin_unlock_bh(&listen_rdma->sc_lock); | |
907 | if (!newxprt) | |
908 | return NULL; | |
909 | ||
910 | dprintk("svcrdma: newxprt from accept queue = %p, cm_id=%p\n", | |
911 | newxprt, newxprt->sc_cm_id); | |
912 | ||
e3e45b1b | 913 | dev = newxprt->sc_cm_id->device; |
377f9b2f TT |
914 | |
915 | /* Qualify the transport resource defaults with the | |
916 | * capabilities of this particular device */ | |
e3e45b1b | 917 | newxprt->sc_max_sge = min((size_t)dev->attrs.max_sge, |
377f9b2f | 918 | (size_t)RPCSVC_MAXPAGES); |
e3e45b1b | 919 | newxprt->sc_max_sge_rd = min_t(size_t, dev->attrs.max_sge_rd, |
bc3fe2e3 | 920 | RPCSVC_MAXPAGES); |
e3e45b1b | 921 | newxprt->sc_max_requests = min((size_t)dev->attrs.max_qp_wr, |
377f9b2f TT |
922 | (size_t)svcrdma_max_requests); |
923 | newxprt->sc_sq_depth = RPCRDMA_SQ_DEPTH_MULT * newxprt->sc_max_requests; | |
924 | ||
36ef25e4 TT |
925 | /* |
926 | * Limit ORD based on client limit, local device limit, and | |
927 | * configured svcrdma limit. | |
928 | */ | |
e3e45b1b | 929 | newxprt->sc_ord = min_t(size_t, dev->attrs.max_qp_rd_atom, newxprt->sc_ord); |
36ef25e4 | 930 | newxprt->sc_ord = min_t(size_t, svcrdma_ord, newxprt->sc_ord); |
377f9b2f | 931 | |
e3e45b1b | 932 | newxprt->sc_pd = ib_alloc_pd(dev); |
377f9b2f TT |
933 | if (IS_ERR(newxprt->sc_pd)) { |
934 | dprintk("svcrdma: error creating PD for connect request\n"); | |
935 | goto errout; | |
936 | } | |
8e37210b | 937 | cq_attr.cqe = newxprt->sc_sq_depth; |
e3e45b1b | 938 | newxprt->sc_sq_cq = ib_create_cq(dev, |
377f9b2f TT |
939 | sq_comp_handler, |
940 | cq_event_handler, | |
941 | newxprt, | |
8e37210b | 942 | &cq_attr); |
377f9b2f TT |
943 | if (IS_ERR(newxprt->sc_sq_cq)) { |
944 | dprintk("svcrdma: error creating SQ CQ for connect request\n"); | |
945 | goto errout; | |
946 | } | |
8e37210b | 947 | cq_attr.cqe = newxprt->sc_max_requests; |
e3e45b1b | 948 | newxprt->sc_rq_cq = ib_create_cq(dev, |
377f9b2f TT |
949 | rq_comp_handler, |
950 | cq_event_handler, | |
951 | newxprt, | |
8e37210b | 952 | &cq_attr); |
377f9b2f TT |
953 | if (IS_ERR(newxprt->sc_rq_cq)) { |
954 | dprintk("svcrdma: error creating RQ CQ for connect request\n"); | |
955 | goto errout; | |
956 | } | |
957 | ||
958 | memset(&qp_attr, 0, sizeof qp_attr); | |
959 | qp_attr.event_handler = qp_event_handler; | |
960 | qp_attr.qp_context = &newxprt->sc_xprt; | |
961 | qp_attr.cap.max_send_wr = newxprt->sc_sq_depth; | |
962 | qp_attr.cap.max_recv_wr = newxprt->sc_max_requests; | |
963 | qp_attr.cap.max_send_sge = newxprt->sc_max_sge; | |
964 | qp_attr.cap.max_recv_sge = newxprt->sc_max_sge; | |
965 | qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR; | |
966 | qp_attr.qp_type = IB_QPT_RC; | |
967 | qp_attr.send_cq = newxprt->sc_sq_cq; | |
968 | qp_attr.recv_cq = newxprt->sc_rq_cq; | |
969 | dprintk("svcrdma: newxprt->sc_cm_id=%p, newxprt->sc_pd=%p\n" | |
970 | " cm_id->device=%p, sc_pd->device=%p\n" | |
971 | " cap.max_send_wr = %d\n" | |
972 | " cap.max_recv_wr = %d\n" | |
973 | " cap.max_send_sge = %d\n" | |
974 | " cap.max_recv_sge = %d\n", | |
975 | newxprt->sc_cm_id, newxprt->sc_pd, | |
e3e45b1b | 976 | dev, newxprt->sc_pd->device, |
377f9b2f TT |
977 | qp_attr.cap.max_send_wr, |
978 | qp_attr.cap.max_recv_wr, | |
979 | qp_attr.cap.max_send_sge, | |
980 | qp_attr.cap.max_recv_sge); | |
981 | ||
982 | ret = rdma_create_qp(newxprt->sc_cm_id, newxprt->sc_pd, &qp_attr); | |
983 | if (ret) { | |
d1e458fe SW |
984 | dprintk("svcrdma: failed to create QP, ret=%d\n", ret); |
985 | goto errout; | |
377f9b2f TT |
986 | } |
987 | newxprt->sc_qp = newxprt->sc_cm_id->qp; | |
988 | ||
3a5c6380 TT |
989 | /* |
990 | * Use the most secure set of MR resources based on the | |
991 | * transport type and available memory management features in | |
992 | * the device. Here's the table implemented below: | |
993 | * | |
994 | * Fast Global DMA Remote WR | |
995 | * Reg LKEY MR Access | |
996 | * Sup'd Sup'd Needed Needed | |
997 | * | |
998 | * IWARP N N Y Y | |
999 | * N Y Y Y | |
1000 | * Y N Y N | |
1001 | * Y Y N - | |
1002 | * | |
1003 | * IB N N Y N | |
1004 | * N Y N - | |
1005 | * Y N Y N | |
1006 | * Y Y N - | |
1007 | * | |
1008 | * NB: iWARP requires remote write access for the data sink | |
1009 | * of an RDMA_READ. IB does not. | |
1010 | */ | |
e5452411 | 1011 | newxprt->sc_reader = rdma_read_chunk_lcl; |
e3e45b1b | 1012 | if (dev->attrs.device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) { |
3a5c6380 | 1013 | newxprt->sc_frmr_pg_list_len = |
e3e45b1b | 1014 | dev->attrs.max_fast_reg_page_list_len; |
3a5c6380 | 1015 | newxprt->sc_dev_caps |= SVCRDMA_DEVCAP_FAST_REG; |
e5452411 | 1016 | newxprt->sc_reader = rdma_read_chunk_frmr; |
3a5c6380 TT |
1017 | } |
1018 | ||
1019 | /* | |
1020 | * Determine if a DMA MR is required and if so, what privs are required | |
1021 | */ | |
e3e45b1b OG |
1022 | if (!rdma_protocol_iwarp(dev, newxprt->sc_cm_id->port_num) && |
1023 | !rdma_ib_or_roce(dev, newxprt->sc_cm_id->port_num)) | |
377f9b2f | 1024 | goto errout; |
3de2c31c MW |
1025 | |
1026 | if (!(newxprt->sc_dev_caps & SVCRDMA_DEVCAP_FAST_REG) || | |
e3e45b1b | 1027 | !(dev->attrs.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY)) { |
3de2c31c MW |
1028 | need_dma_mr = 1; |
1029 | dma_mr_acc = IB_ACCESS_LOCAL_WRITE; | |
e3e45b1b | 1030 | if (rdma_protocol_iwarp(dev, newxprt->sc_cm_id->port_num) && |
3de2c31c MW |
1031 | !(newxprt->sc_dev_caps & SVCRDMA_DEVCAP_FAST_REG)) |
1032 | dma_mr_acc |= IB_ACCESS_REMOTE_WRITE; | |
377f9b2f TT |
1033 | } |
1034 | ||
e3e45b1b | 1035 | if (rdma_protocol_iwarp(dev, newxprt->sc_cm_id->port_num)) |
3de2c31c MW |
1036 | newxprt->sc_dev_caps |= SVCRDMA_DEVCAP_READ_W_INV; |
1037 | ||
3a5c6380 TT |
1038 | /* Create the DMA MR if needed, otherwise, use the DMA LKEY */ |
1039 | if (need_dma_mr) { | |
1040 | /* Register all of physical memory */ | |
1041 | newxprt->sc_phys_mr = | |
1042 | ib_get_dma_mr(newxprt->sc_pd, dma_mr_acc); | |
1043 | if (IS_ERR(newxprt->sc_phys_mr)) { | |
1044 | dprintk("svcrdma: Failed to create DMA MR ret=%d\n", | |
1045 | ret); | |
1046 | goto errout; | |
1047 | } | |
1048 | newxprt->sc_dma_lkey = newxprt->sc_phys_mr->lkey; | |
1049 | } else | |
e3e45b1b | 1050 | newxprt->sc_dma_lkey = dev->local_dma_lkey; |
3a5c6380 | 1051 | |
377f9b2f TT |
1052 | /* Post receive buffers */ |
1053 | for (i = 0; i < newxprt->sc_max_requests; i++) { | |
1054 | ret = svc_rdma_post_recv(newxprt); | |
1055 | if (ret) { | |
1056 | dprintk("svcrdma: failure posting receive buffers\n"); | |
1057 | goto errout; | |
1058 | } | |
1059 | } | |
1060 | ||
1061 | /* Swap out the handler */ | |
1062 | newxprt->sc_cm_id->event_handler = rdma_cma_handler; | |
1063 | ||
af261af4 TT |
1064 | /* |
1065 | * Arm the CQs for the SQ and RQ before accepting so we can't | |
1066 | * miss the first message | |
1067 | */ | |
1068 | ib_req_notify_cq(newxprt->sc_sq_cq, IB_CQ_NEXT_COMP); | |
1069 | ib_req_notify_cq(newxprt->sc_rq_cq, IB_CQ_NEXT_COMP); | |
1070 | ||
377f9b2f TT |
1071 | /* Accept Connection */ |
1072 | set_bit(RDMAXPRT_CONN_PENDING, &newxprt->sc_flags); | |
1073 | memset(&conn_param, 0, sizeof conn_param); | |
1074 | conn_param.responder_resources = 0; | |
1075 | conn_param.initiator_depth = newxprt->sc_ord; | |
1076 | ret = rdma_accept(newxprt->sc_cm_id, &conn_param); | |
1077 | if (ret) { | |
1078 | dprintk("svcrdma: failed to accept new connection, ret=%d\n", | |
1079 | ret); | |
1080 | goto errout; | |
1081 | } | |
1082 | ||
1083 | dprintk("svcrdma: new connection %p accepted with the following " | |
1084 | "attributes:\n" | |
21454aaa | 1085 | " local_ip : %pI4\n" |
377f9b2f | 1086 | " local_port : %d\n" |
21454aaa | 1087 | " remote_ip : %pI4\n" |
377f9b2f TT |
1088 | " remote_port : %d\n" |
1089 | " max_sge : %d\n" | |
bc3fe2e3 | 1090 | " max_sge_rd : %d\n" |
377f9b2f TT |
1091 | " sq_depth : %d\n" |
1092 | " max_requests : %d\n" | |
1093 | " ord : %d\n", | |
1094 | newxprt, | |
21454aaa HH |
1095 | &((struct sockaddr_in *)&newxprt->sc_cm_id-> |
1096 | route.addr.src_addr)->sin_addr.s_addr, | |
377f9b2f TT |
1097 | ntohs(((struct sockaddr_in *)&newxprt->sc_cm_id-> |
1098 | route.addr.src_addr)->sin_port), | |
21454aaa HH |
1099 | &((struct sockaddr_in *)&newxprt->sc_cm_id-> |
1100 | route.addr.dst_addr)->sin_addr.s_addr, | |
377f9b2f TT |
1101 | ntohs(((struct sockaddr_in *)&newxprt->sc_cm_id-> |
1102 | route.addr.dst_addr)->sin_port), | |
1103 | newxprt->sc_max_sge, | |
bc3fe2e3 | 1104 | newxprt->sc_max_sge_rd, |
377f9b2f TT |
1105 | newxprt->sc_sq_depth, |
1106 | newxprt->sc_max_requests, | |
1107 | newxprt->sc_ord); | |
1108 | ||
377f9b2f TT |
1109 | return &newxprt->sc_xprt; |
1110 | ||
1111 | errout: | |
1112 | dprintk("svcrdma: failure accepting new connection rc=%d.\n", ret); | |
c48cbb40 TT |
1113 | /* Take a reference in case the DTO handler runs */ |
1114 | svc_xprt_get(&newxprt->sc_xprt); | |
1711386c | 1115 | if (newxprt->sc_qp && !IS_ERR(newxprt->sc_qp)) |
c48cbb40 | 1116 | ib_destroy_qp(newxprt->sc_qp); |
377f9b2f | 1117 | rdma_destroy_id(newxprt->sc_cm_id); |
c48cbb40 TT |
1118 | /* This call to put will destroy the transport */ |
1119 | svc_xprt_put(&newxprt->sc_xprt); | |
377f9b2f TT |
1120 | return NULL; |
1121 | } | |
1122 | ||
377f9b2f TT |
1123 | static void svc_rdma_release_rqst(struct svc_rqst *rqstp) |
1124 | { | |
377f9b2f TT |
1125 | } |
1126 | ||
c48cbb40 | 1127 | /* |
1711386c | 1128 | * When connected, an svc_xprt has at least two references: |
c48cbb40 TT |
1129 | * |
1130 | * - A reference held by the cm_id between the ESTABLISHED and | |
1131 | * DISCONNECTED events. If the remote peer disconnected first, this | |
1132 | * reference could be gone. | |
1133 | * | |
1134 | * - A reference held by the svc_recv code that called this function | |
1135 | * as part of close processing. | |
1136 | * | |
1711386c | 1137 | * At a minimum one references should still be held. |
c48cbb40 | 1138 | */ |
377f9b2f TT |
1139 | static void svc_rdma_detach(struct svc_xprt *xprt) |
1140 | { | |
1141 | struct svcxprt_rdma *rdma = | |
1142 | container_of(xprt, struct svcxprt_rdma, sc_xprt); | |
377f9b2f | 1143 | dprintk("svc: svc_rdma_detach(%p)\n", xprt); |
c48cbb40 TT |
1144 | |
1145 | /* Disconnect and flush posted WQE */ | |
377f9b2f | 1146 | rdma_disconnect(rdma->sc_cm_id); |
377f9b2f TT |
1147 | } |
1148 | ||
8da91ea8 | 1149 | static void __svc_rdma_free(struct work_struct *work) |
377f9b2f | 1150 | { |
8da91ea8 TT |
1151 | struct svcxprt_rdma *rdma = |
1152 | container_of(work, struct svcxprt_rdma, sc_work); | |
377f9b2f | 1153 | dprintk("svcrdma: svc_rdma_free(%p)\n", rdma); |
8da91ea8 | 1154 | |
c48cbb40 | 1155 | /* We should only be called from kref_put */ |
3fe04ee9 CL |
1156 | if (atomic_read(&rdma->sc_xprt.xpt_ref.refcount) != 0) |
1157 | pr_err("svcrdma: sc_xprt still in use? (%d)\n", | |
1158 | atomic_read(&rdma->sc_xprt.xpt_ref.refcount)); | |
8da91ea8 | 1159 | |
356d0a15 TT |
1160 | /* |
1161 | * Destroy queued, but not processed read completions. Note | |
1162 | * that this cleanup has to be done before destroying the | |
1163 | * cm_id because the device ptr is needed to unmap the dma in | |
1164 | * svc_rdma_put_context. | |
1165 | */ | |
356d0a15 TT |
1166 | while (!list_empty(&rdma->sc_read_complete_q)) { |
1167 | struct svc_rdma_op_ctxt *ctxt; | |
1168 | ctxt = list_entry(rdma->sc_read_complete_q.next, | |
1169 | struct svc_rdma_op_ctxt, | |
1170 | dto_q); | |
1171 | list_del_init(&ctxt->dto_q); | |
1172 | svc_rdma_put_context(ctxt, 1); | |
1173 | } | |
356d0a15 TT |
1174 | |
1175 | /* Destroy queued, but not processed recv completions */ | |
356d0a15 TT |
1176 | while (!list_empty(&rdma->sc_rq_dto_q)) { |
1177 | struct svc_rdma_op_ctxt *ctxt; | |
1178 | ctxt = list_entry(rdma->sc_rq_dto_q.next, | |
1179 | struct svc_rdma_op_ctxt, | |
1180 | dto_q); | |
1181 | list_del_init(&ctxt->dto_q); | |
1182 | svc_rdma_put_context(ctxt, 1); | |
1183 | } | |
356d0a15 TT |
1184 | |
1185 | /* Warn if we leaked a resource or under-referenced */ | |
3fe04ee9 CL |
1186 | if (atomic_read(&rdma->sc_ctxt_used) != 0) |
1187 | pr_err("svcrdma: ctxt still in use? (%d)\n", | |
1188 | atomic_read(&rdma->sc_ctxt_used)); | |
1189 | if (atomic_read(&rdma->sc_dma_used) != 0) | |
1190 | pr_err("svcrdma: dma still in use? (%d)\n", | |
1191 | atomic_read(&rdma->sc_dma_used)); | |
356d0a15 | 1192 | |
64be8608 TT |
1193 | /* De-allocate fastreg mr */ |
1194 | rdma_dealloc_frmr_q(rdma); | |
1195 | ||
1711386c TT |
1196 | /* Destroy the QP if present (not a listener) */ |
1197 | if (rdma->sc_qp && !IS_ERR(rdma->sc_qp)) | |
1198 | ib_destroy_qp(rdma->sc_qp); | |
1199 | ||
c48cbb40 TT |
1200 | if (rdma->sc_sq_cq && !IS_ERR(rdma->sc_sq_cq)) |
1201 | ib_destroy_cq(rdma->sc_sq_cq); | |
377f9b2f | 1202 | |
c48cbb40 TT |
1203 | if (rdma->sc_rq_cq && !IS_ERR(rdma->sc_rq_cq)) |
1204 | ib_destroy_cq(rdma->sc_rq_cq); | |
377f9b2f | 1205 | |
c48cbb40 TT |
1206 | if (rdma->sc_phys_mr && !IS_ERR(rdma->sc_phys_mr)) |
1207 | ib_dereg_mr(rdma->sc_phys_mr); | |
377f9b2f | 1208 | |
c48cbb40 TT |
1209 | if (rdma->sc_pd && !IS_ERR(rdma->sc_pd)) |
1210 | ib_dealloc_pd(rdma->sc_pd); | |
377f9b2f | 1211 | |
356d0a15 TT |
1212 | /* Destroy the CM ID */ |
1213 | rdma_destroy_id(rdma->sc_cm_id); | |
1214 | ||
c48cbb40 | 1215 | kfree(rdma); |
377f9b2f TT |
1216 | } |
1217 | ||
8da91ea8 TT |
1218 | static void svc_rdma_free(struct svc_xprt *xprt) |
1219 | { | |
1220 | struct svcxprt_rdma *rdma = | |
1221 | container_of(xprt, struct svcxprt_rdma, sc_xprt); | |
1222 | INIT_WORK(&rdma->sc_work, __svc_rdma_free); | |
a25e758c | 1223 | queue_work(svc_rdma_wq, &rdma->sc_work); |
8da91ea8 TT |
1224 | } |
1225 | ||
377f9b2f TT |
1226 | static int svc_rdma_has_wspace(struct svc_xprt *xprt) |
1227 | { | |
1228 | struct svcxprt_rdma *rdma = | |
1229 | container_of(xprt, struct svcxprt_rdma, sc_xprt); | |
1230 | ||
1231 | /* | |
0bf48289 | 1232 | * If there are already waiters on the SQ, |
377f9b2f TT |
1233 | * return false. |
1234 | */ | |
1235 | if (waitqueue_active(&rdma->sc_send_wait)) | |
1236 | return 0; | |
1237 | ||
1238 | /* Otherwise return true. */ | |
1239 | return 1; | |
1240 | } | |
1241 | ||
16e4d93f CL |
1242 | static int svc_rdma_secure_port(struct svc_rqst *rqstp) |
1243 | { | |
1244 | return 1; | |
1245 | } | |
1246 | ||
377f9b2f TT |
1247 | int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr) |
1248 | { | |
5b180a9a TT |
1249 | struct ib_send_wr *bad_wr, *n_wr; |
1250 | int wr_count; | |
1251 | int i; | |
377f9b2f TT |
1252 | int ret; |
1253 | ||
1254 | if (test_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags)) | |
9d6347ac | 1255 | return -ENOTCONN; |
377f9b2f | 1256 | |
5b180a9a TT |
1257 | wr_count = 1; |
1258 | for (n_wr = wr->next; n_wr; n_wr = n_wr->next) | |
1259 | wr_count++; | |
1260 | ||
377f9b2f TT |
1261 | /* If the SQ is full, wait until an SQ entry is available */ |
1262 | while (1) { | |
1263 | spin_lock_bh(&xprt->sc_lock); | |
5b180a9a | 1264 | if (xprt->sc_sq_depth < atomic_read(&xprt->sc_sq_count) + wr_count) { |
377f9b2f TT |
1265 | spin_unlock_bh(&xprt->sc_lock); |
1266 | atomic_inc(&rdma_stat_sq_starve); | |
dbcd00eb TT |
1267 | |
1268 | /* See if we can opportunistically reap SQ WR to make room */ | |
377f9b2f TT |
1269 | sq_cq_reap(xprt); |
1270 | ||
1271 | /* Wait until SQ WR available if SQ still full */ | |
1272 | wait_event(xprt->sc_send_wait, | |
1273 | atomic_read(&xprt->sc_sq_count) < | |
1274 | xprt->sc_sq_depth); | |
830bb59b | 1275 | if (test_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags)) |
b432e6b3 | 1276 | return -ENOTCONN; |
377f9b2f TT |
1277 | continue; |
1278 | } | |
5b180a9a TT |
1279 | /* Take a transport ref for each WR posted */ |
1280 | for (i = 0; i < wr_count; i++) | |
1281 | svc_xprt_get(&xprt->sc_xprt); | |
1282 | ||
1283 | /* Bump used SQ WR count and post */ | |
1284 | atomic_add(wr_count, &xprt->sc_sq_count); | |
377f9b2f | 1285 | ret = ib_post_send(xprt->sc_qp, wr, &bad_wr); |
5b180a9a TT |
1286 | if (ret) { |
1287 | set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags); | |
1288 | atomic_sub(wr_count, &xprt->sc_sq_count); | |
1289 | for (i = 0; i < wr_count; i ++) | |
1290 | svc_xprt_put(&xprt->sc_xprt); | |
377f9b2f TT |
1291 | dprintk("svcrdma: failed to post SQ WR rc=%d, " |
1292 | "sc_sq_count=%d, sc_sq_depth=%d\n", | |
1293 | ret, atomic_read(&xprt->sc_sq_count), | |
1294 | xprt->sc_sq_depth); | |
0905c0f0 | 1295 | } |
377f9b2f | 1296 | spin_unlock_bh(&xprt->sc_lock); |
5b180a9a TT |
1297 | if (ret) |
1298 | wake_up(&xprt->sc_send_wait); | |
377f9b2f TT |
1299 | break; |
1300 | } | |
1301 | return ret; | |
1302 | } | |
1303 | ||
008fdbc5 TT |
1304 | void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp, |
1305 | enum rpcrdma_errcode err) | |
377f9b2f TT |
1306 | { |
1307 | struct ib_send_wr err_wr; | |
377f9b2f TT |
1308 | struct page *p; |
1309 | struct svc_rdma_op_ctxt *ctxt; | |
30b7e246 | 1310 | __be32 *va; |
377f9b2f TT |
1311 | int length; |
1312 | int ret; | |
1313 | ||
b7e0b9a9 | 1314 | p = alloc_page(GFP_KERNEL | __GFP_NOFAIL); |
377f9b2f TT |
1315 | va = page_address(p); |
1316 | ||
1317 | /* XDR encode error */ | |
1318 | length = svc_rdma_xdr_encode_error(xprt, rmsgp, err, va); | |
1319 | ||
4a84386f TT |
1320 | ctxt = svc_rdma_get_context(xprt); |
1321 | ctxt->direction = DMA_FROM_DEVICE; | |
1322 | ctxt->count = 1; | |
1323 | ctxt->pages[0] = p; | |
1324 | ||
377f9b2f | 1325 | /* Prepare SGE for local address */ |
4a84386f TT |
1326 | ctxt->sge[0].addr = ib_dma_map_page(xprt->sc_cm_id->device, |
1327 | p, 0, length, DMA_FROM_DEVICE); | |
1328 | if (ib_dma_mapping_error(xprt->sc_cm_id->device, ctxt->sge[0].addr)) { | |
04911b53 | 1329 | put_page(p); |
a5e50268 | 1330 | svc_rdma_put_context(ctxt, 1); |
04911b53 TT |
1331 | return; |
1332 | } | |
1333 | atomic_inc(&xprt->sc_dma_used); | |
4a84386f TT |
1334 | ctxt->sge[0].lkey = xprt->sc_dma_lkey; |
1335 | ctxt->sge[0].length = length; | |
377f9b2f TT |
1336 | |
1337 | /* Prepare SEND WR */ | |
1338 | memset(&err_wr, 0, sizeof err_wr); | |
1339 | ctxt->wr_op = IB_WR_SEND; | |
1340 | err_wr.wr_id = (unsigned long)ctxt; | |
4a84386f | 1341 | err_wr.sg_list = ctxt->sge; |
377f9b2f TT |
1342 | err_wr.num_sge = 1; |
1343 | err_wr.opcode = IB_WR_SEND; | |
1344 | err_wr.send_flags = IB_SEND_SIGNALED; | |
1345 | ||
1346 | /* Post It */ | |
1347 | ret = svc_rdma_send(xprt, &err_wr); | |
1348 | if (ret) { | |
008fdbc5 TT |
1349 | dprintk("svcrdma: Error %d posting send for protocol error\n", |
1350 | ret); | |
4a84386f | 1351 | svc_rdma_unmap_dma(ctxt); |
377f9b2f TT |
1352 | svc_rdma_put_context(ctxt, 1); |
1353 | } | |
377f9b2f | 1354 | } |