]>
Commit | Line | Data |
---|---|---|
377f9b2f | 1 | /* |
0bf48289 | 2 | * Copyright (c) 2014 Open Grid Computing, Inc. All rights reserved. |
377f9b2f TT |
3 | * Copyright (c) 2005-2007 Network Appliance, Inc. All rights reserved. |
4 | * | |
5 | * This software is available to you under a choice of one of two | |
6 | * licenses. You may choose to be licensed under the terms of the GNU | |
7 | * General Public License (GPL) Version 2, available from the file | |
8 | * COPYING in the main directory of this source tree, or the BSD-type | |
9 | * license below: | |
10 | * | |
11 | * Redistribution and use in source and binary forms, with or without | |
12 | * modification, are permitted provided that the following conditions | |
13 | * are met: | |
14 | * | |
15 | * Redistributions of source code must retain the above copyright | |
16 | * notice, this list of conditions and the following disclaimer. | |
17 | * | |
18 | * Redistributions in binary form must reproduce the above | |
19 | * copyright notice, this list of conditions and the following | |
20 | * disclaimer in the documentation and/or other materials provided | |
21 | * with the distribution. | |
22 | * | |
23 | * Neither the name of the Network Appliance, Inc. nor the names of | |
24 | * its contributors may be used to endorse or promote products | |
25 | * derived from this software without specific prior written | |
26 | * permission. | |
27 | * | |
28 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
29 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
30 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
31 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
32 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
33 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
34 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
35 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
36 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
37 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
38 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
39 | * | |
40 | * Author: Tom Tucker <tom@opengridcomputing.com> | |
41 | */ | |
42 | ||
43 | #include <linux/sunrpc/svc_xprt.h> | |
07257450 | 44 | #include <linux/sunrpc/addr.h> |
377f9b2f TT |
45 | #include <linux/sunrpc/debug.h> |
46 | #include <linux/sunrpc/rpc_rdma.h> | |
a6b7a407 | 47 | #include <linux/interrupt.h> |
d43c36dc | 48 | #include <linux/sched.h> |
5a0e3ad6 | 49 | #include <linux/slab.h> |
377f9b2f | 50 | #include <linux/spinlock.h> |
a25e758c | 51 | #include <linux/workqueue.h> |
377f9b2f TT |
52 | #include <rdma/ib_verbs.h> |
53 | #include <rdma/rdma_cm.h> | |
54 | #include <linux/sunrpc/svc_rdma.h> | |
bc3b2d7f | 55 | #include <linux/export.h> |
cec56c8f | 56 | #include "xprt_rdma.h" |
377f9b2f TT |
57 | |
58 | #define RPCDBG_FACILITY RPCDBG_SVCXPRT | |
59 | ||
94684319 | 60 | static struct svcxprt_rdma *rdma_create_xprt(struct svc_serv *, int); |
377f9b2f | 61 | static struct svc_xprt *svc_rdma_create(struct svc_serv *serv, |
62832c03 | 62 | struct net *net, |
377f9b2f TT |
63 | struct sockaddr *sa, int salen, |
64 | int flags); | |
65 | static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt); | |
66 | static void svc_rdma_release_rqst(struct svc_rqst *); | |
377f9b2f TT |
67 | static void svc_rdma_detach(struct svc_xprt *xprt); |
68 | static void svc_rdma_free(struct svc_xprt *xprt); | |
69 | static int svc_rdma_has_wspace(struct svc_xprt *xprt); | |
16e4d93f | 70 | static int svc_rdma_secure_port(struct svc_rqst *); |
ea08e392 | 71 | static void svc_rdma_kill_temp_xprt(struct svc_xprt *); |
377f9b2f TT |
72 | |
73 | static struct svc_xprt_ops svc_rdma_ops = { | |
74 | .xpo_create = svc_rdma_create, | |
75 | .xpo_recvfrom = svc_rdma_recvfrom, | |
76 | .xpo_sendto = svc_rdma_sendto, | |
77 | .xpo_release_rqst = svc_rdma_release_rqst, | |
78 | .xpo_detach = svc_rdma_detach, | |
79 | .xpo_free = svc_rdma_free, | |
80 | .xpo_prep_reply_hdr = svc_rdma_prep_reply_hdr, | |
81 | .xpo_has_wspace = svc_rdma_has_wspace, | |
82 | .xpo_accept = svc_rdma_accept, | |
16e4d93f | 83 | .xpo_secure_port = svc_rdma_secure_port, |
ea08e392 | 84 | .xpo_kill_temp_xprt = svc_rdma_kill_temp_xprt, |
377f9b2f TT |
85 | }; |
86 | ||
87 | struct svc_xprt_class svc_rdma_class = { | |
88 | .xcl_name = "rdma", | |
89 | .xcl_owner = THIS_MODULE, | |
90 | .xcl_ops = &svc_rdma_ops, | |
cc9a903d | 91 | .xcl_max_payload = RPCSVC_MAXPAYLOAD_RDMA, |
3c45ddf8 | 92 | .xcl_ident = XPRT_TRANSPORT_RDMA, |
377f9b2f TT |
93 | }; |
94 | ||
94684319 CL |
95 | #if defined(CONFIG_SUNRPC_BACKCHANNEL) |
96 | static struct svc_xprt *svc_rdma_bc_create(struct svc_serv *, struct net *, | |
97 | struct sockaddr *, int, int); | |
98 | static void svc_rdma_bc_detach(struct svc_xprt *); | |
99 | static void svc_rdma_bc_free(struct svc_xprt *); | |
100 | ||
101 | static struct svc_xprt_ops svc_rdma_bc_ops = { | |
102 | .xpo_create = svc_rdma_bc_create, | |
103 | .xpo_detach = svc_rdma_bc_detach, | |
104 | .xpo_free = svc_rdma_bc_free, | |
105 | .xpo_prep_reply_hdr = svc_rdma_prep_reply_hdr, | |
106 | .xpo_secure_port = svc_rdma_secure_port, | |
107 | }; | |
108 | ||
109 | struct svc_xprt_class svc_rdma_bc_class = { | |
110 | .xcl_name = "rdma-bc", | |
111 | .xcl_owner = THIS_MODULE, | |
112 | .xcl_ops = &svc_rdma_bc_ops, | |
113 | .xcl_max_payload = (1024 - RPCRDMA_HDRLEN_MIN) | |
114 | }; | |
115 | ||
116 | static struct svc_xprt *svc_rdma_bc_create(struct svc_serv *serv, | |
117 | struct net *net, | |
118 | struct sockaddr *sa, int salen, | |
119 | int flags) | |
120 | { | |
121 | struct svcxprt_rdma *cma_xprt; | |
122 | struct svc_xprt *xprt; | |
123 | ||
124 | cma_xprt = rdma_create_xprt(serv, 0); | |
125 | if (!cma_xprt) | |
126 | return ERR_PTR(-ENOMEM); | |
127 | xprt = &cma_xprt->sc_xprt; | |
128 | ||
129 | svc_xprt_init(net, &svc_rdma_bc_class, xprt, serv); | |
23abec20 | 130 | set_bit(XPT_CONG_CTRL, &xprt->xpt_flags); |
94684319 CL |
131 | serv->sv_bc_xprt = xprt; |
132 | ||
133 | dprintk("svcrdma: %s(%p)\n", __func__, xprt); | |
134 | return xprt; | |
135 | } | |
136 | ||
137 | static void svc_rdma_bc_detach(struct svc_xprt *xprt) | |
138 | { | |
139 | dprintk("svcrdma: %s(%p)\n", __func__, xprt); | |
140 | } | |
141 | ||
142 | static void svc_rdma_bc_free(struct svc_xprt *xprt) | |
143 | { | |
144 | struct svcxprt_rdma *rdma = | |
145 | container_of(xprt, struct svcxprt_rdma, sc_xprt); | |
146 | ||
147 | dprintk("svcrdma: %s(%p)\n", __func__, xprt); | |
148 | if (xprt) | |
149 | kfree(rdma); | |
150 | } | |
151 | #endif /* CONFIG_SUNRPC_BACKCHANNEL */ | |
152 | ||
cc886c9f CL |
153 | static struct svc_rdma_op_ctxt *alloc_ctxt(struct svcxprt_rdma *xprt, |
154 | gfp_t flags) | |
377f9b2f TT |
155 | { |
156 | struct svc_rdma_op_ctxt *ctxt; | |
157 | ||
cc886c9f CL |
158 | ctxt = kmalloc(sizeof(*ctxt), flags); |
159 | if (ctxt) { | |
160 | ctxt->xprt = xprt; | |
a3ab867f | 161 | INIT_LIST_HEAD(&ctxt->list); |
cc886c9f CL |
162 | } |
163 | return ctxt; | |
164 | } | |
165 | ||
166 | static bool svc_rdma_prealloc_ctxts(struct svcxprt_rdma *xprt) | |
167 | { | |
03fe9931 | 168 | unsigned int i; |
cc886c9f CL |
169 | |
170 | /* Each RPC/RDMA credit can consume a number of send | |
171 | * and receive WQEs. One ctxt is allocated for each. | |
172 | */ | |
03fe9931 | 173 | i = xprt->sc_sq_depth + xprt->sc_rq_depth; |
cc886c9f CL |
174 | |
175 | while (i--) { | |
176 | struct svc_rdma_op_ctxt *ctxt; | |
177 | ||
178 | ctxt = alloc_ctxt(xprt, GFP_KERNEL); | |
179 | if (!ctxt) { | |
180 | dprintk("svcrdma: No memory for RDMA ctxt\n"); | |
181 | return false; | |
182 | } | |
a3ab867f | 183 | list_add(&ctxt->list, &xprt->sc_ctxts); |
cc886c9f CL |
184 | } |
185 | return true; | |
186 | } | |
187 | ||
188 | struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *xprt) | |
189 | { | |
190 | struct svc_rdma_op_ctxt *ctxt = NULL; | |
191 | ||
81fa3275 | 192 | spin_lock(&xprt->sc_ctxt_lock); |
cc886c9f CL |
193 | xprt->sc_ctxt_used++; |
194 | if (list_empty(&xprt->sc_ctxts)) | |
195 | goto out_empty; | |
196 | ||
197 | ctxt = list_first_entry(&xprt->sc_ctxts, | |
a3ab867f CL |
198 | struct svc_rdma_op_ctxt, list); |
199 | list_del(&ctxt->list); | |
81fa3275 | 200 | spin_unlock(&xprt->sc_ctxt_lock); |
cc886c9f CL |
201 | |
202 | out: | |
8948896c | 203 | ctxt->count = 0; |
cace564f | 204 | ctxt->mapped_sges = 0; |
64be8608 | 205 | ctxt->frmr = NULL; |
377f9b2f | 206 | return ctxt; |
cc886c9f CL |
207 | |
208 | out_empty: | |
209 | /* Either pre-allocation missed the mark, or send | |
210 | * queue accounting is broken. | |
211 | */ | |
81fa3275 | 212 | spin_unlock(&xprt->sc_ctxt_lock); |
cc886c9f CL |
213 | |
214 | ctxt = alloc_ctxt(xprt, GFP_NOIO); | |
215 | if (ctxt) | |
216 | goto out; | |
217 | ||
81fa3275 | 218 | spin_lock(&xprt->sc_ctxt_lock); |
cc886c9f | 219 | xprt->sc_ctxt_used--; |
81fa3275 | 220 | spin_unlock(&xprt->sc_ctxt_lock); |
cc886c9f CL |
221 | WARN_ONCE(1, "svcrdma: empty RDMA ctxt list?\n"); |
222 | return NULL; | |
377f9b2f TT |
223 | } |
224 | ||
146b6df6 | 225 | void svc_rdma_unmap_dma(struct svc_rdma_op_ctxt *ctxt) |
e6ab9143 TT |
226 | { |
227 | struct svcxprt_rdma *xprt = ctxt->xprt; | |
cace564f CL |
228 | struct ib_device *device = xprt->sc_cm_id->device; |
229 | u32 lkey = xprt->sc_pd->local_dma_lkey; | |
dd6fd213 | 230 | unsigned int i; |
cace564f | 231 | |
dd6fd213 | 232 | for (i = 0; i < ctxt->mapped_sges; i++) { |
64be8608 TT |
233 | /* |
234 | * Unmap the DMA addr in the SGE if the lkey matches | |
5fe1043d | 235 | * the local_dma_lkey, otherwise, ignore it since it is |
64be8608 TT |
236 | * an FRMR lkey and will be unmapped later when the |
237 | * last WR that uses it completes. | |
238 | */ | |
dd6fd213 | 239 | if (ctxt->sge[i].lkey == lkey) |
cace564f | 240 | ib_dma_unmap_page(device, |
64be8608 TT |
241 | ctxt->sge[i].addr, |
242 | ctxt->sge[i].length, | |
243 | ctxt->direction); | |
e6ab9143 | 244 | } |
cace564f | 245 | ctxt->mapped_sges = 0; |
e6ab9143 TT |
246 | } |
247 | ||
377f9b2f TT |
248 | void svc_rdma_put_context(struct svc_rdma_op_ctxt *ctxt, int free_pages) |
249 | { | |
cc886c9f | 250 | struct svcxprt_rdma *xprt = ctxt->xprt; |
377f9b2f TT |
251 | int i; |
252 | ||
377f9b2f TT |
253 | if (free_pages) |
254 | for (i = 0; i < ctxt->count; i++) | |
255 | put_page(ctxt->pages[i]); | |
256 | ||
81fa3275 | 257 | spin_lock(&xprt->sc_ctxt_lock); |
cc886c9f | 258 | xprt->sc_ctxt_used--; |
a3ab867f | 259 | list_add(&ctxt->list, &xprt->sc_ctxts); |
81fa3275 | 260 | spin_unlock(&xprt->sc_ctxt_lock); |
cc886c9f CL |
261 | } |
262 | ||
263 | static void svc_rdma_destroy_ctxts(struct svcxprt_rdma *xprt) | |
264 | { | |
265 | while (!list_empty(&xprt->sc_ctxts)) { | |
266 | struct svc_rdma_op_ctxt *ctxt; | |
267 | ||
268 | ctxt = list_first_entry(&xprt->sc_ctxts, | |
a3ab867f CL |
269 | struct svc_rdma_op_ctxt, list); |
270 | list_del(&ctxt->list); | |
cc886c9f CL |
271 | kfree(ctxt); |
272 | } | |
377f9b2f TT |
273 | } |
274 | ||
2fe81b23 | 275 | static struct svc_rdma_req_map *alloc_req_map(gfp_t flags) |
ab96dddb TT |
276 | { |
277 | struct svc_rdma_req_map *map; | |
2fe81b23 CL |
278 | |
279 | map = kmalloc(sizeof(*map), flags); | |
280 | if (map) | |
281 | INIT_LIST_HEAD(&map->free); | |
282 | return map; | |
283 | } | |
284 | ||
285 | static bool svc_rdma_prealloc_maps(struct svcxprt_rdma *xprt) | |
286 | { | |
03fe9931 | 287 | unsigned int i; |
2fe81b23 CL |
288 | |
289 | /* One for each receive buffer on this connection. */ | |
290 | i = xprt->sc_max_requests; | |
291 | ||
292 | while (i--) { | |
293 | struct svc_rdma_req_map *map; | |
294 | ||
295 | map = alloc_req_map(GFP_KERNEL); | |
296 | if (!map) { | |
297 | dprintk("svcrdma: No memory for request map\n"); | |
298 | return false; | |
299 | } | |
300 | list_add(&map->free, &xprt->sc_maps); | |
301 | } | |
302 | return true; | |
303 | } | |
304 | ||
305 | struct svc_rdma_req_map *svc_rdma_get_req_map(struct svcxprt_rdma *xprt) | |
306 | { | |
307 | struct svc_rdma_req_map *map = NULL; | |
308 | ||
309 | spin_lock(&xprt->sc_map_lock); | |
310 | if (list_empty(&xprt->sc_maps)) | |
311 | goto out_empty; | |
312 | ||
313 | map = list_first_entry(&xprt->sc_maps, | |
314 | struct svc_rdma_req_map, free); | |
315 | list_del_init(&map->free); | |
316 | spin_unlock(&xprt->sc_map_lock); | |
317 | ||
318 | out: | |
ab96dddb TT |
319 | map->count = 0; |
320 | return map; | |
2fe81b23 CL |
321 | |
322 | out_empty: | |
323 | spin_unlock(&xprt->sc_map_lock); | |
324 | ||
325 | /* Pre-allocation amount was incorrect */ | |
326 | map = alloc_req_map(GFP_NOIO); | |
327 | if (map) | |
328 | goto out; | |
329 | ||
330 | WARN_ONCE(1, "svcrdma: empty request map list?\n"); | |
331 | return NULL; | |
ab96dddb TT |
332 | } |
333 | ||
2fe81b23 CL |
334 | void svc_rdma_put_req_map(struct svcxprt_rdma *xprt, |
335 | struct svc_rdma_req_map *map) | |
ab96dddb | 336 | { |
2fe81b23 CL |
337 | spin_lock(&xprt->sc_map_lock); |
338 | list_add(&map->free, &xprt->sc_maps); | |
339 | spin_unlock(&xprt->sc_map_lock); | |
340 | } | |
341 | ||
342 | static void svc_rdma_destroy_maps(struct svcxprt_rdma *xprt) | |
343 | { | |
344 | while (!list_empty(&xprt->sc_maps)) { | |
345 | struct svc_rdma_req_map *map; | |
346 | ||
347 | map = list_first_entry(&xprt->sc_maps, | |
348 | struct svc_rdma_req_map, free); | |
349 | list_del(&map->free); | |
350 | kfree(map); | |
351 | } | |
ab96dddb TT |
352 | } |
353 | ||
377f9b2f TT |
354 | /* QP event handler */ |
355 | static void qp_event_handler(struct ib_event *event, void *context) | |
356 | { | |
357 | struct svc_xprt *xprt = context; | |
358 | ||
359 | switch (event->event) { | |
360 | /* These are considered benign events */ | |
361 | case IB_EVENT_PATH_MIG: | |
362 | case IB_EVENT_COMM_EST: | |
363 | case IB_EVENT_SQ_DRAINED: | |
364 | case IB_EVENT_QP_LAST_WQE_REACHED: | |
76357c71 SG |
365 | dprintk("svcrdma: QP event %s (%d) received for QP=%p\n", |
366 | ib_event_msg(event->event), event->event, | |
367 | event->element.qp); | |
377f9b2f TT |
368 | break; |
369 | /* These are considered fatal events */ | |
370 | case IB_EVENT_PATH_MIG_ERR: | |
371 | case IB_EVENT_QP_FATAL: | |
372 | case IB_EVENT_QP_REQ_ERR: | |
373 | case IB_EVENT_QP_ACCESS_ERR: | |
374 | case IB_EVENT_DEVICE_FATAL: | |
375 | default: | |
76357c71 | 376 | dprintk("svcrdma: QP ERROR event %s (%d) received for QP=%p, " |
377f9b2f | 377 | "closing transport\n", |
76357c71 SG |
378 | ib_event_msg(event->event), event->event, |
379 | event->element.qp); | |
377f9b2f TT |
380 | set_bit(XPT_CLOSE, &xprt->xpt_flags); |
381 | break; | |
382 | } | |
383 | } | |
384 | ||
8bd5ba86 CL |
385 | /** |
386 | * svc_rdma_wc_receive - Invoked by RDMA provider for each polled Receive WC | |
387 | * @cq: completion queue | |
388 | * @wc: completed WR | |
377f9b2f | 389 | * |
377f9b2f | 390 | */ |
8bd5ba86 | 391 | static void svc_rdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc) |
377f9b2f | 392 | { |
8bd5ba86 CL |
393 | struct svcxprt_rdma *xprt = cq->cq_context; |
394 | struct ib_cqe *cqe = wc->wr_cqe; | |
395 | struct svc_rdma_op_ctxt *ctxt; | |
377f9b2f | 396 | |
8bd5ba86 CL |
397 | /* WARNING: Only wc->wr_cqe and wc->status are reliable */ |
398 | ctxt = container_of(cqe, struct svc_rdma_op_ctxt, cqe); | |
8bd5ba86 | 399 | svc_rdma_unmap_dma(ctxt); |
dbcd00eb | 400 | |
8bd5ba86 CL |
401 | if (wc->status != IB_WC_SUCCESS) |
402 | goto flushed; | |
377f9b2f | 403 | |
8bd5ba86 CL |
404 | /* All wc fields are now known to be valid */ |
405 | ctxt->byte_len = wc->byte_len; | |
406 | spin_lock(&xprt->sc_rq_dto_lock); | |
a3ab867f | 407 | list_add_tail(&ctxt->list, &xprt->sc_rq_dto_q); |
8bd5ba86 | 408 | spin_unlock(&xprt->sc_rq_dto_lock); |
dbcd00eb TT |
409 | |
410 | set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags); | |
8bd5ba86 CL |
411 | if (test_bit(RDMAXPRT_CONN_PENDING, &xprt->sc_flags)) |
412 | goto out; | |
413 | svc_xprt_enqueue(&xprt->sc_xprt); | |
414 | goto out; | |
415 | ||
416 | flushed: | |
417 | if (wc->status != IB_WC_WR_FLUSH_ERR) | |
418 | pr_warn("svcrdma: receive: %s (%u/0x%x)\n", | |
419 | ib_wc_status_msg(wc->status), | |
420 | wc->status, wc->vendor_err); | |
421 | set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags); | |
422 | svc_rdma_put_context(ctxt, 1); | |
423 | ||
424 | out: | |
425 | svc_xprt_put(&xprt->sc_xprt); | |
377f9b2f TT |
426 | } |
427 | ||
be99bb11 CL |
428 | static void svc_rdma_send_wc_common(struct svcxprt_rdma *xprt, |
429 | struct ib_wc *wc, | |
430 | const char *opname) | |
e1183210 | 431 | { |
be99bb11 CL |
432 | if (wc->status != IB_WC_SUCCESS) |
433 | goto err; | |
e1183210 | 434 | |
be99bb11 | 435 | out: |
e4eb42ce | 436 | atomic_inc(&xprt->sc_sq_avail); |
be99bb11 CL |
437 | wake_up(&xprt->sc_send_wait); |
438 | return; | |
e1183210 | 439 | |
be99bb11 CL |
440 | err: |
441 | set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags); | |
442 | if (wc->status != IB_WC_WR_FLUSH_ERR) | |
443 | pr_err("svcrdma: %s: %s (%u/0x%x)\n", | |
444 | opname, ib_wc_status_msg(wc->status), | |
445 | wc->status, wc->vendor_err); | |
446 | goto out; | |
447 | } | |
ced4ac0c | 448 | |
be99bb11 CL |
449 | static void svc_rdma_send_wc_common_put(struct ib_cq *cq, struct ib_wc *wc, |
450 | const char *opname) | |
451 | { | |
452 | struct svcxprt_rdma *xprt = cq->cq_context; | |
ced4ac0c | 453 | |
be99bb11 CL |
454 | svc_rdma_send_wc_common(xprt, wc, opname); |
455 | svc_xprt_put(&xprt->sc_xprt); | |
456 | } | |
ced4ac0c | 457 | |
be99bb11 CL |
458 | /** |
459 | * svc_rdma_wc_send - Invoked by RDMA provider for each polled Send WC | |
460 | * @cq: completion queue | |
461 | * @wc: completed WR | |
462 | * | |
463 | */ | |
464 | void svc_rdma_wc_send(struct ib_cq *cq, struct ib_wc *wc) | |
465 | { | |
466 | struct ib_cqe *cqe = wc->wr_cqe; | |
467 | struct svc_rdma_op_ctxt *ctxt; | |
e1183210 | 468 | |
be99bb11 | 469 | svc_rdma_send_wc_common_put(cq, wc, "send"); |
ced4ac0c | 470 | |
be99bb11 CL |
471 | ctxt = container_of(cqe, struct svc_rdma_op_ctxt, cqe); |
472 | svc_rdma_unmap_dma(ctxt); | |
473 | svc_rdma_put_context(ctxt, 1); | |
e1183210 TT |
474 | } |
475 | ||
be99bb11 CL |
476 | /** |
477 | * svc_rdma_wc_write - Invoked by RDMA provider for each polled Write WC | |
478 | * @cq: completion queue | |
479 | * @wc: completed WR | |
0905c0f0 | 480 | * |
377f9b2f | 481 | */ |
be99bb11 | 482 | void svc_rdma_wc_write(struct ib_cq *cq, struct ib_wc *wc) |
377f9b2f | 483 | { |
be99bb11 CL |
484 | struct ib_cqe *cqe = wc->wr_cqe; |
485 | struct svc_rdma_op_ctxt *ctxt; | |
377f9b2f | 486 | |
be99bb11 | 487 | svc_rdma_send_wc_common_put(cq, wc, "write"); |
0bf48289 | 488 | |
be99bb11 CL |
489 | ctxt = container_of(cqe, struct svc_rdma_op_ctxt, cqe); |
490 | svc_rdma_unmap_dma(ctxt); | |
491 | svc_rdma_put_context(ctxt, 0); | |
492 | } | |
dbcd00eb | 493 | |
be99bb11 CL |
494 | /** |
495 | * svc_rdma_wc_reg - Invoked by RDMA provider for each polled FASTREG WC | |
496 | * @cq: completion queue | |
497 | * @wc: completed WR | |
498 | * | |
499 | */ | |
500 | void svc_rdma_wc_reg(struct ib_cq *cq, struct ib_wc *wc) | |
501 | { | |
502 | svc_rdma_send_wc_common_put(cq, wc, "fastreg"); | |
503 | } | |
377f9b2f | 504 | |
be99bb11 CL |
505 | /** |
506 | * svc_rdma_wc_read - Invoked by RDMA provider for each polled Read WC | |
507 | * @cq: completion queue | |
508 | * @wc: completed WR | |
509 | * | |
510 | */ | |
511 | void svc_rdma_wc_read(struct ib_cq *cq, struct ib_wc *wc) | |
512 | { | |
513 | struct svcxprt_rdma *xprt = cq->cq_context; | |
514 | struct ib_cqe *cqe = wc->wr_cqe; | |
515 | struct svc_rdma_op_ctxt *ctxt; | |
377f9b2f | 516 | |
be99bb11 | 517 | svc_rdma_send_wc_common(xprt, wc, "read"); |
377f9b2f | 518 | |
be99bb11 CL |
519 | ctxt = container_of(cqe, struct svc_rdma_op_ctxt, cqe); |
520 | svc_rdma_unmap_dma(ctxt); | |
521 | svc_rdma_put_frmr(xprt, ctxt->frmr); | |
0bf48289 | 522 | |
be99bb11 CL |
523 | if (test_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags)) { |
524 | struct svc_rdma_op_ctxt *read_hdr; | |
0bf48289 | 525 | |
be99bb11 CL |
526 | read_hdr = ctxt->read_hdr; |
527 | spin_lock(&xprt->sc_rq_dto_lock); | |
a3ab867f | 528 | list_add_tail(&read_hdr->list, |
be99bb11 CL |
529 | &xprt->sc_read_complete_q); |
530 | spin_unlock(&xprt->sc_rq_dto_lock); | |
531 | ||
532 | set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags); | |
533 | svc_xprt_enqueue(&xprt->sc_xprt); | |
377f9b2f TT |
534 | } |
535 | ||
be99bb11 CL |
536 | svc_rdma_put_context(ctxt, 0); |
537 | svc_xprt_put(&xprt->sc_xprt); | |
377f9b2f TT |
538 | } |
539 | ||
be99bb11 CL |
540 | /** |
541 | * svc_rdma_wc_inv - Invoked by RDMA provider for each polled LOCAL_INV WC | |
542 | * @cq: completion queue | |
543 | * @wc: completed WR | |
544 | * | |
545 | */ | |
546 | void svc_rdma_wc_inv(struct ib_cq *cq, struct ib_wc *wc) | |
377f9b2f | 547 | { |
be99bb11 | 548 | svc_rdma_send_wc_common_put(cq, wc, "localInv"); |
377f9b2f TT |
549 | } |
550 | ||
377f9b2f TT |
551 | static struct svcxprt_rdma *rdma_create_xprt(struct svc_serv *serv, |
552 | int listener) | |
553 | { | |
554 | struct svcxprt_rdma *cma_xprt = kzalloc(sizeof *cma_xprt, GFP_KERNEL); | |
555 | ||
556 | if (!cma_xprt) | |
557 | return NULL; | |
bd4620dd | 558 | svc_xprt_init(&init_net, &svc_rdma_class, &cma_xprt->sc_xprt, serv); |
377f9b2f | 559 | INIT_LIST_HEAD(&cma_xprt->sc_accept_q); |
377f9b2f TT |
560 | INIT_LIST_HEAD(&cma_xprt->sc_rq_dto_q); |
561 | INIT_LIST_HEAD(&cma_xprt->sc_read_complete_q); | |
64be8608 | 562 | INIT_LIST_HEAD(&cma_xprt->sc_frmr_q); |
cc886c9f | 563 | INIT_LIST_HEAD(&cma_xprt->sc_ctxts); |
2fe81b23 | 564 | INIT_LIST_HEAD(&cma_xprt->sc_maps); |
377f9b2f TT |
565 | init_waitqueue_head(&cma_xprt->sc_send_wait); |
566 | ||
567 | spin_lock_init(&cma_xprt->sc_lock); | |
377f9b2f | 568 | spin_lock_init(&cma_xprt->sc_rq_dto_lock); |
64be8608 | 569 | spin_lock_init(&cma_xprt->sc_frmr_q_lock); |
cc886c9f | 570 | spin_lock_init(&cma_xprt->sc_ctxt_lock); |
2fe81b23 | 571 | spin_lock_init(&cma_xprt->sc_map_lock); |
377f9b2f | 572 | |
362142b2 JL |
573 | /* |
574 | * Note that this implies that the underlying transport support | |
575 | * has some form of congestion control (see RFC 7530 section 3.1 | |
576 | * paragraph 2). For now, we assume that all supported RDMA | |
577 | * transports are suitable here. | |
578 | */ | |
579 | set_bit(XPT_CONG_CTRL, &cma_xprt->sc_xprt.xpt_flags); | |
580 | ||
8948896c | 581 | if (listener) |
377f9b2f TT |
582 | set_bit(XPT_LISTENER, &cma_xprt->sc_xprt.xpt_flags); |
583 | ||
584 | return cma_xprt; | |
585 | } | |
586 | ||
39b09a1a | 587 | int svc_rdma_post_recv(struct svcxprt_rdma *xprt, gfp_t flags) |
377f9b2f TT |
588 | { |
589 | struct ib_recv_wr recv_wr, *bad_recv_wr; | |
590 | struct svc_rdma_op_ctxt *ctxt; | |
591 | struct page *page; | |
a5abf4e8 | 592 | dma_addr_t pa; |
377f9b2f TT |
593 | int sge_no; |
594 | int buflen; | |
595 | int ret; | |
596 | ||
597 | ctxt = svc_rdma_get_context(xprt); | |
598 | buflen = 0; | |
599 | ctxt->direction = DMA_FROM_DEVICE; | |
8bd5ba86 | 600 | ctxt->cqe.done = svc_rdma_wc_receive; |
377f9b2f | 601 | for (sge_no = 0; buflen < xprt->sc_max_req_size; sge_no++) { |
3fe04ee9 CL |
602 | if (sge_no >= xprt->sc_max_sge) { |
603 | pr_err("svcrdma: Too many sges (%d)\n", sge_no); | |
604 | goto err_put_ctxt; | |
605 | } | |
39b09a1a CL |
606 | page = alloc_page(flags); |
607 | if (!page) | |
608 | goto err_put_ctxt; | |
377f9b2f | 609 | ctxt->pages[sge_no] = page; |
b432e6b3 TT |
610 | pa = ib_dma_map_page(xprt->sc_cm_id->device, |
611 | page, 0, PAGE_SIZE, | |
377f9b2f | 612 | DMA_FROM_DEVICE); |
a5abf4e8 TT |
613 | if (ib_dma_mapping_error(xprt->sc_cm_id->device, pa)) |
614 | goto err_put_ctxt; | |
cace564f | 615 | svc_rdma_count_mappings(xprt, ctxt); |
377f9b2f TT |
616 | ctxt->sge[sge_no].addr = pa; |
617 | ctxt->sge[sge_no].length = PAGE_SIZE; | |
5fe1043d | 618 | ctxt->sge[sge_no].lkey = xprt->sc_pd->local_dma_lkey; |
4a84386f | 619 | ctxt->count = sge_no + 1; |
377f9b2f TT |
620 | buflen += PAGE_SIZE; |
621 | } | |
377f9b2f TT |
622 | recv_wr.next = NULL; |
623 | recv_wr.sg_list = &ctxt->sge[0]; | |
624 | recv_wr.num_sge = ctxt->count; | |
8bd5ba86 | 625 | recv_wr.wr_cqe = &ctxt->cqe; |
377f9b2f | 626 | |
0905c0f0 | 627 | svc_xprt_get(&xprt->sc_xprt); |
377f9b2f | 628 | ret = ib_post_recv(xprt->sc_qp, &recv_wr, &bad_recv_wr); |
0905c0f0 | 629 | if (ret) { |
21515e46 | 630 | svc_rdma_unmap_dma(ctxt); |
05a0826a | 631 | svc_rdma_put_context(ctxt, 1); |
21515e46 | 632 | svc_xprt_put(&xprt->sc_xprt); |
0905c0f0 | 633 | } |
377f9b2f | 634 | return ret; |
a5abf4e8 TT |
635 | |
636 | err_put_ctxt: | |
4a84386f | 637 | svc_rdma_unmap_dma(ctxt); |
a5abf4e8 TT |
638 | svc_rdma_put_context(ctxt, 1); |
639 | return -ENOMEM; | |
377f9b2f TT |
640 | } |
641 | ||
bf36387a CL |
642 | int svc_rdma_repost_recv(struct svcxprt_rdma *xprt, gfp_t flags) |
643 | { | |
644 | int ret = 0; | |
645 | ||
646 | ret = svc_rdma_post_recv(xprt, flags); | |
647 | if (ret) { | |
648 | pr_err("svcrdma: could not post a receive buffer, err=%d.\n", | |
649 | ret); | |
650 | pr_err("svcrdma: closing transport %p.\n", xprt); | |
651 | set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags); | |
652 | ret = -ENOTCONN; | |
653 | } | |
654 | return ret; | |
655 | } | |
656 | ||
cc9d8340 CL |
657 | static void |
658 | svc_rdma_parse_connect_private(struct svcxprt_rdma *newxprt, | |
659 | struct rdma_conn_param *param) | |
660 | { | |
661 | const struct rpcrdma_connect_private *pmsg = param->private_data; | |
662 | ||
663 | if (pmsg && | |
664 | pmsg->cp_magic == rpcrdma_cmp_magic && | |
665 | pmsg->cp_version == RPCRDMA_CMP_VERSION) { | |
25d55296 CL |
666 | newxprt->sc_snd_w_inv = pmsg->cp_flags & |
667 | RPCRDMA_CMP_F_SND_W_INV_OK; | |
668 | ||
669 | dprintk("svcrdma: client send_size %u, recv_size %u " | |
670 | "remote inv %ssupported\n", | |
cc9d8340 | 671 | rpcrdma_decode_buffer_size(pmsg->cp_send_size), |
25d55296 CL |
672 | rpcrdma_decode_buffer_size(pmsg->cp_recv_size), |
673 | newxprt->sc_snd_w_inv ? "" : "un"); | |
cc9d8340 CL |
674 | } |
675 | } | |
676 | ||
377f9b2f TT |
677 | /* |
678 | * This function handles the CONNECT_REQUEST event on a listening | |
679 | * endpoint. It is passed the cma_id for the _new_ connection. The context in | |
680 | * this cma_id is inherited from the listening cma_id and is the svc_xprt | |
681 | * structure for the listening endpoint. | |
682 | * | |
683 | * This function creates a new xprt for the new connection and enqueues it on | |
684 | * the accept queue for the listent xprt. When the listen thread is kicked, it | |
685 | * will call the recvfrom method on the listen xprt which will accept the new | |
686 | * connection. | |
687 | */ | |
cc9d8340 CL |
688 | static void handle_connect_req(struct rdma_cm_id *new_cma_id, |
689 | struct rdma_conn_param *param) | |
377f9b2f TT |
690 | { |
691 | struct svcxprt_rdma *listen_xprt = new_cma_id->context; | |
692 | struct svcxprt_rdma *newxprt; | |
af261af4 | 693 | struct sockaddr *sa; |
377f9b2f TT |
694 | |
695 | /* Create a new transport */ | |
696 | newxprt = rdma_create_xprt(listen_xprt->sc_xprt.xpt_server, 0); | |
697 | if (!newxprt) { | |
698 | dprintk("svcrdma: failed to create new transport\n"); | |
699 | return; | |
700 | } | |
701 | newxprt->sc_cm_id = new_cma_id; | |
702 | new_cma_id->context = newxprt; | |
703 | dprintk("svcrdma: Creating newxprt=%p, cm_id=%p, listenxprt=%p\n", | |
704 | newxprt, newxprt->sc_cm_id, listen_xprt); | |
cc9d8340 | 705 | svc_rdma_parse_connect_private(newxprt, param); |
377f9b2f | 706 | |
36ef25e4 | 707 | /* Save client advertised inbound read limit for use later in accept. */ |
cc9d8340 | 708 | newxprt->sc_ord = param->initiator_depth; |
36ef25e4 | 709 | |
af261af4 TT |
710 | /* Set the local and remote addresses in the transport */ |
711 | sa = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.dst_addr; | |
712 | svc_xprt_set_remote(&newxprt->sc_xprt, sa, svc_addr_len(sa)); | |
713 | sa = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.src_addr; | |
714 | svc_xprt_set_local(&newxprt->sc_xprt, sa, svc_addr_len(sa)); | |
715 | ||
377f9b2f TT |
716 | /* |
717 | * Enqueue the new transport on the accept queue of the listening | |
718 | * transport | |
719 | */ | |
720 | spin_lock_bh(&listen_xprt->sc_lock); | |
721 | list_add_tail(&newxprt->sc_accept_q, &listen_xprt->sc_accept_q); | |
722 | spin_unlock_bh(&listen_xprt->sc_lock); | |
723 | ||
377f9b2f TT |
724 | set_bit(XPT_CONN, &listen_xprt->sc_xprt.xpt_flags); |
725 | svc_xprt_enqueue(&listen_xprt->sc_xprt); | |
726 | } | |
727 | ||
728 | /* | |
729 | * Handles events generated on the listening endpoint. These events will be | |
730 | * either be incoming connect requests or adapter removal events. | |
731 | */ | |
732 | static int rdma_listen_handler(struct rdma_cm_id *cma_id, | |
733 | struct rdma_cm_event *event) | |
734 | { | |
735 | struct svcxprt_rdma *xprt = cma_id->context; | |
736 | int ret = 0; | |
737 | ||
738 | switch (event->event) { | |
739 | case RDMA_CM_EVENT_CONNECT_REQUEST: | |
740 | dprintk("svcrdma: Connect request on cma_id=%p, xprt = %p, " | |
76357c71 SG |
741 | "event = %s (%d)\n", cma_id, cma_id->context, |
742 | rdma_event_msg(event->event), event->event); | |
cc9d8340 | 743 | handle_connect_req(cma_id, &event->param.conn); |
377f9b2f TT |
744 | break; |
745 | ||
746 | case RDMA_CM_EVENT_ESTABLISHED: | |
747 | /* Accept complete */ | |
748 | dprintk("svcrdma: Connection completed on LISTEN xprt=%p, " | |
749 | "cm_id=%p\n", xprt, cma_id); | |
750 | break; | |
751 | ||
752 | case RDMA_CM_EVENT_DEVICE_REMOVAL: | |
753 | dprintk("svcrdma: Device removal xprt=%p, cm_id=%p\n", | |
754 | xprt, cma_id); | |
755 | if (xprt) | |
756 | set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags); | |
757 | break; | |
758 | ||
759 | default: | |
760 | dprintk("svcrdma: Unexpected event on listening endpoint %p, " | |
76357c71 SG |
761 | "event = %s (%d)\n", cma_id, |
762 | rdma_event_msg(event->event), event->event); | |
377f9b2f TT |
763 | break; |
764 | } | |
765 | ||
766 | return ret; | |
767 | } | |
768 | ||
769 | static int rdma_cma_handler(struct rdma_cm_id *cma_id, | |
770 | struct rdma_cm_event *event) | |
771 | { | |
772 | struct svc_xprt *xprt = cma_id->context; | |
773 | struct svcxprt_rdma *rdma = | |
774 | container_of(xprt, struct svcxprt_rdma, sc_xprt); | |
775 | switch (event->event) { | |
776 | case RDMA_CM_EVENT_ESTABLISHED: | |
777 | /* Accept complete */ | |
c48cbb40 | 778 | svc_xprt_get(xprt); |
377f9b2f TT |
779 | dprintk("svcrdma: Connection completed on DTO xprt=%p, " |
780 | "cm_id=%p\n", xprt, cma_id); | |
781 | clear_bit(RDMAXPRT_CONN_PENDING, &rdma->sc_flags); | |
782 | svc_xprt_enqueue(xprt); | |
783 | break; | |
784 | case RDMA_CM_EVENT_DISCONNECTED: | |
785 | dprintk("svcrdma: Disconnect on DTO xprt=%p, cm_id=%p\n", | |
786 | xprt, cma_id); | |
787 | if (xprt) { | |
788 | set_bit(XPT_CLOSE, &xprt->xpt_flags); | |
789 | svc_xprt_enqueue(xprt); | |
120693d1 | 790 | svc_xprt_put(xprt); |
377f9b2f TT |
791 | } |
792 | break; | |
793 | case RDMA_CM_EVENT_DEVICE_REMOVAL: | |
794 | dprintk("svcrdma: Device removal cma_id=%p, xprt = %p, " | |
76357c71 SG |
795 | "event = %s (%d)\n", cma_id, xprt, |
796 | rdma_event_msg(event->event), event->event); | |
377f9b2f TT |
797 | if (xprt) { |
798 | set_bit(XPT_CLOSE, &xprt->xpt_flags); | |
799 | svc_xprt_enqueue(xprt); | |
ff79c74d | 800 | svc_xprt_put(xprt); |
377f9b2f TT |
801 | } |
802 | break; | |
803 | default: | |
804 | dprintk("svcrdma: Unexpected event on DTO endpoint %p, " | |
76357c71 SG |
805 | "event = %s (%d)\n", cma_id, |
806 | rdma_event_msg(event->event), event->event); | |
377f9b2f TT |
807 | break; |
808 | } | |
809 | return 0; | |
810 | } | |
811 | ||
812 | /* | |
813 | * Create a listening RDMA service endpoint. | |
814 | */ | |
815 | static struct svc_xprt *svc_rdma_create(struct svc_serv *serv, | |
62832c03 | 816 | struct net *net, |
377f9b2f TT |
817 | struct sockaddr *sa, int salen, |
818 | int flags) | |
819 | { | |
820 | struct rdma_cm_id *listen_id; | |
821 | struct svcxprt_rdma *cma_xprt; | |
377f9b2f TT |
822 | int ret; |
823 | ||
824 | dprintk("svcrdma: Creating RDMA socket\n"); | |
696190ea | 825 | if ((sa->sa_family != AF_INET) && (sa->sa_family != AF_INET6)) { |
bade732a TT |
826 | dprintk("svcrdma: Address family %d is not supported.\n", sa->sa_family); |
827 | return ERR_PTR(-EAFNOSUPPORT); | |
828 | } | |
377f9b2f TT |
829 | cma_xprt = rdma_create_xprt(serv, 1); |
830 | if (!cma_xprt) | |
58e8f621 | 831 | return ERR_PTR(-ENOMEM); |
377f9b2f | 832 | |
fa20105e GS |
833 | listen_id = rdma_create_id(&init_net, rdma_listen_handler, cma_xprt, |
834 | RDMA_PS_TCP, IB_QPT_RC); | |
377f9b2f | 835 | if (IS_ERR(listen_id)) { |
58e8f621 TT |
836 | ret = PTR_ERR(listen_id); |
837 | dprintk("svcrdma: rdma_create_id failed = %d\n", ret); | |
838 | goto err0; | |
377f9b2f | 839 | } |
58e8f621 | 840 | |
696190ea SM |
841 | /* Allow both IPv4 and IPv6 sockets to bind a single port |
842 | * at the same time. | |
843 | */ | |
844 | #if IS_ENABLED(CONFIG_IPV6) | |
845 | ret = rdma_set_afonly(listen_id, 1); | |
846 | if (ret) { | |
847 | dprintk("svcrdma: rdma_set_afonly failed = %d\n", ret); | |
848 | goto err1; | |
849 | } | |
850 | #endif | |
377f9b2f TT |
851 | ret = rdma_bind_addr(listen_id, sa); |
852 | if (ret) { | |
377f9b2f | 853 | dprintk("svcrdma: rdma_bind_addr failed = %d\n", ret); |
58e8f621 | 854 | goto err1; |
377f9b2f TT |
855 | } |
856 | cma_xprt->sc_cm_id = listen_id; | |
857 | ||
858 | ret = rdma_listen(listen_id, RPCRDMA_LISTEN_BACKLOG); | |
859 | if (ret) { | |
377f9b2f | 860 | dprintk("svcrdma: rdma_listen failed = %d\n", ret); |
58e8f621 | 861 | goto err1; |
377f9b2f TT |
862 | } |
863 | ||
864 | /* | |
865 | * We need to use the address from the cm_id in case the | |
866 | * caller specified 0 for the port number. | |
867 | */ | |
868 | sa = (struct sockaddr *)&cma_xprt->sc_cm_id->route.addr.src_addr; | |
869 | svc_xprt_set_local(&cma_xprt->sc_xprt, sa, salen); | |
870 | ||
871 | return &cma_xprt->sc_xprt; | |
58e8f621 TT |
872 | |
873 | err1: | |
874 | rdma_destroy_id(listen_id); | |
875 | err0: | |
876 | kfree(cma_xprt); | |
877 | return ERR_PTR(ret); | |
377f9b2f TT |
878 | } |
879 | ||
64be8608 TT |
880 | static struct svc_rdma_fastreg_mr *rdma_alloc_frmr(struct svcxprt_rdma *xprt) |
881 | { | |
882 | struct ib_mr *mr; | |
412a15c0 | 883 | struct scatterlist *sg; |
64be8608 | 884 | struct svc_rdma_fastreg_mr *frmr; |
9ac07501 | 885 | u32 num_sg; |
64be8608 TT |
886 | |
887 | frmr = kmalloc(sizeof(*frmr), GFP_KERNEL); | |
888 | if (!frmr) | |
889 | goto err; | |
890 | ||
9ac07501 SW |
891 | num_sg = min_t(u32, RPCSVC_MAXPAGES, xprt->sc_frmr_pg_list_len); |
892 | mr = ib_alloc_mr(xprt->sc_pd, IB_MR_TYPE_MEM_REG, num_sg); | |
846d8e7c | 893 | if (IS_ERR(mr)) |
64be8608 TT |
894 | goto err_free_frmr; |
895 | ||
412a15c0 SG |
896 | sg = kcalloc(RPCSVC_MAXPAGES, sizeof(*sg), GFP_KERNEL); |
897 | if (!sg) | |
64be8608 TT |
898 | goto err_free_mr; |
899 | ||
412a15c0 SG |
900 | sg_init_table(sg, RPCSVC_MAXPAGES); |
901 | ||
64be8608 | 902 | frmr->mr = mr; |
412a15c0 | 903 | frmr->sg = sg; |
64be8608 TT |
904 | INIT_LIST_HEAD(&frmr->frmr_list); |
905 | return frmr; | |
906 | ||
907 | err_free_mr: | |
908 | ib_dereg_mr(mr); | |
909 | err_free_frmr: | |
910 | kfree(frmr); | |
911 | err: | |
912 | return ERR_PTR(-ENOMEM); | |
913 | } | |
914 | ||
915 | static void rdma_dealloc_frmr_q(struct svcxprt_rdma *xprt) | |
916 | { | |
917 | struct svc_rdma_fastreg_mr *frmr; | |
918 | ||
919 | while (!list_empty(&xprt->sc_frmr_q)) { | |
920 | frmr = list_entry(xprt->sc_frmr_q.next, | |
921 | struct svc_rdma_fastreg_mr, frmr_list); | |
922 | list_del_init(&frmr->frmr_list); | |
412a15c0 | 923 | kfree(frmr->sg); |
64be8608 | 924 | ib_dereg_mr(frmr->mr); |
64be8608 TT |
925 | kfree(frmr); |
926 | } | |
927 | } | |
928 | ||
929 | struct svc_rdma_fastreg_mr *svc_rdma_get_frmr(struct svcxprt_rdma *rdma) | |
930 | { | |
931 | struct svc_rdma_fastreg_mr *frmr = NULL; | |
932 | ||
81fa3275 | 933 | spin_lock(&rdma->sc_frmr_q_lock); |
64be8608 TT |
934 | if (!list_empty(&rdma->sc_frmr_q)) { |
935 | frmr = list_entry(rdma->sc_frmr_q.next, | |
936 | struct svc_rdma_fastreg_mr, frmr_list); | |
937 | list_del_init(&frmr->frmr_list); | |
412a15c0 | 938 | frmr->sg_nents = 0; |
64be8608 | 939 | } |
81fa3275 | 940 | spin_unlock(&rdma->sc_frmr_q_lock); |
64be8608 TT |
941 | if (frmr) |
942 | return frmr; | |
943 | ||
944 | return rdma_alloc_frmr(rdma); | |
945 | } | |
946 | ||
64be8608 TT |
947 | void svc_rdma_put_frmr(struct svcxprt_rdma *rdma, |
948 | struct svc_rdma_fastreg_mr *frmr) | |
949 | { | |
950 | if (frmr) { | |
412a15c0 SG |
951 | ib_dma_unmap_sg(rdma->sc_cm_id->device, |
952 | frmr->sg, frmr->sg_nents, frmr->direction); | |
81fa3275 | 953 | spin_lock(&rdma->sc_frmr_q_lock); |
3fe04ee9 | 954 | WARN_ON_ONCE(!list_empty(&frmr->frmr_list)); |
64be8608 | 955 | list_add(&frmr->frmr_list, &rdma->sc_frmr_q); |
81fa3275 | 956 | spin_unlock(&rdma->sc_frmr_q_lock); |
64be8608 TT |
957 | } |
958 | } | |
959 | ||
377f9b2f TT |
960 | /* |
961 | * This is the xpo_recvfrom function for listening endpoints. Its | |
962 | * purpose is to accept incoming connections. The CMA callback handler | |
963 | * has already created a new transport and attached it to the new CMA | |
964 | * ID. | |
965 | * | |
966 | * There is a queue of pending connections hung on the listening | |
967 | * transport. This queue contains the new svc_xprt structure. This | |
968 | * function takes svc_xprt structures off the accept_q and completes | |
969 | * the connection. | |
970 | */ | |
971 | static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt) | |
972 | { | |
973 | struct svcxprt_rdma *listen_rdma; | |
974 | struct svcxprt_rdma *newxprt = NULL; | |
975 | struct rdma_conn_param conn_param; | |
cc9d8340 | 976 | struct rpcrdma_connect_private pmsg; |
377f9b2f | 977 | struct ib_qp_init_attr qp_attr; |
e3e45b1b | 978 | struct ib_device *dev; |
07257450 | 979 | struct sockaddr *sap; |
03fe9931 | 980 | unsigned int i; |
e3e45b1b | 981 | int ret = 0; |
377f9b2f TT |
982 | |
983 | listen_rdma = container_of(xprt, struct svcxprt_rdma, sc_xprt); | |
984 | clear_bit(XPT_CONN, &xprt->xpt_flags); | |
985 | /* Get the next entry off the accept list */ | |
986 | spin_lock_bh(&listen_rdma->sc_lock); | |
987 | if (!list_empty(&listen_rdma->sc_accept_q)) { | |
988 | newxprt = list_entry(listen_rdma->sc_accept_q.next, | |
989 | struct svcxprt_rdma, sc_accept_q); | |
990 | list_del_init(&newxprt->sc_accept_q); | |
991 | } | |
992 | if (!list_empty(&listen_rdma->sc_accept_q)) | |
993 | set_bit(XPT_CONN, &listen_rdma->sc_xprt.xpt_flags); | |
994 | spin_unlock_bh(&listen_rdma->sc_lock); | |
995 | if (!newxprt) | |
996 | return NULL; | |
997 | ||
998 | dprintk("svcrdma: newxprt from accept queue = %p, cm_id=%p\n", | |
999 | newxprt, newxprt->sc_cm_id); | |
1000 | ||
e3e45b1b | 1001 | dev = newxprt->sc_cm_id->device; |
377f9b2f TT |
1002 | |
1003 | /* Qualify the transport resource defaults with the | |
1004 | * capabilities of this particular device */ | |
e3e45b1b | 1005 | newxprt->sc_max_sge = min((size_t)dev->attrs.max_sge, |
377f9b2f | 1006 | (size_t)RPCSVC_MAXPAGES); |
e3e45b1b | 1007 | newxprt->sc_max_sge_rd = min_t(size_t, dev->attrs.max_sge_rd, |
bc3fe2e3 | 1008 | RPCSVC_MAXPAGES); |
3d61677c | 1009 | newxprt->sc_max_req_size = svcrdma_max_req_size; |
03fe9931 CL |
1010 | newxprt->sc_max_requests = min_t(u32, dev->attrs.max_qp_wr, |
1011 | svcrdma_max_requests); | |
98fc21d3 | 1012 | newxprt->sc_fc_credits = cpu_to_be32(newxprt->sc_max_requests); |
03fe9931 CL |
1013 | newxprt->sc_max_bc_requests = min_t(u32, dev->attrs.max_qp_wr, |
1014 | svcrdma_max_bc_requests); | |
1015 | newxprt->sc_rq_depth = newxprt->sc_max_requests + | |
1016 | newxprt->sc_max_bc_requests; | |
1017 | newxprt->sc_sq_depth = RPCRDMA_SQ_DEPTH_MULT * newxprt->sc_rq_depth; | |
e4eb42ce | 1018 | atomic_set(&newxprt->sc_sq_avail, newxprt->sc_sq_depth); |
377f9b2f | 1019 | |
cc886c9f CL |
1020 | if (!svc_rdma_prealloc_ctxts(newxprt)) |
1021 | goto errout; | |
2fe81b23 CL |
1022 | if (!svc_rdma_prealloc_maps(newxprt)) |
1023 | goto errout; | |
cc886c9f | 1024 | |
36ef25e4 TT |
1025 | /* |
1026 | * Limit ORD based on client limit, local device limit, and | |
1027 | * configured svcrdma limit. | |
1028 | */ | |
e3e45b1b | 1029 | newxprt->sc_ord = min_t(size_t, dev->attrs.max_qp_rd_atom, newxprt->sc_ord); |
36ef25e4 | 1030 | newxprt->sc_ord = min_t(size_t, svcrdma_ord, newxprt->sc_ord); |
377f9b2f | 1031 | |
ed082d36 | 1032 | newxprt->sc_pd = ib_alloc_pd(dev, 0); |
377f9b2f TT |
1033 | if (IS_ERR(newxprt->sc_pd)) { |
1034 | dprintk("svcrdma: error creating PD for connect request\n"); | |
1035 | goto errout; | |
1036 | } | |
be99bb11 | 1037 | newxprt->sc_sq_cq = ib_alloc_cq(dev, newxprt, newxprt->sc_sq_depth, |
81fa3275 | 1038 | 0, IB_POLL_WORKQUEUE); |
377f9b2f TT |
1039 | if (IS_ERR(newxprt->sc_sq_cq)) { |
1040 | dprintk("svcrdma: error creating SQ CQ for connect request\n"); | |
1041 | goto errout; | |
1042 | } | |
8bd5ba86 | 1043 | newxprt->sc_rq_cq = ib_alloc_cq(dev, newxprt, newxprt->sc_rq_depth, |
81fa3275 | 1044 | 0, IB_POLL_WORKQUEUE); |
377f9b2f TT |
1045 | if (IS_ERR(newxprt->sc_rq_cq)) { |
1046 | dprintk("svcrdma: error creating RQ CQ for connect request\n"); | |
1047 | goto errout; | |
1048 | } | |
1049 | ||
1050 | memset(&qp_attr, 0, sizeof qp_attr); | |
1051 | qp_attr.event_handler = qp_event_handler; | |
1052 | qp_attr.qp_context = &newxprt->sc_xprt; | |
1053 | qp_attr.cap.max_send_wr = newxprt->sc_sq_depth; | |
03fe9931 | 1054 | qp_attr.cap.max_recv_wr = newxprt->sc_rq_depth; |
377f9b2f TT |
1055 | qp_attr.cap.max_send_sge = newxprt->sc_max_sge; |
1056 | qp_attr.cap.max_recv_sge = newxprt->sc_max_sge; | |
1057 | qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR; | |
1058 | qp_attr.qp_type = IB_QPT_RC; | |
1059 | qp_attr.send_cq = newxprt->sc_sq_cq; | |
1060 | qp_attr.recv_cq = newxprt->sc_rq_cq; | |
07257450 CL |
1061 | dprintk("svcrdma: newxprt->sc_cm_id=%p, newxprt->sc_pd=%p\n", |
1062 | newxprt->sc_cm_id, newxprt->sc_pd); | |
1063 | dprintk(" cap.max_send_wr = %d, cap.max_recv_wr = %d\n", | |
1064 | qp_attr.cap.max_send_wr, qp_attr.cap.max_recv_wr); | |
1065 | dprintk(" cap.max_send_sge = %d, cap.max_recv_sge = %d\n", | |
1066 | qp_attr.cap.max_send_sge, qp_attr.cap.max_recv_sge); | |
377f9b2f TT |
1067 | |
1068 | ret = rdma_create_qp(newxprt->sc_cm_id, newxprt->sc_pd, &qp_attr); | |
1069 | if (ret) { | |
d1e458fe SW |
1070 | dprintk("svcrdma: failed to create QP, ret=%d\n", ret); |
1071 | goto errout; | |
377f9b2f TT |
1072 | } |
1073 | newxprt->sc_qp = newxprt->sc_cm_id->qp; | |
1074 | ||
3a5c6380 TT |
1075 | /* |
1076 | * Use the most secure set of MR resources based on the | |
1077 | * transport type and available memory management features in | |
1078 | * the device. Here's the table implemented below: | |
1079 | * | |
1080 | * Fast Global DMA Remote WR | |
1081 | * Reg LKEY MR Access | |
1082 | * Sup'd Sup'd Needed Needed | |
1083 | * | |
1084 | * IWARP N N Y Y | |
1085 | * N Y Y Y | |
1086 | * Y N Y N | |
1087 | * Y Y N - | |
1088 | * | |
1089 | * IB N N Y N | |
1090 | * N Y N - | |
1091 | * Y N Y N | |
1092 | * Y Y N - | |
1093 | * | |
1094 | * NB: iWARP requires remote write access for the data sink | |
1095 | * of an RDMA_READ. IB does not. | |
1096 | */ | |
e5452411 | 1097 | newxprt->sc_reader = rdma_read_chunk_lcl; |
e3e45b1b | 1098 | if (dev->attrs.device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) { |
3a5c6380 | 1099 | newxprt->sc_frmr_pg_list_len = |
e3e45b1b | 1100 | dev->attrs.max_fast_reg_page_list_len; |
3a5c6380 | 1101 | newxprt->sc_dev_caps |= SVCRDMA_DEVCAP_FAST_REG; |
e5452411 | 1102 | newxprt->sc_reader = rdma_read_chunk_frmr; |
25d55296 CL |
1103 | } else |
1104 | newxprt->sc_snd_w_inv = false; | |
3a5c6380 TT |
1105 | |
1106 | /* | |
1107 | * Determine if a DMA MR is required and if so, what privs are required | |
1108 | */ | |
e3e45b1b OG |
1109 | if (!rdma_protocol_iwarp(dev, newxprt->sc_cm_id->port_num) && |
1110 | !rdma_ib_or_roce(dev, newxprt->sc_cm_id->port_num)) | |
377f9b2f | 1111 | goto errout; |
3de2c31c | 1112 | |
e3e45b1b | 1113 | if (rdma_protocol_iwarp(dev, newxprt->sc_cm_id->port_num)) |
3de2c31c MW |
1114 | newxprt->sc_dev_caps |= SVCRDMA_DEVCAP_READ_W_INV; |
1115 | ||
377f9b2f | 1116 | /* Post receive buffers */ |
0319aafc | 1117 | for (i = 0; i < newxprt->sc_max_requests; i++) { |
39b09a1a | 1118 | ret = svc_rdma_post_recv(newxprt, GFP_KERNEL); |
377f9b2f TT |
1119 | if (ret) { |
1120 | dprintk("svcrdma: failure posting receive buffers\n"); | |
1121 | goto errout; | |
1122 | } | |
1123 | } | |
1124 | ||
1125 | /* Swap out the handler */ | |
1126 | newxprt->sc_cm_id->event_handler = rdma_cma_handler; | |
1127 | ||
cc9d8340 CL |
1128 | /* Construct RDMA-CM private message */ |
1129 | pmsg.cp_magic = rpcrdma_cmp_magic; | |
1130 | pmsg.cp_version = RPCRDMA_CMP_VERSION; | |
1131 | pmsg.cp_flags = 0; | |
1132 | pmsg.cp_send_size = pmsg.cp_recv_size = | |
1133 | rpcrdma_encode_buffer_size(newxprt->sc_max_req_size); | |
1134 | ||
377f9b2f TT |
1135 | /* Accept Connection */ |
1136 | set_bit(RDMAXPRT_CONN_PENDING, &newxprt->sc_flags); | |
1137 | memset(&conn_param, 0, sizeof conn_param); | |
1138 | conn_param.responder_resources = 0; | |
1139 | conn_param.initiator_depth = newxprt->sc_ord; | |
cc9d8340 CL |
1140 | conn_param.private_data = &pmsg; |
1141 | conn_param.private_data_len = sizeof(pmsg); | |
377f9b2f TT |
1142 | ret = rdma_accept(newxprt->sc_cm_id, &conn_param); |
1143 | if (ret) { | |
1144 | dprintk("svcrdma: failed to accept new connection, ret=%d\n", | |
1145 | ret); | |
1146 | goto errout; | |
1147 | } | |
1148 | ||
07257450 CL |
1149 | dprintk("svcrdma: new connection %p accepted:\n", newxprt); |
1150 | sap = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.src_addr; | |
1151 | dprintk(" local address : %pIS:%u\n", sap, rpc_get_port(sap)); | |
1152 | sap = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.dst_addr; | |
1153 | dprintk(" remote address : %pIS:%u\n", sap, rpc_get_port(sap)); | |
1154 | dprintk(" max_sge : %d\n", newxprt->sc_max_sge); | |
1155 | dprintk(" max_sge_rd : %d\n", newxprt->sc_max_sge_rd); | |
1156 | dprintk(" sq_depth : %d\n", newxprt->sc_sq_depth); | |
1157 | dprintk(" max_requests : %d\n", newxprt->sc_max_requests); | |
1158 | dprintk(" ord : %d\n", newxprt->sc_ord); | |
377f9b2f | 1159 | |
377f9b2f TT |
1160 | return &newxprt->sc_xprt; |
1161 | ||
1162 | errout: | |
1163 | dprintk("svcrdma: failure accepting new connection rc=%d.\n", ret); | |
c48cbb40 TT |
1164 | /* Take a reference in case the DTO handler runs */ |
1165 | svc_xprt_get(&newxprt->sc_xprt); | |
1711386c | 1166 | if (newxprt->sc_qp && !IS_ERR(newxprt->sc_qp)) |
c48cbb40 | 1167 | ib_destroy_qp(newxprt->sc_qp); |
377f9b2f | 1168 | rdma_destroy_id(newxprt->sc_cm_id); |
c48cbb40 TT |
1169 | /* This call to put will destroy the transport */ |
1170 | svc_xprt_put(&newxprt->sc_xprt); | |
377f9b2f TT |
1171 | return NULL; |
1172 | } | |
1173 | ||
377f9b2f TT |
1174 | static void svc_rdma_release_rqst(struct svc_rqst *rqstp) |
1175 | { | |
377f9b2f TT |
1176 | } |
1177 | ||
c48cbb40 | 1178 | /* |
1711386c | 1179 | * When connected, an svc_xprt has at least two references: |
c48cbb40 TT |
1180 | * |
1181 | * - A reference held by the cm_id between the ESTABLISHED and | |
1182 | * DISCONNECTED events. If the remote peer disconnected first, this | |
1183 | * reference could be gone. | |
1184 | * | |
1185 | * - A reference held by the svc_recv code that called this function | |
1186 | * as part of close processing. | |
1187 | * | |
1711386c | 1188 | * At a minimum one references should still be held. |
c48cbb40 | 1189 | */ |
377f9b2f TT |
1190 | static void svc_rdma_detach(struct svc_xprt *xprt) |
1191 | { | |
1192 | struct svcxprt_rdma *rdma = | |
1193 | container_of(xprt, struct svcxprt_rdma, sc_xprt); | |
377f9b2f | 1194 | dprintk("svc: svc_rdma_detach(%p)\n", xprt); |
c48cbb40 TT |
1195 | |
1196 | /* Disconnect and flush posted WQE */ | |
377f9b2f | 1197 | rdma_disconnect(rdma->sc_cm_id); |
377f9b2f TT |
1198 | } |
1199 | ||
8da91ea8 | 1200 | static void __svc_rdma_free(struct work_struct *work) |
377f9b2f | 1201 | { |
8da91ea8 TT |
1202 | struct svcxprt_rdma *rdma = |
1203 | container_of(work, struct svcxprt_rdma, sc_work); | |
5d252f90 CL |
1204 | struct svc_xprt *xprt = &rdma->sc_xprt; |
1205 | ||
1206 | dprintk("svcrdma: %s(%p)\n", __func__, rdma); | |
8da91ea8 | 1207 | |
76ee8fd6 CL |
1208 | if (rdma->sc_qp && !IS_ERR(rdma->sc_qp)) |
1209 | ib_drain_qp(rdma->sc_qp); | |
1210 | ||
c48cbb40 | 1211 | /* We should only be called from kref_put */ |
2c935bc5 | 1212 | if (kref_read(&xprt->xpt_ref) != 0) |
3fe04ee9 | 1213 | pr_err("svcrdma: sc_xprt still in use? (%d)\n", |
2c935bc5 | 1214 | kref_read(&xprt->xpt_ref)); |
8da91ea8 | 1215 | |
356d0a15 TT |
1216 | /* |
1217 | * Destroy queued, but not processed read completions. Note | |
1218 | * that this cleanup has to be done before destroying the | |
1219 | * cm_id because the device ptr is needed to unmap the dma in | |
1220 | * svc_rdma_put_context. | |
1221 | */ | |
356d0a15 TT |
1222 | while (!list_empty(&rdma->sc_read_complete_q)) { |
1223 | struct svc_rdma_op_ctxt *ctxt; | |
a3ab867f CL |
1224 | ctxt = list_first_entry(&rdma->sc_read_complete_q, |
1225 | struct svc_rdma_op_ctxt, list); | |
1226 | list_del(&ctxt->list); | |
356d0a15 TT |
1227 | svc_rdma_put_context(ctxt, 1); |
1228 | } | |
356d0a15 TT |
1229 | |
1230 | /* Destroy queued, but not processed recv completions */ | |
356d0a15 TT |
1231 | while (!list_empty(&rdma->sc_rq_dto_q)) { |
1232 | struct svc_rdma_op_ctxt *ctxt; | |
a3ab867f CL |
1233 | ctxt = list_first_entry(&rdma->sc_rq_dto_q, |
1234 | struct svc_rdma_op_ctxt, list); | |
1235 | list_del(&ctxt->list); | |
356d0a15 TT |
1236 | svc_rdma_put_context(ctxt, 1); |
1237 | } | |
356d0a15 TT |
1238 | |
1239 | /* Warn if we leaked a resource or under-referenced */ | |
cc886c9f | 1240 | if (rdma->sc_ctxt_used != 0) |
3fe04ee9 | 1241 | pr_err("svcrdma: ctxt still in use? (%d)\n", |
cc886c9f | 1242 | rdma->sc_ctxt_used); |
356d0a15 | 1243 | |
5d252f90 CL |
1244 | /* Final put of backchannel client transport */ |
1245 | if (xprt->xpt_bc_xprt) { | |
1246 | xprt_put(xprt->xpt_bc_xprt); | |
1247 | xprt->xpt_bc_xprt = NULL; | |
1248 | } | |
1249 | ||
64be8608 | 1250 | rdma_dealloc_frmr_q(rdma); |
cc886c9f | 1251 | svc_rdma_destroy_ctxts(rdma); |
2fe81b23 | 1252 | svc_rdma_destroy_maps(rdma); |
64be8608 | 1253 | |
1711386c TT |
1254 | /* Destroy the QP if present (not a listener) */ |
1255 | if (rdma->sc_qp && !IS_ERR(rdma->sc_qp)) | |
1256 | ib_destroy_qp(rdma->sc_qp); | |
1257 | ||
c48cbb40 | 1258 | if (rdma->sc_sq_cq && !IS_ERR(rdma->sc_sq_cq)) |
be99bb11 | 1259 | ib_free_cq(rdma->sc_sq_cq); |
377f9b2f | 1260 | |
c48cbb40 | 1261 | if (rdma->sc_rq_cq && !IS_ERR(rdma->sc_rq_cq)) |
8bd5ba86 | 1262 | ib_free_cq(rdma->sc_rq_cq); |
377f9b2f | 1263 | |
c48cbb40 TT |
1264 | if (rdma->sc_pd && !IS_ERR(rdma->sc_pd)) |
1265 | ib_dealloc_pd(rdma->sc_pd); | |
377f9b2f | 1266 | |
356d0a15 TT |
1267 | /* Destroy the CM ID */ |
1268 | rdma_destroy_id(rdma->sc_cm_id); | |
1269 | ||
c48cbb40 | 1270 | kfree(rdma); |
377f9b2f TT |
1271 | } |
1272 | ||
8da91ea8 TT |
1273 | static void svc_rdma_free(struct svc_xprt *xprt) |
1274 | { | |
1275 | struct svcxprt_rdma *rdma = | |
1276 | container_of(xprt, struct svcxprt_rdma, sc_xprt); | |
1277 | INIT_WORK(&rdma->sc_work, __svc_rdma_free); | |
a25e758c | 1278 | queue_work(svc_rdma_wq, &rdma->sc_work); |
8da91ea8 TT |
1279 | } |
1280 | ||
377f9b2f TT |
1281 | static int svc_rdma_has_wspace(struct svc_xprt *xprt) |
1282 | { | |
1283 | struct svcxprt_rdma *rdma = | |
1284 | container_of(xprt, struct svcxprt_rdma, sc_xprt); | |
1285 | ||
1286 | /* | |
0bf48289 | 1287 | * If there are already waiters on the SQ, |
377f9b2f TT |
1288 | * return false. |
1289 | */ | |
1290 | if (waitqueue_active(&rdma->sc_send_wait)) | |
1291 | return 0; | |
1292 | ||
1293 | /* Otherwise return true. */ | |
1294 | return 1; | |
1295 | } | |
1296 | ||
16e4d93f CL |
1297 | static int svc_rdma_secure_port(struct svc_rqst *rqstp) |
1298 | { | |
1299 | return 1; | |
1300 | } | |
1301 | ||
ea08e392 SM |
1302 | static void svc_rdma_kill_temp_xprt(struct svc_xprt *xprt) |
1303 | { | |
1304 | } | |
1305 | ||
377f9b2f TT |
1306 | int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr) |
1307 | { | |
5b180a9a TT |
1308 | struct ib_send_wr *bad_wr, *n_wr; |
1309 | int wr_count; | |
1310 | int i; | |
377f9b2f TT |
1311 | int ret; |
1312 | ||
1313 | if (test_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags)) | |
9d6347ac | 1314 | return -ENOTCONN; |
377f9b2f | 1315 | |
5b180a9a TT |
1316 | wr_count = 1; |
1317 | for (n_wr = wr->next; n_wr; n_wr = n_wr->next) | |
1318 | wr_count++; | |
1319 | ||
377f9b2f TT |
1320 | /* If the SQ is full, wait until an SQ entry is available */ |
1321 | while (1) { | |
e4eb42ce | 1322 | if ((atomic_sub_return(wr_count, &xprt->sc_sq_avail) < 0)) { |
377f9b2f | 1323 | atomic_inc(&rdma_stat_sq_starve); |
dbcd00eb | 1324 | |
377f9b2f | 1325 | /* Wait until SQ WR available if SQ still full */ |
e4eb42ce | 1326 | atomic_add(wr_count, &xprt->sc_sq_avail); |
377f9b2f | 1327 | wait_event(xprt->sc_send_wait, |
e4eb42ce | 1328 | atomic_read(&xprt->sc_sq_avail) > wr_count); |
830bb59b | 1329 | if (test_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags)) |
b432e6b3 | 1330 | return -ENOTCONN; |
377f9b2f TT |
1331 | continue; |
1332 | } | |
5b180a9a TT |
1333 | /* Take a transport ref for each WR posted */ |
1334 | for (i = 0; i < wr_count; i++) | |
1335 | svc_xprt_get(&xprt->sc_xprt); | |
1336 | ||
1337 | /* Bump used SQ WR count and post */ | |
377f9b2f | 1338 | ret = ib_post_send(xprt->sc_qp, wr, &bad_wr); |
5b180a9a TT |
1339 | if (ret) { |
1340 | set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags); | |
5b180a9a TT |
1341 | for (i = 0; i < wr_count; i ++) |
1342 | svc_xprt_put(&xprt->sc_xprt); | |
e4eb42ce CL |
1343 | dprintk("svcrdma: failed to post SQ WR rc=%d\n", ret); |
1344 | dprintk(" sc_sq_avail=%d, sc_sq_depth=%d\n", | |
1345 | atomic_read(&xprt->sc_sq_avail), | |
1346 | xprt->sc_sq_depth); | |
5b180a9a | 1347 | wake_up(&xprt->sc_send_wait); |
e4eb42ce | 1348 | } |
377f9b2f TT |
1349 | break; |
1350 | } | |
1351 | return ret; | |
1352 | } |