]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blame - net/sunrpc/xprtrdma/verbs.c
Merge tag 'for-4.2' of git://git.infradead.org/battery-2.6
[mirror_ubuntu-focal-kernel.git] / net / sunrpc / xprtrdma / verbs.c
CommitLineData
f58851e6 1/*
c56c65fb
TT
2 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the BSD-type
8 * license below:
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 *
14 * Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 *
17 * Redistributions in binary form must reproduce the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer in the documentation and/or other materials provided
20 * with the distribution.
21 *
22 * Neither the name of the Network Appliance, Inc. nor the names of
23 * its contributors may be used to endorse or promote products
24 * derived from this software without specific prior written
25 * permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
f58851e6
TT
38 */
39
c56c65fb
TT
40/*
41 * verbs.c
42 *
43 * Encapsulates the major functions managing:
44 * o adapters
45 * o endpoints
46 * o connections
47 * o buffer memory
48 */
49
a6b7a407 50#include <linux/interrupt.h>
5a0e3ad6 51#include <linux/slab.h>
eba8ff66 52#include <linux/prefetch.h>
0dd39cae 53#include <linux/sunrpc/addr.h>
65866f82 54#include <asm/bitops.h>
c56c65fb 55
f58851e6
TT
56#include "xprt_rdma.h"
57
c56c65fb
TT
58/*
59 * Globals/Macros
60 */
61
f895b252 62#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
c56c65fb
TT
63# define RPCDBG_FACILITY RPCDBG_TRANS
64#endif
65
66/*
67 * internal functions
68 */
69
70/*
71 * handle replies in tasklet context, using a single, global list
72 * rdma tasklet function -- just turn around and call the func
73 * for all replies on the list
74 */
75
76static DEFINE_SPINLOCK(rpcrdma_tk_lock_g);
77static LIST_HEAD(rpcrdma_tasklets_g);
78
79static void
80rpcrdma_run_tasklet(unsigned long data)
81{
82 struct rpcrdma_rep *rep;
83 void (*func)(struct rpcrdma_rep *);
84 unsigned long flags;
85
86 data = data;
87 spin_lock_irqsave(&rpcrdma_tk_lock_g, flags);
88 while (!list_empty(&rpcrdma_tasklets_g)) {
89 rep = list_entry(rpcrdma_tasklets_g.next,
90 struct rpcrdma_rep, rr_list);
91 list_del(&rep->rr_list);
92 func = rep->rr_func;
93 rep->rr_func = NULL;
94 spin_unlock_irqrestore(&rpcrdma_tk_lock_g, flags);
95
96 if (func)
97 func(rep);
98 else
99 rpcrdma_recv_buffer_put(rep);
100
101 spin_lock_irqsave(&rpcrdma_tk_lock_g, flags);
102 }
103 spin_unlock_irqrestore(&rpcrdma_tk_lock_g, flags);
104}
105
106static DECLARE_TASKLET(rpcrdma_tasklet_g, rpcrdma_run_tasklet, 0UL);
107
f1a03b76
CL
108static void
109rpcrdma_schedule_tasklet(struct list_head *sched_list)
110{
111 unsigned long flags;
112
113 spin_lock_irqsave(&rpcrdma_tk_lock_g, flags);
114 list_splice_tail(sched_list, &rpcrdma_tasklets_g);
115 spin_unlock_irqrestore(&rpcrdma_tk_lock_g, flags);
116 tasklet_schedule(&rpcrdma_tasklet_g);
117}
118
c56c65fb
TT
119static void
120rpcrdma_qp_async_error_upcall(struct ib_event *event, void *context)
121{
122 struct rpcrdma_ep *ep = context;
123
7ff11de1 124 pr_err("RPC: %s: %s on device %s ep %p\n",
76357c71 125 __func__, ib_event_msg(event->event),
7ff11de1 126 event->device->name, context);
c56c65fb
TT
127 if (ep->rep_connected == 1) {
128 ep->rep_connected = -EIO;
afadc468 129 rpcrdma_conn_func(ep);
c56c65fb
TT
130 wake_up_all(&ep->rep_connect_wait);
131 }
132}
133
134static void
135rpcrdma_cq_async_error_upcall(struct ib_event *event, void *context)
136{
137 struct rpcrdma_ep *ep = context;
138
7ff11de1 139 pr_err("RPC: %s: %s on device %s ep %p\n",
76357c71 140 __func__, ib_event_msg(event->event),
7ff11de1 141 event->device->name, context);
c56c65fb
TT
142 if (ep->rep_connected == 1) {
143 ep->rep_connected = -EIO;
afadc468 144 rpcrdma_conn_func(ep);
c56c65fb
TT
145 wake_up_all(&ep->rep_connect_wait);
146 }
147}
148
fc664485
CL
149static void
150rpcrdma_sendcq_process_wc(struct ib_wc *wc)
c56c65fb 151{
8502427c 152 /* WARNING: Only wr_id and status are reliable at this point */
e46ac34c
CL
153 if (wc->wr_id == RPCRDMA_IGNORE_COMPLETION) {
154 if (wc->status != IB_WC_SUCCESS &&
155 wc->status != IB_WC_WR_FLUSH_ERR)
8502427c 156 pr_err("RPC: %s: SEND: %s\n",
76357c71 157 __func__, ib_wc_status_msg(wc->status));
8502427c
CL
158 } else {
159 struct rpcrdma_mw *r;
160
161 r = (struct rpcrdma_mw *)(unsigned long)wc->wr_id;
e46ac34c 162 r->mw_sendcompletion(wc);
8502427c 163 }
c56c65fb
TT
164}
165
fc664485 166static int
1c00dd07 167rpcrdma_sendcq_poll(struct ib_cq *cq, struct rpcrdma_ep *ep)
c56c65fb 168{
1c00dd07 169 struct ib_wc *wcs;
8301a2c0 170 int budget, count, rc;
c56c65fb 171
8301a2c0 172 budget = RPCRDMA_WC_BUDGET / RPCRDMA_POLLSIZE;
1c00dd07
CL
173 do {
174 wcs = ep->rep_send_wcs;
175
176 rc = ib_poll_cq(cq, RPCRDMA_POLLSIZE, wcs);
177 if (rc <= 0)
178 return rc;
179
180 count = rc;
181 while (count-- > 0)
182 rpcrdma_sendcq_process_wc(wcs++);
8301a2c0 183 } while (rc == RPCRDMA_POLLSIZE && --budget);
1c00dd07 184 return 0;
fc664485 185}
c56c65fb 186
fc664485
CL
187/*
188 * Handle send, fast_reg_mr, and local_inv completions.
189 *
190 * Send events are typically suppressed and thus do not result
191 * in an upcall. Occasionally one is signaled, however. This
192 * prevents the provider's completion queue from wrapping and
193 * losing a completion.
194 */
195static void
196rpcrdma_sendcq_upcall(struct ib_cq *cq, void *cq_context)
197{
1c00dd07 198 struct rpcrdma_ep *ep = (struct rpcrdma_ep *)cq_context;
fc664485
CL
199 int rc;
200
1c00dd07 201 rc = rpcrdma_sendcq_poll(cq, ep);
fc664485
CL
202 if (rc) {
203 dprintk("RPC: %s: ib_poll_cq failed: %i\n",
204 __func__, rc);
205 return;
c56c65fb
TT
206 }
207
7f23f6f6
CL
208 rc = ib_req_notify_cq(cq,
209 IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS);
210 if (rc == 0)
211 return;
212 if (rc < 0) {
fc664485
CL
213 dprintk("RPC: %s: ib_req_notify_cq failed: %i\n",
214 __func__, rc);
215 return;
216 }
217
1c00dd07 218 rpcrdma_sendcq_poll(cq, ep);
fc664485
CL
219}
220
221static void
bb96193d 222rpcrdma_recvcq_process_wc(struct ib_wc *wc, struct list_head *sched_list)
fc664485
CL
223{
224 struct rpcrdma_rep *rep =
225 (struct rpcrdma_rep *)(unsigned long)wc->wr_id;
226
8502427c
CL
227 /* WARNING: Only wr_id and status are reliable at this point */
228 if (wc->status != IB_WC_SUCCESS)
229 goto out_fail;
fc664485 230
8502427c 231 /* status == SUCCESS means all fields in wc are trustworthy */
fc664485
CL
232 if (wc->opcode != IB_WC_RECV)
233 return;
234
8502427c
CL
235 dprintk("RPC: %s: rep %p opcode 'recv', length %u: success\n",
236 __func__, rep, wc->byte_len);
237
fc664485
CL
238 rep->rr_len = wc->byte_len;
239 ib_dma_sync_single_for_cpu(rdmab_to_ia(rep->rr_buffer)->ri_id->device,
6b1184cd
CL
240 rdmab_addr(rep->rr_rdmabuf),
241 rep->rr_len, DMA_FROM_DEVICE);
242 prefetch(rdmab_to_msg(rep->rr_rdmabuf));
fc664485
CL
243
244out_schedule:
bb96193d 245 list_add_tail(&rep->rr_list, sched_list);
8502427c
CL
246 return;
247out_fail:
248 if (wc->status != IB_WC_WR_FLUSH_ERR)
249 pr_err("RPC: %s: rep %p: %s\n",
76357c71 250 __func__, rep, ib_wc_status_msg(wc->status));
8502427c
CL
251 rep->rr_len = ~0U;
252 goto out_schedule;
fc664485
CL
253}
254
255static int
1c00dd07 256rpcrdma_recvcq_poll(struct ib_cq *cq, struct rpcrdma_ep *ep)
fc664485 257{
bb96193d 258 struct list_head sched_list;
1c00dd07 259 struct ib_wc *wcs;
8301a2c0 260 int budget, count, rc;
fc664485 261
bb96193d 262 INIT_LIST_HEAD(&sched_list);
8301a2c0 263 budget = RPCRDMA_WC_BUDGET / RPCRDMA_POLLSIZE;
1c00dd07
CL
264 do {
265 wcs = ep->rep_recv_wcs;
266
267 rc = ib_poll_cq(cq, RPCRDMA_POLLSIZE, wcs);
268 if (rc <= 0)
bb96193d 269 goto out_schedule;
1c00dd07
CL
270
271 count = rc;
272 while (count-- > 0)
bb96193d 273 rpcrdma_recvcq_process_wc(wcs++, &sched_list);
8301a2c0 274 } while (rc == RPCRDMA_POLLSIZE && --budget);
bb96193d
CL
275 rc = 0;
276
277out_schedule:
f1a03b76 278 rpcrdma_schedule_tasklet(&sched_list);
bb96193d 279 return rc;
c56c65fb
TT
280}
281
282/*
fc664485 283 * Handle receive completions.
c56c65fb 284 *
c56c65fb
TT
285 * It is reentrant but processes single events in order to maintain
286 * ordering of receives to keep server credits.
287 *
288 * It is the responsibility of the scheduled tasklet to return
289 * recv buffers to the pool. NOTE: this affects synchronization of
290 * connection shutdown. That is, the structures required for
291 * the completion of the reply handler must remain intact until
292 * all memory has been reclaimed.
c56c65fb
TT
293 */
294static void
fc664485 295rpcrdma_recvcq_upcall(struct ib_cq *cq, void *cq_context)
c56c65fb 296{
1c00dd07 297 struct rpcrdma_ep *ep = (struct rpcrdma_ep *)cq_context;
c56c65fb
TT
298 int rc;
299
1c00dd07 300 rc = rpcrdma_recvcq_poll(cq, ep);
fc664485
CL
301 if (rc) {
302 dprintk("RPC: %s: ib_poll_cq failed: %i\n",
303 __func__, rc);
c56c65fb 304 return;
fc664485 305 }
c56c65fb 306
7f23f6f6
CL
307 rc = ib_req_notify_cq(cq,
308 IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS);
309 if (rc == 0)
310 return;
311 if (rc < 0) {
fc664485 312 dprintk("RPC: %s: ib_req_notify_cq failed: %i\n",
c56c65fb
TT
313 __func__, rc);
314 return;
315 }
316
1c00dd07 317 rpcrdma_recvcq_poll(cq, ep);
c56c65fb
TT
318}
319
a7bc211a
CL
320static void
321rpcrdma_flush_cqs(struct rpcrdma_ep *ep)
322{
5c166bef
CL
323 struct ib_wc wc;
324 LIST_HEAD(sched_list);
325
326 while (ib_poll_cq(ep->rep_attr.recv_cq, 1, &wc) > 0)
327 rpcrdma_recvcq_process_wc(&wc, &sched_list);
328 if (!list_empty(&sched_list))
329 rpcrdma_schedule_tasklet(&sched_list);
330 while (ib_poll_cq(ep->rep_attr.send_cq, 1, &wc) > 0)
331 rpcrdma_sendcq_process_wc(&wc);
a7bc211a
CL
332}
333
c56c65fb
TT
334static int
335rpcrdma_conn_upcall(struct rdma_cm_id *id, struct rdma_cm_event *event)
336{
337 struct rpcrdma_xprt *xprt = id->context;
338 struct rpcrdma_ia *ia = &xprt->rx_ia;
339 struct rpcrdma_ep *ep = &xprt->rx_ep;
f895b252 340#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
0dd39cae 341 struct sockaddr *sap = (struct sockaddr *)&ep->rep_remote_addr;
ff0db049 342#endif
ce1ab9ab
CL
343 struct ib_qp_attr *attr = &ia->ri_qp_attr;
344 struct ib_qp_init_attr *iattr = &ia->ri_qp_init_attr;
c56c65fb
TT
345 int connstate = 0;
346
347 switch (event->event) {
348 case RDMA_CM_EVENT_ADDR_RESOLVED:
349 case RDMA_CM_EVENT_ROUTE_RESOLVED:
5675add3 350 ia->ri_async_rc = 0;
c56c65fb
TT
351 complete(&ia->ri_done);
352 break;
353 case RDMA_CM_EVENT_ADDR_ERROR:
354 ia->ri_async_rc = -EHOSTUNREACH;
355 dprintk("RPC: %s: CM address resolution error, ep 0x%p\n",
356 __func__, ep);
357 complete(&ia->ri_done);
358 break;
359 case RDMA_CM_EVENT_ROUTE_ERROR:
360 ia->ri_async_rc = -ENETUNREACH;
361 dprintk("RPC: %s: CM route resolution error, ep 0x%p\n",
362 __func__, ep);
363 complete(&ia->ri_done);
364 break;
365 case RDMA_CM_EVENT_ESTABLISHED:
366 connstate = 1;
ce1ab9ab
CL
367 ib_query_qp(ia->ri_id->qp, attr,
368 IB_QP_MAX_QP_RD_ATOMIC | IB_QP_MAX_DEST_RD_ATOMIC,
369 iattr);
c56c65fb
TT
370 dprintk("RPC: %s: %d responder resources"
371 " (%d initiator)\n",
ce1ab9ab
CL
372 __func__, attr->max_dest_rd_atomic,
373 attr->max_rd_atomic);
c56c65fb
TT
374 goto connected;
375 case RDMA_CM_EVENT_CONNECT_ERROR:
376 connstate = -ENOTCONN;
377 goto connected;
378 case RDMA_CM_EVENT_UNREACHABLE:
379 connstate = -ENETDOWN;
380 goto connected;
381 case RDMA_CM_EVENT_REJECTED:
382 connstate = -ECONNREFUSED;
383 goto connected;
384 case RDMA_CM_EVENT_DISCONNECTED:
385 connstate = -ECONNABORTED;
386 goto connected;
387 case RDMA_CM_EVENT_DEVICE_REMOVAL:
388 connstate = -ENODEV;
389connected:
c56c65fb
TT
390 dprintk("RPC: %s: %sconnected\n",
391 __func__, connstate > 0 ? "" : "dis");
392 ep->rep_connected = connstate;
afadc468 393 rpcrdma_conn_func(ep);
c56c65fb 394 wake_up_all(&ep->rep_connect_wait);
8079fb78 395 /*FALLTHROUGH*/
c56c65fb 396 default:
0dd39cae
CL
397 dprintk("RPC: %s: %pIS:%u (ep 0x%p): %s\n",
398 __func__, sap, rpc_get_port(sap), ep,
76357c71 399 rdma_event_msg(event->event));
c56c65fb
TT
400 break;
401 }
402
f895b252 403#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
b3cd8d45 404 if (connstate == 1) {
ce1ab9ab 405 int ird = attr->max_dest_rd_atomic;
b3cd8d45 406 int tird = ep->rep_remote_cma.responder_resources;
0dd39cae 407
a0ce85f5 408 pr_info("rpcrdma: connection to %pIS:%u on %s, memreg '%s', %d credits, %d responders%s\n",
0dd39cae 409 sap, rpc_get_port(sap),
b3cd8d45 410 ia->ri_id->device->name,
a0ce85f5 411 ia->ri_ops->ro_displayname,
b3cd8d45
TT
412 xprt->rx_buf.rb_max_requests,
413 ird, ird < 4 && ird < tird / 2 ? " (low!)" : "");
414 } else if (connstate < 0) {
0dd39cae
CL
415 pr_info("rpcrdma: connection to %pIS:%u closed (%d)\n",
416 sap, rpc_get_port(sap), connstate);
b3cd8d45
TT
417 }
418#endif
419
c56c65fb
TT
420 return 0;
421}
422
423static struct rdma_cm_id *
424rpcrdma_create_id(struct rpcrdma_xprt *xprt,
425 struct rpcrdma_ia *ia, struct sockaddr *addr)
426{
427 struct rdma_cm_id *id;
428 int rc;
429
1a954051
TT
430 init_completion(&ia->ri_done);
431
b26f9b99 432 id = rdma_create_id(rpcrdma_conn_upcall, xprt, RDMA_PS_TCP, IB_QPT_RC);
c56c65fb
TT
433 if (IS_ERR(id)) {
434 rc = PTR_ERR(id);
435 dprintk("RPC: %s: rdma_create_id() failed %i\n",
436 __func__, rc);
437 return id;
438 }
439
5675add3 440 ia->ri_async_rc = -ETIMEDOUT;
c56c65fb
TT
441 rc = rdma_resolve_addr(id, NULL, addr, RDMA_RESOLVE_TIMEOUT);
442 if (rc) {
443 dprintk("RPC: %s: rdma_resolve_addr() failed %i\n",
444 __func__, rc);
445 goto out;
446 }
5675add3
TT
447 wait_for_completion_interruptible_timeout(&ia->ri_done,
448 msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT) + 1);
c56c65fb
TT
449 rc = ia->ri_async_rc;
450 if (rc)
451 goto out;
452
5675add3 453 ia->ri_async_rc = -ETIMEDOUT;
c56c65fb
TT
454 rc = rdma_resolve_route(id, RDMA_RESOLVE_TIMEOUT);
455 if (rc) {
456 dprintk("RPC: %s: rdma_resolve_route() failed %i\n",
457 __func__, rc);
458 goto out;
459 }
5675add3
TT
460 wait_for_completion_interruptible_timeout(&ia->ri_done,
461 msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT) + 1);
c56c65fb
TT
462 rc = ia->ri_async_rc;
463 if (rc)
464 goto out;
465
466 return id;
467
468out:
469 rdma_destroy_id(id);
470 return ERR_PTR(rc);
471}
472
473/*
474 * Drain any cq, prior to teardown.
475 */
476static void
477rpcrdma_clean_cq(struct ib_cq *cq)
478{
479 struct ib_wc wc;
480 int count = 0;
481
482 while (1 == ib_poll_cq(cq, 1, &wc))
483 ++count;
484
485 if (count)
486 dprintk("RPC: %s: flushed %d events (last 0x%x)\n",
487 __func__, count, wc.opcode);
488}
489
490/*
491 * Exported functions.
492 */
493
494/*
495 * Open and initialize an Interface Adapter.
496 * o initializes fields of struct rpcrdma_ia, including
497 * interface and provider attributes and protection zone.
498 */
499int
500rpcrdma_ia_open(struct rpcrdma_xprt *xprt, struct sockaddr *addr, int memreg)
501{
bd7ed1d1 502 int rc, mem_priv;
c56c65fb 503 struct rpcrdma_ia *ia = &xprt->rx_ia;
7bc7972c 504 struct ib_device_attr *devattr = &ia->ri_devattr;
c56c65fb 505
c56c65fb
TT
506 ia->ri_id = rpcrdma_create_id(xprt, ia, addr);
507 if (IS_ERR(ia->ri_id)) {
508 rc = PTR_ERR(ia->ri_id);
509 goto out1;
510 }
511
512 ia->ri_pd = ib_alloc_pd(ia->ri_id->device);
513 if (IS_ERR(ia->ri_pd)) {
514 rc = PTR_ERR(ia->ri_pd);
515 dprintk("RPC: %s: ib_alloc_pd() failed %i\n",
516 __func__, rc);
517 goto out2;
518 }
519
7bc7972c 520 rc = ib_query_device(ia->ri_id->device, devattr);
bd7ed1d1
TT
521 if (rc) {
522 dprintk("RPC: %s: ib_query_device failed %d\n",
523 __func__, rc);
5ae711a2 524 goto out3;
bd7ed1d1
TT
525 }
526
7bc7972c 527 if (devattr->device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY) {
bd7ed1d1
TT
528 ia->ri_have_dma_lkey = 1;
529 ia->ri_dma_lkey = ia->ri_id->device->local_dma_lkey;
530 }
531
f10eafd3 532 if (memreg == RPCRDMA_FRMR) {
3197d309 533 /* Requires both frmr reg and local dma lkey */
41f97028 534 if (((devattr->device_cap_flags &
3197d309 535 (IB_DEVICE_MEM_MGT_EXTENSIONS|IB_DEVICE_LOCAL_DMA_LKEY)) !=
41f97028
CL
536 (IB_DEVICE_MEM_MGT_EXTENSIONS|IB_DEVICE_LOCAL_DMA_LKEY)) ||
537 (devattr->max_fast_reg_page_list_len == 0)) {
3197d309 538 dprintk("RPC: %s: FRMR registration "
f10eafd3
CL
539 "not supported by HCA\n", __func__);
540 memreg = RPCRDMA_MTHCAFMR;
bd7ed1d1 541 }
f10eafd3
CL
542 }
543 if (memreg == RPCRDMA_MTHCAFMR) {
544 if (!ia->ri_id->device->alloc_fmr) {
545 dprintk("RPC: %s: MTHCAFMR registration "
546 "not supported by HCA\n", __func__);
f10eafd3 547 memreg = RPCRDMA_ALLPHYSICAL;
f10eafd3 548 }
bd7ed1d1
TT
549 }
550
c56c65fb
TT
551 /*
552 * Optionally obtain an underlying physical identity mapping in
553 * order to do a memory window-based bind. This base registration
554 * is protected from remote access - that is enabled only by binding
555 * for the specific bytes targeted during each RPC operation, and
556 * revoked after the corresponding completion similar to a storage
557 * adapter.
558 */
bd7ed1d1 559 switch (memreg) {
3197d309 560 case RPCRDMA_FRMR:
a0ce85f5 561 ia->ri_ops = &rpcrdma_frwr_memreg_ops;
bd7ed1d1 562 break;
bd7ed1d1 563 case RPCRDMA_ALLPHYSICAL:
a0ce85f5 564 ia->ri_ops = &rpcrdma_physical_memreg_ops;
bd7ed1d1
TT
565 mem_priv = IB_ACCESS_LOCAL_WRITE |
566 IB_ACCESS_REMOTE_WRITE |
567 IB_ACCESS_REMOTE_READ;
568 goto register_setup;
bd7ed1d1 569 case RPCRDMA_MTHCAFMR:
a0ce85f5 570 ia->ri_ops = &rpcrdma_fmr_memreg_ops;
bd7ed1d1 571 if (ia->ri_have_dma_lkey)
c56c65fb 572 break;
bd7ed1d1
TT
573 mem_priv = IB_ACCESS_LOCAL_WRITE;
574 register_setup:
c56c65fb
TT
575 ia->ri_bind_mem = ib_get_dma_mr(ia->ri_pd, mem_priv);
576 if (IS_ERR(ia->ri_bind_mem)) {
577 printk(KERN_ALERT "%s: ib_get_dma_mr for "
0ac531c1 578 "phys register failed with %lX\n",
c56c65fb 579 __func__, PTR_ERR(ia->ri_bind_mem));
0ac531c1 580 rc = -ENOMEM;
5ae711a2 581 goto out3;
c56c65fb 582 }
bd7ed1d1
TT
583 break;
584 default:
cdd9ade7
CL
585 printk(KERN_ERR "RPC: Unsupported memory "
586 "registration mode: %d\n", memreg);
587 rc = -ENOMEM;
5ae711a2 588 goto out3;
c56c65fb 589 }
a0ce85f5
CL
590 dprintk("RPC: %s: memory registration strategy is '%s'\n",
591 __func__, ia->ri_ops->ro_displayname);
c56c65fb
TT
592
593 /* Else will do memory reg/dereg for each chunk */
594 ia->ri_memreg_strategy = memreg;
595
73806c88 596 rwlock_init(&ia->ri_qplock);
c56c65fb 597 return 0;
5ae711a2
CL
598
599out3:
600 ib_dealloc_pd(ia->ri_pd);
601 ia->ri_pd = NULL;
c56c65fb
TT
602out2:
603 rdma_destroy_id(ia->ri_id);
fee08caf 604 ia->ri_id = NULL;
c56c65fb
TT
605out1:
606 return rc;
607}
608
609/*
610 * Clean up/close an IA.
611 * o if event handles and PD have been initialized, free them.
612 * o close the IA
613 */
614void
615rpcrdma_ia_close(struct rpcrdma_ia *ia)
616{
617 int rc;
618
619 dprintk("RPC: %s: entering\n", __func__);
620 if (ia->ri_bind_mem != NULL) {
621 rc = ib_dereg_mr(ia->ri_bind_mem);
622 dprintk("RPC: %s: ib_dereg_mr returned %i\n",
623 __func__, rc);
624 }
fee08caf
TT
625 if (ia->ri_id != NULL && !IS_ERR(ia->ri_id)) {
626 if (ia->ri_id->qp)
627 rdma_destroy_qp(ia->ri_id);
628 rdma_destroy_id(ia->ri_id);
629 ia->ri_id = NULL;
630 }
c56c65fb
TT
631 if (ia->ri_pd != NULL && !IS_ERR(ia->ri_pd)) {
632 rc = ib_dealloc_pd(ia->ri_pd);
633 dprintk("RPC: %s: ib_dealloc_pd returned %i\n",
634 __func__, rc);
635 }
c56c65fb
TT
636}
637
638/*
639 * Create unconnected endpoint.
640 */
641int
642rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
643 struct rpcrdma_create_data_internal *cdata)
644{
7bc7972c 645 struct ib_device_attr *devattr = &ia->ri_devattr;
fc664485 646 struct ib_cq *sendcq, *recvcq;
8e37210b 647 struct ib_cq_init_attr cq_attr = {};
5d40a8a5 648 int rc, err;
c56c65fb 649
c56c65fb 650 /* check provider's send/recv wr limits */
7bc7972c
CL
651 if (cdata->max_requests > devattr->max_qp_wr)
652 cdata->max_requests = devattr->max_qp_wr;
c56c65fb
TT
653
654 ep->rep_attr.event_handler = rpcrdma_qp_async_error_upcall;
655 ep->rep_attr.qp_context = ep;
c56c65fb
TT
656 ep->rep_attr.srq = NULL;
657 ep->rep_attr.cap.max_send_wr = cdata->max_requests;
3968cb58
CL
658 rc = ia->ri_ops->ro_open(ia, ep, cdata);
659 if (rc)
660 return rc;
c56c65fb
TT
661 ep->rep_attr.cap.max_recv_wr = cdata->max_requests;
662 ep->rep_attr.cap.max_send_sge = (cdata->padding ? 4 : 2);
663 ep->rep_attr.cap.max_recv_sge = 1;
664 ep->rep_attr.cap.max_inline_data = 0;
665 ep->rep_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
666 ep->rep_attr.qp_type = IB_QPT_RC;
667 ep->rep_attr.port_num = ~0;
668
c05fbb5a
CL
669 if (cdata->padding) {
670 ep->rep_padbuf = rpcrdma_alloc_regbuf(ia, cdata->padding,
671 GFP_KERNEL);
672 if (IS_ERR(ep->rep_padbuf))
673 return PTR_ERR(ep->rep_padbuf);
674 } else
675 ep->rep_padbuf = NULL;
676
c56c65fb
TT
677 dprintk("RPC: %s: requested max: dtos: send %d recv %d; "
678 "iovs: send %d recv %d\n",
679 __func__,
680 ep->rep_attr.cap.max_send_wr,
681 ep->rep_attr.cap.max_recv_wr,
682 ep->rep_attr.cap.max_send_sge,
683 ep->rep_attr.cap.max_recv_sge);
684
685 /* set trigger for requesting send completion */
fc664485 686 ep->rep_cqinit = ep->rep_attr.cap.max_send_wr/2 - 1;
e7104a2a
CL
687 if (ep->rep_cqinit > RPCRDMA_MAX_UNSIGNALED_SENDS)
688 ep->rep_cqinit = RPCRDMA_MAX_UNSIGNALED_SENDS;
689 else if (ep->rep_cqinit <= 2)
c56c65fb
TT
690 ep->rep_cqinit = 0;
691 INIT_CQCOUNT(ep);
c56c65fb 692 init_waitqueue_head(&ep->rep_connect_wait);
254f91e2 693 INIT_DELAYED_WORK(&ep->rep_connect_worker, rpcrdma_connect_worker);
c56c65fb 694
8e37210b 695 cq_attr.cqe = ep->rep_attr.cap.max_send_wr + 1;
fc664485 696 sendcq = ib_create_cq(ia->ri_id->device, rpcrdma_sendcq_upcall,
8e37210b 697 rpcrdma_cq_async_error_upcall, ep, &cq_attr);
fc664485
CL
698 if (IS_ERR(sendcq)) {
699 rc = PTR_ERR(sendcq);
700 dprintk("RPC: %s: failed to create send CQ: %i\n",
c56c65fb
TT
701 __func__, rc);
702 goto out1;
703 }
704
fc664485 705 rc = ib_req_notify_cq(sendcq, IB_CQ_NEXT_COMP);
c56c65fb
TT
706 if (rc) {
707 dprintk("RPC: %s: ib_req_notify_cq failed: %i\n",
708 __func__, rc);
709 goto out2;
710 }
711
8e37210b 712 cq_attr.cqe = ep->rep_attr.cap.max_recv_wr + 1;
fc664485 713 recvcq = ib_create_cq(ia->ri_id->device, rpcrdma_recvcq_upcall,
8e37210b 714 rpcrdma_cq_async_error_upcall, ep, &cq_attr);
fc664485
CL
715 if (IS_ERR(recvcq)) {
716 rc = PTR_ERR(recvcq);
717 dprintk("RPC: %s: failed to create recv CQ: %i\n",
718 __func__, rc);
719 goto out2;
720 }
721
722 rc = ib_req_notify_cq(recvcq, IB_CQ_NEXT_COMP);
723 if (rc) {
724 dprintk("RPC: %s: ib_req_notify_cq failed: %i\n",
725 __func__, rc);
726 ib_destroy_cq(recvcq);
727 goto out2;
728 }
729
730 ep->rep_attr.send_cq = sendcq;
731 ep->rep_attr.recv_cq = recvcq;
c56c65fb
TT
732
733 /* Initialize cma parameters */
734
735 /* RPC/RDMA does not use private data */
736 ep->rep_remote_cma.private_data = NULL;
737 ep->rep_remote_cma.private_data_len = 0;
738
739 /* Client offers RDMA Read but does not initiate */
b334eaab 740 ep->rep_remote_cma.initiator_depth = 0;
7bc7972c 741 if (devattr->max_qp_rd_atom > 32) /* arbitrary but <= 255 */
b334eaab
TT
742 ep->rep_remote_cma.responder_resources = 32;
743 else
7bc7972c
CL
744 ep->rep_remote_cma.responder_resources =
745 devattr->max_qp_rd_atom;
c56c65fb
TT
746
747 ep->rep_remote_cma.retry_count = 7;
748 ep->rep_remote_cma.flow_control = 0;
749 ep->rep_remote_cma.rnr_retry_count = 0;
750
751 return 0;
752
753out2:
fc664485 754 err = ib_destroy_cq(sendcq);
5d40a8a5
CL
755 if (err)
756 dprintk("RPC: %s: ib_destroy_cq returned %i\n",
757 __func__, err);
c56c65fb 758out1:
c05fbb5a 759 rpcrdma_free_regbuf(ia, ep->rep_padbuf);
c56c65fb
TT
760 return rc;
761}
762
763/*
764 * rpcrdma_ep_destroy
765 *
766 * Disconnect and destroy endpoint. After this, the only
767 * valid operations on the ep are to free it (if dynamically
768 * allocated) or re-create it.
c56c65fb 769 */
7f1d5419 770void
c56c65fb
TT
771rpcrdma_ep_destroy(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
772{
773 int rc;
774
775 dprintk("RPC: %s: entering, connected is %d\n",
776 __func__, ep->rep_connected);
777
254f91e2
CL
778 cancel_delayed_work_sync(&ep->rep_connect_worker);
779
c56c65fb 780 if (ia->ri_id->qp) {
282191cb 781 rpcrdma_ep_disconnect(ep, ia);
fee08caf
TT
782 rdma_destroy_qp(ia->ri_id);
783 ia->ri_id->qp = NULL;
c56c65fb
TT
784 }
785
c05fbb5a 786 rpcrdma_free_regbuf(ia, ep->rep_padbuf);
c56c65fb 787
fc664485
CL
788 rpcrdma_clean_cq(ep->rep_attr.recv_cq);
789 rc = ib_destroy_cq(ep->rep_attr.recv_cq);
790 if (rc)
791 dprintk("RPC: %s: ib_destroy_cq returned %i\n",
792 __func__, rc);
793
794 rpcrdma_clean_cq(ep->rep_attr.send_cq);
795 rc = ib_destroy_cq(ep->rep_attr.send_cq);
c56c65fb
TT
796 if (rc)
797 dprintk("RPC: %s: ib_destroy_cq returned %i\n",
798 __func__, rc);
c56c65fb
TT
799}
800
801/*
802 * Connect unconnected endpoint.
803 */
804int
805rpcrdma_ep_connect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
806{
73806c88 807 struct rdma_cm_id *id, *old;
c56c65fb
TT
808 int rc = 0;
809 int retry_count = 0;
c56c65fb 810
c055551e 811 if (ep->rep_connected != 0) {
c56c65fb
TT
812 struct rpcrdma_xprt *xprt;
813retry:
ec62f40d 814 dprintk("RPC: %s: reconnecting...\n", __func__);
282191cb
CL
815
816 rpcrdma_ep_disconnect(ep, ia);
a7bc211a 817 rpcrdma_flush_cqs(ep);
c56c65fb
TT
818
819 xprt = container_of(ia, struct rpcrdma_xprt, rx_ia);
31a701a9
CL
820 ia->ri_ops->ro_reset(xprt);
821
c56c65fb
TT
822 id = rpcrdma_create_id(xprt, ia,
823 (struct sockaddr *)&xprt->rx_data.addr);
824 if (IS_ERR(id)) {
ec62f40d 825 rc = -EHOSTUNREACH;
c56c65fb
TT
826 goto out;
827 }
828 /* TEMP TEMP TEMP - fail if new device:
829 * Deregister/remarshal *all* requests!
830 * Close and recreate adapter, pd, etc!
831 * Re-determine all attributes still sane!
832 * More stuff I haven't thought of!
833 * Rrrgh!
834 */
835 if (ia->ri_id->device != id->device) {
836 printk("RPC: %s: can't reconnect on "
837 "different device!\n", __func__);
838 rdma_destroy_id(id);
ec62f40d 839 rc = -ENETUNREACH;
c56c65fb
TT
840 goto out;
841 }
842 /* END TEMP */
ec62f40d
CL
843 rc = rdma_create_qp(id, ia->ri_pd, &ep->rep_attr);
844 if (rc) {
845 dprintk("RPC: %s: rdma_create_qp failed %i\n",
846 __func__, rc);
847 rdma_destroy_id(id);
848 rc = -ENETUNREACH;
849 goto out;
850 }
73806c88
CL
851
852 write_lock(&ia->ri_qplock);
853 old = ia->ri_id;
c56c65fb 854 ia->ri_id = id;
73806c88
CL
855 write_unlock(&ia->ri_qplock);
856
857 rdma_destroy_qp(old);
858 rdma_destroy_id(old);
ec62f40d
CL
859 } else {
860 dprintk("RPC: %s: connecting...\n", __func__);
861 rc = rdma_create_qp(ia->ri_id, ia->ri_pd, &ep->rep_attr);
862 if (rc) {
863 dprintk("RPC: %s: rdma_create_qp failed %i\n",
864 __func__, rc);
865 /* do not update ep->rep_connected */
866 return -ENETUNREACH;
867 }
c56c65fb
TT
868 }
869
c56c65fb
TT
870 ep->rep_connected = 0;
871
872 rc = rdma_connect(ia->ri_id, &ep->rep_remote_cma);
873 if (rc) {
874 dprintk("RPC: %s: rdma_connect() failed with %i\n",
875 __func__, rc);
876 goto out;
877 }
878
c56c65fb
TT
879 wait_event_interruptible(ep->rep_connect_wait, ep->rep_connected != 0);
880
881 /*
882 * Check state. A non-peer reject indicates no listener
883 * (ECONNREFUSED), which may be a transient state. All
884 * others indicate a transport condition which has already
885 * undergone a best-effort.
886 */
f64f9e71
JP
887 if (ep->rep_connected == -ECONNREFUSED &&
888 ++retry_count <= RDMA_CONNECT_RETRY_MAX) {
c56c65fb
TT
889 dprintk("RPC: %s: non-peer_reject, retry\n", __func__);
890 goto retry;
891 }
892 if (ep->rep_connected <= 0) {
893 /* Sometimes, the only way to reliably connect to remote
894 * CMs is to use same nonzero values for ORD and IRD. */
b334eaab
TT
895 if (retry_count++ <= RDMA_CONNECT_RETRY_MAX + 1 &&
896 (ep->rep_remote_cma.responder_resources == 0 ||
897 ep->rep_remote_cma.initiator_depth !=
898 ep->rep_remote_cma.responder_resources)) {
899 if (ep->rep_remote_cma.responder_resources == 0)
900 ep->rep_remote_cma.responder_resources = 1;
901 ep->rep_remote_cma.initiator_depth =
902 ep->rep_remote_cma.responder_resources;
c56c65fb 903 goto retry;
b334eaab 904 }
c56c65fb
TT
905 rc = ep->rep_connected;
906 } else {
907 dprintk("RPC: %s: connected\n", __func__);
908 }
909
910out:
911 if (rc)
912 ep->rep_connected = rc;
913 return rc;
914}
915
916/*
917 * rpcrdma_ep_disconnect
918 *
919 * This is separate from destroy to facilitate the ability
920 * to reconnect without recreating the endpoint.
921 *
922 * This call is not reentrant, and must not be made in parallel
923 * on the same endpoint.
924 */
282191cb 925void
c56c65fb
TT
926rpcrdma_ep_disconnect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
927{
928 int rc;
929
a7bc211a 930 rpcrdma_flush_cqs(ep);
c56c65fb
TT
931 rc = rdma_disconnect(ia->ri_id);
932 if (!rc) {
933 /* returns without wait if not connected */
934 wait_event_interruptible(ep->rep_connect_wait,
935 ep->rep_connected != 1);
936 dprintk("RPC: %s: after wait, %sconnected\n", __func__,
937 (ep->rep_connected == 1) ? "still " : "dis");
938 } else {
939 dprintk("RPC: %s: rdma_disconnect %i\n", __func__, rc);
940 ep->rep_connected = rc;
941 }
c56c65fb
TT
942}
943
1392402c
CL
944static struct rpcrdma_req *
945rpcrdma_create_req(struct rpcrdma_xprt *r_xprt)
946{
1392402c 947 struct rpcrdma_req *req;
1392402c 948
85275c87 949 req = kzalloc(sizeof(*req), GFP_KERNEL);
1392402c 950 if (req == NULL)
85275c87 951 return ERR_PTR(-ENOMEM);
1392402c 952
1392402c
CL
953 req->rl_buffer = &r_xprt->rx_buf;
954 return req;
1392402c
CL
955}
956
957static struct rpcrdma_rep *
958rpcrdma_create_rep(struct rpcrdma_xprt *r_xprt)
959{
960 struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data;
1392402c
CL
961 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
962 struct rpcrdma_rep *rep;
963 int rc;
964
965 rc = -ENOMEM;
6b1184cd 966 rep = kzalloc(sizeof(*rep), GFP_KERNEL);
1392402c
CL
967 if (rep == NULL)
968 goto out;
1392402c 969
6b1184cd
CL
970 rep->rr_rdmabuf = rpcrdma_alloc_regbuf(ia, cdata->inline_rsize,
971 GFP_KERNEL);
972 if (IS_ERR(rep->rr_rdmabuf)) {
973 rc = PTR_ERR(rep->rr_rdmabuf);
1392402c 974 goto out_free;
6b1184cd 975 }
1392402c
CL
976
977 rep->rr_buffer = &r_xprt->rx_buf;
978 return rep;
979
980out_free:
981 kfree(rep);
982out:
983 return ERR_PTR(rc);
984}
985
c56c65fb 986int
ac920d04 987rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt)
c56c65fb 988{
ac920d04
CL
989 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
990 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
991 struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data;
c56c65fb 992 char *p;
1392402c 993 size_t len;
c56c65fb
TT
994 int i, rc;
995
996 buf->rb_max_requests = cdata->max_requests;
997 spin_lock_init(&buf->rb_lock);
c56c65fb
TT
998
999 /* Need to allocate:
1000 * 1. arrays for send and recv pointers
1001 * 2. arrays of struct rpcrdma_req to fill in pointers
1002 * 3. array of struct rpcrdma_rep for replies
c56c65fb
TT
1003 * Send/recv buffers in req/rep need to be registered
1004 */
c56c65fb
TT
1005 len = buf->rb_max_requests *
1006 (sizeof(struct rpcrdma_req *) + sizeof(struct rpcrdma_rep *));
c56c65fb 1007
c56c65fb
TT
1008 p = kzalloc(len, GFP_KERNEL);
1009 if (p == NULL) {
1010 dprintk("RPC: %s: req_t/rep_t/pad kzalloc(%zd) failed\n",
1011 __func__, len);
1012 rc = -ENOMEM;
1013 goto out;
1014 }
1015 buf->rb_pool = p; /* for freeing it later */
1016
1017 buf->rb_send_bufs = (struct rpcrdma_req **) p;
1018 p = (char *) &buf->rb_send_bufs[buf->rb_max_requests];
1019 buf->rb_recv_bufs = (struct rpcrdma_rep **) p;
1020 p = (char *) &buf->rb_recv_bufs[buf->rb_max_requests];
1021
91e70e70
CL
1022 rc = ia->ri_ops->ro_init(r_xprt);
1023 if (rc)
1024 goto out;
c56c65fb 1025
c56c65fb
TT
1026 for (i = 0; i < buf->rb_max_requests; i++) {
1027 struct rpcrdma_req *req;
1028 struct rpcrdma_rep *rep;
1029
1392402c
CL
1030 req = rpcrdma_create_req(r_xprt);
1031 if (IS_ERR(req)) {
c56c65fb
TT
1032 dprintk("RPC: %s: request buffer %d alloc"
1033 " failed\n", __func__, i);
1392402c 1034 rc = PTR_ERR(req);
c56c65fb
TT
1035 goto out;
1036 }
c56c65fb 1037 buf->rb_send_bufs[i] = req;
c56c65fb 1038
1392402c
CL
1039 rep = rpcrdma_create_rep(r_xprt);
1040 if (IS_ERR(rep)) {
c56c65fb
TT
1041 dprintk("RPC: %s: reply buffer %d alloc failed\n",
1042 __func__, i);
1392402c 1043 rc = PTR_ERR(rep);
c56c65fb
TT
1044 goto out;
1045 }
c56c65fb 1046 buf->rb_recv_bufs[i] = rep;
c56c65fb 1047 }
1392402c 1048
c56c65fb
TT
1049 return 0;
1050out:
1051 rpcrdma_buffer_destroy(buf);
1052 return rc;
1053}
1054
1392402c
CL
1055static void
1056rpcrdma_destroy_rep(struct rpcrdma_ia *ia, struct rpcrdma_rep *rep)
1057{
1058 if (!rep)
1059 return;
1060
6b1184cd 1061 rpcrdma_free_regbuf(ia, rep->rr_rdmabuf);
1392402c
CL
1062 kfree(rep);
1063}
1064
1065static void
1066rpcrdma_destroy_req(struct rpcrdma_ia *ia, struct rpcrdma_req *req)
1067{
1068 if (!req)
1069 return;
1070
0ca77dc3 1071 rpcrdma_free_regbuf(ia, req->rl_sendbuf);
85275c87 1072 rpcrdma_free_regbuf(ia, req->rl_rdmabuf);
1392402c
CL
1073 kfree(req);
1074}
1075
c56c65fb
TT
1076void
1077rpcrdma_buffer_destroy(struct rpcrdma_buffer *buf)
1078{
c56c65fb 1079 struct rpcrdma_ia *ia = rdmab_to_ia(buf);
2e84522c 1080 int i;
c56c65fb
TT
1081
1082 /* clean up in reverse order from create
1083 * 1. recv mr memory (mr free, then kfree)
c56c65fb 1084 * 2. send mr memory (mr free, then kfree)
2e84522c 1085 * 3. MWs
c56c65fb
TT
1086 */
1087 dprintk("RPC: %s: entering\n", __func__);
1088
1089 for (i = 0; i < buf->rb_max_requests; i++) {
1392402c
CL
1090 if (buf->rb_recv_bufs)
1091 rpcrdma_destroy_rep(ia, buf->rb_recv_bufs[i]);
1092 if (buf->rb_send_bufs)
1093 rpcrdma_destroy_req(ia, buf->rb_send_bufs[i]);
c56c65fb
TT
1094 }
1095
4561f347 1096 ia->ri_ops->ro_destroy(buf);
4034ba04 1097
c56c65fb
TT
1098 kfree(buf->rb_pool);
1099}
1100
c2922c02
CL
1101/* "*mw" can be NULL when rpcrdma_buffer_get_mrs() fails, leaving
1102 * some req segments uninitialized.
1103 */
1104static void
1105rpcrdma_buffer_put_mr(struct rpcrdma_mw **mw, struct rpcrdma_buffer *buf)
1106{
1107 if (*mw) {
1108 list_add_tail(&(*mw)->mw_list, &buf->rb_mws);
1109 *mw = NULL;
1110 }
1111}
1112
1113/* Cycle mw's back in reverse order, and "spin" them.
1114 * This delays and scrambles reuse as much as possible.
1115 */
1116static void
1117rpcrdma_buffer_put_mrs(struct rpcrdma_req *req, struct rpcrdma_buffer *buf)
1118{
1119 struct rpcrdma_mr_seg *seg = req->rl_segments;
1120 struct rpcrdma_mr_seg *seg1 = seg;
1121 int i;
1122
1123 for (i = 1, seg++; i < RPCRDMA_MAX_SEGS; seg++, i++)
3eb35810
CL
1124 rpcrdma_buffer_put_mr(&seg->rl_mw, buf);
1125 rpcrdma_buffer_put_mr(&seg1->rl_mw, buf);
c2922c02
CL
1126}
1127
1128static void
1129rpcrdma_buffer_put_sendbuf(struct rpcrdma_req *req, struct rpcrdma_buffer *buf)
1130{
1131 buf->rb_send_bufs[--buf->rb_send_index] = req;
1132 req->rl_niovs = 0;
1133 if (req->rl_reply) {
1134 buf->rb_recv_bufs[--buf->rb_recv_index] = req->rl_reply;
1135 req->rl_reply->rr_func = NULL;
1136 req->rl_reply = NULL;
1137 }
1138}
1139
6814baea 1140/* rpcrdma_unmap_one() was already done during deregistration.
ddb6bebc
CL
1141 * Redo only the ib_post_send().
1142 */
1143static void
1144rpcrdma_retry_local_inv(struct rpcrdma_mw *r, struct rpcrdma_ia *ia)
1145{
1146 struct rpcrdma_xprt *r_xprt =
1147 container_of(ia, struct rpcrdma_xprt, rx_ia);
1148 struct ib_send_wr invalidate_wr, *bad_wr;
1149 int rc;
1150
1151 dprintk("RPC: %s: FRMR %p is stale\n", __func__, r);
1152
1153 /* When this FRMR is re-inserted into rb_mws, it is no longer stale */
dab7e3b8 1154 r->r.frmr.fr_state = FRMR_IS_INVALID;
ddb6bebc
CL
1155
1156 memset(&invalidate_wr, 0, sizeof(invalidate_wr));
1157 invalidate_wr.wr_id = (unsigned long)(void *)r;
1158 invalidate_wr.opcode = IB_WR_LOCAL_INV;
ddb6bebc
CL
1159 invalidate_wr.ex.invalidate_rkey = r->r.frmr.fr_mr->rkey;
1160 DECR_CQCOUNT(&r_xprt->rx_ep);
1161
1162 dprintk("RPC: %s: frmr %p invalidating rkey %08x\n",
1163 __func__, r, r->r.frmr.fr_mr->rkey);
1164
1165 read_lock(&ia->ri_qplock);
1166 rc = ib_post_send(ia->ri_id->qp, &invalidate_wr, &bad_wr);
1167 read_unlock(&ia->ri_qplock);
1168 if (rc) {
1169 /* Force rpcrdma_buffer_get() to retry */
1170 r->r.frmr.fr_state = FRMR_IS_STALE;
1171 dprintk("RPC: %s: ib_post_send failed, %i\n",
1172 __func__, rc);
1173 }
1174}
1175
1176static void
1177rpcrdma_retry_flushed_linv(struct list_head *stale,
1178 struct rpcrdma_buffer *buf)
1179{
1180 struct rpcrdma_ia *ia = rdmab_to_ia(buf);
1181 struct list_head *pos;
1182 struct rpcrdma_mw *r;
1183 unsigned long flags;
1184
1185 list_for_each(pos, stale) {
1186 r = list_entry(pos, struct rpcrdma_mw, mw_list);
1187 rpcrdma_retry_local_inv(r, ia);
1188 }
1189
1190 spin_lock_irqsave(&buf->rb_lock, flags);
1191 list_splice_tail(stale, &buf->rb_mws);
1192 spin_unlock_irqrestore(&buf->rb_lock, flags);
1193}
1194
1195static struct rpcrdma_req *
1196rpcrdma_buffer_get_frmrs(struct rpcrdma_req *req, struct rpcrdma_buffer *buf,
1197 struct list_head *stale)
1198{
1199 struct rpcrdma_mw *r;
1200 int i;
1201
1202 i = RPCRDMA_MAX_SEGS - 1;
1203 while (!list_empty(&buf->rb_mws)) {
1204 r = list_entry(buf->rb_mws.next,
1205 struct rpcrdma_mw, mw_list);
1206 list_del(&r->mw_list);
1207 if (r->r.frmr.fr_state == FRMR_IS_STALE) {
1208 list_add(&r->mw_list, stale);
1209 continue;
1210 }
3eb35810 1211 req->rl_segments[i].rl_mw = r;
ddb6bebc
CL
1212 if (unlikely(i-- == 0))
1213 return req; /* Success */
1214 }
1215
1216 /* Not enough entries on rb_mws for this req */
1217 rpcrdma_buffer_put_sendbuf(req, buf);
1218 rpcrdma_buffer_put_mrs(req, buf);
1219 return NULL;
1220}
1221
c2922c02 1222static struct rpcrdma_req *
ddb6bebc 1223rpcrdma_buffer_get_fmrs(struct rpcrdma_req *req, struct rpcrdma_buffer *buf)
c2922c02
CL
1224{
1225 struct rpcrdma_mw *r;
1226 int i;
1227
1228 i = RPCRDMA_MAX_SEGS - 1;
1229 while (!list_empty(&buf->rb_mws)) {
1230 r = list_entry(buf->rb_mws.next,
1231 struct rpcrdma_mw, mw_list);
1232 list_del(&r->mw_list);
3eb35810 1233 req->rl_segments[i].rl_mw = r;
c2922c02
CL
1234 if (unlikely(i-- == 0))
1235 return req; /* Success */
1236 }
1237
1238 /* Not enough entries on rb_mws for this req */
1239 rpcrdma_buffer_put_sendbuf(req, buf);
1240 rpcrdma_buffer_put_mrs(req, buf);
1241 return NULL;
1242}
1243
c56c65fb
TT
1244/*
1245 * Get a set of request/reply buffers.
1246 *
1247 * Reply buffer (if needed) is attached to send buffer upon return.
1248 * Rule:
1249 * rb_send_index and rb_recv_index MUST always be pointing to the
1250 * *next* available buffer (non-NULL). They are incremented after
1251 * removing buffers, and decremented *before* returning them.
1252 */
1253struct rpcrdma_req *
1254rpcrdma_buffer_get(struct rpcrdma_buffer *buffers)
1255{
c2922c02 1256 struct rpcrdma_ia *ia = rdmab_to_ia(buffers);
ddb6bebc 1257 struct list_head stale;
c56c65fb
TT
1258 struct rpcrdma_req *req;
1259 unsigned long flags;
1260
1261 spin_lock_irqsave(&buffers->rb_lock, flags);
1262 if (buffers->rb_send_index == buffers->rb_max_requests) {
1263 spin_unlock_irqrestore(&buffers->rb_lock, flags);
1264 dprintk("RPC: %s: out of request buffers\n", __func__);
1265 return ((struct rpcrdma_req *)NULL);
1266 }
1267
1268 req = buffers->rb_send_bufs[buffers->rb_send_index];
1269 if (buffers->rb_send_index < buffers->rb_recv_index) {
1270 dprintk("RPC: %s: %d extra receives outstanding (ok)\n",
1271 __func__,
1272 buffers->rb_recv_index - buffers->rb_send_index);
1273 req->rl_reply = NULL;
1274 } else {
1275 req->rl_reply = buffers->rb_recv_bufs[buffers->rb_recv_index];
1276 buffers->rb_recv_bufs[buffers->rb_recv_index++] = NULL;
1277 }
1278 buffers->rb_send_bufs[buffers->rb_send_index++] = NULL;
ddb6bebc
CL
1279
1280 INIT_LIST_HEAD(&stale);
c2922c02
CL
1281 switch (ia->ri_memreg_strategy) {
1282 case RPCRDMA_FRMR:
ddb6bebc
CL
1283 req = rpcrdma_buffer_get_frmrs(req, buffers, &stale);
1284 break;
c2922c02 1285 case RPCRDMA_MTHCAFMR:
ddb6bebc 1286 req = rpcrdma_buffer_get_fmrs(req, buffers);
c2922c02
CL
1287 break;
1288 default:
1289 break;
c56c65fb
TT
1290 }
1291 spin_unlock_irqrestore(&buffers->rb_lock, flags);
ddb6bebc
CL
1292 if (!list_empty(&stale))
1293 rpcrdma_retry_flushed_linv(&stale, buffers);
c56c65fb
TT
1294 return req;
1295}
1296
1297/*
1298 * Put request/reply buffers back into pool.
1299 * Pre-decrement counter/array index.
1300 */
1301void
1302rpcrdma_buffer_put(struct rpcrdma_req *req)
1303{
1304 struct rpcrdma_buffer *buffers = req->rl_buffer;
1305 struct rpcrdma_ia *ia = rdmab_to_ia(buffers);
c56c65fb
TT
1306 unsigned long flags;
1307
c56c65fb 1308 spin_lock_irqsave(&buffers->rb_lock, flags);
c2922c02 1309 rpcrdma_buffer_put_sendbuf(req, buffers);
c56c65fb 1310 switch (ia->ri_memreg_strategy) {
3197d309 1311 case RPCRDMA_FRMR:
c56c65fb 1312 case RPCRDMA_MTHCAFMR:
c2922c02 1313 rpcrdma_buffer_put_mrs(req, buffers);
c56c65fb
TT
1314 break;
1315 default:
1316 break;
1317 }
1318 spin_unlock_irqrestore(&buffers->rb_lock, flags);
1319}
1320
1321/*
1322 * Recover reply buffers from pool.
1323 * This happens when recovering from error conditions.
1324 * Post-increment counter/array index.
1325 */
1326void
1327rpcrdma_recv_buffer_get(struct rpcrdma_req *req)
1328{
1329 struct rpcrdma_buffer *buffers = req->rl_buffer;
1330 unsigned long flags;
1331
c56c65fb
TT
1332 spin_lock_irqsave(&buffers->rb_lock, flags);
1333 if (buffers->rb_recv_index < buffers->rb_max_requests) {
1334 req->rl_reply = buffers->rb_recv_bufs[buffers->rb_recv_index];
1335 buffers->rb_recv_bufs[buffers->rb_recv_index++] = NULL;
1336 }
1337 spin_unlock_irqrestore(&buffers->rb_lock, flags);
1338}
1339
1340/*
1341 * Put reply buffers back into pool when not attached to
b45ccfd2 1342 * request. This happens in error conditions.
c56c65fb
TT
1343 */
1344void
1345rpcrdma_recv_buffer_put(struct rpcrdma_rep *rep)
1346{
1347 struct rpcrdma_buffer *buffers = rep->rr_buffer;
1348 unsigned long flags;
1349
1350 rep->rr_func = NULL;
1351 spin_lock_irqsave(&buffers->rb_lock, flags);
1352 buffers->rb_recv_bufs[--buffers->rb_recv_index] = rep;
1353 spin_unlock_irqrestore(&buffers->rb_lock, flags);
1354}
1355
1356/*
1357 * Wrappers for internal-use kmalloc memory registration, used by buffer code.
1358 */
1359
d654788e
CL
1360void
1361rpcrdma_mapping_error(struct rpcrdma_mr_seg *seg)
1362{
1363 dprintk("RPC: map_one: offset %p iova %llx len %zu\n",
1364 seg->mr_offset,
1365 (unsigned long long)seg->mr_dma, seg->mr_dmalen);
1366}
1367
df515ca7 1368static int
c56c65fb
TT
1369rpcrdma_register_internal(struct rpcrdma_ia *ia, void *va, int len,
1370 struct ib_mr **mrp, struct ib_sge *iov)
1371{
1372 struct ib_phys_buf ipb;
1373 struct ib_mr *mr;
1374 int rc;
1375
1376 /*
1377 * All memory passed here was kmalloc'ed, therefore phys-contiguous.
1378 */
1379 iov->addr = ib_dma_map_single(ia->ri_id->device,
1380 va, len, DMA_BIDIRECTIONAL);
bf858ab0
YB
1381 if (ib_dma_mapping_error(ia->ri_id->device, iov->addr))
1382 return -ENOMEM;
1383
c56c65fb
TT
1384 iov->length = len;
1385
bd7ed1d1
TT
1386 if (ia->ri_have_dma_lkey) {
1387 *mrp = NULL;
1388 iov->lkey = ia->ri_dma_lkey;
1389 return 0;
1390 } else if (ia->ri_bind_mem != NULL) {
c56c65fb
TT
1391 *mrp = NULL;
1392 iov->lkey = ia->ri_bind_mem->lkey;
1393 return 0;
1394 }
1395
1396 ipb.addr = iov->addr;
1397 ipb.size = iov->length;
1398 mr = ib_reg_phys_mr(ia->ri_pd, &ipb, 1,
1399 IB_ACCESS_LOCAL_WRITE, &iov->addr);
1400
1401 dprintk("RPC: %s: phys convert: 0x%llx "
1402 "registered 0x%llx length %d\n",
a56daeb7
AM
1403 __func__, (unsigned long long)ipb.addr,
1404 (unsigned long long)iov->addr, len);
c56c65fb
TT
1405
1406 if (IS_ERR(mr)) {
1407 *mrp = NULL;
1408 rc = PTR_ERR(mr);
1409 dprintk("RPC: %s: failed with %i\n", __func__, rc);
1410 } else {
1411 *mrp = mr;
1412 iov->lkey = mr->lkey;
1413 rc = 0;
1414 }
1415
1416 return rc;
1417}
1418
df515ca7 1419static int
c56c65fb
TT
1420rpcrdma_deregister_internal(struct rpcrdma_ia *ia,
1421 struct ib_mr *mr, struct ib_sge *iov)
1422{
1423 int rc;
1424
1425 ib_dma_unmap_single(ia->ri_id->device,
1426 iov->addr, iov->length, DMA_BIDIRECTIONAL);
1427
1428 if (NULL == mr)
1429 return 0;
1430
1431 rc = ib_dereg_mr(mr);
1432 if (rc)
1433 dprintk("RPC: %s: ib_dereg_mr failed %i\n", __func__, rc);
1434 return rc;
1435}
1436
9128c3e7
CL
1437/**
1438 * rpcrdma_alloc_regbuf - kmalloc and register memory for SEND/RECV buffers
1439 * @ia: controlling rpcrdma_ia
1440 * @size: size of buffer to be allocated, in bytes
1441 * @flags: GFP flags
1442 *
1443 * Returns pointer to private header of an area of internally
1444 * registered memory, or an ERR_PTR. The registered buffer follows
1445 * the end of the private header.
1446 *
1447 * xprtrdma uses a regbuf for posting an outgoing RDMA SEND, or for
1448 * receiving the payload of RDMA RECV operations. regbufs are not
1449 * used for RDMA READ/WRITE operations, thus are registered only for
1450 * LOCAL access.
1451 */
1452struct rpcrdma_regbuf *
1453rpcrdma_alloc_regbuf(struct rpcrdma_ia *ia, size_t size, gfp_t flags)
1454{
1455 struct rpcrdma_regbuf *rb;
1456 int rc;
1457
1458 rc = -ENOMEM;
1459 rb = kmalloc(sizeof(*rb) + size, flags);
1460 if (rb == NULL)
1461 goto out;
1462
1463 rb->rg_size = size;
1464 rb->rg_owner = NULL;
1465 rc = rpcrdma_register_internal(ia, rb->rg_base, size,
1466 &rb->rg_mr, &rb->rg_iov);
1467 if (rc)
1468 goto out_free;
1469
1470 return rb;
1471
1472out_free:
1473 kfree(rb);
1474out:
1475 return ERR_PTR(rc);
1476}
1477
1478/**
1479 * rpcrdma_free_regbuf - deregister and free registered buffer
1480 * @ia: controlling rpcrdma_ia
1481 * @rb: regbuf to be deregistered and freed
1482 */
1483void
1484rpcrdma_free_regbuf(struct rpcrdma_ia *ia, struct rpcrdma_regbuf *rb)
1485{
1486 if (rb) {
1487 rpcrdma_deregister_internal(ia, rb->rg_mr, &rb->rg_iov);
1488 kfree(rb);
1489 }
1490}
1491
c56c65fb
TT
1492/*
1493 * Prepost any receive buffer, then post send.
1494 *
1495 * Receive buffer is donated to hardware, reclaimed upon recv completion.
1496 */
1497int
1498rpcrdma_ep_post(struct rpcrdma_ia *ia,
1499 struct rpcrdma_ep *ep,
1500 struct rpcrdma_req *req)
1501{
1502 struct ib_send_wr send_wr, *send_wr_fail;
1503 struct rpcrdma_rep *rep = req->rl_reply;
1504 int rc;
1505
1506 if (rep) {
1507 rc = rpcrdma_ep_post_recv(ia, ep, rep);
1508 if (rc)
1509 goto out;
1510 req->rl_reply = NULL;
1511 }
1512
1513 send_wr.next = NULL;
e46ac34c 1514 send_wr.wr_id = RPCRDMA_IGNORE_COMPLETION;
c56c65fb
TT
1515 send_wr.sg_list = req->rl_send_iov;
1516 send_wr.num_sge = req->rl_niovs;
1517 send_wr.opcode = IB_WR_SEND;
c56c65fb
TT
1518 if (send_wr.num_sge == 4) /* no need to sync any pad (constant) */
1519 ib_dma_sync_single_for_device(ia->ri_id->device,
1520 req->rl_send_iov[3].addr, req->rl_send_iov[3].length,
1521 DMA_TO_DEVICE);
1522 ib_dma_sync_single_for_device(ia->ri_id->device,
1523 req->rl_send_iov[1].addr, req->rl_send_iov[1].length,
1524 DMA_TO_DEVICE);
1525 ib_dma_sync_single_for_device(ia->ri_id->device,
1526 req->rl_send_iov[0].addr, req->rl_send_iov[0].length,
1527 DMA_TO_DEVICE);
1528
1529 if (DECR_CQCOUNT(ep) > 0)
1530 send_wr.send_flags = 0;
1531 else { /* Provider must take a send completion every now and then */
1532 INIT_CQCOUNT(ep);
1533 send_wr.send_flags = IB_SEND_SIGNALED;
1534 }
1535
1536 rc = ib_post_send(ia->ri_id->qp, &send_wr, &send_wr_fail);
1537 if (rc)
1538 dprintk("RPC: %s: ib_post_send returned %i\n", __func__,
1539 rc);
1540out:
1541 return rc;
1542}
1543
1544/*
1545 * (Re)post a receive buffer.
1546 */
1547int
1548rpcrdma_ep_post_recv(struct rpcrdma_ia *ia,
1549 struct rpcrdma_ep *ep,
1550 struct rpcrdma_rep *rep)
1551{
1552 struct ib_recv_wr recv_wr, *recv_wr_fail;
1553 int rc;
1554
1555 recv_wr.next = NULL;
1556 recv_wr.wr_id = (u64) (unsigned long) rep;
6b1184cd 1557 recv_wr.sg_list = &rep->rr_rdmabuf->rg_iov;
c56c65fb
TT
1558 recv_wr.num_sge = 1;
1559
1560 ib_dma_sync_single_for_cpu(ia->ri_id->device,
6b1184cd
CL
1561 rdmab_addr(rep->rr_rdmabuf),
1562 rdmab_length(rep->rr_rdmabuf),
1563 DMA_BIDIRECTIONAL);
c56c65fb 1564
c56c65fb
TT
1565 rc = ib_post_recv(ia->ri_id->qp, &recv_wr, &recv_wr_fail);
1566
1567 if (rc)
1568 dprintk("RPC: %s: ib_post_recv returned %i\n", __func__,
1569 rc);
1570 return rc;
1571}
43e95988 1572
1c9351ee 1573/* How many chunk list items fit within our inline buffers?
43e95988 1574 */
1c9351ee
CL
1575unsigned int
1576rpcrdma_max_segments(struct rpcrdma_xprt *r_xprt)
43e95988
CL
1577{
1578 struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data;
1c9351ee 1579 int bytes, segments;
43e95988 1580
1c9351ee
CL
1581 bytes = min_t(unsigned int, cdata->inline_wsize, cdata->inline_rsize);
1582 bytes -= RPCRDMA_HDRLEN_MIN;
1583 if (bytes < sizeof(struct rpcrdma_segment) * 2) {
1584 pr_warn("RPC: %s: inline threshold too small\n",
1585 __func__);
1586 return 0;
43e95988 1587 }
1c9351ee
CL
1588
1589 segments = 1 << (fls(bytes / sizeof(struct rpcrdma_segment)) - 1);
1590 dprintk("RPC: %s: max chunk list size = %d segments\n",
1591 __func__, segments);
1592 return segments;
43e95988 1593}