]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - net/sunrpc/xprtrdma/verbs.c
Merge remote-tracking branches 'asoc/fix/db1200', 'asoc/fix/dwc', 'asoc/fix/imx-ssi...
[mirror_ubuntu-zesty-kernel.git] / net / sunrpc / xprtrdma / verbs.c
1 /*
2 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the BSD-type
8 * license below:
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 *
14 * Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 *
17 * Redistributions in binary form must reproduce the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer in the documentation and/or other materials provided
20 * with the distribution.
21 *
22 * Neither the name of the Network Appliance, Inc. nor the names of
23 * its contributors may be used to endorse or promote products
24 * derived from this software without specific prior written
25 * permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 /*
41 * verbs.c
42 *
43 * Encapsulates the major functions managing:
44 * o adapters
45 * o endpoints
46 * o connections
47 * o buffer memory
48 */
49
50 #include <linux/interrupt.h>
51 #include <linux/slab.h>
52 #include <linux/prefetch.h>
53 #include <linux/sunrpc/addr.h>
54 #include <asm/bitops.h>
55 #include <linux/module.h> /* try_module_get()/module_put() */
56
57 #include "xprt_rdma.h"
58
59 /*
60 * Globals/Macros
61 */
62
63 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
64 # define RPCDBG_FACILITY RPCDBG_TRANS
65 #endif
66
67 /*
68 * internal functions
69 */
70
71 /*
72 * handle replies in tasklet context, using a single, global list
73 * rdma tasklet function -- just turn around and call the func
74 * for all replies on the list
75 */
76
77 static DEFINE_SPINLOCK(rpcrdma_tk_lock_g);
78 static LIST_HEAD(rpcrdma_tasklets_g);
79
80 static void
81 rpcrdma_run_tasklet(unsigned long data)
82 {
83 struct rpcrdma_rep *rep;
84 unsigned long flags;
85
86 data = data;
87 spin_lock_irqsave(&rpcrdma_tk_lock_g, flags);
88 while (!list_empty(&rpcrdma_tasklets_g)) {
89 rep = list_entry(rpcrdma_tasklets_g.next,
90 struct rpcrdma_rep, rr_list);
91 list_del(&rep->rr_list);
92 spin_unlock_irqrestore(&rpcrdma_tk_lock_g, flags);
93
94 rpcrdma_reply_handler(rep);
95
96 spin_lock_irqsave(&rpcrdma_tk_lock_g, flags);
97 }
98 spin_unlock_irqrestore(&rpcrdma_tk_lock_g, flags);
99 }
100
101 static DECLARE_TASKLET(rpcrdma_tasklet_g, rpcrdma_run_tasklet, 0UL);
102
103 static void
104 rpcrdma_schedule_tasklet(struct list_head *sched_list)
105 {
106 unsigned long flags;
107
108 spin_lock_irqsave(&rpcrdma_tk_lock_g, flags);
109 list_splice_tail(sched_list, &rpcrdma_tasklets_g);
110 spin_unlock_irqrestore(&rpcrdma_tk_lock_g, flags);
111 tasklet_schedule(&rpcrdma_tasklet_g);
112 }
113
114 static void
115 rpcrdma_qp_async_error_upcall(struct ib_event *event, void *context)
116 {
117 struct rpcrdma_ep *ep = context;
118
119 pr_err("RPC: %s: %s on device %s ep %p\n",
120 __func__, ib_event_msg(event->event),
121 event->device->name, context);
122 if (ep->rep_connected == 1) {
123 ep->rep_connected = -EIO;
124 rpcrdma_conn_func(ep);
125 wake_up_all(&ep->rep_connect_wait);
126 }
127 }
128
129 static void
130 rpcrdma_cq_async_error_upcall(struct ib_event *event, void *context)
131 {
132 struct rpcrdma_ep *ep = context;
133
134 pr_err("RPC: %s: %s on device %s ep %p\n",
135 __func__, ib_event_msg(event->event),
136 event->device->name, context);
137 if (ep->rep_connected == 1) {
138 ep->rep_connected = -EIO;
139 rpcrdma_conn_func(ep);
140 wake_up_all(&ep->rep_connect_wait);
141 }
142 }
143
144 static void
145 rpcrdma_sendcq_process_wc(struct ib_wc *wc)
146 {
147 /* WARNING: Only wr_id and status are reliable at this point */
148 if (wc->wr_id == RPCRDMA_IGNORE_COMPLETION) {
149 if (wc->status != IB_WC_SUCCESS &&
150 wc->status != IB_WC_WR_FLUSH_ERR)
151 pr_err("RPC: %s: SEND: %s\n",
152 __func__, ib_wc_status_msg(wc->status));
153 } else {
154 struct rpcrdma_mw *r;
155
156 r = (struct rpcrdma_mw *)(unsigned long)wc->wr_id;
157 r->mw_sendcompletion(wc);
158 }
159 }
160
161 static int
162 rpcrdma_sendcq_poll(struct ib_cq *cq, struct rpcrdma_ep *ep)
163 {
164 struct ib_wc *wcs;
165 int budget, count, rc;
166
167 budget = RPCRDMA_WC_BUDGET / RPCRDMA_POLLSIZE;
168 do {
169 wcs = ep->rep_send_wcs;
170
171 rc = ib_poll_cq(cq, RPCRDMA_POLLSIZE, wcs);
172 if (rc <= 0)
173 return rc;
174
175 count = rc;
176 while (count-- > 0)
177 rpcrdma_sendcq_process_wc(wcs++);
178 } while (rc == RPCRDMA_POLLSIZE && --budget);
179 return 0;
180 }
181
182 /*
183 * Handle send, fast_reg_mr, and local_inv completions.
184 *
185 * Send events are typically suppressed and thus do not result
186 * in an upcall. Occasionally one is signaled, however. This
187 * prevents the provider's completion queue from wrapping and
188 * losing a completion.
189 */
190 static void
191 rpcrdma_sendcq_upcall(struct ib_cq *cq, void *cq_context)
192 {
193 struct rpcrdma_ep *ep = (struct rpcrdma_ep *)cq_context;
194 int rc;
195
196 rc = rpcrdma_sendcq_poll(cq, ep);
197 if (rc) {
198 dprintk("RPC: %s: ib_poll_cq failed: %i\n",
199 __func__, rc);
200 return;
201 }
202
203 rc = ib_req_notify_cq(cq,
204 IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS);
205 if (rc == 0)
206 return;
207 if (rc < 0) {
208 dprintk("RPC: %s: ib_req_notify_cq failed: %i\n",
209 __func__, rc);
210 return;
211 }
212
213 rpcrdma_sendcq_poll(cq, ep);
214 }
215
216 static void
217 rpcrdma_recvcq_process_wc(struct ib_wc *wc, struct list_head *sched_list)
218 {
219 struct rpcrdma_rep *rep =
220 (struct rpcrdma_rep *)(unsigned long)wc->wr_id;
221
222 /* WARNING: Only wr_id and status are reliable at this point */
223 if (wc->status != IB_WC_SUCCESS)
224 goto out_fail;
225
226 /* status == SUCCESS means all fields in wc are trustworthy */
227 if (wc->opcode != IB_WC_RECV)
228 return;
229
230 dprintk("RPC: %s: rep %p opcode 'recv', length %u: success\n",
231 __func__, rep, wc->byte_len);
232
233 rep->rr_len = wc->byte_len;
234 ib_dma_sync_single_for_cpu(rep->rr_device,
235 rdmab_addr(rep->rr_rdmabuf),
236 rep->rr_len, DMA_FROM_DEVICE);
237 prefetch(rdmab_to_msg(rep->rr_rdmabuf));
238
239 out_schedule:
240 list_add_tail(&rep->rr_list, sched_list);
241 return;
242 out_fail:
243 if (wc->status != IB_WC_WR_FLUSH_ERR)
244 pr_err("RPC: %s: rep %p: %s\n",
245 __func__, rep, ib_wc_status_msg(wc->status));
246 rep->rr_len = ~0U;
247 goto out_schedule;
248 }
249
250 static int
251 rpcrdma_recvcq_poll(struct ib_cq *cq, struct rpcrdma_ep *ep)
252 {
253 struct list_head sched_list;
254 struct ib_wc *wcs;
255 int budget, count, rc;
256
257 INIT_LIST_HEAD(&sched_list);
258 budget = RPCRDMA_WC_BUDGET / RPCRDMA_POLLSIZE;
259 do {
260 wcs = ep->rep_recv_wcs;
261
262 rc = ib_poll_cq(cq, RPCRDMA_POLLSIZE, wcs);
263 if (rc <= 0)
264 goto out_schedule;
265
266 count = rc;
267 while (count-- > 0)
268 rpcrdma_recvcq_process_wc(wcs++, &sched_list);
269 } while (rc == RPCRDMA_POLLSIZE && --budget);
270 rc = 0;
271
272 out_schedule:
273 rpcrdma_schedule_tasklet(&sched_list);
274 return rc;
275 }
276
277 /*
278 * Handle receive completions.
279 *
280 * It is reentrant but processes single events in order to maintain
281 * ordering of receives to keep server credits.
282 *
283 * It is the responsibility of the scheduled tasklet to return
284 * recv buffers to the pool. NOTE: this affects synchronization of
285 * connection shutdown. That is, the structures required for
286 * the completion of the reply handler must remain intact until
287 * all memory has been reclaimed.
288 */
289 static void
290 rpcrdma_recvcq_upcall(struct ib_cq *cq, void *cq_context)
291 {
292 struct rpcrdma_ep *ep = (struct rpcrdma_ep *)cq_context;
293 int rc;
294
295 rc = rpcrdma_recvcq_poll(cq, ep);
296 if (rc) {
297 dprintk("RPC: %s: ib_poll_cq failed: %i\n",
298 __func__, rc);
299 return;
300 }
301
302 rc = ib_req_notify_cq(cq,
303 IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS);
304 if (rc == 0)
305 return;
306 if (rc < 0) {
307 dprintk("RPC: %s: ib_req_notify_cq failed: %i\n",
308 __func__, rc);
309 return;
310 }
311
312 rpcrdma_recvcq_poll(cq, ep);
313 }
314
315 static void
316 rpcrdma_flush_cqs(struct rpcrdma_ep *ep)
317 {
318 struct ib_wc wc;
319 LIST_HEAD(sched_list);
320
321 while (ib_poll_cq(ep->rep_attr.recv_cq, 1, &wc) > 0)
322 rpcrdma_recvcq_process_wc(&wc, &sched_list);
323 if (!list_empty(&sched_list))
324 rpcrdma_schedule_tasklet(&sched_list);
325 while (ib_poll_cq(ep->rep_attr.send_cq, 1, &wc) > 0)
326 rpcrdma_sendcq_process_wc(&wc);
327 }
328
329 static int
330 rpcrdma_conn_upcall(struct rdma_cm_id *id, struct rdma_cm_event *event)
331 {
332 struct rpcrdma_xprt *xprt = id->context;
333 struct rpcrdma_ia *ia = &xprt->rx_ia;
334 struct rpcrdma_ep *ep = &xprt->rx_ep;
335 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
336 struct sockaddr *sap = (struct sockaddr *)&ep->rep_remote_addr;
337 #endif
338 struct ib_qp_attr *attr = &ia->ri_qp_attr;
339 struct ib_qp_init_attr *iattr = &ia->ri_qp_init_attr;
340 int connstate = 0;
341
342 switch (event->event) {
343 case RDMA_CM_EVENT_ADDR_RESOLVED:
344 case RDMA_CM_EVENT_ROUTE_RESOLVED:
345 ia->ri_async_rc = 0;
346 complete(&ia->ri_done);
347 break;
348 case RDMA_CM_EVENT_ADDR_ERROR:
349 ia->ri_async_rc = -EHOSTUNREACH;
350 dprintk("RPC: %s: CM address resolution error, ep 0x%p\n",
351 __func__, ep);
352 complete(&ia->ri_done);
353 break;
354 case RDMA_CM_EVENT_ROUTE_ERROR:
355 ia->ri_async_rc = -ENETUNREACH;
356 dprintk("RPC: %s: CM route resolution error, ep 0x%p\n",
357 __func__, ep);
358 complete(&ia->ri_done);
359 break;
360 case RDMA_CM_EVENT_ESTABLISHED:
361 connstate = 1;
362 ib_query_qp(ia->ri_id->qp, attr,
363 IB_QP_MAX_QP_RD_ATOMIC | IB_QP_MAX_DEST_RD_ATOMIC,
364 iattr);
365 dprintk("RPC: %s: %d responder resources"
366 " (%d initiator)\n",
367 __func__, attr->max_dest_rd_atomic,
368 attr->max_rd_atomic);
369 goto connected;
370 case RDMA_CM_EVENT_CONNECT_ERROR:
371 connstate = -ENOTCONN;
372 goto connected;
373 case RDMA_CM_EVENT_UNREACHABLE:
374 connstate = -ENETDOWN;
375 goto connected;
376 case RDMA_CM_EVENT_REJECTED:
377 connstate = -ECONNREFUSED;
378 goto connected;
379 case RDMA_CM_EVENT_DISCONNECTED:
380 connstate = -ECONNABORTED;
381 goto connected;
382 case RDMA_CM_EVENT_DEVICE_REMOVAL:
383 connstate = -ENODEV;
384 connected:
385 dprintk("RPC: %s: %sconnected\n",
386 __func__, connstate > 0 ? "" : "dis");
387 ep->rep_connected = connstate;
388 rpcrdma_conn_func(ep);
389 wake_up_all(&ep->rep_connect_wait);
390 /*FALLTHROUGH*/
391 default:
392 dprintk("RPC: %s: %pIS:%u (ep 0x%p): %s\n",
393 __func__, sap, rpc_get_port(sap), ep,
394 rdma_event_msg(event->event));
395 break;
396 }
397
398 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
399 if (connstate == 1) {
400 int ird = attr->max_dest_rd_atomic;
401 int tird = ep->rep_remote_cma.responder_resources;
402
403 pr_info("rpcrdma: connection to %pIS:%u on %s, memreg '%s', %d credits, %d responders%s\n",
404 sap, rpc_get_port(sap),
405 ia->ri_device->name,
406 ia->ri_ops->ro_displayname,
407 xprt->rx_buf.rb_max_requests,
408 ird, ird < 4 && ird < tird / 2 ? " (low!)" : "");
409 } else if (connstate < 0) {
410 pr_info("rpcrdma: connection to %pIS:%u closed (%d)\n",
411 sap, rpc_get_port(sap), connstate);
412 }
413 #endif
414
415 return 0;
416 }
417
418 static void rpcrdma_destroy_id(struct rdma_cm_id *id)
419 {
420 if (id) {
421 module_put(id->device->owner);
422 rdma_destroy_id(id);
423 }
424 }
425
426 static struct rdma_cm_id *
427 rpcrdma_create_id(struct rpcrdma_xprt *xprt,
428 struct rpcrdma_ia *ia, struct sockaddr *addr)
429 {
430 struct rdma_cm_id *id;
431 int rc;
432
433 init_completion(&ia->ri_done);
434
435 id = rdma_create_id(rpcrdma_conn_upcall, xprt, RDMA_PS_TCP, IB_QPT_RC);
436 if (IS_ERR(id)) {
437 rc = PTR_ERR(id);
438 dprintk("RPC: %s: rdma_create_id() failed %i\n",
439 __func__, rc);
440 return id;
441 }
442
443 ia->ri_async_rc = -ETIMEDOUT;
444 rc = rdma_resolve_addr(id, NULL, addr, RDMA_RESOLVE_TIMEOUT);
445 if (rc) {
446 dprintk("RPC: %s: rdma_resolve_addr() failed %i\n",
447 __func__, rc);
448 goto out;
449 }
450 wait_for_completion_interruptible_timeout(&ia->ri_done,
451 msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT) + 1);
452
453 /* FIXME:
454 * Until xprtrdma supports DEVICE_REMOVAL, the provider must
455 * be pinned while there are active NFS/RDMA mounts to prevent
456 * hangs and crashes at umount time.
457 */
458 if (!ia->ri_async_rc && !try_module_get(id->device->owner)) {
459 dprintk("RPC: %s: Failed to get device module\n",
460 __func__);
461 ia->ri_async_rc = -ENODEV;
462 }
463 rc = ia->ri_async_rc;
464 if (rc)
465 goto out;
466
467 ia->ri_async_rc = -ETIMEDOUT;
468 rc = rdma_resolve_route(id, RDMA_RESOLVE_TIMEOUT);
469 if (rc) {
470 dprintk("RPC: %s: rdma_resolve_route() failed %i\n",
471 __func__, rc);
472 goto put;
473 }
474 wait_for_completion_interruptible_timeout(&ia->ri_done,
475 msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT) + 1);
476 rc = ia->ri_async_rc;
477 if (rc)
478 goto put;
479
480 return id;
481 put:
482 module_put(id->device->owner);
483 out:
484 rdma_destroy_id(id);
485 return ERR_PTR(rc);
486 }
487
488 /*
489 * Drain any cq, prior to teardown.
490 */
491 static void
492 rpcrdma_clean_cq(struct ib_cq *cq)
493 {
494 struct ib_wc wc;
495 int count = 0;
496
497 while (1 == ib_poll_cq(cq, 1, &wc))
498 ++count;
499
500 if (count)
501 dprintk("RPC: %s: flushed %d events (last 0x%x)\n",
502 __func__, count, wc.opcode);
503 }
504
505 /*
506 * Exported functions.
507 */
508
509 /*
510 * Open and initialize an Interface Adapter.
511 * o initializes fields of struct rpcrdma_ia, including
512 * interface and provider attributes and protection zone.
513 */
514 int
515 rpcrdma_ia_open(struct rpcrdma_xprt *xprt, struct sockaddr *addr, int memreg)
516 {
517 struct rpcrdma_ia *ia = &xprt->rx_ia;
518 struct ib_device_attr *devattr = &ia->ri_devattr;
519 int rc;
520
521 ia->ri_dma_mr = NULL;
522
523 ia->ri_id = rpcrdma_create_id(xprt, ia, addr);
524 if (IS_ERR(ia->ri_id)) {
525 rc = PTR_ERR(ia->ri_id);
526 goto out1;
527 }
528 ia->ri_device = ia->ri_id->device;
529
530 ia->ri_pd = ib_alloc_pd(ia->ri_device);
531 if (IS_ERR(ia->ri_pd)) {
532 rc = PTR_ERR(ia->ri_pd);
533 dprintk("RPC: %s: ib_alloc_pd() failed %i\n",
534 __func__, rc);
535 goto out2;
536 }
537
538 rc = ib_query_device(ia->ri_device, devattr);
539 if (rc) {
540 dprintk("RPC: %s: ib_query_device failed %d\n",
541 __func__, rc);
542 goto out3;
543 }
544
545 if (memreg == RPCRDMA_FRMR) {
546 /* Requires both frmr reg and local dma lkey */
547 if (((devattr->device_cap_flags &
548 (IB_DEVICE_MEM_MGT_EXTENSIONS|IB_DEVICE_LOCAL_DMA_LKEY)) !=
549 (IB_DEVICE_MEM_MGT_EXTENSIONS|IB_DEVICE_LOCAL_DMA_LKEY)) ||
550 (devattr->max_fast_reg_page_list_len == 0)) {
551 dprintk("RPC: %s: FRMR registration "
552 "not supported by HCA\n", __func__);
553 memreg = RPCRDMA_MTHCAFMR;
554 }
555 }
556 if (memreg == RPCRDMA_MTHCAFMR) {
557 if (!ia->ri_device->alloc_fmr) {
558 dprintk("RPC: %s: MTHCAFMR registration "
559 "not supported by HCA\n", __func__);
560 goto out3;
561 }
562 }
563
564 switch (memreg) {
565 case RPCRDMA_FRMR:
566 ia->ri_ops = &rpcrdma_frwr_memreg_ops;
567 break;
568 case RPCRDMA_ALLPHYSICAL:
569 ia->ri_ops = &rpcrdma_physical_memreg_ops;
570 break;
571 case RPCRDMA_MTHCAFMR:
572 ia->ri_ops = &rpcrdma_fmr_memreg_ops;
573 break;
574 default:
575 printk(KERN_ERR "RPC: Unsupported memory "
576 "registration mode: %d\n", memreg);
577 rc = -ENOMEM;
578 goto out3;
579 }
580 dprintk("RPC: %s: memory registration strategy is '%s'\n",
581 __func__, ia->ri_ops->ro_displayname);
582
583 rwlock_init(&ia->ri_qplock);
584 return 0;
585
586 out3:
587 ib_dealloc_pd(ia->ri_pd);
588 ia->ri_pd = NULL;
589 out2:
590 rpcrdma_destroy_id(ia->ri_id);
591 ia->ri_id = NULL;
592 out1:
593 return rc;
594 }
595
596 /*
597 * Clean up/close an IA.
598 * o if event handles and PD have been initialized, free them.
599 * o close the IA
600 */
601 void
602 rpcrdma_ia_close(struct rpcrdma_ia *ia)
603 {
604 dprintk("RPC: %s: entering\n", __func__);
605 if (ia->ri_id != NULL && !IS_ERR(ia->ri_id)) {
606 if (ia->ri_id->qp)
607 rdma_destroy_qp(ia->ri_id);
608 rpcrdma_destroy_id(ia->ri_id);
609 ia->ri_id = NULL;
610 }
611
612 /* If the pd is still busy, xprtrdma missed freeing a resource */
613 if (ia->ri_pd && !IS_ERR(ia->ri_pd))
614 ib_dealloc_pd(ia->ri_pd);
615 }
616
617 /*
618 * Create unconnected endpoint.
619 */
620 int
621 rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
622 struct rpcrdma_create_data_internal *cdata)
623 {
624 struct ib_device_attr *devattr = &ia->ri_devattr;
625 struct ib_cq *sendcq, *recvcq;
626 struct ib_cq_init_attr cq_attr = {};
627 int rc, err;
628
629 if (devattr->max_sge < RPCRDMA_MAX_IOVS) {
630 dprintk("RPC: %s: insufficient sge's available\n",
631 __func__);
632 return -ENOMEM;
633 }
634
635 /* check provider's send/recv wr limits */
636 if (cdata->max_requests > devattr->max_qp_wr)
637 cdata->max_requests = devattr->max_qp_wr;
638
639 ep->rep_attr.event_handler = rpcrdma_qp_async_error_upcall;
640 ep->rep_attr.qp_context = ep;
641 ep->rep_attr.srq = NULL;
642 ep->rep_attr.cap.max_send_wr = cdata->max_requests;
643 rc = ia->ri_ops->ro_open(ia, ep, cdata);
644 if (rc)
645 return rc;
646 ep->rep_attr.cap.max_recv_wr = cdata->max_requests;
647 ep->rep_attr.cap.max_send_sge = RPCRDMA_MAX_IOVS;
648 ep->rep_attr.cap.max_recv_sge = 1;
649 ep->rep_attr.cap.max_inline_data = 0;
650 ep->rep_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
651 ep->rep_attr.qp_type = IB_QPT_RC;
652 ep->rep_attr.port_num = ~0;
653
654 dprintk("RPC: %s: requested max: dtos: send %d recv %d; "
655 "iovs: send %d recv %d\n",
656 __func__,
657 ep->rep_attr.cap.max_send_wr,
658 ep->rep_attr.cap.max_recv_wr,
659 ep->rep_attr.cap.max_send_sge,
660 ep->rep_attr.cap.max_recv_sge);
661
662 /* set trigger for requesting send completion */
663 ep->rep_cqinit = ep->rep_attr.cap.max_send_wr/2 - 1;
664 if (ep->rep_cqinit > RPCRDMA_MAX_UNSIGNALED_SENDS)
665 ep->rep_cqinit = RPCRDMA_MAX_UNSIGNALED_SENDS;
666 else if (ep->rep_cqinit <= 2)
667 ep->rep_cqinit = 0;
668 INIT_CQCOUNT(ep);
669 init_waitqueue_head(&ep->rep_connect_wait);
670 INIT_DELAYED_WORK(&ep->rep_connect_worker, rpcrdma_connect_worker);
671
672 cq_attr.cqe = ep->rep_attr.cap.max_send_wr + 1;
673 sendcq = ib_create_cq(ia->ri_device, rpcrdma_sendcq_upcall,
674 rpcrdma_cq_async_error_upcall, ep, &cq_attr);
675 if (IS_ERR(sendcq)) {
676 rc = PTR_ERR(sendcq);
677 dprintk("RPC: %s: failed to create send CQ: %i\n",
678 __func__, rc);
679 goto out1;
680 }
681
682 rc = ib_req_notify_cq(sendcq, IB_CQ_NEXT_COMP);
683 if (rc) {
684 dprintk("RPC: %s: ib_req_notify_cq failed: %i\n",
685 __func__, rc);
686 goto out2;
687 }
688
689 cq_attr.cqe = ep->rep_attr.cap.max_recv_wr + 1;
690 recvcq = ib_create_cq(ia->ri_device, rpcrdma_recvcq_upcall,
691 rpcrdma_cq_async_error_upcall, ep, &cq_attr);
692 if (IS_ERR(recvcq)) {
693 rc = PTR_ERR(recvcq);
694 dprintk("RPC: %s: failed to create recv CQ: %i\n",
695 __func__, rc);
696 goto out2;
697 }
698
699 rc = ib_req_notify_cq(recvcq, IB_CQ_NEXT_COMP);
700 if (rc) {
701 dprintk("RPC: %s: ib_req_notify_cq failed: %i\n",
702 __func__, rc);
703 ib_destroy_cq(recvcq);
704 goto out2;
705 }
706
707 ep->rep_attr.send_cq = sendcq;
708 ep->rep_attr.recv_cq = recvcq;
709
710 /* Initialize cma parameters */
711
712 /* RPC/RDMA does not use private data */
713 ep->rep_remote_cma.private_data = NULL;
714 ep->rep_remote_cma.private_data_len = 0;
715
716 /* Client offers RDMA Read but does not initiate */
717 ep->rep_remote_cma.initiator_depth = 0;
718 if (devattr->max_qp_rd_atom > 32) /* arbitrary but <= 255 */
719 ep->rep_remote_cma.responder_resources = 32;
720 else
721 ep->rep_remote_cma.responder_resources =
722 devattr->max_qp_rd_atom;
723
724 ep->rep_remote_cma.retry_count = 7;
725 ep->rep_remote_cma.flow_control = 0;
726 ep->rep_remote_cma.rnr_retry_count = 0;
727
728 return 0;
729
730 out2:
731 err = ib_destroy_cq(sendcq);
732 if (err)
733 dprintk("RPC: %s: ib_destroy_cq returned %i\n",
734 __func__, err);
735 out1:
736 if (ia->ri_dma_mr)
737 ib_dereg_mr(ia->ri_dma_mr);
738 return rc;
739 }
740
741 /*
742 * rpcrdma_ep_destroy
743 *
744 * Disconnect and destroy endpoint. After this, the only
745 * valid operations on the ep are to free it (if dynamically
746 * allocated) or re-create it.
747 */
748 void
749 rpcrdma_ep_destroy(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
750 {
751 int rc;
752
753 dprintk("RPC: %s: entering, connected is %d\n",
754 __func__, ep->rep_connected);
755
756 cancel_delayed_work_sync(&ep->rep_connect_worker);
757
758 if (ia->ri_id->qp) {
759 rpcrdma_ep_disconnect(ep, ia);
760 rdma_destroy_qp(ia->ri_id);
761 ia->ri_id->qp = NULL;
762 }
763
764 rpcrdma_clean_cq(ep->rep_attr.recv_cq);
765 rc = ib_destroy_cq(ep->rep_attr.recv_cq);
766 if (rc)
767 dprintk("RPC: %s: ib_destroy_cq returned %i\n",
768 __func__, rc);
769
770 rpcrdma_clean_cq(ep->rep_attr.send_cq);
771 rc = ib_destroy_cq(ep->rep_attr.send_cq);
772 if (rc)
773 dprintk("RPC: %s: ib_destroy_cq returned %i\n",
774 __func__, rc);
775
776 if (ia->ri_dma_mr) {
777 rc = ib_dereg_mr(ia->ri_dma_mr);
778 dprintk("RPC: %s: ib_dereg_mr returned %i\n",
779 __func__, rc);
780 }
781 }
782
783 /*
784 * Connect unconnected endpoint.
785 */
786 int
787 rpcrdma_ep_connect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
788 {
789 struct rdma_cm_id *id, *old;
790 int rc = 0;
791 int retry_count = 0;
792
793 if (ep->rep_connected != 0) {
794 struct rpcrdma_xprt *xprt;
795 retry:
796 dprintk("RPC: %s: reconnecting...\n", __func__);
797
798 rpcrdma_ep_disconnect(ep, ia);
799 rpcrdma_flush_cqs(ep);
800
801 xprt = container_of(ia, struct rpcrdma_xprt, rx_ia);
802 id = rpcrdma_create_id(xprt, ia,
803 (struct sockaddr *)&xprt->rx_data.addr);
804 if (IS_ERR(id)) {
805 rc = -EHOSTUNREACH;
806 goto out;
807 }
808 /* TEMP TEMP TEMP - fail if new device:
809 * Deregister/remarshal *all* requests!
810 * Close and recreate adapter, pd, etc!
811 * Re-determine all attributes still sane!
812 * More stuff I haven't thought of!
813 * Rrrgh!
814 */
815 if (ia->ri_device != id->device) {
816 printk("RPC: %s: can't reconnect on "
817 "different device!\n", __func__);
818 rpcrdma_destroy_id(id);
819 rc = -ENETUNREACH;
820 goto out;
821 }
822 /* END TEMP */
823 rc = rdma_create_qp(id, ia->ri_pd, &ep->rep_attr);
824 if (rc) {
825 dprintk("RPC: %s: rdma_create_qp failed %i\n",
826 __func__, rc);
827 rpcrdma_destroy_id(id);
828 rc = -ENETUNREACH;
829 goto out;
830 }
831
832 write_lock(&ia->ri_qplock);
833 old = ia->ri_id;
834 ia->ri_id = id;
835 write_unlock(&ia->ri_qplock);
836
837 rdma_destroy_qp(old);
838 rpcrdma_destroy_id(old);
839 } else {
840 dprintk("RPC: %s: connecting...\n", __func__);
841 rc = rdma_create_qp(ia->ri_id, ia->ri_pd, &ep->rep_attr);
842 if (rc) {
843 dprintk("RPC: %s: rdma_create_qp failed %i\n",
844 __func__, rc);
845 /* do not update ep->rep_connected */
846 return -ENETUNREACH;
847 }
848 }
849
850 ep->rep_connected = 0;
851
852 rc = rdma_connect(ia->ri_id, &ep->rep_remote_cma);
853 if (rc) {
854 dprintk("RPC: %s: rdma_connect() failed with %i\n",
855 __func__, rc);
856 goto out;
857 }
858
859 wait_event_interruptible(ep->rep_connect_wait, ep->rep_connected != 0);
860
861 /*
862 * Check state. A non-peer reject indicates no listener
863 * (ECONNREFUSED), which may be a transient state. All
864 * others indicate a transport condition which has already
865 * undergone a best-effort.
866 */
867 if (ep->rep_connected == -ECONNREFUSED &&
868 ++retry_count <= RDMA_CONNECT_RETRY_MAX) {
869 dprintk("RPC: %s: non-peer_reject, retry\n", __func__);
870 goto retry;
871 }
872 if (ep->rep_connected <= 0) {
873 /* Sometimes, the only way to reliably connect to remote
874 * CMs is to use same nonzero values for ORD and IRD. */
875 if (retry_count++ <= RDMA_CONNECT_RETRY_MAX + 1 &&
876 (ep->rep_remote_cma.responder_resources == 0 ||
877 ep->rep_remote_cma.initiator_depth !=
878 ep->rep_remote_cma.responder_resources)) {
879 if (ep->rep_remote_cma.responder_resources == 0)
880 ep->rep_remote_cma.responder_resources = 1;
881 ep->rep_remote_cma.initiator_depth =
882 ep->rep_remote_cma.responder_resources;
883 goto retry;
884 }
885 rc = ep->rep_connected;
886 } else {
887 dprintk("RPC: %s: connected\n", __func__);
888 }
889
890 out:
891 if (rc)
892 ep->rep_connected = rc;
893 return rc;
894 }
895
896 /*
897 * rpcrdma_ep_disconnect
898 *
899 * This is separate from destroy to facilitate the ability
900 * to reconnect without recreating the endpoint.
901 *
902 * This call is not reentrant, and must not be made in parallel
903 * on the same endpoint.
904 */
905 void
906 rpcrdma_ep_disconnect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
907 {
908 int rc;
909
910 rpcrdma_flush_cqs(ep);
911 rc = rdma_disconnect(ia->ri_id);
912 if (!rc) {
913 /* returns without wait if not connected */
914 wait_event_interruptible(ep->rep_connect_wait,
915 ep->rep_connected != 1);
916 dprintk("RPC: %s: after wait, %sconnected\n", __func__,
917 (ep->rep_connected == 1) ? "still " : "dis");
918 } else {
919 dprintk("RPC: %s: rdma_disconnect %i\n", __func__, rc);
920 ep->rep_connected = rc;
921 }
922 }
923
924 static struct rpcrdma_req *
925 rpcrdma_create_req(struct rpcrdma_xprt *r_xprt)
926 {
927 struct rpcrdma_req *req;
928
929 req = kzalloc(sizeof(*req), GFP_KERNEL);
930 if (req == NULL)
931 return ERR_PTR(-ENOMEM);
932
933 req->rl_buffer = &r_xprt->rx_buf;
934 return req;
935 }
936
937 static struct rpcrdma_rep *
938 rpcrdma_create_rep(struct rpcrdma_xprt *r_xprt)
939 {
940 struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data;
941 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
942 struct rpcrdma_rep *rep;
943 int rc;
944
945 rc = -ENOMEM;
946 rep = kzalloc(sizeof(*rep), GFP_KERNEL);
947 if (rep == NULL)
948 goto out;
949
950 rep->rr_rdmabuf = rpcrdma_alloc_regbuf(ia, cdata->inline_rsize,
951 GFP_KERNEL);
952 if (IS_ERR(rep->rr_rdmabuf)) {
953 rc = PTR_ERR(rep->rr_rdmabuf);
954 goto out_free;
955 }
956
957 rep->rr_device = ia->ri_device;
958 rep->rr_rxprt = r_xprt;
959 return rep;
960
961 out_free:
962 kfree(rep);
963 out:
964 return ERR_PTR(rc);
965 }
966
967 int
968 rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt)
969 {
970 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
971 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
972 struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data;
973 char *p;
974 size_t len;
975 int i, rc;
976
977 buf->rb_max_requests = cdata->max_requests;
978 spin_lock_init(&buf->rb_lock);
979
980 /* Need to allocate:
981 * 1. arrays for send and recv pointers
982 * 2. arrays of struct rpcrdma_req to fill in pointers
983 * 3. array of struct rpcrdma_rep for replies
984 * Send/recv buffers in req/rep need to be registered
985 */
986 len = buf->rb_max_requests *
987 (sizeof(struct rpcrdma_req *) + sizeof(struct rpcrdma_rep *));
988
989 p = kzalloc(len, GFP_KERNEL);
990 if (p == NULL) {
991 dprintk("RPC: %s: req_t/rep_t/pad kzalloc(%zd) failed\n",
992 __func__, len);
993 rc = -ENOMEM;
994 goto out;
995 }
996 buf->rb_pool = p; /* for freeing it later */
997
998 buf->rb_send_bufs = (struct rpcrdma_req **) p;
999 p = (char *) &buf->rb_send_bufs[buf->rb_max_requests];
1000 buf->rb_recv_bufs = (struct rpcrdma_rep **) p;
1001 p = (char *) &buf->rb_recv_bufs[buf->rb_max_requests];
1002
1003 rc = ia->ri_ops->ro_init(r_xprt);
1004 if (rc)
1005 goto out;
1006
1007 for (i = 0; i < buf->rb_max_requests; i++) {
1008 struct rpcrdma_req *req;
1009 struct rpcrdma_rep *rep;
1010
1011 req = rpcrdma_create_req(r_xprt);
1012 if (IS_ERR(req)) {
1013 dprintk("RPC: %s: request buffer %d alloc"
1014 " failed\n", __func__, i);
1015 rc = PTR_ERR(req);
1016 goto out;
1017 }
1018 buf->rb_send_bufs[i] = req;
1019
1020 rep = rpcrdma_create_rep(r_xprt);
1021 if (IS_ERR(rep)) {
1022 dprintk("RPC: %s: reply buffer %d alloc failed\n",
1023 __func__, i);
1024 rc = PTR_ERR(rep);
1025 goto out;
1026 }
1027 buf->rb_recv_bufs[i] = rep;
1028 }
1029
1030 return 0;
1031 out:
1032 rpcrdma_buffer_destroy(buf);
1033 return rc;
1034 }
1035
1036 static void
1037 rpcrdma_destroy_rep(struct rpcrdma_ia *ia, struct rpcrdma_rep *rep)
1038 {
1039 if (!rep)
1040 return;
1041
1042 rpcrdma_free_regbuf(ia, rep->rr_rdmabuf);
1043 kfree(rep);
1044 }
1045
1046 static void
1047 rpcrdma_destroy_req(struct rpcrdma_ia *ia, struct rpcrdma_req *req)
1048 {
1049 if (!req)
1050 return;
1051
1052 rpcrdma_free_regbuf(ia, req->rl_sendbuf);
1053 rpcrdma_free_regbuf(ia, req->rl_rdmabuf);
1054 kfree(req);
1055 }
1056
1057 void
1058 rpcrdma_buffer_destroy(struct rpcrdma_buffer *buf)
1059 {
1060 struct rpcrdma_ia *ia = rdmab_to_ia(buf);
1061 int i;
1062
1063 /* clean up in reverse order from create
1064 * 1. recv mr memory (mr free, then kfree)
1065 * 2. send mr memory (mr free, then kfree)
1066 * 3. MWs
1067 */
1068 dprintk("RPC: %s: entering\n", __func__);
1069
1070 for (i = 0; i < buf->rb_max_requests; i++) {
1071 if (buf->rb_recv_bufs)
1072 rpcrdma_destroy_rep(ia, buf->rb_recv_bufs[i]);
1073 if (buf->rb_send_bufs)
1074 rpcrdma_destroy_req(ia, buf->rb_send_bufs[i]);
1075 }
1076
1077 ia->ri_ops->ro_destroy(buf);
1078
1079 kfree(buf->rb_pool);
1080 }
1081
1082 struct rpcrdma_mw *
1083 rpcrdma_get_mw(struct rpcrdma_xprt *r_xprt)
1084 {
1085 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
1086 struct rpcrdma_mw *mw = NULL;
1087
1088 spin_lock(&buf->rb_mwlock);
1089 if (!list_empty(&buf->rb_mws)) {
1090 mw = list_first_entry(&buf->rb_mws,
1091 struct rpcrdma_mw, mw_list);
1092 list_del_init(&mw->mw_list);
1093 }
1094 spin_unlock(&buf->rb_mwlock);
1095
1096 if (!mw)
1097 pr_err("RPC: %s: no MWs available\n", __func__);
1098 return mw;
1099 }
1100
1101 void
1102 rpcrdma_put_mw(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mw *mw)
1103 {
1104 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
1105
1106 spin_lock(&buf->rb_mwlock);
1107 list_add_tail(&mw->mw_list, &buf->rb_mws);
1108 spin_unlock(&buf->rb_mwlock);
1109 }
1110
1111 static void
1112 rpcrdma_buffer_put_sendbuf(struct rpcrdma_req *req, struct rpcrdma_buffer *buf)
1113 {
1114 buf->rb_send_bufs[--buf->rb_send_index] = req;
1115 req->rl_niovs = 0;
1116 if (req->rl_reply) {
1117 buf->rb_recv_bufs[--buf->rb_recv_index] = req->rl_reply;
1118 req->rl_reply = NULL;
1119 }
1120 }
1121
1122 /*
1123 * Get a set of request/reply buffers.
1124 *
1125 * Reply buffer (if needed) is attached to send buffer upon return.
1126 * Rule:
1127 * rb_send_index and rb_recv_index MUST always be pointing to the
1128 * *next* available buffer (non-NULL). They are incremented after
1129 * removing buffers, and decremented *before* returning them.
1130 */
1131 struct rpcrdma_req *
1132 rpcrdma_buffer_get(struct rpcrdma_buffer *buffers)
1133 {
1134 struct rpcrdma_req *req;
1135 unsigned long flags;
1136
1137 spin_lock_irqsave(&buffers->rb_lock, flags);
1138
1139 if (buffers->rb_send_index == buffers->rb_max_requests) {
1140 spin_unlock_irqrestore(&buffers->rb_lock, flags);
1141 dprintk("RPC: %s: out of request buffers\n", __func__);
1142 return ((struct rpcrdma_req *)NULL);
1143 }
1144
1145 req = buffers->rb_send_bufs[buffers->rb_send_index];
1146 if (buffers->rb_send_index < buffers->rb_recv_index) {
1147 dprintk("RPC: %s: %d extra receives outstanding (ok)\n",
1148 __func__,
1149 buffers->rb_recv_index - buffers->rb_send_index);
1150 req->rl_reply = NULL;
1151 } else {
1152 req->rl_reply = buffers->rb_recv_bufs[buffers->rb_recv_index];
1153 buffers->rb_recv_bufs[buffers->rb_recv_index++] = NULL;
1154 }
1155 buffers->rb_send_bufs[buffers->rb_send_index++] = NULL;
1156
1157 spin_unlock_irqrestore(&buffers->rb_lock, flags);
1158 return req;
1159 }
1160
1161 /*
1162 * Put request/reply buffers back into pool.
1163 * Pre-decrement counter/array index.
1164 */
1165 void
1166 rpcrdma_buffer_put(struct rpcrdma_req *req)
1167 {
1168 struct rpcrdma_buffer *buffers = req->rl_buffer;
1169 unsigned long flags;
1170
1171 spin_lock_irqsave(&buffers->rb_lock, flags);
1172 rpcrdma_buffer_put_sendbuf(req, buffers);
1173 spin_unlock_irqrestore(&buffers->rb_lock, flags);
1174 }
1175
1176 /*
1177 * Recover reply buffers from pool.
1178 * This happens when recovering from error conditions.
1179 * Post-increment counter/array index.
1180 */
1181 void
1182 rpcrdma_recv_buffer_get(struct rpcrdma_req *req)
1183 {
1184 struct rpcrdma_buffer *buffers = req->rl_buffer;
1185 unsigned long flags;
1186
1187 spin_lock_irqsave(&buffers->rb_lock, flags);
1188 if (buffers->rb_recv_index < buffers->rb_max_requests) {
1189 req->rl_reply = buffers->rb_recv_bufs[buffers->rb_recv_index];
1190 buffers->rb_recv_bufs[buffers->rb_recv_index++] = NULL;
1191 }
1192 spin_unlock_irqrestore(&buffers->rb_lock, flags);
1193 }
1194
1195 /*
1196 * Put reply buffers back into pool when not attached to
1197 * request. This happens in error conditions.
1198 */
1199 void
1200 rpcrdma_recv_buffer_put(struct rpcrdma_rep *rep)
1201 {
1202 struct rpcrdma_buffer *buffers = &rep->rr_rxprt->rx_buf;
1203 unsigned long flags;
1204
1205 spin_lock_irqsave(&buffers->rb_lock, flags);
1206 buffers->rb_recv_bufs[--buffers->rb_recv_index] = rep;
1207 spin_unlock_irqrestore(&buffers->rb_lock, flags);
1208 }
1209
1210 /*
1211 * Wrappers for internal-use kmalloc memory registration, used by buffer code.
1212 */
1213
1214 void
1215 rpcrdma_mapping_error(struct rpcrdma_mr_seg *seg)
1216 {
1217 dprintk("RPC: map_one: offset %p iova %llx len %zu\n",
1218 seg->mr_offset,
1219 (unsigned long long)seg->mr_dma, seg->mr_dmalen);
1220 }
1221
1222 /**
1223 * rpcrdma_alloc_regbuf - kmalloc and register memory for SEND/RECV buffers
1224 * @ia: controlling rpcrdma_ia
1225 * @size: size of buffer to be allocated, in bytes
1226 * @flags: GFP flags
1227 *
1228 * Returns pointer to private header of an area of internally
1229 * registered memory, or an ERR_PTR. The registered buffer follows
1230 * the end of the private header.
1231 *
1232 * xprtrdma uses a regbuf for posting an outgoing RDMA SEND, or for
1233 * receiving the payload of RDMA RECV operations. regbufs are not
1234 * used for RDMA READ/WRITE operations, thus are registered only for
1235 * LOCAL access.
1236 */
1237 struct rpcrdma_regbuf *
1238 rpcrdma_alloc_regbuf(struct rpcrdma_ia *ia, size_t size, gfp_t flags)
1239 {
1240 struct rpcrdma_regbuf *rb;
1241 struct ib_sge *iov;
1242
1243 rb = kmalloc(sizeof(*rb) + size, flags);
1244 if (rb == NULL)
1245 goto out;
1246
1247 iov = &rb->rg_iov;
1248 iov->addr = ib_dma_map_single(ia->ri_device,
1249 (void *)rb->rg_base, size,
1250 DMA_BIDIRECTIONAL);
1251 if (ib_dma_mapping_error(ia->ri_device, iov->addr))
1252 goto out_free;
1253
1254 iov->length = size;
1255 iov->lkey = ia->ri_pd->local_dma_lkey;
1256 rb->rg_size = size;
1257 rb->rg_owner = NULL;
1258 return rb;
1259
1260 out_free:
1261 kfree(rb);
1262 out:
1263 return ERR_PTR(-ENOMEM);
1264 }
1265
1266 /**
1267 * rpcrdma_free_regbuf - deregister and free registered buffer
1268 * @ia: controlling rpcrdma_ia
1269 * @rb: regbuf to be deregistered and freed
1270 */
1271 void
1272 rpcrdma_free_regbuf(struct rpcrdma_ia *ia, struct rpcrdma_regbuf *rb)
1273 {
1274 struct ib_sge *iov;
1275
1276 if (!rb)
1277 return;
1278
1279 iov = &rb->rg_iov;
1280 ib_dma_unmap_single(ia->ri_device,
1281 iov->addr, iov->length, DMA_BIDIRECTIONAL);
1282 kfree(rb);
1283 }
1284
1285 /*
1286 * Prepost any receive buffer, then post send.
1287 *
1288 * Receive buffer is donated to hardware, reclaimed upon recv completion.
1289 */
1290 int
1291 rpcrdma_ep_post(struct rpcrdma_ia *ia,
1292 struct rpcrdma_ep *ep,
1293 struct rpcrdma_req *req)
1294 {
1295 struct ib_device *device = ia->ri_device;
1296 struct ib_send_wr send_wr, *send_wr_fail;
1297 struct rpcrdma_rep *rep = req->rl_reply;
1298 struct ib_sge *iov = req->rl_send_iov;
1299 int i, rc;
1300
1301 if (rep) {
1302 rc = rpcrdma_ep_post_recv(ia, ep, rep);
1303 if (rc)
1304 goto out;
1305 req->rl_reply = NULL;
1306 }
1307
1308 send_wr.next = NULL;
1309 send_wr.wr_id = RPCRDMA_IGNORE_COMPLETION;
1310 send_wr.sg_list = iov;
1311 send_wr.num_sge = req->rl_niovs;
1312 send_wr.opcode = IB_WR_SEND;
1313
1314 for (i = 0; i < send_wr.num_sge; i++)
1315 ib_dma_sync_single_for_device(device, iov[i].addr,
1316 iov[i].length, DMA_TO_DEVICE);
1317 dprintk("RPC: %s: posting %d s/g entries\n",
1318 __func__, send_wr.num_sge);
1319
1320 if (DECR_CQCOUNT(ep) > 0)
1321 send_wr.send_flags = 0;
1322 else { /* Provider must take a send completion every now and then */
1323 INIT_CQCOUNT(ep);
1324 send_wr.send_flags = IB_SEND_SIGNALED;
1325 }
1326
1327 rc = ib_post_send(ia->ri_id->qp, &send_wr, &send_wr_fail);
1328 if (rc)
1329 dprintk("RPC: %s: ib_post_send returned %i\n", __func__,
1330 rc);
1331 out:
1332 return rc;
1333 }
1334
1335 /*
1336 * (Re)post a receive buffer.
1337 */
1338 int
1339 rpcrdma_ep_post_recv(struct rpcrdma_ia *ia,
1340 struct rpcrdma_ep *ep,
1341 struct rpcrdma_rep *rep)
1342 {
1343 struct ib_recv_wr recv_wr, *recv_wr_fail;
1344 int rc;
1345
1346 recv_wr.next = NULL;
1347 recv_wr.wr_id = (u64) (unsigned long) rep;
1348 recv_wr.sg_list = &rep->rr_rdmabuf->rg_iov;
1349 recv_wr.num_sge = 1;
1350
1351 ib_dma_sync_single_for_cpu(ia->ri_device,
1352 rdmab_addr(rep->rr_rdmabuf),
1353 rdmab_length(rep->rr_rdmabuf),
1354 DMA_BIDIRECTIONAL);
1355
1356 rc = ib_post_recv(ia->ri_id->qp, &recv_wr, &recv_wr_fail);
1357
1358 if (rc)
1359 dprintk("RPC: %s: ib_post_recv returned %i\n", __func__,
1360 rc);
1361 return rc;
1362 }
1363
1364 /* How many chunk list items fit within our inline buffers?
1365 */
1366 unsigned int
1367 rpcrdma_max_segments(struct rpcrdma_xprt *r_xprt)
1368 {
1369 struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data;
1370 int bytes, segments;
1371
1372 bytes = min_t(unsigned int, cdata->inline_wsize, cdata->inline_rsize);
1373 bytes -= RPCRDMA_HDRLEN_MIN;
1374 if (bytes < sizeof(struct rpcrdma_segment) * 2) {
1375 pr_warn("RPC: %s: inline threshold too small\n",
1376 __func__);
1377 return 0;
1378 }
1379
1380 segments = 1 << (fls(bytes / sizeof(struct rpcrdma_segment)) - 1);
1381 dprintk("RPC: %s: max chunk list size = %d segments\n",
1382 __func__, segments);
1383 return segments;
1384 }