]> git.proxmox.com Git - mirror_ubuntu-kernels.git/blob - net/sunrpc/xprtrdma/frwr_ops.c
HID: logitech-dj: fix spelling in printk
[mirror_ubuntu-kernels.git] / net / sunrpc / xprtrdma / frwr_ops.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2015, 2017 Oracle. All rights reserved.
4 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
5 */
6
7 /* Lightweight memory registration using Fast Registration Work
8 * Requests (FRWR).
9 *
10 * FRWR features ordered asynchronous registration and deregistration
11 * of arbitrarily sized memory regions. This is the fastest and safest
12 * but most complex memory registration mode.
13 */
14
15 /* Normal operation
16 *
17 * A Memory Region is prepared for RDMA READ or WRITE using a FAST_REG
18 * Work Request (frwr_map). When the RDMA operation is finished, this
19 * Memory Region is invalidated using a LOCAL_INV Work Request
20 * (frwr_unmap_sync).
21 *
22 * Typically these Work Requests are not signaled, and neither are RDMA
23 * SEND Work Requests (with the exception of signaling occasionally to
24 * prevent provider work queue overflows). This greatly reduces HCA
25 * interrupt workload.
26 *
27 * As an optimization, frwr_unmap marks MRs INVALID before the
28 * LOCAL_INV WR is posted. If posting succeeds, the MR is placed on
29 * rb_mrs immediately so that no work (like managing a linked list
30 * under a spinlock) is needed in the completion upcall.
31 *
32 * But this means that frwr_map() can occasionally encounter an MR
33 * that is INVALID but the LOCAL_INV WR has not completed. Work Queue
34 * ordering prevents a subsequent FAST_REG WR from executing against
35 * that MR while it is still being invalidated.
36 */
37
38 /* Transport recovery
39 *
40 * ->op_map and the transport connect worker cannot run at the same
41 * time, but ->op_unmap can fire while the transport connect worker
42 * is running. Thus MR recovery is handled in ->op_map, to guarantee
43 * that recovered MRs are owned by a sending RPC, and not one where
44 * ->op_unmap could fire at the same time transport reconnect is
45 * being done.
46 *
47 * When the underlying transport disconnects, MRs are left in one of
48 * four states:
49 *
50 * INVALID: The MR was not in use before the QP entered ERROR state.
51 *
52 * VALID: The MR was registered before the QP entered ERROR state.
53 *
54 * FLUSHED_FR: The MR was being registered when the QP entered ERROR
55 * state, and the pending WR was flushed.
56 *
57 * FLUSHED_LI: The MR was being invalidated when the QP entered ERROR
58 * state, and the pending WR was flushed.
59 *
60 * When frwr_map encounters FLUSHED and VALID MRs, they are recovered
61 * with ib_dereg_mr and then are re-initialized. Because MR recovery
62 * allocates fresh resources, it is deferred to a workqueue, and the
63 * recovered MRs are placed back on the rb_mrs list when recovery is
64 * complete. frwr_map allocates another MR for the current RPC while
65 * the broken MR is reset.
66 *
67 * To ensure that frwr_map doesn't encounter an MR that is marked
68 * INVALID but that is about to be flushed due to a previous transport
69 * disconnect, the transport connect worker attempts to drain all
70 * pending send queue WRs before the transport is reconnected.
71 */
72
73 #include <linux/sunrpc/rpc_rdma.h>
74 #include <linux/sunrpc/svc_rdma.h>
75
76 #include "xprt_rdma.h"
77 #include <trace/events/rpcrdma.h>
78
79 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
80 # define RPCDBG_FACILITY RPCDBG_TRANS
81 #endif
82
83 /**
84 * frwr_is_supported - Check if device supports FRWR
85 * @ia: interface adapter to check
86 *
87 * Returns true if device supports FRWR, otherwise false
88 */
89 bool frwr_is_supported(struct rpcrdma_ia *ia)
90 {
91 struct ib_device_attr *attrs = &ia->ri_device->attrs;
92
93 if (!(attrs->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS))
94 goto out_not_supported;
95 if (attrs->max_fast_reg_page_list_len == 0)
96 goto out_not_supported;
97 return true;
98
99 out_not_supported:
100 pr_info("rpcrdma: 'frwr' mode is not supported by device %s\n",
101 ia->ri_device->name);
102 return false;
103 }
104
105 /**
106 * frwr_release_mr - Destroy one MR
107 * @mr: MR allocated by frwr_init_mr
108 *
109 */
110 void frwr_release_mr(struct rpcrdma_mr *mr)
111 {
112 int rc;
113
114 rc = ib_dereg_mr(mr->frwr.fr_mr);
115 if (rc)
116 trace_xprtrdma_frwr_dereg(mr, rc);
117 kfree(mr->mr_sg);
118 kfree(mr);
119 }
120
121 /* MRs are dynamically allocated, so simply clean up and release the MR.
122 * A replacement MR will subsequently be allocated on demand.
123 */
124 static void
125 frwr_mr_recycle_worker(struct work_struct *work)
126 {
127 struct rpcrdma_mr *mr = container_of(work, struct rpcrdma_mr, mr_recycle);
128 struct rpcrdma_xprt *r_xprt = mr->mr_xprt;
129
130 trace_xprtrdma_mr_recycle(mr);
131
132 if (mr->mr_dir != DMA_NONE) {
133 trace_xprtrdma_mr_unmap(mr);
134 ib_dma_unmap_sg(r_xprt->rx_ia.ri_device,
135 mr->mr_sg, mr->mr_nents, mr->mr_dir);
136 mr->mr_dir = DMA_NONE;
137 }
138
139 spin_lock(&r_xprt->rx_buf.rb_mrlock);
140 list_del(&mr->mr_all);
141 r_xprt->rx_stats.mrs_recycled++;
142 spin_unlock(&r_xprt->rx_buf.rb_mrlock);
143
144 frwr_release_mr(mr);
145 }
146
147 /**
148 * frwr_init_mr - Initialize one MR
149 * @ia: interface adapter
150 * @mr: generic MR to prepare for FRWR
151 *
152 * Returns zero if successful. Otherwise a negative errno
153 * is returned.
154 */
155 int frwr_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mr *mr)
156 {
157 unsigned int depth = ia->ri_max_frwr_depth;
158 struct scatterlist *sg;
159 struct ib_mr *frmr;
160 int rc;
161
162 frmr = ib_alloc_mr(ia->ri_pd, ia->ri_mrtype, depth);
163 if (IS_ERR(frmr))
164 goto out_mr_err;
165
166 sg = kcalloc(depth, sizeof(*sg), GFP_KERNEL);
167 if (!sg)
168 goto out_list_err;
169
170 mr->frwr.fr_mr = frmr;
171 mr->frwr.fr_state = FRWR_IS_INVALID;
172 mr->mr_dir = DMA_NONE;
173 INIT_LIST_HEAD(&mr->mr_list);
174 INIT_WORK(&mr->mr_recycle, frwr_mr_recycle_worker);
175 init_completion(&mr->frwr.fr_linv_done);
176
177 sg_init_table(sg, depth);
178 mr->mr_sg = sg;
179 return 0;
180
181 out_mr_err:
182 rc = PTR_ERR(frmr);
183 trace_xprtrdma_frwr_alloc(mr, rc);
184 return rc;
185
186 out_list_err:
187 dprintk("RPC: %s: sg allocation failure\n",
188 __func__);
189 ib_dereg_mr(frmr);
190 return -ENOMEM;
191 }
192
193 /**
194 * frwr_open - Prepare an endpoint for use with FRWR
195 * @ia: interface adapter this endpoint will use
196 * @ep: endpoint to prepare
197 * @cdata: transport parameters
198 *
199 * On success, sets:
200 * ep->rep_attr.cap.max_send_wr
201 * ep->rep_attr.cap.max_recv_wr
202 * cdata->max_requests
203 * ia->ri_max_segs
204 *
205 * And these FRWR-related fields:
206 * ia->ri_max_frwr_depth
207 * ia->ri_mrtype
208 *
209 * On failure, a negative errno is returned.
210 */
211 int frwr_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep,
212 struct rpcrdma_create_data_internal *cdata)
213 {
214 struct ib_device_attr *attrs = &ia->ri_device->attrs;
215 int max_qp_wr, depth, delta;
216
217 ia->ri_mrtype = IB_MR_TYPE_MEM_REG;
218 if (attrs->device_cap_flags & IB_DEVICE_SG_GAPS_REG)
219 ia->ri_mrtype = IB_MR_TYPE_SG_GAPS;
220
221 /* Quirk: Some devices advertise a large max_fast_reg_page_list_len
222 * capability, but perform optimally when the MRs are not larger
223 * than a page.
224 */
225 if (attrs->max_sge_rd > 1)
226 ia->ri_max_frwr_depth = attrs->max_sge_rd;
227 else
228 ia->ri_max_frwr_depth = attrs->max_fast_reg_page_list_len;
229 if (ia->ri_max_frwr_depth > RPCRDMA_MAX_DATA_SEGS)
230 ia->ri_max_frwr_depth = RPCRDMA_MAX_DATA_SEGS;
231 dprintk("RPC: %s: max FR page list depth = %u\n",
232 __func__, ia->ri_max_frwr_depth);
233
234 /* Add room for frwr register and invalidate WRs.
235 * 1. FRWR reg WR for head
236 * 2. FRWR invalidate WR for head
237 * 3. N FRWR reg WRs for pagelist
238 * 4. N FRWR invalidate WRs for pagelist
239 * 5. FRWR reg WR for tail
240 * 6. FRWR invalidate WR for tail
241 * 7. The RDMA_SEND WR
242 */
243 depth = 7;
244
245 /* Calculate N if the device max FRWR depth is smaller than
246 * RPCRDMA_MAX_DATA_SEGS.
247 */
248 if (ia->ri_max_frwr_depth < RPCRDMA_MAX_DATA_SEGS) {
249 delta = RPCRDMA_MAX_DATA_SEGS - ia->ri_max_frwr_depth;
250 do {
251 depth += 2; /* FRWR reg + invalidate */
252 delta -= ia->ri_max_frwr_depth;
253 } while (delta > 0);
254 }
255
256 max_qp_wr = ia->ri_device->attrs.max_qp_wr;
257 max_qp_wr -= RPCRDMA_BACKWARD_WRS;
258 max_qp_wr -= 1;
259 if (max_qp_wr < RPCRDMA_MIN_SLOT_TABLE)
260 return -ENOMEM;
261 if (cdata->max_requests > max_qp_wr)
262 cdata->max_requests = max_qp_wr;
263 ep->rep_attr.cap.max_send_wr = cdata->max_requests * depth;
264 if (ep->rep_attr.cap.max_send_wr > max_qp_wr) {
265 cdata->max_requests = max_qp_wr / depth;
266 if (!cdata->max_requests)
267 return -EINVAL;
268 ep->rep_attr.cap.max_send_wr = cdata->max_requests *
269 depth;
270 }
271 ep->rep_attr.cap.max_send_wr += RPCRDMA_BACKWARD_WRS;
272 ep->rep_attr.cap.max_send_wr += 1; /* for ib_drain_sq */
273 ep->rep_attr.cap.max_recv_wr = cdata->max_requests;
274 ep->rep_attr.cap.max_recv_wr += RPCRDMA_BACKWARD_WRS;
275 ep->rep_attr.cap.max_recv_wr += 1; /* for ib_drain_rq */
276
277 ia->ri_max_segs = max_t(unsigned int, 1, RPCRDMA_MAX_DATA_SEGS /
278 ia->ri_max_frwr_depth);
279 /* Reply chunks require segments for head and tail buffers */
280 ia->ri_max_segs += 2;
281 if (ia->ri_max_segs > RPCRDMA_MAX_HDR_SEGS)
282 ia->ri_max_segs = RPCRDMA_MAX_HDR_SEGS;
283 return 0;
284 }
285
286 /**
287 * frwr_maxpages - Compute size of largest payload
288 * @r_xprt: transport
289 *
290 * Returns maximum size of an RPC message, in pages.
291 *
292 * FRWR mode conveys a list of pages per chunk segment. The
293 * maximum length of that list is the FRWR page list depth.
294 */
295 size_t frwr_maxpages(struct rpcrdma_xprt *r_xprt)
296 {
297 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
298
299 return min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS,
300 (ia->ri_max_segs - 2) * ia->ri_max_frwr_depth);
301 }
302
303 static void
304 __frwr_sendcompletion_flush(struct ib_wc *wc, const char *wr)
305 {
306 if (wc->status != IB_WC_WR_FLUSH_ERR)
307 pr_err("rpcrdma: %s: %s (%u/0x%x)\n",
308 wr, ib_wc_status_msg(wc->status),
309 wc->status, wc->vendor_err);
310 }
311
312 /**
313 * frwr_wc_fastreg - Invoked by RDMA provider for a flushed FastReg WC
314 * @cq: completion queue (ignored)
315 * @wc: completed WR
316 *
317 */
318 static void
319 frwr_wc_fastreg(struct ib_cq *cq, struct ib_wc *wc)
320 {
321 struct ib_cqe *cqe = wc->wr_cqe;
322 struct rpcrdma_frwr *frwr =
323 container_of(cqe, struct rpcrdma_frwr, fr_cqe);
324
325 /* WARNING: Only wr_cqe and status are reliable at this point */
326 if (wc->status != IB_WC_SUCCESS) {
327 frwr->fr_state = FRWR_FLUSHED_FR;
328 __frwr_sendcompletion_flush(wc, "fastreg");
329 }
330 trace_xprtrdma_wc_fastreg(wc, frwr);
331 }
332
333 /**
334 * frwr_wc_localinv - Invoked by RDMA provider for a flushed LocalInv WC
335 * @cq: completion queue (ignored)
336 * @wc: completed WR
337 *
338 */
339 static void
340 frwr_wc_localinv(struct ib_cq *cq, struct ib_wc *wc)
341 {
342 struct ib_cqe *cqe = wc->wr_cqe;
343 struct rpcrdma_frwr *frwr = container_of(cqe, struct rpcrdma_frwr,
344 fr_cqe);
345
346 /* WARNING: Only wr_cqe and status are reliable at this point */
347 if (wc->status != IB_WC_SUCCESS) {
348 frwr->fr_state = FRWR_FLUSHED_LI;
349 __frwr_sendcompletion_flush(wc, "localinv");
350 }
351 trace_xprtrdma_wc_li(wc, frwr);
352 }
353
354 /**
355 * frwr_wc_localinv_wake - Invoked by RDMA provider for a signaled LocalInv WC
356 * @cq: completion queue (ignored)
357 * @wc: completed WR
358 *
359 * Awaken anyone waiting for an MR to finish being fenced.
360 */
361 static void
362 frwr_wc_localinv_wake(struct ib_cq *cq, struct ib_wc *wc)
363 {
364 struct ib_cqe *cqe = wc->wr_cqe;
365 struct rpcrdma_frwr *frwr = container_of(cqe, struct rpcrdma_frwr,
366 fr_cqe);
367
368 /* WARNING: Only wr_cqe and status are reliable at this point */
369 if (wc->status != IB_WC_SUCCESS) {
370 frwr->fr_state = FRWR_FLUSHED_LI;
371 __frwr_sendcompletion_flush(wc, "localinv");
372 }
373 complete(&frwr->fr_linv_done);
374 trace_xprtrdma_wc_li_wake(wc, frwr);
375 }
376
377 /**
378 * frwr_map - Register a memory region
379 * @r_xprt: controlling transport
380 * @seg: memory region co-ordinates
381 * @nsegs: number of segments remaining
382 * @writing: true when RDMA Write will be used
383 * @xid: XID of RPC using the registered memory
384 * @out: initialized MR
385 *
386 * Prepare a REG_MR Work Request to register a memory region
387 * for remote access via RDMA READ or RDMA WRITE.
388 *
389 * Returns the next segment or a negative errno pointer.
390 * On success, the prepared MR is planted in @out.
391 */
392 struct rpcrdma_mr_seg *frwr_map(struct rpcrdma_xprt *r_xprt,
393 struct rpcrdma_mr_seg *seg,
394 int nsegs, bool writing, u32 xid,
395 struct rpcrdma_mr **out)
396 {
397 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
398 bool holes_ok = ia->ri_mrtype == IB_MR_TYPE_SG_GAPS;
399 struct rpcrdma_frwr *frwr;
400 struct rpcrdma_mr *mr;
401 struct ib_mr *ibmr;
402 struct ib_reg_wr *reg_wr;
403 int i, n;
404 u8 key;
405
406 mr = NULL;
407 do {
408 if (mr)
409 rpcrdma_mr_recycle(mr);
410 mr = rpcrdma_mr_get(r_xprt);
411 if (!mr)
412 return ERR_PTR(-EAGAIN);
413 } while (mr->frwr.fr_state != FRWR_IS_INVALID);
414 frwr = &mr->frwr;
415 frwr->fr_state = FRWR_IS_VALID;
416
417 if (nsegs > ia->ri_max_frwr_depth)
418 nsegs = ia->ri_max_frwr_depth;
419 for (i = 0; i < nsegs;) {
420 if (seg->mr_page)
421 sg_set_page(&mr->mr_sg[i],
422 seg->mr_page,
423 seg->mr_len,
424 offset_in_page(seg->mr_offset));
425 else
426 sg_set_buf(&mr->mr_sg[i], seg->mr_offset,
427 seg->mr_len);
428
429 ++seg;
430 ++i;
431 if (holes_ok)
432 continue;
433 if ((i < nsegs && offset_in_page(seg->mr_offset)) ||
434 offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len))
435 break;
436 }
437 mr->mr_dir = rpcrdma_data_dir(writing);
438
439 mr->mr_nents = ib_dma_map_sg(ia->ri_device, mr->mr_sg, i, mr->mr_dir);
440 if (!mr->mr_nents)
441 goto out_dmamap_err;
442
443 ibmr = frwr->fr_mr;
444 n = ib_map_mr_sg(ibmr, mr->mr_sg, mr->mr_nents, NULL, PAGE_SIZE);
445 if (unlikely(n != mr->mr_nents))
446 goto out_mapmr_err;
447
448 ibmr->iova &= 0x00000000ffffffff;
449 ibmr->iova |= ((u64)cpu_to_be32(xid)) << 32;
450 key = (u8)(ibmr->rkey & 0x000000FF);
451 ib_update_fast_reg_key(ibmr, ++key);
452
453 reg_wr = &frwr->fr_regwr;
454 reg_wr->mr = ibmr;
455 reg_wr->key = ibmr->rkey;
456 reg_wr->access = writing ?
457 IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE :
458 IB_ACCESS_REMOTE_READ;
459
460 mr->mr_handle = ibmr->rkey;
461 mr->mr_length = ibmr->length;
462 mr->mr_offset = ibmr->iova;
463 trace_xprtrdma_mr_map(mr);
464
465 *out = mr;
466 return seg;
467
468 out_dmamap_err:
469 frwr->fr_state = FRWR_IS_INVALID;
470 trace_xprtrdma_frwr_sgerr(mr, i);
471 rpcrdma_mr_put(mr);
472 return ERR_PTR(-EIO);
473
474 out_mapmr_err:
475 trace_xprtrdma_frwr_maperr(mr, n);
476 rpcrdma_mr_recycle(mr);
477 return ERR_PTR(-EIO);
478 }
479
480 /**
481 * frwr_send - post Send WR containing the RPC Call message
482 * @ia: interface adapter
483 * @req: Prepared RPC Call
484 *
485 * For FRWR, chain any FastReg WRs to the Send WR. Only a
486 * single ib_post_send call is needed to register memory
487 * and then post the Send WR.
488 *
489 * Returns the result of ib_post_send.
490 */
491 int frwr_send(struct rpcrdma_ia *ia, struct rpcrdma_req *req)
492 {
493 struct ib_send_wr *post_wr;
494 struct rpcrdma_mr *mr;
495
496 post_wr = &req->rl_sendctx->sc_wr;
497 list_for_each_entry(mr, &req->rl_registered, mr_list) {
498 struct rpcrdma_frwr *frwr;
499
500 frwr = &mr->frwr;
501
502 frwr->fr_cqe.done = frwr_wc_fastreg;
503 frwr->fr_regwr.wr.next = post_wr;
504 frwr->fr_regwr.wr.wr_cqe = &frwr->fr_cqe;
505 frwr->fr_regwr.wr.num_sge = 0;
506 frwr->fr_regwr.wr.opcode = IB_WR_REG_MR;
507 frwr->fr_regwr.wr.send_flags = 0;
508
509 post_wr = &frwr->fr_regwr.wr;
510 }
511
512 /* If ib_post_send fails, the next ->send_request for
513 * @req will queue these MRs for recovery.
514 */
515 return ib_post_send(ia->ri_id->qp, post_wr, NULL);
516 }
517
518 /**
519 * frwr_reminv - handle a remotely invalidated mr on the @mrs list
520 * @rep: Received reply
521 * @mrs: list of MRs to check
522 *
523 */
524 void frwr_reminv(struct rpcrdma_rep *rep, struct list_head *mrs)
525 {
526 struct rpcrdma_mr *mr;
527
528 list_for_each_entry(mr, mrs, mr_list)
529 if (mr->mr_handle == rep->rr_inv_rkey) {
530 list_del_init(&mr->mr_list);
531 trace_xprtrdma_mr_remoteinv(mr);
532 mr->frwr.fr_state = FRWR_IS_INVALID;
533 rpcrdma_mr_unmap_and_put(mr);
534 break; /* only one invalidated MR per RPC */
535 }
536 }
537
538 /**
539 * frwr_unmap_sync - invalidate memory regions that were registered for @req
540 * @r_xprt: controlling transport
541 * @mrs: list of MRs to process
542 *
543 * Sleeps until it is safe for the host CPU to access the
544 * previously mapped memory regions.
545 *
546 * Caller ensures that @mrs is not empty before the call. This
547 * function empties the list.
548 */
549 void frwr_unmap_sync(struct rpcrdma_xprt *r_xprt, struct list_head *mrs)
550 {
551 struct ib_send_wr *first, **prev, *last;
552 const struct ib_send_wr *bad_wr;
553 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
554 struct rpcrdma_frwr *frwr;
555 struct rpcrdma_mr *mr;
556 int count, rc;
557
558 /* ORDER: Invalidate all of the MRs first
559 *
560 * Chain the LOCAL_INV Work Requests and post them with
561 * a single ib_post_send() call.
562 */
563 frwr = NULL;
564 count = 0;
565 prev = &first;
566 list_for_each_entry(mr, mrs, mr_list) {
567 mr->frwr.fr_state = FRWR_IS_INVALID;
568
569 frwr = &mr->frwr;
570 trace_xprtrdma_mr_localinv(mr);
571
572 frwr->fr_cqe.done = frwr_wc_localinv;
573 last = &frwr->fr_invwr;
574 memset(last, 0, sizeof(*last));
575 last->wr_cqe = &frwr->fr_cqe;
576 last->opcode = IB_WR_LOCAL_INV;
577 last->ex.invalidate_rkey = mr->mr_handle;
578 count++;
579
580 *prev = last;
581 prev = &last->next;
582 }
583 if (!frwr)
584 goto unmap;
585
586 /* Strong send queue ordering guarantees that when the
587 * last WR in the chain completes, all WRs in the chain
588 * are complete.
589 */
590 last->send_flags = IB_SEND_SIGNALED;
591 frwr->fr_cqe.done = frwr_wc_localinv_wake;
592 reinit_completion(&frwr->fr_linv_done);
593
594 /* Transport disconnect drains the receive CQ before it
595 * replaces the QP. The RPC reply handler won't call us
596 * unless ri_id->qp is a valid pointer.
597 */
598 r_xprt->rx_stats.local_inv_needed++;
599 bad_wr = NULL;
600 rc = ib_post_send(ia->ri_id->qp, first, &bad_wr);
601 if (bad_wr != first)
602 wait_for_completion(&frwr->fr_linv_done);
603 if (rc)
604 goto out_release;
605
606 /* ORDER: Now DMA unmap all of the MRs, and return
607 * them to the free MR list.
608 */
609 unmap:
610 while (!list_empty(mrs)) {
611 mr = rpcrdma_mr_pop(mrs);
612 rpcrdma_mr_unmap_and_put(mr);
613 }
614 return;
615
616 out_release:
617 pr_err("rpcrdma: FRWR invalidate ib_post_send returned %i\n", rc);
618
619 /* Unmap and release the MRs in the LOCAL_INV WRs that did not
620 * get posted.
621 */
622 while (bad_wr) {
623 frwr = container_of(bad_wr, struct rpcrdma_frwr,
624 fr_invwr);
625 mr = container_of(frwr, struct rpcrdma_mr, frwr);
626 bad_wr = bad_wr->next;
627
628 list_del_init(&mr->mr_list);
629 rpcrdma_mr_recycle(mr);
630 }
631 }