]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/sunrpc/xprtrdma/frwr_ops.c
License cleanup: add SPDX GPL-2.0 license identifier to files with no license
[mirror_ubuntu-bionic-kernel.git] / net / sunrpc / xprtrdma / frwr_ops.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2015 Oracle. All rights reserved.
4 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
5 */
6
7 /* Lightweight memory registration using Fast Registration Work
8 * Requests (FRWR). Also referred to sometimes as FRMR mode.
9 *
10 * FRWR features ordered asynchronous registration and deregistration
11 * of arbitrarily sized memory regions. This is the fastest and safest
12 * but most complex memory registration mode.
13 */
14
15 /* Normal operation
16 *
17 * A Memory Region is prepared for RDMA READ or WRITE using a FAST_REG
18 * Work Request (frmr_op_map). When the RDMA operation is finished, this
19 * Memory Region is invalidated using a LOCAL_INV Work Request
20 * (frmr_op_unmap).
21 *
22 * Typically these Work Requests are not signaled, and neither are RDMA
23 * SEND Work Requests (with the exception of signaling occasionally to
24 * prevent provider work queue overflows). This greatly reduces HCA
25 * interrupt workload.
26 *
27 * As an optimization, frwr_op_unmap marks MRs INVALID before the
28 * LOCAL_INV WR is posted. If posting succeeds, the MR is placed on
29 * rb_mws immediately so that no work (like managing a linked list
30 * under a spinlock) is needed in the completion upcall.
31 *
32 * But this means that frwr_op_map() can occasionally encounter an MR
33 * that is INVALID but the LOCAL_INV WR has not completed. Work Queue
34 * ordering prevents a subsequent FAST_REG WR from executing against
35 * that MR while it is still being invalidated.
36 */
37
38 /* Transport recovery
39 *
40 * ->op_map and the transport connect worker cannot run at the same
41 * time, but ->op_unmap can fire while the transport connect worker
42 * is running. Thus MR recovery is handled in ->op_map, to guarantee
43 * that recovered MRs are owned by a sending RPC, and not one where
44 * ->op_unmap could fire at the same time transport reconnect is
45 * being done.
46 *
47 * When the underlying transport disconnects, MRs are left in one of
48 * four states:
49 *
50 * INVALID: The MR was not in use before the QP entered ERROR state.
51 *
52 * VALID: The MR was registered before the QP entered ERROR state.
53 *
54 * FLUSHED_FR: The MR was being registered when the QP entered ERROR
55 * state, and the pending WR was flushed.
56 *
57 * FLUSHED_LI: The MR was being invalidated when the QP entered ERROR
58 * state, and the pending WR was flushed.
59 *
60 * When frwr_op_map encounters FLUSHED and VALID MRs, they are recovered
61 * with ib_dereg_mr and then are re-initialized. Because MR recovery
62 * allocates fresh resources, it is deferred to a workqueue, and the
63 * recovered MRs are placed back on the rb_mws list when recovery is
64 * complete. frwr_op_map allocates another MR for the current RPC while
65 * the broken MR is reset.
66 *
67 * To ensure that frwr_op_map doesn't encounter an MR that is marked
68 * INVALID but that is about to be flushed due to a previous transport
69 * disconnect, the transport connect worker attempts to drain all
70 * pending send queue WRs before the transport is reconnected.
71 */
72
73 #include <linux/sunrpc/rpc_rdma.h>
74
75 #include "xprt_rdma.h"
76
77 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
78 # define RPCDBG_FACILITY RPCDBG_TRANS
79 #endif
80
81 bool
82 frwr_is_supported(struct rpcrdma_ia *ia)
83 {
84 struct ib_device_attr *attrs = &ia->ri_device->attrs;
85
86 if (!(attrs->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS))
87 goto out_not_supported;
88 if (attrs->max_fast_reg_page_list_len == 0)
89 goto out_not_supported;
90 return true;
91
92 out_not_supported:
93 pr_info("rpcrdma: 'frwr' mode is not supported by device %s\n",
94 ia->ri_device->name);
95 return false;
96 }
97
98 static int
99 frwr_op_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mw *r)
100 {
101 unsigned int depth = ia->ri_max_frmr_depth;
102 struct rpcrdma_frmr *f = &r->frmr;
103 int rc;
104
105 f->fr_mr = ib_alloc_mr(ia->ri_pd, ia->ri_mrtype, depth);
106 if (IS_ERR(f->fr_mr))
107 goto out_mr_err;
108
109 r->mw_sg = kcalloc(depth, sizeof(*r->mw_sg), GFP_KERNEL);
110 if (!r->mw_sg)
111 goto out_list_err;
112
113 sg_init_table(r->mw_sg, depth);
114 init_completion(&f->fr_linv_done);
115 return 0;
116
117 out_mr_err:
118 rc = PTR_ERR(f->fr_mr);
119 dprintk("RPC: %s: ib_alloc_mr status %i\n",
120 __func__, rc);
121 return rc;
122
123 out_list_err:
124 rc = -ENOMEM;
125 dprintk("RPC: %s: sg allocation failure\n",
126 __func__);
127 ib_dereg_mr(f->fr_mr);
128 return rc;
129 }
130
131 static void
132 frwr_op_release_mr(struct rpcrdma_mw *r)
133 {
134 int rc;
135
136 /* Ensure MW is not on any rl_registered list */
137 if (!list_empty(&r->mw_list))
138 list_del(&r->mw_list);
139
140 rc = ib_dereg_mr(r->frmr.fr_mr);
141 if (rc)
142 pr_err("rpcrdma: final ib_dereg_mr for %p returned %i\n",
143 r, rc);
144 kfree(r->mw_sg);
145 kfree(r);
146 }
147
148 static int
149 __frwr_reset_mr(struct rpcrdma_ia *ia, struct rpcrdma_mw *r)
150 {
151 struct rpcrdma_frmr *f = &r->frmr;
152 int rc;
153
154 rc = ib_dereg_mr(f->fr_mr);
155 if (rc) {
156 pr_warn("rpcrdma: ib_dereg_mr status %d, frwr %p orphaned\n",
157 rc, r);
158 return rc;
159 }
160
161 f->fr_mr = ib_alloc_mr(ia->ri_pd, ia->ri_mrtype,
162 ia->ri_max_frmr_depth);
163 if (IS_ERR(f->fr_mr)) {
164 pr_warn("rpcrdma: ib_alloc_mr status %ld, frwr %p orphaned\n",
165 PTR_ERR(f->fr_mr), r);
166 return PTR_ERR(f->fr_mr);
167 }
168
169 dprintk("RPC: %s: recovered FRMR %p\n", __func__, f);
170 f->fr_state = FRMR_IS_INVALID;
171 return 0;
172 }
173
174 /* Reset of a single FRMR. Generate a fresh rkey by replacing the MR.
175 */
176 static void
177 frwr_op_recover_mr(struct rpcrdma_mw *mw)
178 {
179 enum rpcrdma_frmr_state state = mw->frmr.fr_state;
180 struct rpcrdma_xprt *r_xprt = mw->mw_xprt;
181 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
182 int rc;
183
184 rc = __frwr_reset_mr(ia, mw);
185 if (state != FRMR_FLUSHED_LI)
186 ib_dma_unmap_sg(ia->ri_device,
187 mw->mw_sg, mw->mw_nents, mw->mw_dir);
188 if (rc)
189 goto out_release;
190
191 rpcrdma_put_mw(r_xprt, mw);
192 r_xprt->rx_stats.mrs_recovered++;
193 return;
194
195 out_release:
196 pr_err("rpcrdma: FRMR reset failed %d, %p release\n", rc, mw);
197 r_xprt->rx_stats.mrs_orphaned++;
198
199 spin_lock(&r_xprt->rx_buf.rb_mwlock);
200 list_del(&mw->mw_all);
201 spin_unlock(&r_xprt->rx_buf.rb_mwlock);
202
203 frwr_op_release_mr(mw);
204 }
205
206 static int
207 frwr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep,
208 struct rpcrdma_create_data_internal *cdata)
209 {
210 struct ib_device_attr *attrs = &ia->ri_device->attrs;
211 int depth, delta;
212
213 ia->ri_mrtype = IB_MR_TYPE_MEM_REG;
214 if (attrs->device_cap_flags & IB_DEVICE_SG_GAPS_REG)
215 ia->ri_mrtype = IB_MR_TYPE_SG_GAPS;
216
217 ia->ri_max_frmr_depth =
218 min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS,
219 attrs->max_fast_reg_page_list_len);
220 dprintk("RPC: %s: device's max FR page list len = %u\n",
221 __func__, ia->ri_max_frmr_depth);
222
223 /* Add room for frmr register and invalidate WRs.
224 * 1. FRMR reg WR for head
225 * 2. FRMR invalidate WR for head
226 * 3. N FRMR reg WRs for pagelist
227 * 4. N FRMR invalidate WRs for pagelist
228 * 5. FRMR reg WR for tail
229 * 6. FRMR invalidate WR for tail
230 * 7. The RDMA_SEND WR
231 */
232 depth = 7;
233
234 /* Calculate N if the device max FRMR depth is smaller than
235 * RPCRDMA_MAX_DATA_SEGS.
236 */
237 if (ia->ri_max_frmr_depth < RPCRDMA_MAX_DATA_SEGS) {
238 delta = RPCRDMA_MAX_DATA_SEGS - ia->ri_max_frmr_depth;
239 do {
240 depth += 2; /* FRMR reg + invalidate */
241 delta -= ia->ri_max_frmr_depth;
242 } while (delta > 0);
243 }
244
245 ep->rep_attr.cap.max_send_wr *= depth;
246 if (ep->rep_attr.cap.max_send_wr > attrs->max_qp_wr) {
247 cdata->max_requests = attrs->max_qp_wr / depth;
248 if (!cdata->max_requests)
249 return -EINVAL;
250 ep->rep_attr.cap.max_send_wr = cdata->max_requests *
251 depth;
252 }
253
254 ia->ri_max_segs = max_t(unsigned int, 1, RPCRDMA_MAX_DATA_SEGS /
255 ia->ri_max_frmr_depth);
256 return 0;
257 }
258
259 /* FRWR mode conveys a list of pages per chunk segment. The
260 * maximum length of that list is the FRWR page list depth.
261 */
262 static size_t
263 frwr_op_maxpages(struct rpcrdma_xprt *r_xprt)
264 {
265 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
266
267 return min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS,
268 RPCRDMA_MAX_HDR_SEGS * ia->ri_max_frmr_depth);
269 }
270
271 static void
272 __frwr_sendcompletion_flush(struct ib_wc *wc, const char *wr)
273 {
274 if (wc->status != IB_WC_WR_FLUSH_ERR)
275 pr_err("rpcrdma: %s: %s (%u/0x%x)\n",
276 wr, ib_wc_status_msg(wc->status),
277 wc->status, wc->vendor_err);
278 }
279
280 /**
281 * frwr_wc_fastreg - Invoked by RDMA provider for a flushed FastReg WC
282 * @cq: completion queue (ignored)
283 * @wc: completed WR
284 *
285 */
286 static void
287 frwr_wc_fastreg(struct ib_cq *cq, struct ib_wc *wc)
288 {
289 struct rpcrdma_frmr *frmr;
290 struct ib_cqe *cqe;
291
292 /* WARNING: Only wr_cqe and status are reliable at this point */
293 if (wc->status != IB_WC_SUCCESS) {
294 cqe = wc->wr_cqe;
295 frmr = container_of(cqe, struct rpcrdma_frmr, fr_cqe);
296 frmr->fr_state = FRMR_FLUSHED_FR;
297 __frwr_sendcompletion_flush(wc, "fastreg");
298 }
299 }
300
301 /**
302 * frwr_wc_localinv - Invoked by RDMA provider for a flushed LocalInv WC
303 * @cq: completion queue (ignored)
304 * @wc: completed WR
305 *
306 */
307 static void
308 frwr_wc_localinv(struct ib_cq *cq, struct ib_wc *wc)
309 {
310 struct rpcrdma_frmr *frmr;
311 struct ib_cqe *cqe;
312
313 /* WARNING: Only wr_cqe and status are reliable at this point */
314 if (wc->status != IB_WC_SUCCESS) {
315 cqe = wc->wr_cqe;
316 frmr = container_of(cqe, struct rpcrdma_frmr, fr_cqe);
317 frmr->fr_state = FRMR_FLUSHED_LI;
318 __frwr_sendcompletion_flush(wc, "localinv");
319 }
320 }
321
322 /**
323 * frwr_wc_localinv_wake - Invoked by RDMA provider for a signaled LocalInv WC
324 * @cq: completion queue (ignored)
325 * @wc: completed WR
326 *
327 * Awaken anyone waiting for an MR to finish being fenced.
328 */
329 static void
330 frwr_wc_localinv_wake(struct ib_cq *cq, struct ib_wc *wc)
331 {
332 struct rpcrdma_frmr *frmr;
333 struct ib_cqe *cqe;
334
335 /* WARNING: Only wr_cqe and status are reliable at this point */
336 cqe = wc->wr_cqe;
337 frmr = container_of(cqe, struct rpcrdma_frmr, fr_cqe);
338 if (wc->status != IB_WC_SUCCESS) {
339 frmr->fr_state = FRMR_FLUSHED_LI;
340 __frwr_sendcompletion_flush(wc, "localinv");
341 }
342 complete(&frmr->fr_linv_done);
343 }
344
345 /* Post a REG_MR Work Request to register a memory region
346 * for remote access via RDMA READ or RDMA WRITE.
347 */
348 static struct rpcrdma_mr_seg *
349 frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
350 int nsegs, bool writing, struct rpcrdma_mw **out)
351 {
352 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
353 bool holes_ok = ia->ri_mrtype == IB_MR_TYPE_SG_GAPS;
354 struct rpcrdma_mw *mw;
355 struct rpcrdma_frmr *frmr;
356 struct ib_mr *mr;
357 struct ib_reg_wr *reg_wr;
358 struct ib_send_wr *bad_wr;
359 int rc, i, n;
360 u8 key;
361
362 mw = NULL;
363 do {
364 if (mw)
365 rpcrdma_defer_mr_recovery(mw);
366 mw = rpcrdma_get_mw(r_xprt);
367 if (!mw)
368 return ERR_PTR(-ENOBUFS);
369 } while (mw->frmr.fr_state != FRMR_IS_INVALID);
370 frmr = &mw->frmr;
371 frmr->fr_state = FRMR_IS_VALID;
372 mr = frmr->fr_mr;
373 reg_wr = &frmr->fr_regwr;
374
375 if (nsegs > ia->ri_max_frmr_depth)
376 nsegs = ia->ri_max_frmr_depth;
377 for (i = 0; i < nsegs;) {
378 if (seg->mr_page)
379 sg_set_page(&mw->mw_sg[i],
380 seg->mr_page,
381 seg->mr_len,
382 offset_in_page(seg->mr_offset));
383 else
384 sg_set_buf(&mw->mw_sg[i], seg->mr_offset,
385 seg->mr_len);
386
387 ++seg;
388 ++i;
389 if (holes_ok)
390 continue;
391 if ((i < nsegs && offset_in_page(seg->mr_offset)) ||
392 offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len))
393 break;
394 }
395 mw->mw_dir = rpcrdma_data_dir(writing);
396
397 mw->mw_nents = ib_dma_map_sg(ia->ri_device, mw->mw_sg, i, mw->mw_dir);
398 if (!mw->mw_nents)
399 goto out_dmamap_err;
400
401 n = ib_map_mr_sg(mr, mw->mw_sg, mw->mw_nents, NULL, PAGE_SIZE);
402 if (unlikely(n != mw->mw_nents))
403 goto out_mapmr_err;
404
405 dprintk("RPC: %s: Using frmr %p to map %u segments (%llu bytes)\n",
406 __func__, frmr, mw->mw_nents, mr->length);
407
408 key = (u8)(mr->rkey & 0x000000FF);
409 ib_update_fast_reg_key(mr, ++key);
410
411 reg_wr->wr.next = NULL;
412 reg_wr->wr.opcode = IB_WR_REG_MR;
413 frmr->fr_cqe.done = frwr_wc_fastreg;
414 reg_wr->wr.wr_cqe = &frmr->fr_cqe;
415 reg_wr->wr.num_sge = 0;
416 reg_wr->wr.send_flags = 0;
417 reg_wr->mr = mr;
418 reg_wr->key = mr->rkey;
419 reg_wr->access = writing ?
420 IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE :
421 IB_ACCESS_REMOTE_READ;
422
423 rpcrdma_set_signaled(&r_xprt->rx_ep, &reg_wr->wr);
424 rc = ib_post_send(ia->ri_id->qp, &reg_wr->wr, &bad_wr);
425 if (rc)
426 goto out_senderr;
427
428 mw->mw_handle = mr->rkey;
429 mw->mw_length = mr->length;
430 mw->mw_offset = mr->iova;
431
432 *out = mw;
433 return seg;
434
435 out_dmamap_err:
436 pr_err("rpcrdma: failed to DMA map sg %p sg_nents %d\n",
437 mw->mw_sg, i);
438 frmr->fr_state = FRMR_IS_INVALID;
439 rpcrdma_put_mw(r_xprt, mw);
440 return ERR_PTR(-EIO);
441
442 out_mapmr_err:
443 pr_err("rpcrdma: failed to map mr %p (%d/%d)\n",
444 frmr->fr_mr, n, mw->mw_nents);
445 rpcrdma_defer_mr_recovery(mw);
446 return ERR_PTR(-EIO);
447
448 out_senderr:
449 pr_err("rpcrdma: FRMR registration ib_post_send returned %i\n", rc);
450 rpcrdma_defer_mr_recovery(mw);
451 return ERR_PTR(-ENOTCONN);
452 }
453
454 /* Invalidate all memory regions that were registered for "req".
455 *
456 * Sleeps until it is safe for the host CPU to access the
457 * previously mapped memory regions.
458 *
459 * Caller ensures that @mws is not empty before the call. This
460 * function empties the list.
461 */
462 static void
463 frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct list_head *mws)
464 {
465 struct ib_send_wr *first, **prev, *last, *bad_wr;
466 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
467 struct rpcrdma_frmr *f;
468 struct rpcrdma_mw *mw;
469 int count, rc;
470
471 /* ORDER: Invalidate all of the MRs first
472 *
473 * Chain the LOCAL_INV Work Requests and post them with
474 * a single ib_post_send() call.
475 */
476 f = NULL;
477 count = 0;
478 prev = &first;
479 list_for_each_entry(mw, mws, mw_list) {
480 mw->frmr.fr_state = FRMR_IS_INVALID;
481
482 if (mw->mw_flags & RPCRDMA_MW_F_RI)
483 continue;
484
485 f = &mw->frmr;
486 dprintk("RPC: %s: invalidating frmr %p\n",
487 __func__, f);
488
489 f->fr_cqe.done = frwr_wc_localinv;
490 last = &f->fr_invwr;
491 memset(last, 0, sizeof(*last));
492 last->wr_cqe = &f->fr_cqe;
493 last->opcode = IB_WR_LOCAL_INV;
494 last->ex.invalidate_rkey = mw->mw_handle;
495 count++;
496
497 *prev = last;
498 prev = &last->next;
499 }
500 if (!f)
501 goto unmap;
502
503 /* Strong send queue ordering guarantees that when the
504 * last WR in the chain completes, all WRs in the chain
505 * are complete.
506 */
507 last->send_flags = IB_SEND_SIGNALED;
508 f->fr_cqe.done = frwr_wc_localinv_wake;
509 reinit_completion(&f->fr_linv_done);
510
511 /* Initialize CQ count, since there is always a signaled
512 * WR being posted here. The new cqcount depends on how
513 * many SQEs are about to be consumed.
514 */
515 rpcrdma_init_cqcount(&r_xprt->rx_ep, count);
516
517 /* Transport disconnect drains the receive CQ before it
518 * replaces the QP. The RPC reply handler won't call us
519 * unless ri_id->qp is a valid pointer.
520 */
521 r_xprt->rx_stats.local_inv_needed++;
522 bad_wr = NULL;
523 rc = ib_post_send(ia->ri_id->qp, first, &bad_wr);
524 if (bad_wr != first)
525 wait_for_completion(&f->fr_linv_done);
526 if (rc)
527 goto reset_mrs;
528
529 /* ORDER: Now DMA unmap all of the MRs, and return
530 * them to the free MW list.
531 */
532 unmap:
533 while (!list_empty(mws)) {
534 mw = rpcrdma_pop_mw(mws);
535 dprintk("RPC: %s: DMA unmapping frmr %p\n",
536 __func__, &mw->frmr);
537 ib_dma_unmap_sg(ia->ri_device,
538 mw->mw_sg, mw->mw_nents, mw->mw_dir);
539 rpcrdma_put_mw(r_xprt, mw);
540 }
541 return;
542
543 reset_mrs:
544 pr_err("rpcrdma: FRMR invalidate ib_post_send returned %i\n", rc);
545
546 /* Find and reset the MRs in the LOCAL_INV WRs that did not
547 * get posted.
548 */
549 rpcrdma_init_cqcount(&r_xprt->rx_ep, -count);
550 while (bad_wr) {
551 f = container_of(bad_wr, struct rpcrdma_frmr,
552 fr_invwr);
553 mw = container_of(f, struct rpcrdma_mw, frmr);
554
555 __frwr_reset_mr(ia, mw);
556
557 bad_wr = bad_wr->next;
558 }
559 goto unmap;
560 }
561
562 /* Use a slow, safe mechanism to invalidate all memory regions
563 * that were registered for "req".
564 */
565 static void
566 frwr_op_unmap_safe(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
567 bool sync)
568 {
569 struct rpcrdma_mw *mw;
570
571 while (!list_empty(&req->rl_registered)) {
572 mw = rpcrdma_pop_mw(&req->rl_registered);
573 if (sync)
574 frwr_op_recover_mr(mw);
575 else
576 rpcrdma_defer_mr_recovery(mw);
577 }
578 }
579
580 const struct rpcrdma_memreg_ops rpcrdma_frwr_memreg_ops = {
581 .ro_map = frwr_op_map,
582 .ro_unmap_sync = frwr_op_unmap_sync,
583 .ro_unmap_safe = frwr_op_unmap_safe,
584 .ro_recover_mr = frwr_op_recover_mr,
585 .ro_open = frwr_op_open,
586 .ro_maxpages = frwr_op_maxpages,
587 .ro_init_mr = frwr_op_init_mr,
588 .ro_release_mr = frwr_op_release_mr,
589 .ro_displayname = "frwr",
590 .ro_send_w_inv_ok = RPCRDMA_CMP_F_SND_W_INV_OK,
591 };