2 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the BSD-type
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
14 * Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
17 * Redistributions in binary form must reproduce the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer in the documentation and/or other materials provided
20 * with the distribution.
22 * Neither the name of the Network Appliance, Inc. nor the names of
23 * its contributors may be used to endorse or promote products
24 * derived from this software without specific prior written
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
43 * This file contains the top-level implementation of an RPC RDMA
46 * Naming convention: functions beginning with xprt_ are part of the
47 * transport switch. All others are RPC RDMA internal.
50 #include <linux/module.h>
51 #include <linux/init.h>
52 #include <linux/slab.h>
53 #include <linux/seq_file.h>
55 #include "xprt_rdma.h"
58 # define RPCDBG_FACILITY RPCDBG_TRANS
61 MODULE_LICENSE("Dual BSD/GPL");
63 MODULE_DESCRIPTION("RPC/RDMA Transport for Linux kernel NFS");
64 MODULE_AUTHOR("Network Appliance, Inc.");
70 static unsigned int xprt_rdma_slot_table_entries
= RPCRDMA_DEF_SLOT_TABLE
;
71 static unsigned int xprt_rdma_max_inline_read
= RPCRDMA_DEF_INLINE
;
72 static unsigned int xprt_rdma_max_inline_write
= RPCRDMA_DEF_INLINE
;
73 static unsigned int xprt_rdma_inline_write_padding
;
74 static unsigned int xprt_rdma_memreg_strategy
= RPCRDMA_FRMR
;
75 int xprt_rdma_pad_optimize
= 0;
79 static unsigned int min_slot_table_size
= RPCRDMA_MIN_SLOT_TABLE
;
80 static unsigned int max_slot_table_size
= RPCRDMA_MAX_SLOT_TABLE
;
81 static unsigned int zero
;
82 static unsigned int max_padding
= PAGE_SIZE
;
83 static unsigned int min_memreg
= RPCRDMA_BOUNCEBUFFERS
;
84 static unsigned int max_memreg
= RPCRDMA_LAST
- 1;
86 static struct ctl_table_header
*sunrpc_table_header
;
88 static ctl_table xr_tunables_table
[] = {
90 .procname
= "rdma_slot_table_entries",
91 .data
= &xprt_rdma_slot_table_entries
,
92 .maxlen
= sizeof(unsigned int),
94 .proc_handler
= proc_dointvec_minmax
,
95 .extra1
= &min_slot_table_size
,
96 .extra2
= &max_slot_table_size
99 .procname
= "rdma_max_inline_read",
100 .data
= &xprt_rdma_max_inline_read
,
101 .maxlen
= sizeof(unsigned int),
103 .proc_handler
= proc_dointvec
,
106 .procname
= "rdma_max_inline_write",
107 .data
= &xprt_rdma_max_inline_write
,
108 .maxlen
= sizeof(unsigned int),
110 .proc_handler
= proc_dointvec
,
113 .procname
= "rdma_inline_write_padding",
114 .data
= &xprt_rdma_inline_write_padding
,
115 .maxlen
= sizeof(unsigned int),
117 .proc_handler
= proc_dointvec_minmax
,
119 .extra2
= &max_padding
,
122 .procname
= "rdma_memreg_strategy",
123 .data
= &xprt_rdma_memreg_strategy
,
124 .maxlen
= sizeof(unsigned int),
126 .proc_handler
= proc_dointvec_minmax
,
127 .extra1
= &min_memreg
,
128 .extra2
= &max_memreg
,
131 .procname
= "rdma_pad_optimize",
132 .data
= &xprt_rdma_pad_optimize
,
133 .maxlen
= sizeof(unsigned int),
135 .proc_handler
= proc_dointvec
,
140 static ctl_table sunrpc_table
[] = {
142 .procname
= "sunrpc",
144 .child
= xr_tunables_table
151 static struct rpc_xprt_ops xprt_rdma_procs
; /* forward reference */
154 xprt_rdma_format_addresses(struct rpc_xprt
*xprt
)
156 struct sockaddr
*sap
= (struct sockaddr
*)
157 &rpcx_to_rdmad(xprt
).addr
;
158 struct sockaddr_in
*sin
= (struct sockaddr_in
*)sap
;
161 (void)rpc_ntop(sap
, buf
, sizeof(buf
));
162 xprt
->address_strings
[RPC_DISPLAY_ADDR
] = kstrdup(buf
, GFP_KERNEL
);
164 snprintf(buf
, sizeof(buf
), "%u", rpc_get_port(sap
));
165 xprt
->address_strings
[RPC_DISPLAY_PORT
] = kstrdup(buf
, GFP_KERNEL
);
167 xprt
->address_strings
[RPC_DISPLAY_PROTO
] = "rdma";
169 snprintf(buf
, sizeof(buf
), "%08x", ntohl(sin
->sin_addr
.s_addr
));
170 xprt
->address_strings
[RPC_DISPLAY_HEX_ADDR
] = kstrdup(buf
, GFP_KERNEL
);
172 snprintf(buf
, sizeof(buf
), "%4hx", rpc_get_port(sap
));
173 xprt
->address_strings
[RPC_DISPLAY_HEX_PORT
] = kstrdup(buf
, GFP_KERNEL
);
176 xprt
->address_strings
[RPC_DISPLAY_NETID
] = "rdma";
180 xprt_rdma_free_addresses(struct rpc_xprt
*xprt
)
184 for (i
= 0; i
< RPC_DISPLAY_MAX
; i
++)
186 case RPC_DISPLAY_PROTO
:
187 case RPC_DISPLAY_NETID
:
190 kfree(xprt
->address_strings
[i
]);
195 xprt_rdma_connect_worker(struct work_struct
*work
)
197 struct rpcrdma_xprt
*r_xprt
=
198 container_of(work
, struct rpcrdma_xprt
, rdma_connect
.work
);
199 struct rpc_xprt
*xprt
= &r_xprt
->xprt
;
202 if (!xprt
->shutdown
) {
203 xprt_clear_connected(xprt
);
205 dprintk("RPC: %s: %sconnect\n", __func__
,
206 r_xprt
->rx_ep
.rep_connected
!= 0 ? "re" : "");
207 rc
= rpcrdma_ep_connect(&r_xprt
->rx_ep
, &r_xprt
->rx_ia
);
214 xprt_wake_pending_tasks(xprt
, rc
);
217 dprintk("RPC: %s: exit\n", __func__
);
218 xprt_clear_connecting(xprt
);
225 * Free all memory associated with the object, including its own.
226 * NOTE: none of the *destroy methods free memory for their top-level
227 * objects, even though they may have allocated it (they do free
228 * private memory). It's up to the caller to handle it. In this
229 * case (RDMA transport), all structure memory is inlined with the
230 * struct rpcrdma_xprt.
233 xprt_rdma_destroy(struct rpc_xprt
*xprt
)
235 struct rpcrdma_xprt
*r_xprt
= rpcx_to_rdmax(xprt
);
238 dprintk("RPC: %s: called\n", __func__
);
240 cancel_delayed_work_sync(&r_xprt
->rdma_connect
);
242 xprt_clear_connected(xprt
);
244 rpcrdma_buffer_destroy(&r_xprt
->rx_buf
);
245 rc
= rpcrdma_ep_destroy(&r_xprt
->rx_ep
, &r_xprt
->rx_ia
);
247 dprintk("RPC: %s: rpcrdma_ep_destroy returned %i\n",
249 rpcrdma_ia_close(&r_xprt
->rx_ia
);
251 xprt_rdma_free_addresses(xprt
);
255 dprintk("RPC: %s: returning\n", __func__
);
257 module_put(THIS_MODULE
);
260 static const struct rpc_timeout xprt_rdma_default_timeout
= {
261 .to_initval
= 60 * HZ
,
262 .to_maxval
= 60 * HZ
,
266 * xprt_setup_rdma - Set up transport to use RDMA
268 * @args: rpc transport arguments
270 static struct rpc_xprt
*
271 xprt_setup_rdma(struct xprt_create
*args
)
273 struct rpcrdma_create_data_internal cdata
;
274 struct rpc_xprt
*xprt
;
275 struct rpcrdma_xprt
*new_xprt
;
276 struct rpcrdma_ep
*new_ep
;
277 struct sockaddr_in
*sin
;
280 if (args
->addrlen
> sizeof(xprt
->addr
)) {
281 dprintk("RPC: %s: address too large\n", __func__
);
282 return ERR_PTR(-EBADF
);
285 xprt
= xprt_alloc(args
->net
, sizeof(struct rpcrdma_xprt
),
286 xprt_rdma_slot_table_entries
);
288 dprintk("RPC: %s: couldn't allocate rpcrdma_xprt\n",
290 return ERR_PTR(-ENOMEM
);
293 /* 60 second timeout, no retries */
294 xprt
->timeout
= &xprt_rdma_default_timeout
;
295 xprt
->bind_timeout
= (60U * HZ
);
296 xprt
->reestablish_timeout
= (5U * HZ
);
297 xprt
->idle_timeout
= (5U * 60 * HZ
);
299 xprt
->resvport
= 0; /* privileged port not needed */
300 xprt
->tsh_size
= 0; /* RPC-RDMA handles framing */
301 xprt
->max_payload
= RPCRDMA_MAX_DATA_SEGS
* PAGE_SIZE
;
302 xprt
->ops
= &xprt_rdma_procs
;
305 * Set up RDMA-specific connect data.
308 /* Put server RDMA address in local cdata */
309 memcpy(&cdata
.addr
, args
->dstaddr
, args
->addrlen
);
311 /* Ensure xprt->addr holds valid server TCP (not RDMA)
312 * address, for any side protocols which peek at it */
313 xprt
->prot
= IPPROTO_TCP
;
314 xprt
->addrlen
= args
->addrlen
;
315 memcpy(&xprt
->addr
, &cdata
.addr
, xprt
->addrlen
);
317 sin
= (struct sockaddr_in
*)&cdata
.addr
;
318 if (ntohs(sin
->sin_port
) != 0)
319 xprt_set_bound(xprt
);
321 dprintk("RPC: %s: %pI4:%u\n",
322 __func__
, &sin
->sin_addr
.s_addr
, ntohs(sin
->sin_port
));
324 /* Set max requests */
325 cdata
.max_requests
= xprt
->max_reqs
;
327 /* Set some length limits */
328 cdata
.rsize
= RPCRDMA_MAX_SEGS
* PAGE_SIZE
; /* RDMA write max */
329 cdata
.wsize
= RPCRDMA_MAX_SEGS
* PAGE_SIZE
; /* RDMA read max */
331 cdata
.inline_wsize
= xprt_rdma_max_inline_write
;
332 if (cdata
.inline_wsize
> cdata
.wsize
)
333 cdata
.inline_wsize
= cdata
.wsize
;
335 cdata
.inline_rsize
= xprt_rdma_max_inline_read
;
336 if (cdata
.inline_rsize
> cdata
.rsize
)
337 cdata
.inline_rsize
= cdata
.rsize
;
339 cdata
.padding
= xprt_rdma_inline_write_padding
;
342 * Create new transport instance, which includes initialized
348 new_xprt
= rpcx_to_rdmax(xprt
);
350 rc
= rpcrdma_ia_open(new_xprt
, (struct sockaddr
*) &cdata
.addr
,
351 xprt_rdma_memreg_strategy
);
356 * initialize and create ep
358 new_xprt
->rx_data
= cdata
;
359 new_ep
= &new_xprt
->rx_ep
;
360 new_ep
->rep_remote_addr
= cdata
.addr
;
362 rc
= rpcrdma_ep_create(&new_xprt
->rx_ep
,
363 &new_xprt
->rx_ia
, &new_xprt
->rx_data
);
368 * Allocate pre-registered send and receive buffers for headers and
369 * any inline data. Also specify any padding which will be provided
370 * from a preregistered zero buffer.
372 rc
= rpcrdma_buffer_create(&new_xprt
->rx_buf
, new_ep
, &new_xprt
->rx_ia
,
378 * Register a callback for connection events. This is necessary because
379 * connection loss notification is async. We also catch connection loss
380 * when reaping receives.
382 INIT_DELAYED_WORK(&new_xprt
->rdma_connect
, xprt_rdma_connect_worker
);
383 new_ep
->rep_func
= rpcrdma_conn_func
;
384 new_ep
->rep_xprt
= xprt
;
386 xprt_rdma_format_addresses(xprt
);
388 if (!try_module_get(THIS_MODULE
))
394 xprt_rdma_free_addresses(xprt
);
397 (void) rpcrdma_ep_destroy(new_ep
, &new_xprt
->rx_ia
);
399 rpcrdma_ia_close(&new_xprt
->rx_ia
);
406 * Close a connection, during shutdown or timeout/reconnect
409 xprt_rdma_close(struct rpc_xprt
*xprt
)
411 struct rpcrdma_xprt
*r_xprt
= rpcx_to_rdmax(xprt
);
413 dprintk("RPC: %s: closing\n", __func__
);
414 if (r_xprt
->rx_ep
.rep_connected
> 0)
415 xprt
->reestablish_timeout
= 0;
416 xprt_disconnect_done(xprt
);
417 (void) rpcrdma_ep_disconnect(&r_xprt
->rx_ep
, &r_xprt
->rx_ia
);
421 xprt_rdma_set_port(struct rpc_xprt
*xprt
, u16 port
)
423 struct sockaddr_in
*sap
;
425 sap
= (struct sockaddr_in
*)&xprt
->addr
;
426 sap
->sin_port
= htons(port
);
427 sap
= (struct sockaddr_in
*)&rpcx_to_rdmad(xprt
).addr
;
428 sap
->sin_port
= htons(port
);
429 dprintk("RPC: %s: %u\n", __func__
, port
);
433 xprt_rdma_connect(struct rpc_task
*task
)
435 struct rpc_xprt
*xprt
= (struct rpc_xprt
*)task
->tk_xprt
;
436 struct rpcrdma_xprt
*r_xprt
= rpcx_to_rdmax(xprt
);
438 if (r_xprt
->rx_ep
.rep_connected
!= 0) {
440 schedule_delayed_work(&r_xprt
->rdma_connect
,
441 xprt
->reestablish_timeout
);
442 xprt
->reestablish_timeout
<<= 1;
443 if (xprt
->reestablish_timeout
> (30 * HZ
))
444 xprt
->reestablish_timeout
= (30 * HZ
);
445 else if (xprt
->reestablish_timeout
< (5 * HZ
))
446 xprt
->reestablish_timeout
= (5 * HZ
);
448 schedule_delayed_work(&r_xprt
->rdma_connect
, 0);
449 if (!RPC_IS_ASYNC(task
))
450 flush_delayed_work(&r_xprt
->rdma_connect
);
455 xprt_rdma_reserve_xprt(struct rpc_xprt
*xprt
, struct rpc_task
*task
)
457 struct rpcrdma_xprt
*r_xprt
= rpcx_to_rdmax(xprt
);
458 int credits
= atomic_read(&r_xprt
->rx_buf
.rb_credits
);
460 /* == RPC_CWNDSCALE @ init, but *after* setup */
461 if (r_xprt
->rx_buf
.rb_cwndscale
== 0UL) {
462 r_xprt
->rx_buf
.rb_cwndscale
= xprt
->cwnd
;
463 dprintk("RPC: %s: cwndscale %lu\n", __func__
,
464 r_xprt
->rx_buf
.rb_cwndscale
);
465 BUG_ON(r_xprt
->rx_buf
.rb_cwndscale
<= 0);
467 xprt
->cwnd
= credits
* r_xprt
->rx_buf
.rb_cwndscale
;
468 return xprt_reserve_xprt_cong(xprt
, task
);
472 * The RDMA allocate/free functions need the task structure as a place
473 * to hide the struct rpcrdma_req, which is necessary for the actual send/recv
474 * sequence. For this reason, the recv buffers are attached to send
475 * buffers for portions of the RPC. Note that the RPC layer allocates
476 * both send and receive buffers in the same call. We may register
477 * the receive buffer portion when using reply chunks.
480 xprt_rdma_allocate(struct rpc_task
*task
, size_t size
)
482 struct rpc_xprt
*xprt
= task
->tk_xprt
;
483 struct rpcrdma_req
*req
, *nreq
;
485 req
= rpcrdma_buffer_get(&rpcx_to_rdmax(xprt
)->rx_buf
);
488 if (size
> req
->rl_size
) {
489 dprintk("RPC: %s: size %zd too large for buffer[%zd]: "
490 "prog %d vers %d proc %d\n",
491 __func__
, size
, req
->rl_size
,
492 task
->tk_client
->cl_prog
, task
->tk_client
->cl_vers
,
493 task
->tk_msg
.rpc_proc
->p_proc
);
495 * Outgoing length shortage. Our inline write max must have
496 * been configured to perform direct i/o.
498 * This is therefore a large metadata operation, and the
499 * allocate call was made on the maximum possible message,
500 * e.g. containing long filename(s) or symlink data. In
501 * fact, while these metadata operations *might* carry
502 * large outgoing payloads, they rarely *do*. However, we
503 * have to commit to the request here, so reallocate and
504 * register it now. The data path will never require this
507 * If the allocation or registration fails, the RPC framework
508 * will (doggedly) retry.
510 if (rpcx_to_rdmax(xprt
)->rx_ia
.ri_memreg_strategy
==
511 RPCRDMA_BOUNCEBUFFERS
) {
512 /* forced to "pure inline" */
513 dprintk("RPC: %s: too much data (%zd) for inline "
514 "(r/w max %d/%d)\n", __func__
, size
,
515 rpcx_to_rdmad(xprt
).inline_rsize
,
516 rpcx_to_rdmad(xprt
).inline_wsize
);
518 rpc_exit(task
, -EIO
); /* fail the operation */
519 rpcx_to_rdmax(xprt
)->rx_stats
.failed_marshal_count
++;
522 if (task
->tk_flags
& RPC_TASK_SWAPPER
)
523 nreq
= kmalloc(sizeof *req
+ size
, GFP_ATOMIC
);
525 nreq
= kmalloc(sizeof *req
+ size
, GFP_NOFS
);
529 if (rpcrdma_register_internal(&rpcx_to_rdmax(xprt
)->rx_ia
,
530 nreq
->rl_base
, size
+ sizeof(struct rpcrdma_req
)
531 - offsetof(struct rpcrdma_req
, rl_base
),
532 &nreq
->rl_handle
, &nreq
->rl_iov
)) {
536 rpcx_to_rdmax(xprt
)->rx_stats
.hardway_register_count
+= size
;
537 nreq
->rl_size
= size
;
539 nreq
->rl_nchunks
= 0;
540 nreq
->rl_buffer
= (struct rpcrdma_buffer
*)req
;
541 nreq
->rl_reply
= req
->rl_reply
;
542 memcpy(nreq
->rl_segments
,
543 req
->rl_segments
, sizeof nreq
->rl_segments
);
544 /* flag the swap with an unused field */
545 nreq
->rl_iov
.length
= 0;
546 req
->rl_reply
= NULL
;
549 dprintk("RPC: %s: size %zd, request 0x%p\n", __func__
, size
, req
);
551 req
->rl_connect_cookie
= 0; /* our reserved value */
552 return req
->rl_xdr_buf
;
555 rpcrdma_buffer_put(req
);
556 rpcx_to_rdmax(xprt
)->rx_stats
.failed_marshal_count
++;
561 * This function returns all RDMA resources to the pool.
564 xprt_rdma_free(void *buffer
)
566 struct rpcrdma_req
*req
;
567 struct rpcrdma_xprt
*r_xprt
;
568 struct rpcrdma_rep
*rep
;
574 req
= container_of(buffer
, struct rpcrdma_req
, rl_xdr_buf
[0]);
575 if (req
->rl_iov
.length
== 0) { /* see allocate above */
576 r_xprt
= container_of(((struct rpcrdma_req
*) req
->rl_buffer
)->rl_buffer
,
577 struct rpcrdma_xprt
, rx_buf
);
579 r_xprt
= container_of(req
->rl_buffer
, struct rpcrdma_xprt
, rx_buf
);
582 dprintk("RPC: %s: called on 0x%p%s\n",
583 __func__
, rep
, (rep
&& rep
->rr_func
) ? " (with waiter)" : "");
586 * Finish the deregistration. When using mw bind, this was
587 * begun in rpcrdma_reply_handler(). In all other modes, we
588 * do it here, in thread context. The process is considered
589 * complete when the rr_func vector becomes NULL - this
590 * was put in place during rpcrdma_reply_handler() - the wait
591 * call below will not block if the dereg is "done". If
592 * interrupted, our framework will clean up.
594 for (i
= 0; req
->rl_nchunks
;) {
596 i
+= rpcrdma_deregister_external(
597 &req
->rl_segments
[i
], r_xprt
, NULL
);
600 if (rep
&& wait_event_interruptible(rep
->rr_unbind
, !rep
->rr_func
)) {
601 rep
->rr_func
= NULL
; /* abandon the callback */
602 req
->rl_reply
= NULL
;
605 if (req
->rl_iov
.length
== 0) { /* see allocate above */
606 struct rpcrdma_req
*oreq
= (struct rpcrdma_req
*)req
->rl_buffer
;
607 oreq
->rl_reply
= req
->rl_reply
;
608 (void) rpcrdma_deregister_internal(&r_xprt
->rx_ia
,
615 /* Put back request+reply buffers */
616 rpcrdma_buffer_put(req
);
620 * send_request invokes the meat of RPC RDMA. It must do the following:
621 * 1. Marshal the RPC request into an RPC RDMA request, which means
622 * putting a header in front of data, and creating IOVs for RDMA
623 * from those in the request.
624 * 2. In marshaling, detect opportunities for RDMA, and use them.
625 * 3. Post a recv message to set up asynch completion, then send
626 * the request (rpcrdma_ep_post).
627 * 4. No partial sends are possible in the RPC-RDMA protocol (as in UDP).
631 xprt_rdma_send_request(struct rpc_task
*task
)
633 struct rpc_rqst
*rqst
= task
->tk_rqstp
;
634 struct rpc_xprt
*xprt
= task
->tk_xprt
;
635 struct rpcrdma_req
*req
= rpcr_to_rdmar(rqst
);
636 struct rpcrdma_xprt
*r_xprt
= rpcx_to_rdmax(xprt
);
638 /* marshal the send itself */
639 if (req
->rl_niovs
== 0 && rpcrdma_marshal_req(rqst
) != 0) {
640 r_xprt
->rx_stats
.failed_marshal_count
++;
641 dprintk("RPC: %s: rpcrdma_marshal_req failed\n",
646 if (req
->rl_reply
== NULL
) /* e.g. reconnection */
647 rpcrdma_recv_buffer_get(req
);
650 req
->rl_reply
->rr_func
= rpcrdma_reply_handler
;
651 /* this need only be done once, but... */
652 req
->rl_reply
->rr_xprt
= xprt
;
655 /* Must suppress retransmit to maintain credits */
656 if (req
->rl_connect_cookie
== xprt
->connect_cookie
)
657 goto drop_connection
;
658 req
->rl_connect_cookie
= xprt
->connect_cookie
;
660 if (rpcrdma_ep_post(&r_xprt
->rx_ia
, &r_xprt
->rx_ep
, req
))
661 goto drop_connection
;
663 rqst
->rq_xmit_bytes_sent
+= rqst
->rq_snd_buf
.len
;
664 rqst
->rq_bytes_sent
= 0;
668 xprt_disconnect_done(xprt
);
669 return -ENOTCONN
; /* implies disconnect */
672 static void xprt_rdma_print_stats(struct rpc_xprt
*xprt
, struct seq_file
*seq
)
674 struct rpcrdma_xprt
*r_xprt
= rpcx_to_rdmax(xprt
);
677 if (xprt_connected(xprt
))
678 idle_time
= (long)(jiffies
- xprt
->last_used
) / HZ
;
681 "\txprt:\trdma %u %lu %lu %lu %ld %lu %lu %lu %Lu %Lu "
682 "%lu %lu %lu %Lu %Lu %Lu %Lu %lu %lu %lu\n",
684 0, /* need a local port? */
685 xprt
->stat
.bind_count
,
686 xprt
->stat
.connect_count
,
687 xprt
->stat
.connect_time
,
695 r_xprt
->rx_stats
.read_chunk_count
,
696 r_xprt
->rx_stats
.write_chunk_count
,
697 r_xprt
->rx_stats
.reply_chunk_count
,
698 r_xprt
->rx_stats
.total_rdma_request
,
699 r_xprt
->rx_stats
.total_rdma_reply
,
700 r_xprt
->rx_stats
.pullup_copy_count
,
701 r_xprt
->rx_stats
.fixup_copy_count
,
702 r_xprt
->rx_stats
.hardway_register_count
,
703 r_xprt
->rx_stats
.failed_marshal_count
,
704 r_xprt
->rx_stats
.bad_reply_count
);
708 * Plumbing for rpc transport switch and kernel module
711 static struct rpc_xprt_ops xprt_rdma_procs
= {
712 .reserve_xprt
= xprt_rdma_reserve_xprt
,
713 .release_xprt
= xprt_release_xprt_cong
, /* sunrpc/xprt.c */
714 .release_request
= xprt_release_rqst_cong
, /* ditto */
715 .set_retrans_timeout
= xprt_set_retrans_timeout_def
, /* ditto */
716 .rpcbind
= rpcb_getport_async
, /* sunrpc/rpcb_clnt.c */
717 .set_port
= xprt_rdma_set_port
,
718 .connect
= xprt_rdma_connect
,
719 .buf_alloc
= xprt_rdma_allocate
,
720 .buf_free
= xprt_rdma_free
,
721 .send_request
= xprt_rdma_send_request
,
722 .close
= xprt_rdma_close
,
723 .destroy
= xprt_rdma_destroy
,
724 .print_stats
= xprt_rdma_print_stats
727 static struct xprt_class xprt_rdma
= {
728 .list
= LIST_HEAD_INIT(xprt_rdma
.list
),
730 .owner
= THIS_MODULE
,
731 .ident
= XPRT_TRANSPORT_RDMA
,
732 .setup
= xprt_setup_rdma
,
735 static void __exit
xprt_rdma_cleanup(void)
739 dprintk(KERN_INFO
"RPCRDMA Module Removed, deregister RPC RDMA transport\n");
741 if (sunrpc_table_header
) {
742 unregister_sysctl_table(sunrpc_table_header
);
743 sunrpc_table_header
= NULL
;
746 rc
= xprt_unregister_transport(&xprt_rdma
);
748 dprintk("RPC: %s: xprt_unregister returned %i\n",
752 static int __init
xprt_rdma_init(void)
756 rc
= xprt_register_transport(&xprt_rdma
);
761 dprintk(KERN_INFO
"RPCRDMA Module Init, register RPC RDMA transport\n");
763 dprintk(KERN_INFO
"Defaults:\n");
764 dprintk(KERN_INFO
"\tSlots %d\n"
765 "\tMaxInlineRead %d\n\tMaxInlineWrite %d\n",
766 xprt_rdma_slot_table_entries
,
767 xprt_rdma_max_inline_read
, xprt_rdma_max_inline_write
);
768 dprintk(KERN_INFO
"\tPadding %d\n\tMemreg %d\n",
769 xprt_rdma_inline_write_padding
, xprt_rdma_memreg_strategy
);
772 if (!sunrpc_table_header
)
773 sunrpc_table_header
= register_sysctl_table(sunrpc_table
);
778 module_init(xprt_rdma_init
);
779 module_exit(xprt_rdma_cleanup
);