2 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2004 Infinicon Corporation. All rights reserved.
4 * Copyright (c) 2004 Intel Corporation. All rights reserved.
5 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
7 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
8 * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
10 * This software is available to you under a choice of one of two
11 * licenses. You may choose to be licensed under the terms of the GNU
12 * General Public License (GPL) Version 2, available from the file
13 * COPYING in the main directory of this source tree, or the
14 * OpenIB.org BSD license below:
16 * Redistribution and use in source and binary forms, with or
17 * without modification, are permitted provided that the following
20 * - Redistributions of source code must retain the above
21 * copyright notice, this list of conditions and the following
24 * - Redistributions in binary form must reproduce the above
25 * copyright notice, this list of conditions and the following
26 * disclaimer in the documentation and/or other materials
27 * provided with the distribution.
29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
33 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
34 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
35 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
39 #include <linux/errno.h>
40 #include <linux/err.h>
41 #include <linux/export.h>
42 #include <linux/string.h>
43 #include <linux/slab.h>
45 #include <linux/in6.h>
46 #include <net/addrconf.h>
47 #include <linux/security.h>
49 #include <rdma/ib_verbs.h>
50 #include <rdma/ib_cache.h>
51 #include <rdma/ib_addr.h>
54 #include "core_priv.h"
56 static int ib_resolve_eth_dmac(struct ib_device
*device
,
57 struct rdma_ah_attr
*ah_attr
);
59 static const char * const ib_events
[] = {
60 [IB_EVENT_CQ_ERR
] = "CQ error",
61 [IB_EVENT_QP_FATAL
] = "QP fatal error",
62 [IB_EVENT_QP_REQ_ERR
] = "QP request error",
63 [IB_EVENT_QP_ACCESS_ERR
] = "QP access error",
64 [IB_EVENT_COMM_EST
] = "communication established",
65 [IB_EVENT_SQ_DRAINED
] = "send queue drained",
66 [IB_EVENT_PATH_MIG
] = "path migration successful",
67 [IB_EVENT_PATH_MIG_ERR
] = "path migration error",
68 [IB_EVENT_DEVICE_FATAL
] = "device fatal error",
69 [IB_EVENT_PORT_ACTIVE
] = "port active",
70 [IB_EVENT_PORT_ERR
] = "port error",
71 [IB_EVENT_LID_CHANGE
] = "LID change",
72 [IB_EVENT_PKEY_CHANGE
] = "P_key change",
73 [IB_EVENT_SM_CHANGE
] = "SM change",
74 [IB_EVENT_SRQ_ERR
] = "SRQ error",
75 [IB_EVENT_SRQ_LIMIT_REACHED
] = "SRQ limit reached",
76 [IB_EVENT_QP_LAST_WQE_REACHED
] = "last WQE reached",
77 [IB_EVENT_CLIENT_REREGISTER
] = "client reregister",
78 [IB_EVENT_GID_CHANGE
] = "GID changed",
81 const char *__attribute_const__
ib_event_msg(enum ib_event_type event
)
85 return (index
< ARRAY_SIZE(ib_events
) && ib_events
[index
]) ?
86 ib_events
[index
] : "unrecognized event";
88 EXPORT_SYMBOL(ib_event_msg
);
90 static const char * const wc_statuses
[] = {
91 [IB_WC_SUCCESS
] = "success",
92 [IB_WC_LOC_LEN_ERR
] = "local length error",
93 [IB_WC_LOC_QP_OP_ERR
] = "local QP operation error",
94 [IB_WC_LOC_EEC_OP_ERR
] = "local EE context operation error",
95 [IB_WC_LOC_PROT_ERR
] = "local protection error",
96 [IB_WC_WR_FLUSH_ERR
] = "WR flushed",
97 [IB_WC_MW_BIND_ERR
] = "memory management operation error",
98 [IB_WC_BAD_RESP_ERR
] = "bad response error",
99 [IB_WC_LOC_ACCESS_ERR
] = "local access error",
100 [IB_WC_REM_INV_REQ_ERR
] = "invalid request error",
101 [IB_WC_REM_ACCESS_ERR
] = "remote access error",
102 [IB_WC_REM_OP_ERR
] = "remote operation error",
103 [IB_WC_RETRY_EXC_ERR
] = "transport retry counter exceeded",
104 [IB_WC_RNR_RETRY_EXC_ERR
] = "RNR retry counter exceeded",
105 [IB_WC_LOC_RDD_VIOL_ERR
] = "local RDD violation error",
106 [IB_WC_REM_INV_RD_REQ_ERR
] = "remote invalid RD request",
107 [IB_WC_REM_ABORT_ERR
] = "operation aborted",
108 [IB_WC_INV_EECN_ERR
] = "invalid EE context number",
109 [IB_WC_INV_EEC_STATE_ERR
] = "invalid EE context state",
110 [IB_WC_FATAL_ERR
] = "fatal error",
111 [IB_WC_RESP_TIMEOUT_ERR
] = "response timeout error",
112 [IB_WC_GENERAL_ERR
] = "general error",
115 const char *__attribute_const__
ib_wc_status_msg(enum ib_wc_status status
)
117 size_t index
= status
;
119 return (index
< ARRAY_SIZE(wc_statuses
) && wc_statuses
[index
]) ?
120 wc_statuses
[index
] : "unrecognized status";
122 EXPORT_SYMBOL(ib_wc_status_msg
);
124 __attribute_const__
int ib_rate_to_mult(enum ib_rate rate
)
127 case IB_RATE_2_5_GBPS
: return 1;
128 case IB_RATE_5_GBPS
: return 2;
129 case IB_RATE_10_GBPS
: return 4;
130 case IB_RATE_20_GBPS
: return 8;
131 case IB_RATE_30_GBPS
: return 12;
132 case IB_RATE_40_GBPS
: return 16;
133 case IB_RATE_60_GBPS
: return 24;
134 case IB_RATE_80_GBPS
: return 32;
135 case IB_RATE_120_GBPS
: return 48;
139 EXPORT_SYMBOL(ib_rate_to_mult
);
141 __attribute_const__
enum ib_rate
mult_to_ib_rate(int mult
)
144 case 1: return IB_RATE_2_5_GBPS
;
145 case 2: return IB_RATE_5_GBPS
;
146 case 4: return IB_RATE_10_GBPS
;
147 case 8: return IB_RATE_20_GBPS
;
148 case 12: return IB_RATE_30_GBPS
;
149 case 16: return IB_RATE_40_GBPS
;
150 case 24: return IB_RATE_60_GBPS
;
151 case 32: return IB_RATE_80_GBPS
;
152 case 48: return IB_RATE_120_GBPS
;
153 default: return IB_RATE_PORT_CURRENT
;
156 EXPORT_SYMBOL(mult_to_ib_rate
);
158 __attribute_const__
int ib_rate_to_mbps(enum ib_rate rate
)
161 case IB_RATE_2_5_GBPS
: return 2500;
162 case IB_RATE_5_GBPS
: return 5000;
163 case IB_RATE_10_GBPS
: return 10000;
164 case IB_RATE_20_GBPS
: return 20000;
165 case IB_RATE_30_GBPS
: return 30000;
166 case IB_RATE_40_GBPS
: return 40000;
167 case IB_RATE_60_GBPS
: return 60000;
168 case IB_RATE_80_GBPS
: return 80000;
169 case IB_RATE_120_GBPS
: return 120000;
170 case IB_RATE_14_GBPS
: return 14062;
171 case IB_RATE_56_GBPS
: return 56250;
172 case IB_RATE_112_GBPS
: return 112500;
173 case IB_RATE_168_GBPS
: return 168750;
174 case IB_RATE_25_GBPS
: return 25781;
175 case IB_RATE_100_GBPS
: return 103125;
176 case IB_RATE_200_GBPS
: return 206250;
177 case IB_RATE_300_GBPS
: return 309375;
181 EXPORT_SYMBOL(ib_rate_to_mbps
);
183 __attribute_const__
enum rdma_transport_type
184 rdma_node_get_transport(enum rdma_node_type node_type
)
187 if (node_type
== RDMA_NODE_USNIC
)
188 return RDMA_TRANSPORT_USNIC
;
189 if (node_type
== RDMA_NODE_USNIC_UDP
)
190 return RDMA_TRANSPORT_USNIC_UDP
;
191 if (node_type
== RDMA_NODE_RNIC
)
192 return RDMA_TRANSPORT_IWARP
;
194 return RDMA_TRANSPORT_IB
;
196 EXPORT_SYMBOL(rdma_node_get_transport
);
198 enum rdma_link_layer
rdma_port_get_link_layer(struct ib_device
*device
, u8 port_num
)
200 enum rdma_transport_type lt
;
201 if (device
->get_link_layer
)
202 return device
->get_link_layer(device
, port_num
);
204 lt
= rdma_node_get_transport(device
->node_type
);
205 if (lt
== RDMA_TRANSPORT_IB
)
206 return IB_LINK_LAYER_INFINIBAND
;
208 return IB_LINK_LAYER_ETHERNET
;
210 EXPORT_SYMBOL(rdma_port_get_link_layer
);
212 /* Protection domains */
215 * ib_alloc_pd - Allocates an unused protection domain.
216 * @device: The device on which to allocate the protection domain.
218 * A protection domain object provides an association between QPs, shared
219 * receive queues, address handles, memory regions, and memory windows.
221 * Every PD has a local_dma_lkey which can be used as the lkey value for local
224 struct ib_pd
*__ib_alloc_pd(struct ib_device
*device
, unsigned int flags
,
228 int mr_access_flags
= 0;
230 pd
= device
->alloc_pd(device
, NULL
, NULL
);
236 pd
->__internal_mr
= NULL
;
237 atomic_set(&pd
->usecnt
, 0);
240 if (device
->attrs
.device_cap_flags
& IB_DEVICE_LOCAL_DMA_LKEY
)
241 pd
->local_dma_lkey
= device
->local_dma_lkey
;
243 mr_access_flags
|= IB_ACCESS_LOCAL_WRITE
;
245 if (flags
& IB_PD_UNSAFE_GLOBAL_RKEY
) {
246 pr_warn("%s: enabling unsafe global rkey\n", caller
);
247 mr_access_flags
|= IB_ACCESS_REMOTE_READ
| IB_ACCESS_REMOTE_WRITE
;
250 if (mr_access_flags
) {
253 mr
= pd
->device
->get_dma_mr(pd
, mr_access_flags
);
259 mr
->device
= pd
->device
;
262 mr
->need_inval
= false;
264 pd
->__internal_mr
= mr
;
266 if (!(device
->attrs
.device_cap_flags
& IB_DEVICE_LOCAL_DMA_LKEY
))
267 pd
->local_dma_lkey
= pd
->__internal_mr
->lkey
;
269 if (flags
& IB_PD_UNSAFE_GLOBAL_RKEY
)
270 pd
->unsafe_global_rkey
= pd
->__internal_mr
->rkey
;
275 EXPORT_SYMBOL(__ib_alloc_pd
);
278 * ib_dealloc_pd - Deallocates a protection domain.
279 * @pd: The protection domain to deallocate.
281 * It is an error to call this function while any resources in the pd still
282 * exist. The caller is responsible to synchronously destroy them and
283 * guarantee no new allocations will happen.
285 void ib_dealloc_pd(struct ib_pd
*pd
)
289 if (pd
->__internal_mr
) {
290 ret
= pd
->device
->dereg_mr(pd
->__internal_mr
);
292 pd
->__internal_mr
= NULL
;
295 /* uverbs manipulates usecnt with proper locking, while the kabi
296 requires the caller to guarantee we can't race here. */
297 WARN_ON(atomic_read(&pd
->usecnt
));
299 /* Making delalloc_pd a void return is a WIP, no driver should return
301 ret
= pd
->device
->dealloc_pd(pd
);
302 WARN_ONCE(ret
, "Infiniband HW driver failed dealloc_pd");
304 EXPORT_SYMBOL(ib_dealloc_pd
);
306 /* Address handles */
308 static struct ib_ah
*_rdma_create_ah(struct ib_pd
*pd
,
309 struct rdma_ah_attr
*ah_attr
,
310 struct ib_udata
*udata
)
314 ah
= pd
->device
->create_ah(pd
, ah_attr
, udata
);
317 ah
->device
= pd
->device
;
320 ah
->type
= ah_attr
->type
;
321 atomic_inc(&pd
->usecnt
);
327 struct ib_ah
*rdma_create_ah(struct ib_pd
*pd
, struct rdma_ah_attr
*ah_attr
)
329 return _rdma_create_ah(pd
, ah_attr
, NULL
);
331 EXPORT_SYMBOL(rdma_create_ah
);
334 * rdma_create_user_ah - Creates an address handle for the
335 * given address vector.
336 * It resolves destination mac address for ah attribute of RoCE type.
337 * @pd: The protection domain associated with the address handle.
338 * @ah_attr: The attributes of the address vector.
339 * @udata: pointer to user's input output buffer information need by
342 * It returns 0 on success and returns appropriate error code on error.
343 * The address handle is used to reference a local or global destination
344 * in all UD QP post sends.
346 struct ib_ah
*rdma_create_user_ah(struct ib_pd
*pd
,
347 struct rdma_ah_attr
*ah_attr
,
348 struct ib_udata
*udata
)
352 if (ah_attr
->type
== RDMA_AH_ATTR_TYPE_ROCE
) {
353 err
= ib_resolve_eth_dmac(pd
->device
, ah_attr
);
358 return _rdma_create_ah(pd
, ah_attr
, udata
);
360 EXPORT_SYMBOL(rdma_create_user_ah
);
362 int ib_get_rdma_header_version(const union rdma_network_hdr
*hdr
)
364 const struct iphdr
*ip4h
= (struct iphdr
*)&hdr
->roce4grh
;
365 struct iphdr ip4h_checked
;
366 const struct ipv6hdr
*ip6h
= (struct ipv6hdr
*)&hdr
->ibgrh
;
368 /* If it's IPv6, the version must be 6, otherwise, the first
369 * 20 bytes (before the IPv4 header) are garbled.
371 if (ip6h
->version
!= 6)
372 return (ip4h
->version
== 4) ? 4 : 0;
373 /* version may be 6 or 4 because the first 20 bytes could be garbled */
375 /* RoCE v2 requires no options, thus header length
382 * We can't write on scattered buffers so we need to copy to
385 memcpy(&ip4h_checked
, ip4h
, sizeof(ip4h_checked
));
386 ip4h_checked
.check
= 0;
387 ip4h_checked
.check
= ip_fast_csum((u8
*)&ip4h_checked
, 5);
388 /* if IPv4 header checksum is OK, believe it */
389 if (ip4h
->check
== ip4h_checked
.check
)
393 EXPORT_SYMBOL(ib_get_rdma_header_version
);
395 static enum rdma_network_type
ib_get_net_type_by_grh(struct ib_device
*device
,
397 const struct ib_grh
*grh
)
401 if (rdma_protocol_ib(device
, port_num
))
402 return RDMA_NETWORK_IB
;
404 grh_version
= ib_get_rdma_header_version((union rdma_network_hdr
*)grh
);
406 if (grh_version
== 4)
407 return RDMA_NETWORK_IPV4
;
409 if (grh
->next_hdr
== IPPROTO_UDP
)
410 return RDMA_NETWORK_IPV6
;
412 return RDMA_NETWORK_ROCE_V1
;
415 struct find_gid_index_context
{
417 enum ib_gid_type gid_type
;
420 static bool find_gid_index(const union ib_gid
*gid
,
421 const struct ib_gid_attr
*gid_attr
,
424 struct find_gid_index_context
*ctx
=
425 (struct find_gid_index_context
*)context
;
427 if (ctx
->gid_type
!= gid_attr
->gid_type
)
430 if ((!!(ctx
->vlan_id
!= 0xffff) == !is_vlan_dev(gid_attr
->ndev
)) ||
431 (is_vlan_dev(gid_attr
->ndev
) &&
432 vlan_dev_vlan_id(gid_attr
->ndev
) != ctx
->vlan_id
))
438 static int get_sgid_index_from_eth(struct ib_device
*device
, u8 port_num
,
439 u16 vlan_id
, const union ib_gid
*sgid
,
440 enum ib_gid_type gid_type
,
443 struct find_gid_index_context context
= {.vlan_id
= vlan_id
,
444 .gid_type
= gid_type
};
446 return ib_find_gid_by_filter(device
, sgid
, port_num
, find_gid_index
,
447 &context
, gid_index
);
450 int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr
*hdr
,
451 enum rdma_network_type net_type
,
452 union ib_gid
*sgid
, union ib_gid
*dgid
)
454 struct sockaddr_in src_in
;
455 struct sockaddr_in dst_in
;
456 __be32 src_saddr
, dst_saddr
;
461 if (net_type
== RDMA_NETWORK_IPV4
) {
462 memcpy(&src_in
.sin_addr
.s_addr
,
463 &hdr
->roce4grh
.saddr
, 4);
464 memcpy(&dst_in
.sin_addr
.s_addr
,
465 &hdr
->roce4grh
.daddr
, 4);
466 src_saddr
= src_in
.sin_addr
.s_addr
;
467 dst_saddr
= dst_in
.sin_addr
.s_addr
;
468 ipv6_addr_set_v4mapped(src_saddr
,
469 (struct in6_addr
*)sgid
);
470 ipv6_addr_set_v4mapped(dst_saddr
,
471 (struct in6_addr
*)dgid
);
473 } else if (net_type
== RDMA_NETWORK_IPV6
||
474 net_type
== RDMA_NETWORK_IB
) {
475 *dgid
= hdr
->ibgrh
.dgid
;
476 *sgid
= hdr
->ibgrh
.sgid
;
482 EXPORT_SYMBOL(ib_get_gids_from_rdma_hdr
);
485 * This function creates ah from the incoming packet.
486 * Incoming packet has dgid of the receiver node on which this code is
487 * getting executed and, sgid contains the GID of the sender.
489 * When resolving mac address of destination, the arrived dgid is used
490 * as sgid and, sgid is used as dgid because sgid contains destinations
491 * GID whom to respond to.
493 * This is why when calling rdma_addr_find_l2_eth_by_grh() function, the
494 * position of arguments dgid and sgid do not match the order of the
497 int ib_init_ah_from_wc(struct ib_device
*device
, u8 port_num
,
498 const struct ib_wc
*wc
, const struct ib_grh
*grh
,
499 struct rdma_ah_attr
*ah_attr
)
504 enum rdma_network_type net_type
= RDMA_NETWORK_IB
;
505 enum ib_gid_type gid_type
= IB_GID_TYPE_IB
;
512 memset(ah_attr
, 0, sizeof *ah_attr
);
513 ah_attr
->type
= rdma_ah_find_type(device
, port_num
);
514 if (rdma_cap_eth_ah(device
, port_num
)) {
515 if (wc
->wc_flags
& IB_WC_WITH_NETWORK_HDR_TYPE
)
516 net_type
= wc
->network_hdr_type
;
518 net_type
= ib_get_net_type_by_grh(device
, port_num
, grh
);
519 gid_type
= ib_network_to_gid_type(net_type
);
521 ret
= ib_get_gids_from_rdma_hdr((union rdma_network_hdr
*)grh
, net_type
,
526 if (rdma_protocol_roce(device
, port_num
)) {
528 u16 vlan_id
= wc
->wc_flags
& IB_WC_WITH_VLAN
?
529 wc
->vlan_id
: 0xffff;
530 struct net_device
*idev
;
531 struct net_device
*resolved_dev
;
533 if (!(wc
->wc_flags
& IB_WC_GRH
))
536 if (!device
->get_netdev
)
539 idev
= device
->get_netdev(device
, port_num
);
543 ret
= rdma_addr_find_l2_eth_by_grh(&dgid
, &sgid
,
545 wc
->wc_flags
& IB_WC_WITH_VLAN
?
547 &if_index
, &hoplimit
);
553 resolved_dev
= dev_get_by_index(&init_net
, if_index
);
555 if (resolved_dev
!= idev
&& !rdma_is_upper_dev_rcu(idev
,
560 dev_put(resolved_dev
);
564 ret
= get_sgid_index_from_eth(device
, port_num
, vlan_id
,
565 &dgid
, gid_type
, &gid_index
);
570 rdma_ah_set_dlid(ah_attr
, wc
->slid
);
571 rdma_ah_set_sl(ah_attr
, wc
->sl
);
572 rdma_ah_set_path_bits(ah_attr
, wc
->dlid_path_bits
);
573 rdma_ah_set_port_num(ah_attr
, port_num
);
575 if (wc
->wc_flags
& IB_WC_GRH
) {
576 if (!rdma_cap_eth_ah(device
, port_num
)) {
577 if (dgid
.global
.interface_id
!= cpu_to_be64(IB_SA_WELL_KNOWN_GUID
)) {
578 ret
= ib_find_cached_gid_by_port(device
, &dgid
,
589 flow_class
= be32_to_cpu(grh
->version_tclass_flow
);
590 rdma_ah_set_grh(ah_attr
, &sgid
,
591 flow_class
& 0xFFFFF,
592 (u8
)gid_index
, hoplimit
,
593 (flow_class
>> 20) & 0xFF);
598 EXPORT_SYMBOL(ib_init_ah_from_wc
);
600 struct ib_ah
*ib_create_ah_from_wc(struct ib_pd
*pd
, const struct ib_wc
*wc
,
601 const struct ib_grh
*grh
, u8 port_num
)
603 struct rdma_ah_attr ah_attr
;
606 ret
= ib_init_ah_from_wc(pd
->device
, port_num
, wc
, grh
, &ah_attr
);
610 return rdma_create_ah(pd
, &ah_attr
);
612 EXPORT_SYMBOL(ib_create_ah_from_wc
);
614 int rdma_modify_ah(struct ib_ah
*ah
, struct rdma_ah_attr
*ah_attr
)
616 if (ah
->type
!= ah_attr
->type
)
619 return ah
->device
->modify_ah
?
620 ah
->device
->modify_ah(ah
, ah_attr
) :
623 EXPORT_SYMBOL(rdma_modify_ah
);
625 int rdma_query_ah(struct ib_ah
*ah
, struct rdma_ah_attr
*ah_attr
)
627 return ah
->device
->query_ah
?
628 ah
->device
->query_ah(ah
, ah_attr
) :
631 EXPORT_SYMBOL(rdma_query_ah
);
633 int rdma_destroy_ah(struct ib_ah
*ah
)
639 ret
= ah
->device
->destroy_ah(ah
);
641 atomic_dec(&pd
->usecnt
);
645 EXPORT_SYMBOL(rdma_destroy_ah
);
647 /* Shared receive queues */
649 struct ib_srq
*ib_create_srq(struct ib_pd
*pd
,
650 struct ib_srq_init_attr
*srq_init_attr
)
654 if (!pd
->device
->create_srq
)
655 return ERR_PTR(-ENOSYS
);
657 srq
= pd
->device
->create_srq(pd
, srq_init_attr
, NULL
);
660 srq
->device
= pd
->device
;
663 srq
->event_handler
= srq_init_attr
->event_handler
;
664 srq
->srq_context
= srq_init_attr
->srq_context
;
665 srq
->srq_type
= srq_init_attr
->srq_type
;
666 if (ib_srq_has_cq(srq
->srq_type
)) {
667 srq
->ext
.cq
= srq_init_attr
->ext
.cq
;
668 atomic_inc(&srq
->ext
.cq
->usecnt
);
670 if (srq
->srq_type
== IB_SRQT_XRC
) {
671 srq
->ext
.xrc
.xrcd
= srq_init_attr
->ext
.xrc
.xrcd
;
672 atomic_inc(&srq
->ext
.xrc
.xrcd
->usecnt
);
674 atomic_inc(&pd
->usecnt
);
675 atomic_set(&srq
->usecnt
, 0);
680 EXPORT_SYMBOL(ib_create_srq
);
682 int ib_modify_srq(struct ib_srq
*srq
,
683 struct ib_srq_attr
*srq_attr
,
684 enum ib_srq_attr_mask srq_attr_mask
)
686 return srq
->device
->modify_srq
?
687 srq
->device
->modify_srq(srq
, srq_attr
, srq_attr_mask
, NULL
) :
690 EXPORT_SYMBOL(ib_modify_srq
);
692 int ib_query_srq(struct ib_srq
*srq
,
693 struct ib_srq_attr
*srq_attr
)
695 return srq
->device
->query_srq
?
696 srq
->device
->query_srq(srq
, srq_attr
) : -ENOSYS
;
698 EXPORT_SYMBOL(ib_query_srq
);
700 int ib_destroy_srq(struct ib_srq
*srq
)
703 enum ib_srq_type srq_type
;
704 struct ib_xrcd
*uninitialized_var(xrcd
);
705 struct ib_cq
*uninitialized_var(cq
);
708 if (atomic_read(&srq
->usecnt
))
712 srq_type
= srq
->srq_type
;
713 if (ib_srq_has_cq(srq_type
))
715 if (srq_type
== IB_SRQT_XRC
)
716 xrcd
= srq
->ext
.xrc
.xrcd
;
718 ret
= srq
->device
->destroy_srq(srq
);
720 atomic_dec(&pd
->usecnt
);
721 if (srq_type
== IB_SRQT_XRC
)
722 atomic_dec(&xrcd
->usecnt
);
723 if (ib_srq_has_cq(srq_type
))
724 atomic_dec(&cq
->usecnt
);
729 EXPORT_SYMBOL(ib_destroy_srq
);
733 static void __ib_shared_qp_event_handler(struct ib_event
*event
, void *context
)
735 struct ib_qp
*qp
= context
;
738 spin_lock_irqsave(&qp
->device
->event_handler_lock
, flags
);
739 list_for_each_entry(event
->element
.qp
, &qp
->open_list
, open_list
)
740 if (event
->element
.qp
->event_handler
)
741 event
->element
.qp
->event_handler(event
, event
->element
.qp
->qp_context
);
742 spin_unlock_irqrestore(&qp
->device
->event_handler_lock
, flags
);
745 static void __ib_insert_xrcd_qp(struct ib_xrcd
*xrcd
, struct ib_qp
*qp
)
747 mutex_lock(&xrcd
->tgt_qp_mutex
);
748 list_add(&qp
->xrcd_list
, &xrcd
->tgt_qp_list
);
749 mutex_unlock(&xrcd
->tgt_qp_mutex
);
752 static struct ib_qp
*__ib_open_qp(struct ib_qp
*real_qp
,
753 void (*event_handler
)(struct ib_event
*, void *),
760 qp
= kzalloc(sizeof *qp
, GFP_KERNEL
);
762 return ERR_PTR(-ENOMEM
);
764 qp
->real_qp
= real_qp
;
765 err
= ib_open_shared_qp_security(qp
, real_qp
->device
);
771 qp
->real_qp
= real_qp
;
772 atomic_inc(&real_qp
->usecnt
);
773 qp
->device
= real_qp
->device
;
774 qp
->event_handler
= event_handler
;
775 qp
->qp_context
= qp_context
;
776 qp
->qp_num
= real_qp
->qp_num
;
777 qp
->qp_type
= real_qp
->qp_type
;
779 spin_lock_irqsave(&real_qp
->device
->event_handler_lock
, flags
);
780 list_add(&qp
->open_list
, &real_qp
->open_list
);
781 spin_unlock_irqrestore(&real_qp
->device
->event_handler_lock
, flags
);
786 struct ib_qp
*ib_open_qp(struct ib_xrcd
*xrcd
,
787 struct ib_qp_open_attr
*qp_open_attr
)
789 struct ib_qp
*qp
, *real_qp
;
791 if (qp_open_attr
->qp_type
!= IB_QPT_XRC_TGT
)
792 return ERR_PTR(-EINVAL
);
794 qp
= ERR_PTR(-EINVAL
);
795 mutex_lock(&xrcd
->tgt_qp_mutex
);
796 list_for_each_entry(real_qp
, &xrcd
->tgt_qp_list
, xrcd_list
) {
797 if (real_qp
->qp_num
== qp_open_attr
->qp_num
) {
798 qp
= __ib_open_qp(real_qp
, qp_open_attr
->event_handler
,
799 qp_open_attr
->qp_context
);
803 mutex_unlock(&xrcd
->tgt_qp_mutex
);
806 EXPORT_SYMBOL(ib_open_qp
);
808 static struct ib_qp
*ib_create_xrc_qp(struct ib_qp
*qp
,
809 struct ib_qp_init_attr
*qp_init_attr
)
811 struct ib_qp
*real_qp
= qp
;
813 qp
->event_handler
= __ib_shared_qp_event_handler
;
816 qp
->send_cq
= qp
->recv_cq
= NULL
;
818 qp
->xrcd
= qp_init_attr
->xrcd
;
819 atomic_inc(&qp_init_attr
->xrcd
->usecnt
);
820 INIT_LIST_HEAD(&qp
->open_list
);
822 qp
= __ib_open_qp(real_qp
, qp_init_attr
->event_handler
,
823 qp_init_attr
->qp_context
);
825 __ib_insert_xrcd_qp(qp_init_attr
->xrcd
, real_qp
);
827 real_qp
->device
->destroy_qp(real_qp
);
831 struct ib_qp
*ib_create_qp(struct ib_pd
*pd
,
832 struct ib_qp_init_attr
*qp_init_attr
)
834 struct ib_device
*device
= pd
? pd
->device
: qp_init_attr
->xrcd
->device
;
838 if (qp_init_attr
->rwq_ind_tbl
&&
839 (qp_init_attr
->recv_cq
||
840 qp_init_attr
->srq
|| qp_init_attr
->cap
.max_recv_wr
||
841 qp_init_attr
->cap
.max_recv_sge
))
842 return ERR_PTR(-EINVAL
);
845 * If the callers is using the RDMA API calculate the resources
846 * needed for the RDMA READ/WRITE operations.
848 * Note that these callers need to pass in a port number.
850 if (qp_init_attr
->cap
.max_rdma_ctxs
)
851 rdma_rw_init_qp(device
, qp_init_attr
);
853 qp
= device
->create_qp(pd
, qp_init_attr
, NULL
);
857 ret
= ib_create_qp_security(qp
, device
);
866 qp
->qp_type
= qp_init_attr
->qp_type
;
867 qp
->rwq_ind_tbl
= qp_init_attr
->rwq_ind_tbl
;
869 atomic_set(&qp
->usecnt
, 0);
871 spin_lock_init(&qp
->mr_lock
);
872 INIT_LIST_HEAD(&qp
->rdma_mrs
);
873 INIT_LIST_HEAD(&qp
->sig_mrs
);
876 if (qp_init_attr
->qp_type
== IB_QPT_XRC_TGT
)
877 return ib_create_xrc_qp(qp
, qp_init_attr
);
879 qp
->event_handler
= qp_init_attr
->event_handler
;
880 qp
->qp_context
= qp_init_attr
->qp_context
;
881 if (qp_init_attr
->qp_type
== IB_QPT_XRC_INI
) {
885 qp
->recv_cq
= qp_init_attr
->recv_cq
;
886 if (qp_init_attr
->recv_cq
)
887 atomic_inc(&qp_init_attr
->recv_cq
->usecnt
);
888 qp
->srq
= qp_init_attr
->srq
;
890 atomic_inc(&qp_init_attr
->srq
->usecnt
);
894 qp
->send_cq
= qp_init_attr
->send_cq
;
897 atomic_inc(&pd
->usecnt
);
898 if (qp_init_attr
->send_cq
)
899 atomic_inc(&qp_init_attr
->send_cq
->usecnt
);
900 if (qp_init_attr
->rwq_ind_tbl
)
901 atomic_inc(&qp
->rwq_ind_tbl
->usecnt
);
903 if (qp_init_attr
->cap
.max_rdma_ctxs
) {
904 ret
= rdma_rw_init_mrs(qp
, qp_init_attr
);
906 pr_err("failed to init MR pool ret= %d\n", ret
);
913 * Note: all hw drivers guarantee that max_send_sge is lower than
914 * the device RDMA WRITE SGE limit but not all hw drivers ensure that
915 * max_send_sge <= max_sge_rd.
917 qp
->max_write_sge
= qp_init_attr
->cap
.max_send_sge
;
918 qp
->max_read_sge
= min_t(u32
, qp_init_attr
->cap
.max_send_sge
,
919 device
->attrs
.max_sge_rd
);
923 EXPORT_SYMBOL(ib_create_qp
);
925 static const struct {
927 enum ib_qp_attr_mask req_param
[IB_QPT_MAX
];
928 enum ib_qp_attr_mask opt_param
[IB_QPT_MAX
];
929 } qp_state_table
[IB_QPS_ERR
+ 1][IB_QPS_ERR
+ 1] = {
931 [IB_QPS_RESET
] = { .valid
= 1 },
935 [IB_QPT_UD
] = (IB_QP_PKEY_INDEX
|
938 [IB_QPT_RAW_PACKET
] = IB_QP_PORT
,
939 [IB_QPT_UC
] = (IB_QP_PKEY_INDEX
|
942 [IB_QPT_RC
] = (IB_QP_PKEY_INDEX
|
945 [IB_QPT_XRC_INI
] = (IB_QP_PKEY_INDEX
|
948 [IB_QPT_XRC_TGT
] = (IB_QP_PKEY_INDEX
|
951 [IB_QPT_SMI
] = (IB_QP_PKEY_INDEX
|
953 [IB_QPT_GSI
] = (IB_QP_PKEY_INDEX
|
959 [IB_QPS_RESET
] = { .valid
= 1 },
960 [IB_QPS_ERR
] = { .valid
= 1 },
964 [IB_QPT_UD
] = (IB_QP_PKEY_INDEX
|
967 [IB_QPT_UC
] = (IB_QP_PKEY_INDEX
|
970 [IB_QPT_RC
] = (IB_QP_PKEY_INDEX
|
973 [IB_QPT_XRC_INI
] = (IB_QP_PKEY_INDEX
|
976 [IB_QPT_XRC_TGT
] = (IB_QP_PKEY_INDEX
|
979 [IB_QPT_SMI
] = (IB_QP_PKEY_INDEX
|
981 [IB_QPT_GSI
] = (IB_QP_PKEY_INDEX
|
988 [IB_QPT_UC
] = (IB_QP_AV
|
992 [IB_QPT_RC
] = (IB_QP_AV
|
996 IB_QP_MAX_DEST_RD_ATOMIC
|
997 IB_QP_MIN_RNR_TIMER
),
998 [IB_QPT_XRC_INI
] = (IB_QP_AV
|
1002 [IB_QPT_XRC_TGT
] = (IB_QP_AV
|
1006 IB_QP_MAX_DEST_RD_ATOMIC
|
1007 IB_QP_MIN_RNR_TIMER
),
1010 [IB_QPT_UD
] = (IB_QP_PKEY_INDEX
|
1012 [IB_QPT_UC
] = (IB_QP_ALT_PATH
|
1013 IB_QP_ACCESS_FLAGS
|
1015 [IB_QPT_RC
] = (IB_QP_ALT_PATH
|
1016 IB_QP_ACCESS_FLAGS
|
1018 [IB_QPT_XRC_INI
] = (IB_QP_ALT_PATH
|
1019 IB_QP_ACCESS_FLAGS
|
1021 [IB_QPT_XRC_TGT
] = (IB_QP_ALT_PATH
|
1022 IB_QP_ACCESS_FLAGS
|
1024 [IB_QPT_SMI
] = (IB_QP_PKEY_INDEX
|
1026 [IB_QPT_GSI
] = (IB_QP_PKEY_INDEX
|
1032 [IB_QPS_RESET
] = { .valid
= 1 },
1033 [IB_QPS_ERR
] = { .valid
= 1 },
1037 [IB_QPT_UD
] = IB_QP_SQ_PSN
,
1038 [IB_QPT_UC
] = IB_QP_SQ_PSN
,
1039 [IB_QPT_RC
] = (IB_QP_TIMEOUT
|
1043 IB_QP_MAX_QP_RD_ATOMIC
),
1044 [IB_QPT_XRC_INI
] = (IB_QP_TIMEOUT
|
1048 IB_QP_MAX_QP_RD_ATOMIC
),
1049 [IB_QPT_XRC_TGT
] = (IB_QP_TIMEOUT
|
1051 [IB_QPT_SMI
] = IB_QP_SQ_PSN
,
1052 [IB_QPT_GSI
] = IB_QP_SQ_PSN
,
1055 [IB_QPT_UD
] = (IB_QP_CUR_STATE
|
1057 [IB_QPT_UC
] = (IB_QP_CUR_STATE
|
1059 IB_QP_ACCESS_FLAGS
|
1060 IB_QP_PATH_MIG_STATE
),
1061 [IB_QPT_RC
] = (IB_QP_CUR_STATE
|
1063 IB_QP_ACCESS_FLAGS
|
1064 IB_QP_MIN_RNR_TIMER
|
1065 IB_QP_PATH_MIG_STATE
),
1066 [IB_QPT_XRC_INI
] = (IB_QP_CUR_STATE
|
1068 IB_QP_ACCESS_FLAGS
|
1069 IB_QP_PATH_MIG_STATE
),
1070 [IB_QPT_XRC_TGT
] = (IB_QP_CUR_STATE
|
1072 IB_QP_ACCESS_FLAGS
|
1073 IB_QP_MIN_RNR_TIMER
|
1074 IB_QP_PATH_MIG_STATE
),
1075 [IB_QPT_SMI
] = (IB_QP_CUR_STATE
|
1077 [IB_QPT_GSI
] = (IB_QP_CUR_STATE
|
1079 [IB_QPT_RAW_PACKET
] = IB_QP_RATE_LIMIT
,
1084 [IB_QPS_RESET
] = { .valid
= 1 },
1085 [IB_QPS_ERR
] = { .valid
= 1 },
1089 [IB_QPT_UD
] = (IB_QP_CUR_STATE
|
1091 [IB_QPT_UC
] = (IB_QP_CUR_STATE
|
1092 IB_QP_ACCESS_FLAGS
|
1094 IB_QP_PATH_MIG_STATE
),
1095 [IB_QPT_RC
] = (IB_QP_CUR_STATE
|
1096 IB_QP_ACCESS_FLAGS
|
1098 IB_QP_PATH_MIG_STATE
|
1099 IB_QP_MIN_RNR_TIMER
),
1100 [IB_QPT_XRC_INI
] = (IB_QP_CUR_STATE
|
1101 IB_QP_ACCESS_FLAGS
|
1103 IB_QP_PATH_MIG_STATE
),
1104 [IB_QPT_XRC_TGT
] = (IB_QP_CUR_STATE
|
1105 IB_QP_ACCESS_FLAGS
|
1107 IB_QP_PATH_MIG_STATE
|
1108 IB_QP_MIN_RNR_TIMER
),
1109 [IB_QPT_SMI
] = (IB_QP_CUR_STATE
|
1111 [IB_QPT_GSI
] = (IB_QP_CUR_STATE
|
1113 [IB_QPT_RAW_PACKET
] = IB_QP_RATE_LIMIT
,
1119 [IB_QPT_UD
] = IB_QP_EN_SQD_ASYNC_NOTIFY
,
1120 [IB_QPT_UC
] = IB_QP_EN_SQD_ASYNC_NOTIFY
,
1121 [IB_QPT_RC
] = IB_QP_EN_SQD_ASYNC_NOTIFY
,
1122 [IB_QPT_XRC_INI
] = IB_QP_EN_SQD_ASYNC_NOTIFY
,
1123 [IB_QPT_XRC_TGT
] = IB_QP_EN_SQD_ASYNC_NOTIFY
, /* ??? */
1124 [IB_QPT_SMI
] = IB_QP_EN_SQD_ASYNC_NOTIFY
,
1125 [IB_QPT_GSI
] = IB_QP_EN_SQD_ASYNC_NOTIFY
1130 [IB_QPS_RESET
] = { .valid
= 1 },
1131 [IB_QPS_ERR
] = { .valid
= 1 },
1135 [IB_QPT_UD
] = (IB_QP_CUR_STATE
|
1137 [IB_QPT_UC
] = (IB_QP_CUR_STATE
|
1139 IB_QP_ACCESS_FLAGS
|
1140 IB_QP_PATH_MIG_STATE
),
1141 [IB_QPT_RC
] = (IB_QP_CUR_STATE
|
1143 IB_QP_ACCESS_FLAGS
|
1144 IB_QP_MIN_RNR_TIMER
|
1145 IB_QP_PATH_MIG_STATE
),
1146 [IB_QPT_XRC_INI
] = (IB_QP_CUR_STATE
|
1148 IB_QP_ACCESS_FLAGS
|
1149 IB_QP_PATH_MIG_STATE
),
1150 [IB_QPT_XRC_TGT
] = (IB_QP_CUR_STATE
|
1152 IB_QP_ACCESS_FLAGS
|
1153 IB_QP_MIN_RNR_TIMER
|
1154 IB_QP_PATH_MIG_STATE
),
1155 [IB_QPT_SMI
] = (IB_QP_CUR_STATE
|
1157 [IB_QPT_GSI
] = (IB_QP_CUR_STATE
|
1164 [IB_QPT_UD
] = (IB_QP_PKEY_INDEX
|
1166 [IB_QPT_UC
] = (IB_QP_AV
|
1168 IB_QP_ACCESS_FLAGS
|
1170 IB_QP_PATH_MIG_STATE
),
1171 [IB_QPT_RC
] = (IB_QP_PORT
|
1176 IB_QP_MAX_QP_RD_ATOMIC
|
1177 IB_QP_MAX_DEST_RD_ATOMIC
|
1179 IB_QP_ACCESS_FLAGS
|
1181 IB_QP_MIN_RNR_TIMER
|
1182 IB_QP_PATH_MIG_STATE
),
1183 [IB_QPT_XRC_INI
] = (IB_QP_PORT
|
1188 IB_QP_MAX_QP_RD_ATOMIC
|
1190 IB_QP_ACCESS_FLAGS
|
1192 IB_QP_PATH_MIG_STATE
),
1193 [IB_QPT_XRC_TGT
] = (IB_QP_PORT
|
1196 IB_QP_MAX_DEST_RD_ATOMIC
|
1198 IB_QP_ACCESS_FLAGS
|
1200 IB_QP_MIN_RNR_TIMER
|
1201 IB_QP_PATH_MIG_STATE
),
1202 [IB_QPT_SMI
] = (IB_QP_PKEY_INDEX
|
1204 [IB_QPT_GSI
] = (IB_QP_PKEY_INDEX
|
1210 [IB_QPS_RESET
] = { .valid
= 1 },
1211 [IB_QPS_ERR
] = { .valid
= 1 },
1215 [IB_QPT_UD
] = (IB_QP_CUR_STATE
|
1217 [IB_QPT_UC
] = (IB_QP_CUR_STATE
|
1218 IB_QP_ACCESS_FLAGS
),
1219 [IB_QPT_SMI
] = (IB_QP_CUR_STATE
|
1221 [IB_QPT_GSI
] = (IB_QP_CUR_STATE
|
1227 [IB_QPS_RESET
] = { .valid
= 1 },
1228 [IB_QPS_ERR
] = { .valid
= 1 }
1232 int ib_modify_qp_is_ok(enum ib_qp_state cur_state
, enum ib_qp_state next_state
,
1233 enum ib_qp_type type
, enum ib_qp_attr_mask mask
,
1234 enum rdma_link_layer ll
)
1236 enum ib_qp_attr_mask req_param
, opt_param
;
1238 if (cur_state
< 0 || cur_state
> IB_QPS_ERR
||
1239 next_state
< 0 || next_state
> IB_QPS_ERR
)
1242 if (mask
& IB_QP_CUR_STATE
&&
1243 cur_state
!= IB_QPS_RTR
&& cur_state
!= IB_QPS_RTS
&&
1244 cur_state
!= IB_QPS_SQD
&& cur_state
!= IB_QPS_SQE
)
1247 if (!qp_state_table
[cur_state
][next_state
].valid
)
1250 req_param
= qp_state_table
[cur_state
][next_state
].req_param
[type
];
1251 opt_param
= qp_state_table
[cur_state
][next_state
].opt_param
[type
];
1253 if ((mask
& req_param
) != req_param
)
1256 if (mask
& ~(req_param
| opt_param
| IB_QP_STATE
))
1261 EXPORT_SYMBOL(ib_modify_qp_is_ok
);
1263 static int ib_resolve_eth_dmac(struct ib_device
*device
,
1264 struct rdma_ah_attr
*ah_attr
)
1267 struct ib_global_route
*grh
;
1269 if (!rdma_is_port_valid(device
, rdma_ah_get_port_num(ah_attr
)))
1272 if (ah_attr
->type
!= RDMA_AH_ATTR_TYPE_ROCE
)
1275 grh
= rdma_ah_retrieve_grh(ah_attr
);
1277 if (rdma_link_local_addr((struct in6_addr
*)grh
->dgid
.raw
)) {
1278 rdma_get_ll_mac((struct in6_addr
*)grh
->dgid
.raw
,
1279 ah_attr
->roce
.dmac
);
1282 if (rdma_is_multicast_addr((struct in6_addr
*)ah_attr
->grh
.dgid
.raw
)) {
1283 if (ipv6_addr_v4mapped((struct in6_addr
*)ah_attr
->grh
.dgid
.raw
)) {
1286 memcpy(&addr
, ah_attr
->grh
.dgid
.raw
+ 12, 4);
1287 ip_eth_mc_map(addr
, (char *)ah_attr
->roce
.dmac
);
1289 ipv6_eth_mc_map((struct in6_addr
*)ah_attr
->grh
.dgid
.raw
,
1290 (char *)ah_attr
->roce
.dmac
);
1294 struct ib_gid_attr sgid_attr
;
1298 ret
= ib_query_gid(device
,
1299 rdma_ah_get_port_num(ah_attr
),
1303 if (ret
|| !sgid_attr
.ndev
) {
1309 ifindex
= sgid_attr
.ndev
->ifindex
;
1312 rdma_addr_find_l2_eth_by_grh(&sgid
, &grh
->dgid
,
1314 NULL
, &ifindex
, &hop_limit
);
1316 dev_put(sgid_attr
.ndev
);
1318 grh
->hop_limit
= hop_limit
;
1325 * ib_modify_qp_with_udata - Modifies the attributes for the specified QP.
1326 * @qp: The QP to modify.
1327 * @attr: On input, specifies the QP attributes to modify. On output,
1328 * the current values of selected QP attributes are returned.
1329 * @attr_mask: A bit-mask used to specify which attributes of the QP
1330 * are being modified.
1331 * @udata: pointer to user's input output buffer information
1332 * are being modified.
1333 * It returns 0 on success and returns appropriate error code on error.
1335 int ib_modify_qp_with_udata(struct ib_qp
*qp
, struct ib_qp_attr
*attr
,
1336 int attr_mask
, struct ib_udata
*udata
)
1340 if (attr_mask
& IB_QP_AV
) {
1341 ret
= ib_resolve_eth_dmac(qp
->device
, &attr
->ah_attr
);
1345 ret
= ib_security_modify_qp(qp
, attr
, attr_mask
, udata
);
1346 if (!ret
&& (attr_mask
& IB_QP_PORT
))
1347 qp
->port
= attr
->port_num
;
1351 EXPORT_SYMBOL(ib_modify_qp_with_udata
);
1353 int ib_get_eth_speed(struct ib_device
*dev
, u8 port_num
, u8
*speed
, u8
*width
)
1357 struct net_device
*netdev
;
1358 struct ethtool_link_ksettings lksettings
;
1360 if (rdma_port_get_link_layer(dev
, port_num
) != IB_LINK_LAYER_ETHERNET
)
1363 if (!dev
->get_netdev
)
1366 netdev
= dev
->get_netdev(dev
, port_num
);
1371 rc
= __ethtool_get_link_ksettings(netdev
, &lksettings
);
1377 netdev_speed
= lksettings
.base
.speed
;
1379 netdev_speed
= SPEED_1000
;
1380 pr_warn("%s speed is unknown, defaulting to %d\n", netdev
->name
,
1384 if (netdev_speed
<= SPEED_1000
) {
1385 *width
= IB_WIDTH_1X
;
1386 *speed
= IB_SPEED_SDR
;
1387 } else if (netdev_speed
<= SPEED_10000
) {
1388 *width
= IB_WIDTH_1X
;
1389 *speed
= IB_SPEED_FDR10
;
1390 } else if (netdev_speed
<= SPEED_20000
) {
1391 *width
= IB_WIDTH_4X
;
1392 *speed
= IB_SPEED_DDR
;
1393 } else if (netdev_speed
<= SPEED_25000
) {
1394 *width
= IB_WIDTH_1X
;
1395 *speed
= IB_SPEED_EDR
;
1396 } else if (netdev_speed
<= SPEED_40000
) {
1397 *width
= IB_WIDTH_4X
;
1398 *speed
= IB_SPEED_FDR10
;
1400 *width
= IB_WIDTH_4X
;
1401 *speed
= IB_SPEED_EDR
;
1406 EXPORT_SYMBOL(ib_get_eth_speed
);
1408 int ib_modify_qp(struct ib_qp
*qp
,
1409 struct ib_qp_attr
*qp_attr
,
1412 return ib_modify_qp_with_udata(qp
, qp_attr
, qp_attr_mask
, NULL
);
1414 EXPORT_SYMBOL(ib_modify_qp
);
1416 int ib_query_qp(struct ib_qp
*qp
,
1417 struct ib_qp_attr
*qp_attr
,
1419 struct ib_qp_init_attr
*qp_init_attr
)
1421 return qp
->device
->query_qp
?
1422 qp
->device
->query_qp(qp
->real_qp
, qp_attr
, qp_attr_mask
, qp_init_attr
) :
1425 EXPORT_SYMBOL(ib_query_qp
);
1427 int ib_close_qp(struct ib_qp
*qp
)
1429 struct ib_qp
*real_qp
;
1430 unsigned long flags
;
1432 real_qp
= qp
->real_qp
;
1436 spin_lock_irqsave(&real_qp
->device
->event_handler_lock
, flags
);
1437 list_del(&qp
->open_list
);
1438 spin_unlock_irqrestore(&real_qp
->device
->event_handler_lock
, flags
);
1440 atomic_dec(&real_qp
->usecnt
);
1441 ib_close_shared_qp_security(qp
->qp_sec
);
1446 EXPORT_SYMBOL(ib_close_qp
);
1448 static int __ib_destroy_shared_qp(struct ib_qp
*qp
)
1450 struct ib_xrcd
*xrcd
;
1451 struct ib_qp
*real_qp
;
1454 real_qp
= qp
->real_qp
;
1455 xrcd
= real_qp
->xrcd
;
1457 mutex_lock(&xrcd
->tgt_qp_mutex
);
1459 if (atomic_read(&real_qp
->usecnt
) == 0)
1460 list_del(&real_qp
->xrcd_list
);
1463 mutex_unlock(&xrcd
->tgt_qp_mutex
);
1466 ret
= ib_destroy_qp(real_qp
);
1468 atomic_dec(&xrcd
->usecnt
);
1470 __ib_insert_xrcd_qp(xrcd
, real_qp
);
1476 int ib_destroy_qp(struct ib_qp
*qp
)
1479 struct ib_cq
*scq
, *rcq
;
1481 struct ib_rwq_ind_table
*ind_tbl
;
1482 struct ib_qp_security
*sec
;
1485 WARN_ON_ONCE(qp
->mrs_used
> 0);
1487 if (atomic_read(&qp
->usecnt
))
1490 if (qp
->real_qp
!= qp
)
1491 return __ib_destroy_shared_qp(qp
);
1497 ind_tbl
= qp
->rwq_ind_tbl
;
1500 ib_destroy_qp_security_begin(sec
);
1503 rdma_rw_cleanup_mrs(qp
);
1505 ret
= qp
->device
->destroy_qp(qp
);
1508 atomic_dec(&pd
->usecnt
);
1510 atomic_dec(&scq
->usecnt
);
1512 atomic_dec(&rcq
->usecnt
);
1514 atomic_dec(&srq
->usecnt
);
1516 atomic_dec(&ind_tbl
->usecnt
);
1518 ib_destroy_qp_security_end(sec
);
1521 ib_destroy_qp_security_abort(sec
);
1526 EXPORT_SYMBOL(ib_destroy_qp
);
1528 /* Completion queues */
1530 struct ib_cq
*ib_create_cq(struct ib_device
*device
,
1531 ib_comp_handler comp_handler
,
1532 void (*event_handler
)(struct ib_event
*, void *),
1534 const struct ib_cq_init_attr
*cq_attr
)
1538 cq
= device
->create_cq(device
, cq_attr
, NULL
, NULL
);
1541 cq
->device
= device
;
1543 cq
->comp_handler
= comp_handler
;
1544 cq
->event_handler
= event_handler
;
1545 cq
->cq_context
= cq_context
;
1546 atomic_set(&cq
->usecnt
, 0);
1551 EXPORT_SYMBOL(ib_create_cq
);
1553 int rdma_set_cq_moderation(struct ib_cq
*cq
, u16 cq_count
, u16 cq_period
)
1555 return cq
->device
->modify_cq
?
1556 cq
->device
->modify_cq(cq
, cq_count
, cq_period
) : -ENOSYS
;
1558 EXPORT_SYMBOL(rdma_set_cq_moderation
);
1560 int ib_destroy_cq(struct ib_cq
*cq
)
1562 if (atomic_read(&cq
->usecnt
))
1565 return cq
->device
->destroy_cq(cq
);
1567 EXPORT_SYMBOL(ib_destroy_cq
);
1569 int ib_resize_cq(struct ib_cq
*cq
, int cqe
)
1571 return cq
->device
->resize_cq
?
1572 cq
->device
->resize_cq(cq
, cqe
, NULL
) : -ENOSYS
;
1574 EXPORT_SYMBOL(ib_resize_cq
);
1576 /* Memory regions */
1578 int ib_dereg_mr(struct ib_mr
*mr
)
1580 struct ib_pd
*pd
= mr
->pd
;
1583 ret
= mr
->device
->dereg_mr(mr
);
1585 atomic_dec(&pd
->usecnt
);
1589 EXPORT_SYMBOL(ib_dereg_mr
);
1592 * ib_alloc_mr() - Allocates a memory region
1593 * @pd: protection domain associated with the region
1594 * @mr_type: memory region type
1595 * @max_num_sg: maximum sg entries available for registration.
1598 * Memory registeration page/sg lists must not exceed max_num_sg.
1599 * For mr_type IB_MR_TYPE_MEM_REG, the total length cannot exceed
1600 * max_num_sg * used_page_size.
1603 struct ib_mr
*ib_alloc_mr(struct ib_pd
*pd
,
1604 enum ib_mr_type mr_type
,
1609 if (!pd
->device
->alloc_mr
)
1610 return ERR_PTR(-ENOSYS
);
1612 mr
= pd
->device
->alloc_mr(pd
, mr_type
, max_num_sg
);
1614 mr
->device
= pd
->device
;
1617 atomic_inc(&pd
->usecnt
);
1618 mr
->need_inval
= false;
1623 EXPORT_SYMBOL(ib_alloc_mr
);
1625 /* "Fast" memory regions */
1627 struct ib_fmr
*ib_alloc_fmr(struct ib_pd
*pd
,
1628 int mr_access_flags
,
1629 struct ib_fmr_attr
*fmr_attr
)
1633 if (!pd
->device
->alloc_fmr
)
1634 return ERR_PTR(-ENOSYS
);
1636 fmr
= pd
->device
->alloc_fmr(pd
, mr_access_flags
, fmr_attr
);
1638 fmr
->device
= pd
->device
;
1640 atomic_inc(&pd
->usecnt
);
1645 EXPORT_SYMBOL(ib_alloc_fmr
);
1647 int ib_unmap_fmr(struct list_head
*fmr_list
)
1651 if (list_empty(fmr_list
))
1654 fmr
= list_entry(fmr_list
->next
, struct ib_fmr
, list
);
1655 return fmr
->device
->unmap_fmr(fmr_list
);
1657 EXPORT_SYMBOL(ib_unmap_fmr
);
1659 int ib_dealloc_fmr(struct ib_fmr
*fmr
)
1665 ret
= fmr
->device
->dealloc_fmr(fmr
);
1667 atomic_dec(&pd
->usecnt
);
1671 EXPORT_SYMBOL(ib_dealloc_fmr
);
1673 /* Multicast groups */
1675 static bool is_valid_mcast_lid(struct ib_qp
*qp
, u16 lid
)
1677 struct ib_qp_init_attr init_attr
= {};
1678 struct ib_qp_attr attr
= {};
1679 int num_eth_ports
= 0;
1682 /* If QP state >= init, it is assigned to a port and we can check this
1685 if (!ib_query_qp(qp
, &attr
, IB_QP_STATE
| IB_QP_PORT
, &init_attr
)) {
1686 if (attr
.qp_state
>= IB_QPS_INIT
) {
1687 if (rdma_port_get_link_layer(qp
->device
, attr
.port_num
) !=
1688 IB_LINK_LAYER_INFINIBAND
)
1694 /* Can't get a quick answer, iterate over all ports */
1695 for (port
= 0; port
< qp
->device
->phys_port_cnt
; port
++)
1696 if (rdma_port_get_link_layer(qp
->device
, port
) !=
1697 IB_LINK_LAYER_INFINIBAND
)
1700 /* If we have at lease one Ethernet port, RoCE annex declares that
1701 * multicast LID should be ignored. We can't tell at this step if the
1702 * QP belongs to an IB or Ethernet port.
1707 /* If all the ports are IB, we can check according to IB spec. */
1709 return !(lid
< be16_to_cpu(IB_MULTICAST_LID_BASE
) ||
1710 lid
== be16_to_cpu(IB_LID_PERMISSIVE
));
1713 int ib_attach_mcast(struct ib_qp
*qp
, union ib_gid
*gid
, u16 lid
)
1717 if (!qp
->device
->attach_mcast
)
1720 if (!rdma_is_multicast_addr((struct in6_addr
*)gid
->raw
) ||
1721 qp
->qp_type
!= IB_QPT_UD
|| !is_valid_mcast_lid(qp
, lid
))
1724 ret
= qp
->device
->attach_mcast(qp
, gid
, lid
);
1726 atomic_inc(&qp
->usecnt
);
1729 EXPORT_SYMBOL(ib_attach_mcast
);
1731 int ib_detach_mcast(struct ib_qp
*qp
, union ib_gid
*gid
, u16 lid
)
1735 if (!qp
->device
->detach_mcast
)
1738 if (!rdma_is_multicast_addr((struct in6_addr
*)gid
->raw
) ||
1739 qp
->qp_type
!= IB_QPT_UD
|| !is_valid_mcast_lid(qp
, lid
))
1742 ret
= qp
->device
->detach_mcast(qp
, gid
, lid
);
1744 atomic_dec(&qp
->usecnt
);
1747 EXPORT_SYMBOL(ib_detach_mcast
);
1749 struct ib_xrcd
*ib_alloc_xrcd(struct ib_device
*device
)
1751 struct ib_xrcd
*xrcd
;
1753 if (!device
->alloc_xrcd
)
1754 return ERR_PTR(-ENOSYS
);
1756 xrcd
= device
->alloc_xrcd(device
, NULL
, NULL
);
1757 if (!IS_ERR(xrcd
)) {
1758 xrcd
->device
= device
;
1760 atomic_set(&xrcd
->usecnt
, 0);
1761 mutex_init(&xrcd
->tgt_qp_mutex
);
1762 INIT_LIST_HEAD(&xrcd
->tgt_qp_list
);
1767 EXPORT_SYMBOL(ib_alloc_xrcd
);
1769 int ib_dealloc_xrcd(struct ib_xrcd
*xrcd
)
1774 if (atomic_read(&xrcd
->usecnt
))
1777 while (!list_empty(&xrcd
->tgt_qp_list
)) {
1778 qp
= list_entry(xrcd
->tgt_qp_list
.next
, struct ib_qp
, xrcd_list
);
1779 ret
= ib_destroy_qp(qp
);
1784 return xrcd
->device
->dealloc_xrcd(xrcd
);
1786 EXPORT_SYMBOL(ib_dealloc_xrcd
);
1789 * ib_create_wq - Creates a WQ associated with the specified protection
1791 * @pd: The protection domain associated with the WQ.
1792 * @wq_init_attr: A list of initial attributes required to create the
1793 * WQ. If WQ creation succeeds, then the attributes are updated to
1794 * the actual capabilities of the created WQ.
1796 * wq_init_attr->max_wr and wq_init_attr->max_sge determine
1797 * the requested size of the WQ, and set to the actual values allocated
1799 * If ib_create_wq() succeeds, then max_wr and max_sge will always be
1800 * at least as large as the requested values.
1802 struct ib_wq
*ib_create_wq(struct ib_pd
*pd
,
1803 struct ib_wq_init_attr
*wq_attr
)
1807 if (!pd
->device
->create_wq
)
1808 return ERR_PTR(-ENOSYS
);
1810 wq
= pd
->device
->create_wq(pd
, wq_attr
, NULL
);
1812 wq
->event_handler
= wq_attr
->event_handler
;
1813 wq
->wq_context
= wq_attr
->wq_context
;
1814 wq
->wq_type
= wq_attr
->wq_type
;
1815 wq
->cq
= wq_attr
->cq
;
1816 wq
->device
= pd
->device
;
1819 atomic_inc(&pd
->usecnt
);
1820 atomic_inc(&wq_attr
->cq
->usecnt
);
1821 atomic_set(&wq
->usecnt
, 0);
1825 EXPORT_SYMBOL(ib_create_wq
);
1828 * ib_destroy_wq - Destroys the specified WQ.
1829 * @wq: The WQ to destroy.
1831 int ib_destroy_wq(struct ib_wq
*wq
)
1834 struct ib_cq
*cq
= wq
->cq
;
1835 struct ib_pd
*pd
= wq
->pd
;
1837 if (atomic_read(&wq
->usecnt
))
1840 err
= wq
->device
->destroy_wq(wq
);
1842 atomic_dec(&pd
->usecnt
);
1843 atomic_dec(&cq
->usecnt
);
1847 EXPORT_SYMBOL(ib_destroy_wq
);
1850 * ib_modify_wq - Modifies the specified WQ.
1851 * @wq: The WQ to modify.
1852 * @wq_attr: On input, specifies the WQ attributes to modify.
1853 * @wq_attr_mask: A bit-mask used to specify which attributes of the WQ
1854 * are being modified.
1855 * On output, the current values of selected WQ attributes are returned.
1857 int ib_modify_wq(struct ib_wq
*wq
, struct ib_wq_attr
*wq_attr
,
1862 if (!wq
->device
->modify_wq
)
1865 err
= wq
->device
->modify_wq(wq
, wq_attr
, wq_attr_mask
, NULL
);
1868 EXPORT_SYMBOL(ib_modify_wq
);
1871 * ib_create_rwq_ind_table - Creates a RQ Indirection Table.
1872 * @device: The device on which to create the rwq indirection table.
1873 * @ib_rwq_ind_table_init_attr: A list of initial attributes required to
1874 * create the Indirection Table.
1876 * Note: The life time of ib_rwq_ind_table_init_attr->ind_tbl is not less
1877 * than the created ib_rwq_ind_table object and the caller is responsible
1878 * for its memory allocation/free.
1880 struct ib_rwq_ind_table
*ib_create_rwq_ind_table(struct ib_device
*device
,
1881 struct ib_rwq_ind_table_init_attr
*init_attr
)
1883 struct ib_rwq_ind_table
*rwq_ind_table
;
1887 if (!device
->create_rwq_ind_table
)
1888 return ERR_PTR(-ENOSYS
);
1890 table_size
= (1 << init_attr
->log_ind_tbl_size
);
1891 rwq_ind_table
= device
->create_rwq_ind_table(device
,
1893 if (IS_ERR(rwq_ind_table
))
1894 return rwq_ind_table
;
1896 rwq_ind_table
->ind_tbl
= init_attr
->ind_tbl
;
1897 rwq_ind_table
->log_ind_tbl_size
= init_attr
->log_ind_tbl_size
;
1898 rwq_ind_table
->device
= device
;
1899 rwq_ind_table
->uobject
= NULL
;
1900 atomic_set(&rwq_ind_table
->usecnt
, 0);
1902 for (i
= 0; i
< table_size
; i
++)
1903 atomic_inc(&rwq_ind_table
->ind_tbl
[i
]->usecnt
);
1905 return rwq_ind_table
;
1907 EXPORT_SYMBOL(ib_create_rwq_ind_table
);
1910 * ib_destroy_rwq_ind_table - Destroys the specified Indirection Table.
1911 * @wq_ind_table: The Indirection Table to destroy.
1913 int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table
*rwq_ind_table
)
1916 u32 table_size
= (1 << rwq_ind_table
->log_ind_tbl_size
);
1917 struct ib_wq
**ind_tbl
= rwq_ind_table
->ind_tbl
;
1919 if (atomic_read(&rwq_ind_table
->usecnt
))
1922 err
= rwq_ind_table
->device
->destroy_rwq_ind_table(rwq_ind_table
);
1924 for (i
= 0; i
< table_size
; i
++)
1925 atomic_dec(&ind_tbl
[i
]->usecnt
);
1930 EXPORT_SYMBOL(ib_destroy_rwq_ind_table
);
1932 struct ib_flow
*ib_create_flow(struct ib_qp
*qp
,
1933 struct ib_flow_attr
*flow_attr
,
1936 struct ib_flow
*flow_id
;
1937 if (!qp
->device
->create_flow
)
1938 return ERR_PTR(-ENOSYS
);
1940 flow_id
= qp
->device
->create_flow(qp
, flow_attr
, domain
);
1941 if (!IS_ERR(flow_id
)) {
1942 atomic_inc(&qp
->usecnt
);
1947 EXPORT_SYMBOL(ib_create_flow
);
1949 int ib_destroy_flow(struct ib_flow
*flow_id
)
1952 struct ib_qp
*qp
= flow_id
->qp
;
1954 err
= qp
->device
->destroy_flow(flow_id
);
1956 atomic_dec(&qp
->usecnt
);
1959 EXPORT_SYMBOL(ib_destroy_flow
);
1961 int ib_check_mr_status(struct ib_mr
*mr
, u32 check_mask
,
1962 struct ib_mr_status
*mr_status
)
1964 return mr
->device
->check_mr_status
?
1965 mr
->device
->check_mr_status(mr
, check_mask
, mr_status
) : -ENOSYS
;
1967 EXPORT_SYMBOL(ib_check_mr_status
);
1969 int ib_set_vf_link_state(struct ib_device
*device
, int vf
, u8 port
,
1972 if (!device
->set_vf_link_state
)
1975 return device
->set_vf_link_state(device
, vf
, port
, state
);
1977 EXPORT_SYMBOL(ib_set_vf_link_state
);
1979 int ib_get_vf_config(struct ib_device
*device
, int vf
, u8 port
,
1980 struct ifla_vf_info
*info
)
1982 if (!device
->get_vf_config
)
1985 return device
->get_vf_config(device
, vf
, port
, info
);
1987 EXPORT_SYMBOL(ib_get_vf_config
);
1989 int ib_get_vf_stats(struct ib_device
*device
, int vf
, u8 port
,
1990 struct ifla_vf_stats
*stats
)
1992 if (!device
->get_vf_stats
)
1995 return device
->get_vf_stats(device
, vf
, port
, stats
);
1997 EXPORT_SYMBOL(ib_get_vf_stats
);
1999 int ib_set_vf_guid(struct ib_device
*device
, int vf
, u8 port
, u64 guid
,
2002 if (!device
->set_vf_guid
)
2005 return device
->set_vf_guid(device
, vf
, port
, guid
, type
);
2007 EXPORT_SYMBOL(ib_set_vf_guid
);
2010 * ib_map_mr_sg() - Map the largest prefix of a dma mapped SG list
2011 * and set it the memory region.
2012 * @mr: memory region
2013 * @sg: dma mapped scatterlist
2014 * @sg_nents: number of entries in sg
2015 * @sg_offset: offset in bytes into sg
2016 * @page_size: page vector desired page size
2019 * - The first sg element is allowed to have an offset.
2020 * - Each sg element must either be aligned to page_size or virtually
2021 * contiguous to the previous element. In case an sg element has a
2022 * non-contiguous offset, the mapping prefix will not include it.
2023 * - The last sg element is allowed to have length less than page_size.
2024 * - If sg_nents total byte length exceeds the mr max_num_sge * page_size
2025 * then only max_num_sg entries will be mapped.
2026 * - If the MR was allocated with type IB_MR_TYPE_SG_GAPS, none of these
2027 * constraints holds and the page_size argument is ignored.
2029 * Returns the number of sg elements that were mapped to the memory region.
2031 * After this completes successfully, the memory region
2032 * is ready for registration.
2034 int ib_map_mr_sg(struct ib_mr
*mr
, struct scatterlist
*sg
, int sg_nents
,
2035 unsigned int *sg_offset
, unsigned int page_size
)
2037 if (unlikely(!mr
->device
->map_mr_sg
))
2040 mr
->page_size
= page_size
;
2042 return mr
->device
->map_mr_sg(mr
, sg
, sg_nents
, sg_offset
);
2044 EXPORT_SYMBOL(ib_map_mr_sg
);
2047 * ib_sg_to_pages() - Convert the largest prefix of a sg list
2049 * @mr: memory region
2050 * @sgl: dma mapped scatterlist
2051 * @sg_nents: number of entries in sg
2052 * @sg_offset_p: IN: start offset in bytes into sg
2053 * OUT: offset in bytes for element n of the sg of the first
2054 * byte that has not been processed where n is the return
2055 * value of this function.
2056 * @set_page: driver page assignment function pointer
2058 * Core service helper for drivers to convert the largest
2059 * prefix of given sg list to a page vector. The sg list
2060 * prefix converted is the prefix that meet the requirements
2063 * Returns the number of sg elements that were assigned to
2066 int ib_sg_to_pages(struct ib_mr
*mr
, struct scatterlist
*sgl
, int sg_nents
,
2067 unsigned int *sg_offset_p
, int (*set_page
)(struct ib_mr
*, u64
))
2069 struct scatterlist
*sg
;
2070 u64 last_end_dma_addr
= 0;
2071 unsigned int sg_offset
= sg_offset_p
? *sg_offset_p
: 0;
2072 unsigned int last_page_off
= 0;
2073 u64 page_mask
= ~((u64
)mr
->page_size
- 1);
2076 if (unlikely(sg_nents
<= 0 || sg_offset
> sg_dma_len(&sgl
[0])))
2079 mr
->iova
= sg_dma_address(&sgl
[0]) + sg_offset
;
2082 for_each_sg(sgl
, sg
, sg_nents
, i
) {
2083 u64 dma_addr
= sg_dma_address(sg
) + sg_offset
;
2084 u64 prev_addr
= dma_addr
;
2085 unsigned int dma_len
= sg_dma_len(sg
) - sg_offset
;
2086 u64 end_dma_addr
= dma_addr
+ dma_len
;
2087 u64 page_addr
= dma_addr
& page_mask
;
2090 * For the second and later elements, check whether either the
2091 * end of element i-1 or the start of element i is not aligned
2092 * on a page boundary.
2094 if (i
&& (last_page_off
!= 0 || page_addr
!= dma_addr
)) {
2095 /* Stop mapping if there is a gap. */
2096 if (last_end_dma_addr
!= dma_addr
)
2100 * Coalesce this element with the last. If it is small
2101 * enough just update mr->length. Otherwise start
2102 * mapping from the next page.
2108 ret
= set_page(mr
, page_addr
);
2109 if (unlikely(ret
< 0)) {
2110 sg_offset
= prev_addr
- sg_dma_address(sg
);
2111 mr
->length
+= prev_addr
- dma_addr
;
2113 *sg_offset_p
= sg_offset
;
2114 return i
|| sg_offset
? i
: ret
;
2116 prev_addr
= page_addr
;
2118 page_addr
+= mr
->page_size
;
2119 } while (page_addr
< end_dma_addr
);
2121 mr
->length
+= dma_len
;
2122 last_end_dma_addr
= end_dma_addr
;
2123 last_page_off
= end_dma_addr
& ~page_mask
;
2132 EXPORT_SYMBOL(ib_sg_to_pages
);
2134 struct ib_drain_cqe
{
2136 struct completion done
;
2139 static void ib_drain_qp_done(struct ib_cq
*cq
, struct ib_wc
*wc
)
2141 struct ib_drain_cqe
*cqe
= container_of(wc
->wr_cqe
, struct ib_drain_cqe
,
2144 complete(&cqe
->done
);
2148 * Post a WR and block until its completion is reaped for the SQ.
2150 static void __ib_drain_sq(struct ib_qp
*qp
)
2152 struct ib_cq
*cq
= qp
->send_cq
;
2153 struct ib_qp_attr attr
= { .qp_state
= IB_QPS_ERR
};
2154 struct ib_drain_cqe sdrain
;
2155 struct ib_send_wr swr
= {}, *bad_swr
;
2158 swr
.wr_cqe
= &sdrain
.cqe
;
2159 sdrain
.cqe
.done
= ib_drain_qp_done
;
2160 init_completion(&sdrain
.done
);
2162 ret
= ib_modify_qp(qp
, &attr
, IB_QP_STATE
);
2164 WARN_ONCE(ret
, "failed to drain send queue: %d\n", ret
);
2168 ret
= ib_post_send(qp
, &swr
, &bad_swr
);
2170 WARN_ONCE(ret
, "failed to drain send queue: %d\n", ret
);
2174 if (cq
->poll_ctx
== IB_POLL_DIRECT
)
2175 while (wait_for_completion_timeout(&sdrain
.done
, HZ
/ 10) <= 0)
2176 ib_process_cq_direct(cq
, -1);
2178 wait_for_completion(&sdrain
.done
);
2182 * Post a WR and block until its completion is reaped for the RQ.
2184 static void __ib_drain_rq(struct ib_qp
*qp
)
2186 struct ib_cq
*cq
= qp
->recv_cq
;
2187 struct ib_qp_attr attr
= { .qp_state
= IB_QPS_ERR
};
2188 struct ib_drain_cqe rdrain
;
2189 struct ib_recv_wr rwr
= {}, *bad_rwr
;
2192 rwr
.wr_cqe
= &rdrain
.cqe
;
2193 rdrain
.cqe
.done
= ib_drain_qp_done
;
2194 init_completion(&rdrain
.done
);
2196 ret
= ib_modify_qp(qp
, &attr
, IB_QP_STATE
);
2198 WARN_ONCE(ret
, "failed to drain recv queue: %d\n", ret
);
2202 ret
= ib_post_recv(qp
, &rwr
, &bad_rwr
);
2204 WARN_ONCE(ret
, "failed to drain recv queue: %d\n", ret
);
2208 if (cq
->poll_ctx
== IB_POLL_DIRECT
)
2209 while (wait_for_completion_timeout(&rdrain
.done
, HZ
/ 10) <= 0)
2210 ib_process_cq_direct(cq
, -1);
2212 wait_for_completion(&rdrain
.done
);
2216 * ib_drain_sq() - Block until all SQ CQEs have been consumed by the
2218 * @qp: queue pair to drain
2220 * If the device has a provider-specific drain function, then
2221 * call that. Otherwise call the generic drain function
2226 * ensure there is room in the CQ and SQ for the drain work request and
2229 * allocate the CQ using ib_alloc_cq().
2231 * ensure that there are no other contexts that are posting WRs concurrently.
2232 * Otherwise the drain is not guaranteed.
2234 void ib_drain_sq(struct ib_qp
*qp
)
2236 if (qp
->device
->drain_sq
)
2237 qp
->device
->drain_sq(qp
);
2241 EXPORT_SYMBOL(ib_drain_sq
);
2244 * ib_drain_rq() - Block until all RQ CQEs have been consumed by the
2246 * @qp: queue pair to drain
2248 * If the device has a provider-specific drain function, then
2249 * call that. Otherwise call the generic drain function
2254 * ensure there is room in the CQ and RQ for the drain work request and
2257 * allocate the CQ using ib_alloc_cq().
2259 * ensure that there are no other contexts that are posting WRs concurrently.
2260 * Otherwise the drain is not guaranteed.
2262 void ib_drain_rq(struct ib_qp
*qp
)
2264 if (qp
->device
->drain_rq
)
2265 qp
->device
->drain_rq(qp
);
2269 EXPORT_SYMBOL(ib_drain_rq
);
2272 * ib_drain_qp() - Block until all CQEs have been consumed by the
2273 * application on both the RQ and SQ.
2274 * @qp: queue pair to drain
2278 * ensure there is room in the CQ(s), SQ, and RQ for drain work requests
2281 * allocate the CQs using ib_alloc_cq().
2283 * ensure that there are no other contexts that are posting WRs concurrently.
2284 * Otherwise the drain is not guaranteed.
2286 void ib_drain_qp(struct ib_qp
*qp
)
2292 EXPORT_SYMBOL(ib_drain_qp
);