2 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2004 Infinicon Corporation. All rights reserved.
4 * Copyright (c) 2004 Intel Corporation. All rights reserved.
5 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
7 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
8 * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
10 * This software is available to you under a choice of one of two
11 * licenses. You may choose to be licensed under the terms of the GNU
12 * General Public License (GPL) Version 2, available from the file
13 * COPYING in the main directory of this source tree, or the
14 * OpenIB.org BSD license below:
16 * Redistribution and use in source and binary forms, with or
17 * without modification, are permitted provided that the following
20 * - Redistributions of source code must retain the above
21 * copyright notice, this list of conditions and the following
24 * - Redistributions in binary form must reproduce the above
25 * copyright notice, this list of conditions and the following
26 * disclaimer in the documentation and/or other materials
27 * provided with the distribution.
29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
33 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
34 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
35 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
39 #include <linux/errno.h>
40 #include <linux/err.h>
41 #include <linux/export.h>
42 #include <linux/string.h>
43 #include <linux/slab.h>
45 #include <linux/in6.h>
46 #include <net/addrconf.h>
47 #include <linux/security.h>
49 #include <rdma/ib_verbs.h>
50 #include <rdma/ib_cache.h>
51 #include <rdma/ib_addr.h>
54 #include "core_priv.h"
56 static int ib_resolve_eth_dmac(struct ib_device
*device
,
57 struct rdma_ah_attr
*ah_attr
);
59 static const char * const ib_events
[] = {
60 [IB_EVENT_CQ_ERR
] = "CQ error",
61 [IB_EVENT_QP_FATAL
] = "QP fatal error",
62 [IB_EVENT_QP_REQ_ERR
] = "QP request error",
63 [IB_EVENT_QP_ACCESS_ERR
] = "QP access error",
64 [IB_EVENT_COMM_EST
] = "communication established",
65 [IB_EVENT_SQ_DRAINED
] = "send queue drained",
66 [IB_EVENT_PATH_MIG
] = "path migration successful",
67 [IB_EVENT_PATH_MIG_ERR
] = "path migration error",
68 [IB_EVENT_DEVICE_FATAL
] = "device fatal error",
69 [IB_EVENT_PORT_ACTIVE
] = "port active",
70 [IB_EVENT_PORT_ERR
] = "port error",
71 [IB_EVENT_LID_CHANGE
] = "LID change",
72 [IB_EVENT_PKEY_CHANGE
] = "P_key change",
73 [IB_EVENT_SM_CHANGE
] = "SM change",
74 [IB_EVENT_SRQ_ERR
] = "SRQ error",
75 [IB_EVENT_SRQ_LIMIT_REACHED
] = "SRQ limit reached",
76 [IB_EVENT_QP_LAST_WQE_REACHED
] = "last WQE reached",
77 [IB_EVENT_CLIENT_REREGISTER
] = "client reregister",
78 [IB_EVENT_GID_CHANGE
] = "GID changed",
81 const char *__attribute_const__
ib_event_msg(enum ib_event_type event
)
85 return (index
< ARRAY_SIZE(ib_events
) && ib_events
[index
]) ?
86 ib_events
[index
] : "unrecognized event";
88 EXPORT_SYMBOL(ib_event_msg
);
90 static const char * const wc_statuses
[] = {
91 [IB_WC_SUCCESS
] = "success",
92 [IB_WC_LOC_LEN_ERR
] = "local length error",
93 [IB_WC_LOC_QP_OP_ERR
] = "local QP operation error",
94 [IB_WC_LOC_EEC_OP_ERR
] = "local EE context operation error",
95 [IB_WC_LOC_PROT_ERR
] = "local protection error",
96 [IB_WC_WR_FLUSH_ERR
] = "WR flushed",
97 [IB_WC_MW_BIND_ERR
] = "memory management operation error",
98 [IB_WC_BAD_RESP_ERR
] = "bad response error",
99 [IB_WC_LOC_ACCESS_ERR
] = "local access error",
100 [IB_WC_REM_INV_REQ_ERR
] = "invalid request error",
101 [IB_WC_REM_ACCESS_ERR
] = "remote access error",
102 [IB_WC_REM_OP_ERR
] = "remote operation error",
103 [IB_WC_RETRY_EXC_ERR
] = "transport retry counter exceeded",
104 [IB_WC_RNR_RETRY_EXC_ERR
] = "RNR retry counter exceeded",
105 [IB_WC_LOC_RDD_VIOL_ERR
] = "local RDD violation error",
106 [IB_WC_REM_INV_RD_REQ_ERR
] = "remote invalid RD request",
107 [IB_WC_REM_ABORT_ERR
] = "operation aborted",
108 [IB_WC_INV_EECN_ERR
] = "invalid EE context number",
109 [IB_WC_INV_EEC_STATE_ERR
] = "invalid EE context state",
110 [IB_WC_FATAL_ERR
] = "fatal error",
111 [IB_WC_RESP_TIMEOUT_ERR
] = "response timeout error",
112 [IB_WC_GENERAL_ERR
] = "general error",
115 const char *__attribute_const__
ib_wc_status_msg(enum ib_wc_status status
)
117 size_t index
= status
;
119 return (index
< ARRAY_SIZE(wc_statuses
) && wc_statuses
[index
]) ?
120 wc_statuses
[index
] : "unrecognized status";
122 EXPORT_SYMBOL(ib_wc_status_msg
);
124 __attribute_const__
int ib_rate_to_mult(enum ib_rate rate
)
127 case IB_RATE_2_5_GBPS
: return 1;
128 case IB_RATE_5_GBPS
: return 2;
129 case IB_RATE_10_GBPS
: return 4;
130 case IB_RATE_20_GBPS
: return 8;
131 case IB_RATE_30_GBPS
: return 12;
132 case IB_RATE_40_GBPS
: return 16;
133 case IB_RATE_60_GBPS
: return 24;
134 case IB_RATE_80_GBPS
: return 32;
135 case IB_RATE_120_GBPS
: return 48;
136 case IB_RATE_14_GBPS
: return 6;
137 case IB_RATE_56_GBPS
: return 22;
138 case IB_RATE_112_GBPS
: return 45;
139 case IB_RATE_168_GBPS
: return 67;
140 case IB_RATE_25_GBPS
: return 10;
141 case IB_RATE_100_GBPS
: return 40;
142 case IB_RATE_200_GBPS
: return 80;
143 case IB_RATE_300_GBPS
: return 120;
144 case IB_RATE_28_GBPS
: return 11;
145 case IB_RATE_50_GBPS
: return 20;
146 case IB_RATE_400_GBPS
: return 160;
147 case IB_RATE_600_GBPS
: return 240;
151 EXPORT_SYMBOL(ib_rate_to_mult
);
153 __attribute_const__
enum ib_rate
mult_to_ib_rate(int mult
)
156 case 1: return IB_RATE_2_5_GBPS
;
157 case 2: return IB_RATE_5_GBPS
;
158 case 4: return IB_RATE_10_GBPS
;
159 case 8: return IB_RATE_20_GBPS
;
160 case 12: return IB_RATE_30_GBPS
;
161 case 16: return IB_RATE_40_GBPS
;
162 case 24: return IB_RATE_60_GBPS
;
163 case 32: return IB_RATE_80_GBPS
;
164 case 48: return IB_RATE_120_GBPS
;
165 case 6: return IB_RATE_14_GBPS
;
166 case 22: return IB_RATE_56_GBPS
;
167 case 45: return IB_RATE_112_GBPS
;
168 case 67: return IB_RATE_168_GBPS
;
169 case 10: return IB_RATE_25_GBPS
;
170 case 40: return IB_RATE_100_GBPS
;
171 case 80: return IB_RATE_200_GBPS
;
172 case 120: return IB_RATE_300_GBPS
;
173 case 11: return IB_RATE_28_GBPS
;
174 case 20: return IB_RATE_50_GBPS
;
175 case 160: return IB_RATE_400_GBPS
;
176 case 240: return IB_RATE_600_GBPS
;
177 default: return IB_RATE_PORT_CURRENT
;
180 EXPORT_SYMBOL(mult_to_ib_rate
);
182 __attribute_const__
int ib_rate_to_mbps(enum ib_rate rate
)
185 case IB_RATE_2_5_GBPS
: return 2500;
186 case IB_RATE_5_GBPS
: return 5000;
187 case IB_RATE_10_GBPS
: return 10000;
188 case IB_RATE_20_GBPS
: return 20000;
189 case IB_RATE_30_GBPS
: return 30000;
190 case IB_RATE_40_GBPS
: return 40000;
191 case IB_RATE_60_GBPS
: return 60000;
192 case IB_RATE_80_GBPS
: return 80000;
193 case IB_RATE_120_GBPS
: return 120000;
194 case IB_RATE_14_GBPS
: return 14062;
195 case IB_RATE_56_GBPS
: return 56250;
196 case IB_RATE_112_GBPS
: return 112500;
197 case IB_RATE_168_GBPS
: return 168750;
198 case IB_RATE_25_GBPS
: return 25781;
199 case IB_RATE_100_GBPS
: return 103125;
200 case IB_RATE_200_GBPS
: return 206250;
201 case IB_RATE_300_GBPS
: return 309375;
202 case IB_RATE_28_GBPS
: return 28125;
203 case IB_RATE_50_GBPS
: return 53125;
204 case IB_RATE_400_GBPS
: return 425000;
205 case IB_RATE_600_GBPS
: return 637500;
209 EXPORT_SYMBOL(ib_rate_to_mbps
);
211 __attribute_const__
enum rdma_transport_type
212 rdma_node_get_transport(enum rdma_node_type node_type
)
215 if (node_type
== RDMA_NODE_USNIC
)
216 return RDMA_TRANSPORT_USNIC
;
217 if (node_type
== RDMA_NODE_USNIC_UDP
)
218 return RDMA_TRANSPORT_USNIC_UDP
;
219 if (node_type
== RDMA_NODE_RNIC
)
220 return RDMA_TRANSPORT_IWARP
;
222 return RDMA_TRANSPORT_IB
;
224 EXPORT_SYMBOL(rdma_node_get_transport
);
226 enum rdma_link_layer
rdma_port_get_link_layer(struct ib_device
*device
, u8 port_num
)
228 enum rdma_transport_type lt
;
229 if (device
->ops
.get_link_layer
)
230 return device
->ops
.get_link_layer(device
, port_num
);
232 lt
= rdma_node_get_transport(device
->node_type
);
233 if (lt
== RDMA_TRANSPORT_IB
)
234 return IB_LINK_LAYER_INFINIBAND
;
236 return IB_LINK_LAYER_ETHERNET
;
238 EXPORT_SYMBOL(rdma_port_get_link_layer
);
240 /* Protection domains */
243 * ib_alloc_pd - Allocates an unused protection domain.
244 * @device: The device on which to allocate the protection domain.
246 * A protection domain object provides an association between QPs, shared
247 * receive queues, address handles, memory regions, and memory windows.
249 * Every PD has a local_dma_lkey which can be used as the lkey value for local
252 struct ib_pd
*__ib_alloc_pd(struct ib_device
*device
, unsigned int flags
,
256 int mr_access_flags
= 0;
258 pd
= device
->ops
.alloc_pd(device
, NULL
, NULL
);
264 pd
->__internal_mr
= NULL
;
265 atomic_set(&pd
->usecnt
, 0);
268 if (device
->attrs
.device_cap_flags
& IB_DEVICE_LOCAL_DMA_LKEY
)
269 pd
->local_dma_lkey
= device
->local_dma_lkey
;
271 mr_access_flags
|= IB_ACCESS_LOCAL_WRITE
;
273 if (flags
& IB_PD_UNSAFE_GLOBAL_RKEY
) {
274 pr_warn("%s: enabling unsafe global rkey\n", caller
);
275 mr_access_flags
|= IB_ACCESS_REMOTE_READ
| IB_ACCESS_REMOTE_WRITE
;
278 pd
->res
.type
= RDMA_RESTRACK_PD
;
279 rdma_restrack_set_task(&pd
->res
, caller
);
280 rdma_restrack_kadd(&pd
->res
);
282 if (mr_access_flags
) {
285 mr
= pd
->device
->ops
.get_dma_mr(pd
, mr_access_flags
);
291 mr
->device
= pd
->device
;
294 mr
->need_inval
= false;
296 pd
->__internal_mr
= mr
;
298 if (!(device
->attrs
.device_cap_flags
& IB_DEVICE_LOCAL_DMA_LKEY
))
299 pd
->local_dma_lkey
= pd
->__internal_mr
->lkey
;
301 if (flags
& IB_PD_UNSAFE_GLOBAL_RKEY
)
302 pd
->unsafe_global_rkey
= pd
->__internal_mr
->rkey
;
307 EXPORT_SYMBOL(__ib_alloc_pd
);
310 * ib_dealloc_pd - Deallocates a protection domain.
311 * @pd: The protection domain to deallocate.
313 * It is an error to call this function while any resources in the pd still
314 * exist. The caller is responsible to synchronously destroy them and
315 * guarantee no new allocations will happen.
317 void ib_dealloc_pd(struct ib_pd
*pd
)
321 if (pd
->__internal_mr
) {
322 ret
= pd
->device
->ops
.dereg_mr(pd
->__internal_mr
);
324 pd
->__internal_mr
= NULL
;
327 /* uverbs manipulates usecnt with proper locking, while the kabi
328 requires the caller to guarantee we can't race here. */
329 WARN_ON(atomic_read(&pd
->usecnt
));
331 rdma_restrack_del(&pd
->res
);
332 /* Making delalloc_pd a void return is a WIP, no driver should return
334 ret
= pd
->device
->ops
.dealloc_pd(pd
);
335 WARN_ONCE(ret
, "Infiniband HW driver failed dealloc_pd");
337 EXPORT_SYMBOL(ib_dealloc_pd
);
339 /* Address handles */
342 * rdma_copy_ah_attr - Copy rdma ah attribute from source to destination.
343 * @dest: Pointer to destination ah_attr. Contents of the destination
344 * pointer is assumed to be invalid and attribute are overwritten.
345 * @src: Pointer to source ah_attr.
347 void rdma_copy_ah_attr(struct rdma_ah_attr
*dest
,
348 const struct rdma_ah_attr
*src
)
351 if (dest
->grh
.sgid_attr
)
352 rdma_hold_gid_attr(dest
->grh
.sgid_attr
);
354 EXPORT_SYMBOL(rdma_copy_ah_attr
);
357 * rdma_replace_ah_attr - Replace valid ah_attr with new new one.
358 * @old: Pointer to existing ah_attr which needs to be replaced.
359 * old is assumed to be valid or zero'd
360 * @new: Pointer to the new ah_attr.
362 * rdma_replace_ah_attr() first releases any reference in the old ah_attr if
363 * old the ah_attr is valid; after that it copies the new attribute and holds
364 * the reference to the replaced ah_attr.
366 void rdma_replace_ah_attr(struct rdma_ah_attr
*old
,
367 const struct rdma_ah_attr
*new)
369 rdma_destroy_ah_attr(old
);
371 if (old
->grh
.sgid_attr
)
372 rdma_hold_gid_attr(old
->grh
.sgid_attr
);
374 EXPORT_SYMBOL(rdma_replace_ah_attr
);
377 * rdma_move_ah_attr - Move ah_attr pointed by source to destination.
378 * @dest: Pointer to destination ah_attr to copy to.
379 * dest is assumed to be valid or zero'd
380 * @src: Pointer to the new ah_attr.
382 * rdma_move_ah_attr() first releases any reference in the destination ah_attr
383 * if it is valid. This also transfers ownership of internal references from
384 * src to dest, making src invalid in the process. No new reference of the src
387 void rdma_move_ah_attr(struct rdma_ah_attr
*dest
, struct rdma_ah_attr
*src
)
389 rdma_destroy_ah_attr(dest
);
391 src
->grh
.sgid_attr
= NULL
;
393 EXPORT_SYMBOL(rdma_move_ah_attr
);
396 * Validate that the rdma_ah_attr is valid for the device before passing it
399 static int rdma_check_ah_attr(struct ib_device
*device
,
400 struct rdma_ah_attr
*ah_attr
)
402 if (!rdma_is_port_valid(device
, ah_attr
->port_num
))
405 if ((rdma_is_grh_required(device
, ah_attr
->port_num
) ||
406 ah_attr
->type
== RDMA_AH_ATTR_TYPE_ROCE
) &&
407 !(ah_attr
->ah_flags
& IB_AH_GRH
))
410 if (ah_attr
->grh
.sgid_attr
) {
412 * Make sure the passed sgid_attr is consistent with the
415 if (ah_attr
->grh
.sgid_attr
->index
!= ah_attr
->grh
.sgid_index
||
416 ah_attr
->grh
.sgid_attr
->port_num
!= ah_attr
->port_num
)
423 * If the ah requires a GRH then ensure that sgid_attr pointer is filled in.
424 * On success the caller is responsible to call rdma_unfill_sgid_attr().
426 static int rdma_fill_sgid_attr(struct ib_device
*device
,
427 struct rdma_ah_attr
*ah_attr
,
428 const struct ib_gid_attr
**old_sgid_attr
)
430 const struct ib_gid_attr
*sgid_attr
;
431 struct ib_global_route
*grh
;
434 *old_sgid_attr
= ah_attr
->grh
.sgid_attr
;
436 ret
= rdma_check_ah_attr(device
, ah_attr
);
440 if (!(ah_attr
->ah_flags
& IB_AH_GRH
))
443 grh
= rdma_ah_retrieve_grh(ah_attr
);
448 rdma_get_gid_attr(device
, ah_attr
->port_num
, grh
->sgid_index
);
449 if (IS_ERR(sgid_attr
))
450 return PTR_ERR(sgid_attr
);
452 /* Move ownerhip of the kref into the ah_attr */
453 grh
->sgid_attr
= sgid_attr
;
457 static void rdma_unfill_sgid_attr(struct rdma_ah_attr
*ah_attr
,
458 const struct ib_gid_attr
*old_sgid_attr
)
461 * Fill didn't change anything, the caller retains ownership of
464 if (ah_attr
->grh
.sgid_attr
== old_sgid_attr
)
468 * Otherwise, we need to undo what rdma_fill_sgid_attr so the caller
469 * doesn't see any change in the rdma_ah_attr. If we get here
470 * old_sgid_attr is NULL.
472 rdma_destroy_ah_attr(ah_attr
);
475 static const struct ib_gid_attr
*
476 rdma_update_sgid_attr(struct rdma_ah_attr
*ah_attr
,
477 const struct ib_gid_attr
*old_attr
)
480 rdma_put_gid_attr(old_attr
);
481 if (ah_attr
->ah_flags
& IB_AH_GRH
) {
482 rdma_hold_gid_attr(ah_attr
->grh
.sgid_attr
);
483 return ah_attr
->grh
.sgid_attr
;
488 static struct ib_ah
*_rdma_create_ah(struct ib_pd
*pd
,
489 struct rdma_ah_attr
*ah_attr
,
491 struct ib_udata
*udata
)
495 might_sleep_if(flags
& RDMA_CREATE_AH_SLEEPABLE
);
497 if (!pd
->device
->ops
.create_ah
)
498 return ERR_PTR(-EOPNOTSUPP
);
500 ah
= pd
->device
->ops
.create_ah(pd
, ah_attr
, flags
, udata
);
503 ah
->device
= pd
->device
;
506 ah
->type
= ah_attr
->type
;
507 ah
->sgid_attr
= rdma_update_sgid_attr(ah_attr
, NULL
);
509 atomic_inc(&pd
->usecnt
);
516 * rdma_create_ah - Creates an address handle for the
517 * given address vector.
518 * @pd: The protection domain associated with the address handle.
519 * @ah_attr: The attributes of the address vector.
520 * @flags: Create address handle flags (see enum rdma_create_ah_flags).
522 * It returns 0 on success and returns appropriate error code on error.
523 * The address handle is used to reference a local or global destination
524 * in all UD QP post sends.
526 struct ib_ah
*rdma_create_ah(struct ib_pd
*pd
, struct rdma_ah_attr
*ah_attr
,
529 const struct ib_gid_attr
*old_sgid_attr
;
533 ret
= rdma_fill_sgid_attr(pd
->device
, ah_attr
, &old_sgid_attr
);
537 ah
= _rdma_create_ah(pd
, ah_attr
, flags
, NULL
);
539 rdma_unfill_sgid_attr(ah_attr
, old_sgid_attr
);
542 EXPORT_SYMBOL(rdma_create_ah
);
545 * rdma_create_user_ah - Creates an address handle for the
546 * given address vector.
547 * It resolves destination mac address for ah attribute of RoCE type.
548 * @pd: The protection domain associated with the address handle.
549 * @ah_attr: The attributes of the address vector.
550 * @udata: pointer to user's input output buffer information need by
553 * It returns 0 on success and returns appropriate error code on error.
554 * The address handle is used to reference a local or global destination
555 * in all UD QP post sends.
557 struct ib_ah
*rdma_create_user_ah(struct ib_pd
*pd
,
558 struct rdma_ah_attr
*ah_attr
,
559 struct ib_udata
*udata
)
561 const struct ib_gid_attr
*old_sgid_attr
;
565 err
= rdma_fill_sgid_attr(pd
->device
, ah_attr
, &old_sgid_attr
);
569 if (ah_attr
->type
== RDMA_AH_ATTR_TYPE_ROCE
) {
570 err
= ib_resolve_eth_dmac(pd
->device
, ah_attr
);
577 ah
= _rdma_create_ah(pd
, ah_attr
, RDMA_CREATE_AH_SLEEPABLE
, udata
);
580 rdma_unfill_sgid_attr(ah_attr
, old_sgid_attr
);
583 EXPORT_SYMBOL(rdma_create_user_ah
);
585 int ib_get_rdma_header_version(const union rdma_network_hdr
*hdr
)
587 const struct iphdr
*ip4h
= (struct iphdr
*)&hdr
->roce4grh
;
588 struct iphdr ip4h_checked
;
589 const struct ipv6hdr
*ip6h
= (struct ipv6hdr
*)&hdr
->ibgrh
;
591 /* If it's IPv6, the version must be 6, otherwise, the first
592 * 20 bytes (before the IPv4 header) are garbled.
594 if (ip6h
->version
!= 6)
595 return (ip4h
->version
== 4) ? 4 : 0;
596 /* version may be 6 or 4 because the first 20 bytes could be garbled */
598 /* RoCE v2 requires no options, thus header length
605 * We can't write on scattered buffers so we need to copy to
608 memcpy(&ip4h_checked
, ip4h
, sizeof(ip4h_checked
));
609 ip4h_checked
.check
= 0;
610 ip4h_checked
.check
= ip_fast_csum((u8
*)&ip4h_checked
, 5);
611 /* if IPv4 header checksum is OK, believe it */
612 if (ip4h
->check
== ip4h_checked
.check
)
616 EXPORT_SYMBOL(ib_get_rdma_header_version
);
618 static enum rdma_network_type
ib_get_net_type_by_grh(struct ib_device
*device
,
620 const struct ib_grh
*grh
)
624 if (rdma_protocol_ib(device
, port_num
))
625 return RDMA_NETWORK_IB
;
627 grh_version
= ib_get_rdma_header_version((union rdma_network_hdr
*)grh
);
629 if (grh_version
== 4)
630 return RDMA_NETWORK_IPV4
;
632 if (grh
->next_hdr
== IPPROTO_UDP
)
633 return RDMA_NETWORK_IPV6
;
635 return RDMA_NETWORK_ROCE_V1
;
638 struct find_gid_index_context
{
640 enum ib_gid_type gid_type
;
643 static bool find_gid_index(const union ib_gid
*gid
,
644 const struct ib_gid_attr
*gid_attr
,
647 struct find_gid_index_context
*ctx
= context
;
649 if (ctx
->gid_type
!= gid_attr
->gid_type
)
652 if ((!!(ctx
->vlan_id
!= 0xffff) == !is_vlan_dev(gid_attr
->ndev
)) ||
653 (is_vlan_dev(gid_attr
->ndev
) &&
654 vlan_dev_vlan_id(gid_attr
->ndev
) != ctx
->vlan_id
))
660 static const struct ib_gid_attr
*
661 get_sgid_attr_from_eth(struct ib_device
*device
, u8 port_num
,
662 u16 vlan_id
, const union ib_gid
*sgid
,
663 enum ib_gid_type gid_type
)
665 struct find_gid_index_context context
= {.vlan_id
= vlan_id
,
666 .gid_type
= gid_type
};
668 return rdma_find_gid_by_filter(device
, sgid
, port_num
, find_gid_index
,
672 int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr
*hdr
,
673 enum rdma_network_type net_type
,
674 union ib_gid
*sgid
, union ib_gid
*dgid
)
676 struct sockaddr_in src_in
;
677 struct sockaddr_in dst_in
;
678 __be32 src_saddr
, dst_saddr
;
683 if (net_type
== RDMA_NETWORK_IPV4
) {
684 memcpy(&src_in
.sin_addr
.s_addr
,
685 &hdr
->roce4grh
.saddr
, 4);
686 memcpy(&dst_in
.sin_addr
.s_addr
,
687 &hdr
->roce4grh
.daddr
, 4);
688 src_saddr
= src_in
.sin_addr
.s_addr
;
689 dst_saddr
= dst_in
.sin_addr
.s_addr
;
690 ipv6_addr_set_v4mapped(src_saddr
,
691 (struct in6_addr
*)sgid
);
692 ipv6_addr_set_v4mapped(dst_saddr
,
693 (struct in6_addr
*)dgid
);
695 } else if (net_type
== RDMA_NETWORK_IPV6
||
696 net_type
== RDMA_NETWORK_IB
) {
697 *dgid
= hdr
->ibgrh
.dgid
;
698 *sgid
= hdr
->ibgrh
.sgid
;
704 EXPORT_SYMBOL(ib_get_gids_from_rdma_hdr
);
706 /* Resolve destination mac address and hop limit for unicast destination
707 * GID entry, considering the source GID entry as well.
708 * ah_attribute must have have valid port_num, sgid_index.
710 static int ib_resolve_unicast_gid_dmac(struct ib_device
*device
,
711 struct rdma_ah_attr
*ah_attr
)
713 struct ib_global_route
*grh
= rdma_ah_retrieve_grh(ah_attr
);
714 const struct ib_gid_attr
*sgid_attr
= grh
->sgid_attr
;
715 int hop_limit
= 0xff;
718 /* If destination is link local and source GID is RoCEv1,
719 * IP stack is not used.
721 if (rdma_link_local_addr((struct in6_addr
*)grh
->dgid
.raw
) &&
722 sgid_attr
->gid_type
== IB_GID_TYPE_ROCE
) {
723 rdma_get_ll_mac((struct in6_addr
*)grh
->dgid
.raw
,
728 ret
= rdma_addr_find_l2_eth_by_grh(&sgid_attr
->gid
, &grh
->dgid
,
730 sgid_attr
, &hop_limit
);
732 grh
->hop_limit
= hop_limit
;
737 * This function initializes address handle attributes from the incoming packet.
738 * Incoming packet has dgid of the receiver node on which this code is
739 * getting executed and, sgid contains the GID of the sender.
741 * When resolving mac address of destination, the arrived dgid is used
742 * as sgid and, sgid is used as dgid because sgid contains destinations
743 * GID whom to respond to.
745 * On success the caller is responsible to call rdma_destroy_ah_attr on the
748 int ib_init_ah_attr_from_wc(struct ib_device
*device
, u8 port_num
,
749 const struct ib_wc
*wc
, const struct ib_grh
*grh
,
750 struct rdma_ah_attr
*ah_attr
)
754 enum rdma_network_type net_type
= RDMA_NETWORK_IB
;
755 enum ib_gid_type gid_type
= IB_GID_TYPE_IB
;
756 const struct ib_gid_attr
*sgid_attr
;
763 memset(ah_attr
, 0, sizeof *ah_attr
);
764 ah_attr
->type
= rdma_ah_find_type(device
, port_num
);
765 if (rdma_cap_eth_ah(device
, port_num
)) {
766 if (wc
->wc_flags
& IB_WC_WITH_NETWORK_HDR_TYPE
)
767 net_type
= wc
->network_hdr_type
;
769 net_type
= ib_get_net_type_by_grh(device
, port_num
, grh
);
770 gid_type
= ib_network_to_gid_type(net_type
);
772 ret
= ib_get_gids_from_rdma_hdr((union rdma_network_hdr
*)grh
, net_type
,
777 rdma_ah_set_sl(ah_attr
, wc
->sl
);
778 rdma_ah_set_port_num(ah_attr
, port_num
);
780 if (rdma_protocol_roce(device
, port_num
)) {
781 u16 vlan_id
= wc
->wc_flags
& IB_WC_WITH_VLAN
?
782 wc
->vlan_id
: 0xffff;
784 if (!(wc
->wc_flags
& IB_WC_GRH
))
787 sgid_attr
= get_sgid_attr_from_eth(device
, port_num
,
790 if (IS_ERR(sgid_attr
))
791 return PTR_ERR(sgid_attr
);
793 flow_class
= be32_to_cpu(grh
->version_tclass_flow
);
794 rdma_move_grh_sgid_attr(ah_attr
,
796 flow_class
& 0xFFFFF,
798 (flow_class
>> 20) & 0xFF,
801 ret
= ib_resolve_unicast_gid_dmac(device
, ah_attr
);
803 rdma_destroy_ah_attr(ah_attr
);
807 rdma_ah_set_dlid(ah_attr
, wc
->slid
);
808 rdma_ah_set_path_bits(ah_attr
, wc
->dlid_path_bits
);
810 if ((wc
->wc_flags
& IB_WC_GRH
) == 0)
813 if (dgid
.global
.interface_id
!=
814 cpu_to_be64(IB_SA_WELL_KNOWN_GUID
)) {
815 sgid_attr
= rdma_find_gid_by_port(
816 device
, &dgid
, IB_GID_TYPE_IB
, port_num
, NULL
);
818 sgid_attr
= rdma_get_gid_attr(device
, port_num
, 0);
820 if (IS_ERR(sgid_attr
))
821 return PTR_ERR(sgid_attr
);
822 flow_class
= be32_to_cpu(grh
->version_tclass_flow
);
823 rdma_move_grh_sgid_attr(ah_attr
,
825 flow_class
& 0xFFFFF,
827 (flow_class
>> 20) & 0xFF,
833 EXPORT_SYMBOL(ib_init_ah_attr_from_wc
);
836 * rdma_move_grh_sgid_attr - Sets the sgid attribute of GRH, taking ownership
839 * @attr: Pointer to AH attribute structure
840 * @dgid: Destination GID
841 * @flow_label: Flow label
842 * @hop_limit: Hop limit
843 * @traffic_class: traffic class
844 * @sgid_attr: Pointer to SGID attribute
846 * This takes ownership of the sgid_attr reference. The caller must ensure
847 * rdma_destroy_ah_attr() is called before destroying the rdma_ah_attr after
848 * calling this function.
850 void rdma_move_grh_sgid_attr(struct rdma_ah_attr
*attr
, union ib_gid
*dgid
,
851 u32 flow_label
, u8 hop_limit
, u8 traffic_class
,
852 const struct ib_gid_attr
*sgid_attr
)
854 rdma_ah_set_grh(attr
, dgid
, flow_label
, sgid_attr
->index
, hop_limit
,
856 attr
->grh
.sgid_attr
= sgid_attr
;
858 EXPORT_SYMBOL(rdma_move_grh_sgid_attr
);
861 * rdma_destroy_ah_attr - Release reference to SGID attribute of
863 * @ah_attr: Pointer to ah attribute
865 * Release reference to the SGID attribute of the ah attribute if it is
866 * non NULL. It is safe to call this multiple times, and safe to call it on
867 * a zero initialized ah_attr.
869 void rdma_destroy_ah_attr(struct rdma_ah_attr
*ah_attr
)
871 if (ah_attr
->grh
.sgid_attr
) {
872 rdma_put_gid_attr(ah_attr
->grh
.sgid_attr
);
873 ah_attr
->grh
.sgid_attr
= NULL
;
876 EXPORT_SYMBOL(rdma_destroy_ah_attr
);
878 struct ib_ah
*ib_create_ah_from_wc(struct ib_pd
*pd
, const struct ib_wc
*wc
,
879 const struct ib_grh
*grh
, u8 port_num
)
881 struct rdma_ah_attr ah_attr
;
885 ret
= ib_init_ah_attr_from_wc(pd
->device
, port_num
, wc
, grh
, &ah_attr
);
889 ah
= rdma_create_ah(pd
, &ah_attr
, RDMA_CREATE_AH_SLEEPABLE
);
891 rdma_destroy_ah_attr(&ah_attr
);
894 EXPORT_SYMBOL(ib_create_ah_from_wc
);
896 int rdma_modify_ah(struct ib_ah
*ah
, struct rdma_ah_attr
*ah_attr
)
898 const struct ib_gid_attr
*old_sgid_attr
;
901 if (ah
->type
!= ah_attr
->type
)
904 ret
= rdma_fill_sgid_attr(ah
->device
, ah_attr
, &old_sgid_attr
);
908 ret
= ah
->device
->ops
.modify_ah
?
909 ah
->device
->ops
.modify_ah(ah
, ah_attr
) :
912 ah
->sgid_attr
= rdma_update_sgid_attr(ah_attr
, ah
->sgid_attr
);
913 rdma_unfill_sgid_attr(ah_attr
, old_sgid_attr
);
916 EXPORT_SYMBOL(rdma_modify_ah
);
918 int rdma_query_ah(struct ib_ah
*ah
, struct rdma_ah_attr
*ah_attr
)
920 ah_attr
->grh
.sgid_attr
= NULL
;
922 return ah
->device
->ops
.query_ah
?
923 ah
->device
->ops
.query_ah(ah
, ah_attr
) :
926 EXPORT_SYMBOL(rdma_query_ah
);
928 int rdma_destroy_ah(struct ib_ah
*ah
, u32 flags
)
930 const struct ib_gid_attr
*sgid_attr
= ah
->sgid_attr
;
934 might_sleep_if(flags
& RDMA_DESTROY_AH_SLEEPABLE
);
937 ret
= ah
->device
->ops
.destroy_ah(ah
, flags
);
939 atomic_dec(&pd
->usecnt
);
941 rdma_put_gid_attr(sgid_attr
);
946 EXPORT_SYMBOL(rdma_destroy_ah
);
948 /* Shared receive queues */
950 struct ib_srq
*ib_create_srq(struct ib_pd
*pd
,
951 struct ib_srq_init_attr
*srq_init_attr
)
955 if (!pd
->device
->ops
.create_srq
)
956 return ERR_PTR(-EOPNOTSUPP
);
958 srq
= pd
->device
->ops
.create_srq(pd
, srq_init_attr
, NULL
);
961 srq
->device
= pd
->device
;
964 srq
->event_handler
= srq_init_attr
->event_handler
;
965 srq
->srq_context
= srq_init_attr
->srq_context
;
966 srq
->srq_type
= srq_init_attr
->srq_type
;
967 if (ib_srq_has_cq(srq
->srq_type
)) {
968 srq
->ext
.cq
= srq_init_attr
->ext
.cq
;
969 atomic_inc(&srq
->ext
.cq
->usecnt
);
971 if (srq
->srq_type
== IB_SRQT_XRC
) {
972 srq
->ext
.xrc
.xrcd
= srq_init_attr
->ext
.xrc
.xrcd
;
973 atomic_inc(&srq
->ext
.xrc
.xrcd
->usecnt
);
975 atomic_inc(&pd
->usecnt
);
976 atomic_set(&srq
->usecnt
, 0);
981 EXPORT_SYMBOL(ib_create_srq
);
983 int ib_modify_srq(struct ib_srq
*srq
,
984 struct ib_srq_attr
*srq_attr
,
985 enum ib_srq_attr_mask srq_attr_mask
)
987 return srq
->device
->ops
.modify_srq
?
988 srq
->device
->ops
.modify_srq(srq
, srq_attr
, srq_attr_mask
,
991 EXPORT_SYMBOL(ib_modify_srq
);
993 int ib_query_srq(struct ib_srq
*srq
,
994 struct ib_srq_attr
*srq_attr
)
996 return srq
->device
->ops
.query_srq
?
997 srq
->device
->ops
.query_srq(srq
, srq_attr
) : -EOPNOTSUPP
;
999 EXPORT_SYMBOL(ib_query_srq
);
1001 int ib_destroy_srq(struct ib_srq
*srq
)
1004 enum ib_srq_type srq_type
;
1005 struct ib_xrcd
*uninitialized_var(xrcd
);
1006 struct ib_cq
*uninitialized_var(cq
);
1009 if (atomic_read(&srq
->usecnt
))
1013 srq_type
= srq
->srq_type
;
1014 if (ib_srq_has_cq(srq_type
))
1016 if (srq_type
== IB_SRQT_XRC
)
1017 xrcd
= srq
->ext
.xrc
.xrcd
;
1019 ret
= srq
->device
->ops
.destroy_srq(srq
);
1021 atomic_dec(&pd
->usecnt
);
1022 if (srq_type
== IB_SRQT_XRC
)
1023 atomic_dec(&xrcd
->usecnt
);
1024 if (ib_srq_has_cq(srq_type
))
1025 atomic_dec(&cq
->usecnt
);
1030 EXPORT_SYMBOL(ib_destroy_srq
);
1034 static void __ib_shared_qp_event_handler(struct ib_event
*event
, void *context
)
1036 struct ib_qp
*qp
= context
;
1037 unsigned long flags
;
1039 spin_lock_irqsave(&qp
->device
->event_handler_lock
, flags
);
1040 list_for_each_entry(event
->element
.qp
, &qp
->open_list
, open_list
)
1041 if (event
->element
.qp
->event_handler
)
1042 event
->element
.qp
->event_handler(event
, event
->element
.qp
->qp_context
);
1043 spin_unlock_irqrestore(&qp
->device
->event_handler_lock
, flags
);
1046 static void __ib_insert_xrcd_qp(struct ib_xrcd
*xrcd
, struct ib_qp
*qp
)
1048 mutex_lock(&xrcd
->tgt_qp_mutex
);
1049 list_add(&qp
->xrcd_list
, &xrcd
->tgt_qp_list
);
1050 mutex_unlock(&xrcd
->tgt_qp_mutex
);
1053 static struct ib_qp
*__ib_open_qp(struct ib_qp
*real_qp
,
1054 void (*event_handler
)(struct ib_event
*, void *),
1058 unsigned long flags
;
1061 qp
= kzalloc(sizeof *qp
, GFP_KERNEL
);
1063 return ERR_PTR(-ENOMEM
);
1065 qp
->real_qp
= real_qp
;
1066 err
= ib_open_shared_qp_security(qp
, real_qp
->device
);
1069 return ERR_PTR(err
);
1072 qp
->real_qp
= real_qp
;
1073 atomic_inc(&real_qp
->usecnt
);
1074 qp
->device
= real_qp
->device
;
1075 qp
->event_handler
= event_handler
;
1076 qp
->qp_context
= qp_context
;
1077 qp
->qp_num
= real_qp
->qp_num
;
1078 qp
->qp_type
= real_qp
->qp_type
;
1080 spin_lock_irqsave(&real_qp
->device
->event_handler_lock
, flags
);
1081 list_add(&qp
->open_list
, &real_qp
->open_list
);
1082 spin_unlock_irqrestore(&real_qp
->device
->event_handler_lock
, flags
);
1087 struct ib_qp
*ib_open_qp(struct ib_xrcd
*xrcd
,
1088 struct ib_qp_open_attr
*qp_open_attr
)
1090 struct ib_qp
*qp
, *real_qp
;
1092 if (qp_open_attr
->qp_type
!= IB_QPT_XRC_TGT
)
1093 return ERR_PTR(-EINVAL
);
1095 qp
= ERR_PTR(-EINVAL
);
1096 mutex_lock(&xrcd
->tgt_qp_mutex
);
1097 list_for_each_entry(real_qp
, &xrcd
->tgt_qp_list
, xrcd_list
) {
1098 if (real_qp
->qp_num
== qp_open_attr
->qp_num
) {
1099 qp
= __ib_open_qp(real_qp
, qp_open_attr
->event_handler
,
1100 qp_open_attr
->qp_context
);
1104 mutex_unlock(&xrcd
->tgt_qp_mutex
);
1107 EXPORT_SYMBOL(ib_open_qp
);
1109 static struct ib_qp
*ib_create_xrc_qp(struct ib_qp
*qp
,
1110 struct ib_qp_init_attr
*qp_init_attr
)
1112 struct ib_qp
*real_qp
= qp
;
1114 qp
->event_handler
= __ib_shared_qp_event_handler
;
1115 qp
->qp_context
= qp
;
1117 qp
->send_cq
= qp
->recv_cq
= NULL
;
1119 qp
->xrcd
= qp_init_attr
->xrcd
;
1120 atomic_inc(&qp_init_attr
->xrcd
->usecnt
);
1121 INIT_LIST_HEAD(&qp
->open_list
);
1123 qp
= __ib_open_qp(real_qp
, qp_init_attr
->event_handler
,
1124 qp_init_attr
->qp_context
);
1126 __ib_insert_xrcd_qp(qp_init_attr
->xrcd
, real_qp
);
1128 real_qp
->device
->ops
.destroy_qp(real_qp
);
1132 struct ib_qp
*ib_create_qp(struct ib_pd
*pd
,
1133 struct ib_qp_init_attr
*qp_init_attr
)
1135 struct ib_device
*device
= pd
? pd
->device
: qp_init_attr
->xrcd
->device
;
1139 if (qp_init_attr
->rwq_ind_tbl
&&
1140 (qp_init_attr
->recv_cq
||
1141 qp_init_attr
->srq
|| qp_init_attr
->cap
.max_recv_wr
||
1142 qp_init_attr
->cap
.max_recv_sge
))
1143 return ERR_PTR(-EINVAL
);
1146 * If the callers is using the RDMA API calculate the resources
1147 * needed for the RDMA READ/WRITE operations.
1149 * Note that these callers need to pass in a port number.
1151 if (qp_init_attr
->cap
.max_rdma_ctxs
)
1152 rdma_rw_init_qp(device
, qp_init_attr
);
1154 qp
= _ib_create_qp(device
, pd
, qp_init_attr
, NULL
, NULL
);
1158 ret
= ib_create_qp_security(qp
, device
);
1161 return ERR_PTR(ret
);
1165 qp
->qp_type
= qp_init_attr
->qp_type
;
1166 qp
->rwq_ind_tbl
= qp_init_attr
->rwq_ind_tbl
;
1168 atomic_set(&qp
->usecnt
, 0);
1170 spin_lock_init(&qp
->mr_lock
);
1171 INIT_LIST_HEAD(&qp
->rdma_mrs
);
1172 INIT_LIST_HEAD(&qp
->sig_mrs
);
1175 if (qp_init_attr
->qp_type
== IB_QPT_XRC_TGT
)
1176 return ib_create_xrc_qp(qp
, qp_init_attr
);
1178 qp
->event_handler
= qp_init_attr
->event_handler
;
1179 qp
->qp_context
= qp_init_attr
->qp_context
;
1180 if (qp_init_attr
->qp_type
== IB_QPT_XRC_INI
) {
1184 qp
->recv_cq
= qp_init_attr
->recv_cq
;
1185 if (qp_init_attr
->recv_cq
)
1186 atomic_inc(&qp_init_attr
->recv_cq
->usecnt
);
1187 qp
->srq
= qp_init_attr
->srq
;
1189 atomic_inc(&qp_init_attr
->srq
->usecnt
);
1192 qp
->send_cq
= qp_init_attr
->send_cq
;
1195 atomic_inc(&pd
->usecnt
);
1196 if (qp_init_attr
->send_cq
)
1197 atomic_inc(&qp_init_attr
->send_cq
->usecnt
);
1198 if (qp_init_attr
->rwq_ind_tbl
)
1199 atomic_inc(&qp
->rwq_ind_tbl
->usecnt
);
1201 if (qp_init_attr
->cap
.max_rdma_ctxs
) {
1202 ret
= rdma_rw_init_mrs(qp
, qp_init_attr
);
1204 pr_err("failed to init MR pool ret= %d\n", ret
);
1206 return ERR_PTR(ret
);
1211 * Note: all hw drivers guarantee that max_send_sge is lower than
1212 * the device RDMA WRITE SGE limit but not all hw drivers ensure that
1213 * max_send_sge <= max_sge_rd.
1215 qp
->max_write_sge
= qp_init_attr
->cap
.max_send_sge
;
1216 qp
->max_read_sge
= min_t(u32
, qp_init_attr
->cap
.max_send_sge
,
1217 device
->attrs
.max_sge_rd
);
1221 EXPORT_SYMBOL(ib_create_qp
);
1223 static const struct {
1225 enum ib_qp_attr_mask req_param
[IB_QPT_MAX
];
1226 enum ib_qp_attr_mask opt_param
[IB_QPT_MAX
];
1227 } qp_state_table
[IB_QPS_ERR
+ 1][IB_QPS_ERR
+ 1] = {
1229 [IB_QPS_RESET
] = { .valid
= 1 },
1233 [IB_QPT_UD
] = (IB_QP_PKEY_INDEX
|
1236 [IB_QPT_RAW_PACKET
] = IB_QP_PORT
,
1237 [IB_QPT_UC
] = (IB_QP_PKEY_INDEX
|
1239 IB_QP_ACCESS_FLAGS
),
1240 [IB_QPT_RC
] = (IB_QP_PKEY_INDEX
|
1242 IB_QP_ACCESS_FLAGS
),
1243 [IB_QPT_XRC_INI
] = (IB_QP_PKEY_INDEX
|
1245 IB_QP_ACCESS_FLAGS
),
1246 [IB_QPT_XRC_TGT
] = (IB_QP_PKEY_INDEX
|
1248 IB_QP_ACCESS_FLAGS
),
1249 [IB_QPT_SMI
] = (IB_QP_PKEY_INDEX
|
1251 [IB_QPT_GSI
] = (IB_QP_PKEY_INDEX
|
1257 [IB_QPS_RESET
] = { .valid
= 1 },
1258 [IB_QPS_ERR
] = { .valid
= 1 },
1262 [IB_QPT_UD
] = (IB_QP_PKEY_INDEX
|
1265 [IB_QPT_UC
] = (IB_QP_PKEY_INDEX
|
1267 IB_QP_ACCESS_FLAGS
),
1268 [IB_QPT_RC
] = (IB_QP_PKEY_INDEX
|
1270 IB_QP_ACCESS_FLAGS
),
1271 [IB_QPT_XRC_INI
] = (IB_QP_PKEY_INDEX
|
1273 IB_QP_ACCESS_FLAGS
),
1274 [IB_QPT_XRC_TGT
] = (IB_QP_PKEY_INDEX
|
1276 IB_QP_ACCESS_FLAGS
),
1277 [IB_QPT_SMI
] = (IB_QP_PKEY_INDEX
|
1279 [IB_QPT_GSI
] = (IB_QP_PKEY_INDEX
|
1286 [IB_QPT_UC
] = (IB_QP_AV
|
1290 [IB_QPT_RC
] = (IB_QP_AV
|
1294 IB_QP_MAX_DEST_RD_ATOMIC
|
1295 IB_QP_MIN_RNR_TIMER
),
1296 [IB_QPT_XRC_INI
] = (IB_QP_AV
|
1300 [IB_QPT_XRC_TGT
] = (IB_QP_AV
|
1304 IB_QP_MAX_DEST_RD_ATOMIC
|
1305 IB_QP_MIN_RNR_TIMER
),
1308 [IB_QPT_UD
] = (IB_QP_PKEY_INDEX
|
1310 [IB_QPT_UC
] = (IB_QP_ALT_PATH
|
1311 IB_QP_ACCESS_FLAGS
|
1313 [IB_QPT_RC
] = (IB_QP_ALT_PATH
|
1314 IB_QP_ACCESS_FLAGS
|
1316 [IB_QPT_XRC_INI
] = (IB_QP_ALT_PATH
|
1317 IB_QP_ACCESS_FLAGS
|
1319 [IB_QPT_XRC_TGT
] = (IB_QP_ALT_PATH
|
1320 IB_QP_ACCESS_FLAGS
|
1322 [IB_QPT_SMI
] = (IB_QP_PKEY_INDEX
|
1324 [IB_QPT_GSI
] = (IB_QP_PKEY_INDEX
|
1330 [IB_QPS_RESET
] = { .valid
= 1 },
1331 [IB_QPS_ERR
] = { .valid
= 1 },
1335 [IB_QPT_UD
] = IB_QP_SQ_PSN
,
1336 [IB_QPT_UC
] = IB_QP_SQ_PSN
,
1337 [IB_QPT_RC
] = (IB_QP_TIMEOUT
|
1341 IB_QP_MAX_QP_RD_ATOMIC
),
1342 [IB_QPT_XRC_INI
] = (IB_QP_TIMEOUT
|
1346 IB_QP_MAX_QP_RD_ATOMIC
),
1347 [IB_QPT_XRC_TGT
] = (IB_QP_TIMEOUT
|
1349 [IB_QPT_SMI
] = IB_QP_SQ_PSN
,
1350 [IB_QPT_GSI
] = IB_QP_SQ_PSN
,
1353 [IB_QPT_UD
] = (IB_QP_CUR_STATE
|
1355 [IB_QPT_UC
] = (IB_QP_CUR_STATE
|
1357 IB_QP_ACCESS_FLAGS
|
1358 IB_QP_PATH_MIG_STATE
),
1359 [IB_QPT_RC
] = (IB_QP_CUR_STATE
|
1361 IB_QP_ACCESS_FLAGS
|
1362 IB_QP_MIN_RNR_TIMER
|
1363 IB_QP_PATH_MIG_STATE
),
1364 [IB_QPT_XRC_INI
] = (IB_QP_CUR_STATE
|
1366 IB_QP_ACCESS_FLAGS
|
1367 IB_QP_PATH_MIG_STATE
),
1368 [IB_QPT_XRC_TGT
] = (IB_QP_CUR_STATE
|
1370 IB_QP_ACCESS_FLAGS
|
1371 IB_QP_MIN_RNR_TIMER
|
1372 IB_QP_PATH_MIG_STATE
),
1373 [IB_QPT_SMI
] = (IB_QP_CUR_STATE
|
1375 [IB_QPT_GSI
] = (IB_QP_CUR_STATE
|
1377 [IB_QPT_RAW_PACKET
] = IB_QP_RATE_LIMIT
,
1382 [IB_QPS_RESET
] = { .valid
= 1 },
1383 [IB_QPS_ERR
] = { .valid
= 1 },
1387 [IB_QPT_UD
] = (IB_QP_CUR_STATE
|
1389 [IB_QPT_UC
] = (IB_QP_CUR_STATE
|
1390 IB_QP_ACCESS_FLAGS
|
1392 IB_QP_PATH_MIG_STATE
),
1393 [IB_QPT_RC
] = (IB_QP_CUR_STATE
|
1394 IB_QP_ACCESS_FLAGS
|
1396 IB_QP_PATH_MIG_STATE
|
1397 IB_QP_MIN_RNR_TIMER
),
1398 [IB_QPT_XRC_INI
] = (IB_QP_CUR_STATE
|
1399 IB_QP_ACCESS_FLAGS
|
1401 IB_QP_PATH_MIG_STATE
),
1402 [IB_QPT_XRC_TGT
] = (IB_QP_CUR_STATE
|
1403 IB_QP_ACCESS_FLAGS
|
1405 IB_QP_PATH_MIG_STATE
|
1406 IB_QP_MIN_RNR_TIMER
),
1407 [IB_QPT_SMI
] = (IB_QP_CUR_STATE
|
1409 [IB_QPT_GSI
] = (IB_QP_CUR_STATE
|
1411 [IB_QPT_RAW_PACKET
] = IB_QP_RATE_LIMIT
,
1417 [IB_QPT_UD
] = IB_QP_EN_SQD_ASYNC_NOTIFY
,
1418 [IB_QPT_UC
] = IB_QP_EN_SQD_ASYNC_NOTIFY
,
1419 [IB_QPT_RC
] = IB_QP_EN_SQD_ASYNC_NOTIFY
,
1420 [IB_QPT_XRC_INI
] = IB_QP_EN_SQD_ASYNC_NOTIFY
,
1421 [IB_QPT_XRC_TGT
] = IB_QP_EN_SQD_ASYNC_NOTIFY
, /* ??? */
1422 [IB_QPT_SMI
] = IB_QP_EN_SQD_ASYNC_NOTIFY
,
1423 [IB_QPT_GSI
] = IB_QP_EN_SQD_ASYNC_NOTIFY
1428 [IB_QPS_RESET
] = { .valid
= 1 },
1429 [IB_QPS_ERR
] = { .valid
= 1 },
1433 [IB_QPT_UD
] = (IB_QP_CUR_STATE
|
1435 [IB_QPT_UC
] = (IB_QP_CUR_STATE
|
1437 IB_QP_ACCESS_FLAGS
|
1438 IB_QP_PATH_MIG_STATE
),
1439 [IB_QPT_RC
] = (IB_QP_CUR_STATE
|
1441 IB_QP_ACCESS_FLAGS
|
1442 IB_QP_MIN_RNR_TIMER
|
1443 IB_QP_PATH_MIG_STATE
),
1444 [IB_QPT_XRC_INI
] = (IB_QP_CUR_STATE
|
1446 IB_QP_ACCESS_FLAGS
|
1447 IB_QP_PATH_MIG_STATE
),
1448 [IB_QPT_XRC_TGT
] = (IB_QP_CUR_STATE
|
1450 IB_QP_ACCESS_FLAGS
|
1451 IB_QP_MIN_RNR_TIMER
|
1452 IB_QP_PATH_MIG_STATE
),
1453 [IB_QPT_SMI
] = (IB_QP_CUR_STATE
|
1455 [IB_QPT_GSI
] = (IB_QP_CUR_STATE
|
1462 [IB_QPT_UD
] = (IB_QP_PKEY_INDEX
|
1464 [IB_QPT_UC
] = (IB_QP_AV
|
1466 IB_QP_ACCESS_FLAGS
|
1468 IB_QP_PATH_MIG_STATE
),
1469 [IB_QPT_RC
] = (IB_QP_PORT
|
1474 IB_QP_MAX_QP_RD_ATOMIC
|
1475 IB_QP_MAX_DEST_RD_ATOMIC
|
1477 IB_QP_ACCESS_FLAGS
|
1479 IB_QP_MIN_RNR_TIMER
|
1480 IB_QP_PATH_MIG_STATE
),
1481 [IB_QPT_XRC_INI
] = (IB_QP_PORT
|
1486 IB_QP_MAX_QP_RD_ATOMIC
|
1488 IB_QP_ACCESS_FLAGS
|
1490 IB_QP_PATH_MIG_STATE
),
1491 [IB_QPT_XRC_TGT
] = (IB_QP_PORT
|
1494 IB_QP_MAX_DEST_RD_ATOMIC
|
1496 IB_QP_ACCESS_FLAGS
|
1498 IB_QP_MIN_RNR_TIMER
|
1499 IB_QP_PATH_MIG_STATE
),
1500 [IB_QPT_SMI
] = (IB_QP_PKEY_INDEX
|
1502 [IB_QPT_GSI
] = (IB_QP_PKEY_INDEX
|
1508 [IB_QPS_RESET
] = { .valid
= 1 },
1509 [IB_QPS_ERR
] = { .valid
= 1 },
1513 [IB_QPT_UD
] = (IB_QP_CUR_STATE
|
1515 [IB_QPT_UC
] = (IB_QP_CUR_STATE
|
1516 IB_QP_ACCESS_FLAGS
),
1517 [IB_QPT_SMI
] = (IB_QP_CUR_STATE
|
1519 [IB_QPT_GSI
] = (IB_QP_CUR_STATE
|
1525 [IB_QPS_RESET
] = { .valid
= 1 },
1526 [IB_QPS_ERR
] = { .valid
= 1 }
1530 bool ib_modify_qp_is_ok(enum ib_qp_state cur_state
, enum ib_qp_state next_state
,
1531 enum ib_qp_type type
, enum ib_qp_attr_mask mask
)
1533 enum ib_qp_attr_mask req_param
, opt_param
;
1535 if (mask
& IB_QP_CUR_STATE
&&
1536 cur_state
!= IB_QPS_RTR
&& cur_state
!= IB_QPS_RTS
&&
1537 cur_state
!= IB_QPS_SQD
&& cur_state
!= IB_QPS_SQE
)
1540 if (!qp_state_table
[cur_state
][next_state
].valid
)
1543 req_param
= qp_state_table
[cur_state
][next_state
].req_param
[type
];
1544 opt_param
= qp_state_table
[cur_state
][next_state
].opt_param
[type
];
1546 if ((mask
& req_param
) != req_param
)
1549 if (mask
& ~(req_param
| opt_param
| IB_QP_STATE
))
1554 EXPORT_SYMBOL(ib_modify_qp_is_ok
);
1557 * ib_resolve_eth_dmac - Resolve destination mac address
1558 * @device: Device to consider
1559 * @ah_attr: address handle attribute which describes the
1560 * source and destination parameters
1561 * ib_resolve_eth_dmac() resolves destination mac address and L3 hop limit It
1562 * returns 0 on success or appropriate error code. It initializes the
1563 * necessary ah_attr fields when call is successful.
1565 static int ib_resolve_eth_dmac(struct ib_device
*device
,
1566 struct rdma_ah_attr
*ah_attr
)
1570 if (rdma_is_multicast_addr((struct in6_addr
*)ah_attr
->grh
.dgid
.raw
)) {
1571 if (ipv6_addr_v4mapped((struct in6_addr
*)ah_attr
->grh
.dgid
.raw
)) {
1574 memcpy(&addr
, ah_attr
->grh
.dgid
.raw
+ 12, 4);
1575 ip_eth_mc_map(addr
, (char *)ah_attr
->roce
.dmac
);
1577 ipv6_eth_mc_map((struct in6_addr
*)ah_attr
->grh
.dgid
.raw
,
1578 (char *)ah_attr
->roce
.dmac
);
1581 ret
= ib_resolve_unicast_gid_dmac(device
, ah_attr
);
1586 static bool is_qp_type_connected(const struct ib_qp
*qp
)
1588 return (qp
->qp_type
== IB_QPT_UC
||
1589 qp
->qp_type
== IB_QPT_RC
||
1590 qp
->qp_type
== IB_QPT_XRC_INI
||
1591 qp
->qp_type
== IB_QPT_XRC_TGT
);
1595 * IB core internal function to perform QP attributes modification.
1597 static int _ib_modify_qp(struct ib_qp
*qp
, struct ib_qp_attr
*attr
,
1598 int attr_mask
, struct ib_udata
*udata
)
1600 u8 port
= attr_mask
& IB_QP_PORT
? attr
->port_num
: qp
->port
;
1601 const struct ib_gid_attr
*old_sgid_attr_av
;
1602 const struct ib_gid_attr
*old_sgid_attr_alt_av
;
1605 if (attr_mask
& IB_QP_AV
) {
1606 ret
= rdma_fill_sgid_attr(qp
->device
, &attr
->ah_attr
,
1611 if (attr_mask
& IB_QP_ALT_PATH
) {
1613 * FIXME: This does not track the migration state, so if the
1614 * user loads a new alternate path after the HW has migrated
1615 * from primary->alternate we will keep the wrong
1616 * references. This is OK for IB because the reference
1617 * counting does not serve any functional purpose.
1619 ret
= rdma_fill_sgid_attr(qp
->device
, &attr
->alt_ah_attr
,
1620 &old_sgid_attr_alt_av
);
1625 * Today the core code can only handle alternate paths and APM
1626 * for IB. Ban them in roce mode.
1628 if (!(rdma_protocol_ib(qp
->device
,
1629 attr
->alt_ah_attr
.port_num
) &&
1630 rdma_protocol_ib(qp
->device
, port
))) {
1637 * If the user provided the qp_attr then we have to resolve it. Kernel
1638 * users have to provide already resolved rdma_ah_attr's
1640 if (udata
&& (attr_mask
& IB_QP_AV
) &&
1641 attr
->ah_attr
.type
== RDMA_AH_ATTR_TYPE_ROCE
&&
1642 is_qp_type_connected(qp
)) {
1643 ret
= ib_resolve_eth_dmac(qp
->device
, &attr
->ah_attr
);
1648 if (rdma_ib_or_roce(qp
->device
, port
)) {
1649 if (attr_mask
& IB_QP_RQ_PSN
&& attr
->rq_psn
& ~0xffffff) {
1650 dev_warn(&qp
->device
->dev
,
1651 "%s rq_psn overflow, masking to 24 bits\n",
1653 attr
->rq_psn
&= 0xffffff;
1656 if (attr_mask
& IB_QP_SQ_PSN
&& attr
->sq_psn
& ~0xffffff) {
1657 dev_warn(&qp
->device
->dev
,
1658 " %s sq_psn overflow, masking to 24 bits\n",
1660 attr
->sq_psn
&= 0xffffff;
1664 ret
= ib_security_modify_qp(qp
, attr
, attr_mask
, udata
);
1668 if (attr_mask
& IB_QP_PORT
)
1669 qp
->port
= attr
->port_num
;
1670 if (attr_mask
& IB_QP_AV
)
1672 rdma_update_sgid_attr(&attr
->ah_attr
, qp
->av_sgid_attr
);
1673 if (attr_mask
& IB_QP_ALT_PATH
)
1674 qp
->alt_path_sgid_attr
= rdma_update_sgid_attr(
1675 &attr
->alt_ah_attr
, qp
->alt_path_sgid_attr
);
1678 if (attr_mask
& IB_QP_ALT_PATH
)
1679 rdma_unfill_sgid_attr(&attr
->alt_ah_attr
, old_sgid_attr_alt_av
);
1681 if (attr_mask
& IB_QP_AV
)
1682 rdma_unfill_sgid_attr(&attr
->ah_attr
, old_sgid_attr_av
);
1687 * ib_modify_qp_with_udata - Modifies the attributes for the specified QP.
1688 * @ib_qp: The QP to modify.
1689 * @attr: On input, specifies the QP attributes to modify. On output,
1690 * the current values of selected QP attributes are returned.
1691 * @attr_mask: A bit-mask used to specify which attributes of the QP
1692 * are being modified.
1693 * @udata: pointer to user's input output buffer information
1694 * are being modified.
1695 * It returns 0 on success and returns appropriate error code on error.
1697 int ib_modify_qp_with_udata(struct ib_qp
*ib_qp
, struct ib_qp_attr
*attr
,
1698 int attr_mask
, struct ib_udata
*udata
)
1700 return _ib_modify_qp(ib_qp
->real_qp
, attr
, attr_mask
, udata
);
1702 EXPORT_SYMBOL(ib_modify_qp_with_udata
);
1704 int ib_get_eth_speed(struct ib_device
*dev
, u8 port_num
, u8
*speed
, u8
*width
)
1708 struct net_device
*netdev
;
1709 struct ethtool_link_ksettings lksettings
;
1711 if (rdma_port_get_link_layer(dev
, port_num
) != IB_LINK_LAYER_ETHERNET
)
1714 if (!dev
->ops
.get_netdev
)
1717 netdev
= dev
->ops
.get_netdev(dev
, port_num
);
1722 rc
= __ethtool_get_link_ksettings(netdev
, &lksettings
);
1728 netdev_speed
= lksettings
.base
.speed
;
1730 netdev_speed
= SPEED_1000
;
1731 pr_warn("%s speed is unknown, defaulting to %d\n", netdev
->name
,
1735 if (netdev_speed
<= SPEED_1000
) {
1736 *width
= IB_WIDTH_1X
;
1737 *speed
= IB_SPEED_SDR
;
1738 } else if (netdev_speed
<= SPEED_10000
) {
1739 *width
= IB_WIDTH_1X
;
1740 *speed
= IB_SPEED_FDR10
;
1741 } else if (netdev_speed
<= SPEED_20000
) {
1742 *width
= IB_WIDTH_4X
;
1743 *speed
= IB_SPEED_DDR
;
1744 } else if (netdev_speed
<= SPEED_25000
) {
1745 *width
= IB_WIDTH_1X
;
1746 *speed
= IB_SPEED_EDR
;
1747 } else if (netdev_speed
<= SPEED_40000
) {
1748 *width
= IB_WIDTH_4X
;
1749 *speed
= IB_SPEED_FDR10
;
1751 *width
= IB_WIDTH_4X
;
1752 *speed
= IB_SPEED_EDR
;
1757 EXPORT_SYMBOL(ib_get_eth_speed
);
1759 int ib_modify_qp(struct ib_qp
*qp
,
1760 struct ib_qp_attr
*qp_attr
,
1763 return _ib_modify_qp(qp
->real_qp
, qp_attr
, qp_attr_mask
, NULL
);
1765 EXPORT_SYMBOL(ib_modify_qp
);
1767 int ib_query_qp(struct ib_qp
*qp
,
1768 struct ib_qp_attr
*qp_attr
,
1770 struct ib_qp_init_attr
*qp_init_attr
)
1772 qp_attr
->ah_attr
.grh
.sgid_attr
= NULL
;
1773 qp_attr
->alt_ah_attr
.grh
.sgid_attr
= NULL
;
1775 return qp
->device
->ops
.query_qp
?
1776 qp
->device
->ops
.query_qp(qp
->real_qp
, qp_attr
, qp_attr_mask
,
1777 qp_init_attr
) : -EOPNOTSUPP
;
1779 EXPORT_SYMBOL(ib_query_qp
);
1781 int ib_close_qp(struct ib_qp
*qp
)
1783 struct ib_qp
*real_qp
;
1784 unsigned long flags
;
1786 real_qp
= qp
->real_qp
;
1790 spin_lock_irqsave(&real_qp
->device
->event_handler_lock
, flags
);
1791 list_del(&qp
->open_list
);
1792 spin_unlock_irqrestore(&real_qp
->device
->event_handler_lock
, flags
);
1794 atomic_dec(&real_qp
->usecnt
);
1796 ib_close_shared_qp_security(qp
->qp_sec
);
1801 EXPORT_SYMBOL(ib_close_qp
);
1803 static int __ib_destroy_shared_qp(struct ib_qp
*qp
)
1805 struct ib_xrcd
*xrcd
;
1806 struct ib_qp
*real_qp
;
1809 real_qp
= qp
->real_qp
;
1810 xrcd
= real_qp
->xrcd
;
1812 mutex_lock(&xrcd
->tgt_qp_mutex
);
1814 if (atomic_read(&real_qp
->usecnt
) == 0)
1815 list_del(&real_qp
->xrcd_list
);
1818 mutex_unlock(&xrcd
->tgt_qp_mutex
);
1821 ret
= ib_destroy_qp(real_qp
);
1823 atomic_dec(&xrcd
->usecnt
);
1825 __ib_insert_xrcd_qp(xrcd
, real_qp
);
1831 int ib_destroy_qp(struct ib_qp
*qp
)
1833 const struct ib_gid_attr
*alt_path_sgid_attr
= qp
->alt_path_sgid_attr
;
1834 const struct ib_gid_attr
*av_sgid_attr
= qp
->av_sgid_attr
;
1836 struct ib_cq
*scq
, *rcq
;
1838 struct ib_rwq_ind_table
*ind_tbl
;
1839 struct ib_qp_security
*sec
;
1842 WARN_ON_ONCE(qp
->mrs_used
> 0);
1844 if (atomic_read(&qp
->usecnt
))
1847 if (qp
->real_qp
!= qp
)
1848 return __ib_destroy_shared_qp(qp
);
1854 ind_tbl
= qp
->rwq_ind_tbl
;
1857 ib_destroy_qp_security_begin(sec
);
1860 rdma_rw_cleanup_mrs(qp
);
1862 rdma_restrack_del(&qp
->res
);
1863 ret
= qp
->device
->ops
.destroy_qp(qp
);
1865 if (alt_path_sgid_attr
)
1866 rdma_put_gid_attr(alt_path_sgid_attr
);
1868 rdma_put_gid_attr(av_sgid_attr
);
1870 atomic_dec(&pd
->usecnt
);
1872 atomic_dec(&scq
->usecnt
);
1874 atomic_dec(&rcq
->usecnt
);
1876 atomic_dec(&srq
->usecnt
);
1878 atomic_dec(&ind_tbl
->usecnt
);
1880 ib_destroy_qp_security_end(sec
);
1883 ib_destroy_qp_security_abort(sec
);
1888 EXPORT_SYMBOL(ib_destroy_qp
);
1890 /* Completion queues */
1892 struct ib_cq
*__ib_create_cq(struct ib_device
*device
,
1893 ib_comp_handler comp_handler
,
1894 void (*event_handler
)(struct ib_event
*, void *),
1896 const struct ib_cq_init_attr
*cq_attr
,
1901 cq
= device
->ops
.create_cq(device
, cq_attr
, NULL
, NULL
);
1904 cq
->device
= device
;
1906 cq
->comp_handler
= comp_handler
;
1907 cq
->event_handler
= event_handler
;
1908 cq
->cq_context
= cq_context
;
1909 atomic_set(&cq
->usecnt
, 0);
1910 cq
->res
.type
= RDMA_RESTRACK_CQ
;
1911 rdma_restrack_set_task(&cq
->res
, caller
);
1912 rdma_restrack_kadd(&cq
->res
);
1917 EXPORT_SYMBOL(__ib_create_cq
);
1919 int rdma_set_cq_moderation(struct ib_cq
*cq
, u16 cq_count
, u16 cq_period
)
1921 return cq
->device
->ops
.modify_cq
?
1922 cq
->device
->ops
.modify_cq(cq
, cq_count
,
1923 cq_period
) : -EOPNOTSUPP
;
1925 EXPORT_SYMBOL(rdma_set_cq_moderation
);
1927 int ib_destroy_cq(struct ib_cq
*cq
)
1929 if (atomic_read(&cq
->usecnt
))
1932 rdma_restrack_del(&cq
->res
);
1933 return cq
->device
->ops
.destroy_cq(cq
);
1935 EXPORT_SYMBOL(ib_destroy_cq
);
1937 int ib_resize_cq(struct ib_cq
*cq
, int cqe
)
1939 return cq
->device
->ops
.resize_cq
?
1940 cq
->device
->ops
.resize_cq(cq
, cqe
, NULL
) : -EOPNOTSUPP
;
1942 EXPORT_SYMBOL(ib_resize_cq
);
1944 /* Memory regions */
1946 int ib_dereg_mr(struct ib_mr
*mr
)
1948 struct ib_pd
*pd
= mr
->pd
;
1949 struct ib_dm
*dm
= mr
->dm
;
1952 rdma_restrack_del(&mr
->res
);
1953 ret
= mr
->device
->ops
.dereg_mr(mr
);
1955 atomic_dec(&pd
->usecnt
);
1957 atomic_dec(&dm
->usecnt
);
1962 EXPORT_SYMBOL(ib_dereg_mr
);
1965 * ib_alloc_mr() - Allocates a memory region
1966 * @pd: protection domain associated with the region
1967 * @mr_type: memory region type
1968 * @max_num_sg: maximum sg entries available for registration.
1971 * Memory registeration page/sg lists must not exceed max_num_sg.
1972 * For mr_type IB_MR_TYPE_MEM_REG, the total length cannot exceed
1973 * max_num_sg * used_page_size.
1976 struct ib_mr
*ib_alloc_mr(struct ib_pd
*pd
,
1977 enum ib_mr_type mr_type
,
1982 if (!pd
->device
->ops
.alloc_mr
)
1983 return ERR_PTR(-EOPNOTSUPP
);
1985 mr
= pd
->device
->ops
.alloc_mr(pd
, mr_type
, max_num_sg
);
1987 mr
->device
= pd
->device
;
1991 atomic_inc(&pd
->usecnt
);
1992 mr
->need_inval
= false;
1993 mr
->res
.type
= RDMA_RESTRACK_MR
;
1994 rdma_restrack_kadd(&mr
->res
);
1999 EXPORT_SYMBOL(ib_alloc_mr
);
2001 /* "Fast" memory regions */
2003 struct ib_fmr
*ib_alloc_fmr(struct ib_pd
*pd
,
2004 int mr_access_flags
,
2005 struct ib_fmr_attr
*fmr_attr
)
2009 if (!pd
->device
->ops
.alloc_fmr
)
2010 return ERR_PTR(-EOPNOTSUPP
);
2012 fmr
= pd
->device
->ops
.alloc_fmr(pd
, mr_access_flags
, fmr_attr
);
2014 fmr
->device
= pd
->device
;
2016 atomic_inc(&pd
->usecnt
);
2021 EXPORT_SYMBOL(ib_alloc_fmr
);
2023 int ib_unmap_fmr(struct list_head
*fmr_list
)
2027 if (list_empty(fmr_list
))
2030 fmr
= list_entry(fmr_list
->next
, struct ib_fmr
, list
);
2031 return fmr
->device
->ops
.unmap_fmr(fmr_list
);
2033 EXPORT_SYMBOL(ib_unmap_fmr
);
2035 int ib_dealloc_fmr(struct ib_fmr
*fmr
)
2041 ret
= fmr
->device
->ops
.dealloc_fmr(fmr
);
2043 atomic_dec(&pd
->usecnt
);
2047 EXPORT_SYMBOL(ib_dealloc_fmr
);
2049 /* Multicast groups */
2051 static bool is_valid_mcast_lid(struct ib_qp
*qp
, u16 lid
)
2053 struct ib_qp_init_attr init_attr
= {};
2054 struct ib_qp_attr attr
= {};
2055 int num_eth_ports
= 0;
2058 /* If QP state >= init, it is assigned to a port and we can check this
2061 if (!ib_query_qp(qp
, &attr
, IB_QP_STATE
| IB_QP_PORT
, &init_attr
)) {
2062 if (attr
.qp_state
>= IB_QPS_INIT
) {
2063 if (rdma_port_get_link_layer(qp
->device
, attr
.port_num
) !=
2064 IB_LINK_LAYER_INFINIBAND
)
2070 /* Can't get a quick answer, iterate over all ports */
2071 for (port
= 0; port
< qp
->device
->phys_port_cnt
; port
++)
2072 if (rdma_port_get_link_layer(qp
->device
, port
) !=
2073 IB_LINK_LAYER_INFINIBAND
)
2076 /* If we have at lease one Ethernet port, RoCE annex declares that
2077 * multicast LID should be ignored. We can't tell at this step if the
2078 * QP belongs to an IB or Ethernet port.
2083 /* If all the ports are IB, we can check according to IB spec. */
2085 return !(lid
< be16_to_cpu(IB_MULTICAST_LID_BASE
) ||
2086 lid
== be16_to_cpu(IB_LID_PERMISSIVE
));
2089 int ib_attach_mcast(struct ib_qp
*qp
, union ib_gid
*gid
, u16 lid
)
2093 if (!qp
->device
->ops
.attach_mcast
)
2096 if (!rdma_is_multicast_addr((struct in6_addr
*)gid
->raw
) ||
2097 qp
->qp_type
!= IB_QPT_UD
|| !is_valid_mcast_lid(qp
, lid
))
2100 ret
= qp
->device
->ops
.attach_mcast(qp
, gid
, lid
);
2102 atomic_inc(&qp
->usecnt
);
2105 EXPORT_SYMBOL(ib_attach_mcast
);
2107 int ib_detach_mcast(struct ib_qp
*qp
, union ib_gid
*gid
, u16 lid
)
2111 if (!qp
->device
->ops
.detach_mcast
)
2114 if (!rdma_is_multicast_addr((struct in6_addr
*)gid
->raw
) ||
2115 qp
->qp_type
!= IB_QPT_UD
|| !is_valid_mcast_lid(qp
, lid
))
2118 ret
= qp
->device
->ops
.detach_mcast(qp
, gid
, lid
);
2120 atomic_dec(&qp
->usecnt
);
2123 EXPORT_SYMBOL(ib_detach_mcast
);
2125 struct ib_xrcd
*__ib_alloc_xrcd(struct ib_device
*device
, const char *caller
)
2127 struct ib_xrcd
*xrcd
;
2129 if (!device
->ops
.alloc_xrcd
)
2130 return ERR_PTR(-EOPNOTSUPP
);
2132 xrcd
= device
->ops
.alloc_xrcd(device
, NULL
, NULL
);
2133 if (!IS_ERR(xrcd
)) {
2134 xrcd
->device
= device
;
2136 atomic_set(&xrcd
->usecnt
, 0);
2137 mutex_init(&xrcd
->tgt_qp_mutex
);
2138 INIT_LIST_HEAD(&xrcd
->tgt_qp_list
);
2143 EXPORT_SYMBOL(__ib_alloc_xrcd
);
2145 int ib_dealloc_xrcd(struct ib_xrcd
*xrcd
)
2150 if (atomic_read(&xrcd
->usecnt
))
2153 while (!list_empty(&xrcd
->tgt_qp_list
)) {
2154 qp
= list_entry(xrcd
->tgt_qp_list
.next
, struct ib_qp
, xrcd_list
);
2155 ret
= ib_destroy_qp(qp
);
2160 return xrcd
->device
->ops
.dealloc_xrcd(xrcd
);
2162 EXPORT_SYMBOL(ib_dealloc_xrcd
);
2165 * ib_create_wq - Creates a WQ associated with the specified protection
2167 * @pd: The protection domain associated with the WQ.
2168 * @wq_attr: A list of initial attributes required to create the
2169 * WQ. If WQ creation succeeds, then the attributes are updated to
2170 * the actual capabilities of the created WQ.
2172 * wq_attr->max_wr and wq_attr->max_sge determine
2173 * the requested size of the WQ, and set to the actual values allocated
2175 * If ib_create_wq() succeeds, then max_wr and max_sge will always be
2176 * at least as large as the requested values.
2178 struct ib_wq
*ib_create_wq(struct ib_pd
*pd
,
2179 struct ib_wq_init_attr
*wq_attr
)
2183 if (!pd
->device
->ops
.create_wq
)
2184 return ERR_PTR(-EOPNOTSUPP
);
2186 wq
= pd
->device
->ops
.create_wq(pd
, wq_attr
, NULL
);
2188 wq
->event_handler
= wq_attr
->event_handler
;
2189 wq
->wq_context
= wq_attr
->wq_context
;
2190 wq
->wq_type
= wq_attr
->wq_type
;
2191 wq
->cq
= wq_attr
->cq
;
2192 wq
->device
= pd
->device
;
2195 atomic_inc(&pd
->usecnt
);
2196 atomic_inc(&wq_attr
->cq
->usecnt
);
2197 atomic_set(&wq
->usecnt
, 0);
2201 EXPORT_SYMBOL(ib_create_wq
);
2204 * ib_destroy_wq - Destroys the specified WQ.
2205 * @wq: The WQ to destroy.
2207 int ib_destroy_wq(struct ib_wq
*wq
)
2210 struct ib_cq
*cq
= wq
->cq
;
2211 struct ib_pd
*pd
= wq
->pd
;
2213 if (atomic_read(&wq
->usecnt
))
2216 err
= wq
->device
->ops
.destroy_wq(wq
);
2218 atomic_dec(&pd
->usecnt
);
2219 atomic_dec(&cq
->usecnt
);
2223 EXPORT_SYMBOL(ib_destroy_wq
);
2226 * ib_modify_wq - Modifies the specified WQ.
2227 * @wq: The WQ to modify.
2228 * @wq_attr: On input, specifies the WQ attributes to modify.
2229 * @wq_attr_mask: A bit-mask used to specify which attributes of the WQ
2230 * are being modified.
2231 * On output, the current values of selected WQ attributes are returned.
2233 int ib_modify_wq(struct ib_wq
*wq
, struct ib_wq_attr
*wq_attr
,
2238 if (!wq
->device
->ops
.modify_wq
)
2241 err
= wq
->device
->ops
.modify_wq(wq
, wq_attr
, wq_attr_mask
, NULL
);
2244 EXPORT_SYMBOL(ib_modify_wq
);
2247 * ib_create_rwq_ind_table - Creates a RQ Indirection Table.
2248 * @device: The device on which to create the rwq indirection table.
2249 * @ib_rwq_ind_table_init_attr: A list of initial attributes required to
2250 * create the Indirection Table.
2252 * Note: The life time of ib_rwq_ind_table_init_attr->ind_tbl is not less
2253 * than the created ib_rwq_ind_table object and the caller is responsible
2254 * for its memory allocation/free.
2256 struct ib_rwq_ind_table
*ib_create_rwq_ind_table(struct ib_device
*device
,
2257 struct ib_rwq_ind_table_init_attr
*init_attr
)
2259 struct ib_rwq_ind_table
*rwq_ind_table
;
2263 if (!device
->ops
.create_rwq_ind_table
)
2264 return ERR_PTR(-EOPNOTSUPP
);
2266 table_size
= (1 << init_attr
->log_ind_tbl_size
);
2267 rwq_ind_table
= device
->ops
.create_rwq_ind_table(device
,
2269 if (IS_ERR(rwq_ind_table
))
2270 return rwq_ind_table
;
2272 rwq_ind_table
->ind_tbl
= init_attr
->ind_tbl
;
2273 rwq_ind_table
->log_ind_tbl_size
= init_attr
->log_ind_tbl_size
;
2274 rwq_ind_table
->device
= device
;
2275 rwq_ind_table
->uobject
= NULL
;
2276 atomic_set(&rwq_ind_table
->usecnt
, 0);
2278 for (i
= 0; i
< table_size
; i
++)
2279 atomic_inc(&rwq_ind_table
->ind_tbl
[i
]->usecnt
);
2281 return rwq_ind_table
;
2283 EXPORT_SYMBOL(ib_create_rwq_ind_table
);
2286 * ib_destroy_rwq_ind_table - Destroys the specified Indirection Table.
2287 * @wq_ind_table: The Indirection Table to destroy.
2289 int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table
*rwq_ind_table
)
2292 u32 table_size
= (1 << rwq_ind_table
->log_ind_tbl_size
);
2293 struct ib_wq
**ind_tbl
= rwq_ind_table
->ind_tbl
;
2295 if (atomic_read(&rwq_ind_table
->usecnt
))
2298 err
= rwq_ind_table
->device
->ops
.destroy_rwq_ind_table(rwq_ind_table
);
2300 for (i
= 0; i
< table_size
; i
++)
2301 atomic_dec(&ind_tbl
[i
]->usecnt
);
2306 EXPORT_SYMBOL(ib_destroy_rwq_ind_table
);
2308 int ib_check_mr_status(struct ib_mr
*mr
, u32 check_mask
,
2309 struct ib_mr_status
*mr_status
)
2311 if (!mr
->device
->ops
.check_mr_status
)
2314 return mr
->device
->ops
.check_mr_status(mr
, check_mask
, mr_status
);
2316 EXPORT_SYMBOL(ib_check_mr_status
);
2318 int ib_set_vf_link_state(struct ib_device
*device
, int vf
, u8 port
,
2321 if (!device
->ops
.set_vf_link_state
)
2324 return device
->ops
.set_vf_link_state(device
, vf
, port
, state
);
2326 EXPORT_SYMBOL(ib_set_vf_link_state
);
2328 int ib_get_vf_config(struct ib_device
*device
, int vf
, u8 port
,
2329 struct ifla_vf_info
*info
)
2331 if (!device
->ops
.get_vf_config
)
2334 return device
->ops
.get_vf_config(device
, vf
, port
, info
);
2336 EXPORT_SYMBOL(ib_get_vf_config
);
2338 int ib_get_vf_stats(struct ib_device
*device
, int vf
, u8 port
,
2339 struct ifla_vf_stats
*stats
)
2341 if (!device
->ops
.get_vf_stats
)
2344 return device
->ops
.get_vf_stats(device
, vf
, port
, stats
);
2346 EXPORT_SYMBOL(ib_get_vf_stats
);
2348 int ib_set_vf_guid(struct ib_device
*device
, int vf
, u8 port
, u64 guid
,
2351 if (!device
->ops
.set_vf_guid
)
2354 return device
->ops
.set_vf_guid(device
, vf
, port
, guid
, type
);
2356 EXPORT_SYMBOL(ib_set_vf_guid
);
2359 * ib_map_mr_sg() - Map the largest prefix of a dma mapped SG list
2360 * and set it the memory region.
2361 * @mr: memory region
2362 * @sg: dma mapped scatterlist
2363 * @sg_nents: number of entries in sg
2364 * @sg_offset: offset in bytes into sg
2365 * @page_size: page vector desired page size
2368 * - The first sg element is allowed to have an offset.
2369 * - Each sg element must either be aligned to page_size or virtually
2370 * contiguous to the previous element. In case an sg element has a
2371 * non-contiguous offset, the mapping prefix will not include it.
2372 * - The last sg element is allowed to have length less than page_size.
2373 * - If sg_nents total byte length exceeds the mr max_num_sge * page_size
2374 * then only max_num_sg entries will be mapped.
2375 * - If the MR was allocated with type IB_MR_TYPE_SG_GAPS, none of these
2376 * constraints holds and the page_size argument is ignored.
2378 * Returns the number of sg elements that were mapped to the memory region.
2380 * After this completes successfully, the memory region
2381 * is ready for registration.
2383 int ib_map_mr_sg(struct ib_mr
*mr
, struct scatterlist
*sg
, int sg_nents
,
2384 unsigned int *sg_offset
, unsigned int page_size
)
2386 if (unlikely(!mr
->device
->ops
.map_mr_sg
))
2389 mr
->page_size
= page_size
;
2391 return mr
->device
->ops
.map_mr_sg(mr
, sg
, sg_nents
, sg_offset
);
2393 EXPORT_SYMBOL(ib_map_mr_sg
);
2396 * ib_sg_to_pages() - Convert the largest prefix of a sg list
2398 * @mr: memory region
2399 * @sgl: dma mapped scatterlist
2400 * @sg_nents: number of entries in sg
2401 * @sg_offset_p: IN: start offset in bytes into sg
2402 * OUT: offset in bytes for element n of the sg of the first
2403 * byte that has not been processed where n is the return
2404 * value of this function.
2405 * @set_page: driver page assignment function pointer
2407 * Core service helper for drivers to convert the largest
2408 * prefix of given sg list to a page vector. The sg list
2409 * prefix converted is the prefix that meet the requirements
2412 * Returns the number of sg elements that were assigned to
2415 int ib_sg_to_pages(struct ib_mr
*mr
, struct scatterlist
*sgl
, int sg_nents
,
2416 unsigned int *sg_offset_p
, int (*set_page
)(struct ib_mr
*, u64
))
2418 struct scatterlist
*sg
;
2419 u64 last_end_dma_addr
= 0;
2420 unsigned int sg_offset
= sg_offset_p
? *sg_offset_p
: 0;
2421 unsigned int last_page_off
= 0;
2422 u64 page_mask
= ~((u64
)mr
->page_size
- 1);
2425 if (unlikely(sg_nents
<= 0 || sg_offset
> sg_dma_len(&sgl
[0])))
2428 mr
->iova
= sg_dma_address(&sgl
[0]) + sg_offset
;
2431 for_each_sg(sgl
, sg
, sg_nents
, i
) {
2432 u64 dma_addr
= sg_dma_address(sg
) + sg_offset
;
2433 u64 prev_addr
= dma_addr
;
2434 unsigned int dma_len
= sg_dma_len(sg
) - sg_offset
;
2435 u64 end_dma_addr
= dma_addr
+ dma_len
;
2436 u64 page_addr
= dma_addr
& page_mask
;
2439 * For the second and later elements, check whether either the
2440 * end of element i-1 or the start of element i is not aligned
2441 * on a page boundary.
2443 if (i
&& (last_page_off
!= 0 || page_addr
!= dma_addr
)) {
2444 /* Stop mapping if there is a gap. */
2445 if (last_end_dma_addr
!= dma_addr
)
2449 * Coalesce this element with the last. If it is small
2450 * enough just update mr->length. Otherwise start
2451 * mapping from the next page.
2457 ret
= set_page(mr
, page_addr
);
2458 if (unlikely(ret
< 0)) {
2459 sg_offset
= prev_addr
- sg_dma_address(sg
);
2460 mr
->length
+= prev_addr
- dma_addr
;
2462 *sg_offset_p
= sg_offset
;
2463 return i
|| sg_offset
? i
: ret
;
2465 prev_addr
= page_addr
;
2467 page_addr
+= mr
->page_size
;
2468 } while (page_addr
< end_dma_addr
);
2470 mr
->length
+= dma_len
;
2471 last_end_dma_addr
= end_dma_addr
;
2472 last_page_off
= end_dma_addr
& ~page_mask
;
2481 EXPORT_SYMBOL(ib_sg_to_pages
);
2483 struct ib_drain_cqe
{
2485 struct completion done
;
2488 static void ib_drain_qp_done(struct ib_cq
*cq
, struct ib_wc
*wc
)
2490 struct ib_drain_cqe
*cqe
= container_of(wc
->wr_cqe
, struct ib_drain_cqe
,
2493 complete(&cqe
->done
);
2497 * Post a WR and block until its completion is reaped for the SQ.
2499 static void __ib_drain_sq(struct ib_qp
*qp
)
2501 struct ib_cq
*cq
= qp
->send_cq
;
2502 struct ib_qp_attr attr
= { .qp_state
= IB_QPS_ERR
};
2503 struct ib_drain_cqe sdrain
;
2504 struct ib_rdma_wr swr
= {
2507 { .wr_cqe
= &sdrain
.cqe
, },
2508 .opcode
= IB_WR_RDMA_WRITE
,
2513 ret
= ib_modify_qp(qp
, &attr
, IB_QP_STATE
);
2515 WARN_ONCE(ret
, "failed to drain send queue: %d\n", ret
);
2519 sdrain
.cqe
.done
= ib_drain_qp_done
;
2520 init_completion(&sdrain
.done
);
2522 ret
= ib_post_send(qp
, &swr
.wr
, NULL
);
2524 WARN_ONCE(ret
, "failed to drain send queue: %d\n", ret
);
2528 if (cq
->poll_ctx
== IB_POLL_DIRECT
)
2529 while (wait_for_completion_timeout(&sdrain
.done
, HZ
/ 10) <= 0)
2530 ib_process_cq_direct(cq
, -1);
2532 wait_for_completion(&sdrain
.done
);
2536 * Post a WR and block until its completion is reaped for the RQ.
2538 static void __ib_drain_rq(struct ib_qp
*qp
)
2540 struct ib_cq
*cq
= qp
->recv_cq
;
2541 struct ib_qp_attr attr
= { .qp_state
= IB_QPS_ERR
};
2542 struct ib_drain_cqe rdrain
;
2543 struct ib_recv_wr rwr
= {};
2546 ret
= ib_modify_qp(qp
, &attr
, IB_QP_STATE
);
2548 WARN_ONCE(ret
, "failed to drain recv queue: %d\n", ret
);
2552 rwr
.wr_cqe
= &rdrain
.cqe
;
2553 rdrain
.cqe
.done
= ib_drain_qp_done
;
2554 init_completion(&rdrain
.done
);
2556 ret
= ib_post_recv(qp
, &rwr
, NULL
);
2558 WARN_ONCE(ret
, "failed to drain recv queue: %d\n", ret
);
2562 if (cq
->poll_ctx
== IB_POLL_DIRECT
)
2563 while (wait_for_completion_timeout(&rdrain
.done
, HZ
/ 10) <= 0)
2564 ib_process_cq_direct(cq
, -1);
2566 wait_for_completion(&rdrain
.done
);
2570 * ib_drain_sq() - Block until all SQ CQEs have been consumed by the
2572 * @qp: queue pair to drain
2574 * If the device has a provider-specific drain function, then
2575 * call that. Otherwise call the generic drain function
2580 * ensure there is room in the CQ and SQ for the drain work request and
2583 * allocate the CQ using ib_alloc_cq().
2585 * ensure that there are no other contexts that are posting WRs concurrently.
2586 * Otherwise the drain is not guaranteed.
2588 void ib_drain_sq(struct ib_qp
*qp
)
2590 if (qp
->device
->ops
.drain_sq
)
2591 qp
->device
->ops
.drain_sq(qp
);
2595 EXPORT_SYMBOL(ib_drain_sq
);
2598 * ib_drain_rq() - Block until all RQ CQEs have been consumed by the
2600 * @qp: queue pair to drain
2602 * If the device has a provider-specific drain function, then
2603 * call that. Otherwise call the generic drain function
2608 * ensure there is room in the CQ and RQ for the drain work request and
2611 * allocate the CQ using ib_alloc_cq().
2613 * ensure that there are no other contexts that are posting WRs concurrently.
2614 * Otherwise the drain is not guaranteed.
2616 void ib_drain_rq(struct ib_qp
*qp
)
2618 if (qp
->device
->ops
.drain_rq
)
2619 qp
->device
->ops
.drain_rq(qp
);
2623 EXPORT_SYMBOL(ib_drain_rq
);
2626 * ib_drain_qp() - Block until all CQEs have been consumed by the
2627 * application on both the RQ and SQ.
2628 * @qp: queue pair to drain
2632 * ensure there is room in the CQ(s), SQ, and RQ for drain work requests
2635 * allocate the CQs using ib_alloc_cq().
2637 * ensure that there are no other contexts that are posting WRs concurrently.
2638 * Otherwise the drain is not guaranteed.
2640 void ib_drain_qp(struct ib_qp
*qp
)
2646 EXPORT_SYMBOL(ib_drain_qp
);
2648 struct net_device
*rdma_alloc_netdev(struct ib_device
*device
, u8 port_num
,
2649 enum rdma_netdev_t type
, const char *name
,
2650 unsigned char name_assign_type
,
2651 void (*setup
)(struct net_device
*))
2653 struct rdma_netdev_alloc_params params
;
2654 struct net_device
*netdev
;
2657 if (!device
->ops
.rdma_netdev_get_params
)
2658 return ERR_PTR(-EOPNOTSUPP
);
2660 rc
= device
->ops
.rdma_netdev_get_params(device
, port_num
, type
,
2665 netdev
= alloc_netdev_mqs(params
.sizeof_priv
, name
, name_assign_type
,
2666 setup
, params
.txqs
, params
.rxqs
);
2668 return ERR_PTR(-ENOMEM
);
2672 EXPORT_SYMBOL(rdma_alloc_netdev
);
2674 int rdma_init_netdev(struct ib_device
*device
, u8 port_num
,
2675 enum rdma_netdev_t type
, const char *name
,
2676 unsigned char name_assign_type
,
2677 void (*setup
)(struct net_device
*),
2678 struct net_device
*netdev
)
2680 struct rdma_netdev_alloc_params params
;
2683 if (!device
->ops
.rdma_netdev_get_params
)
2686 rc
= device
->ops
.rdma_netdev_get_params(device
, port_num
, type
,
2691 return params
.initialize_rdma_netdev(device
, port_num
,
2692 netdev
, params
.param
);
2694 EXPORT_SYMBOL(rdma_init_netdev
);