2 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2004 Infinicon Corporation. All rights reserved.
4 * Copyright (c) 2004 Intel Corporation. All rights reserved.
5 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
7 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
8 * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
10 * This software is available to you under a choice of one of two
11 * licenses. You may choose to be licensed under the terms of the GNU
12 * General Public License (GPL) Version 2, available from the file
13 * COPYING in the main directory of this source tree, or the
14 * OpenIB.org BSD license below:
16 * Redistribution and use in source and binary forms, with or
17 * without modification, are permitted provided that the following
20 * - Redistributions of source code must retain the above
21 * copyright notice, this list of conditions and the following
24 * - Redistributions in binary form must reproduce the above
25 * copyright notice, this list of conditions and the following
26 * disclaimer in the documentation and/or other materials
27 * provided with the distribution.
29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
33 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
34 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
35 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
39 #include <linux/errno.h>
40 #include <linux/err.h>
41 #include <linux/export.h>
42 #include <linux/string.h>
43 #include <linux/slab.h>
45 #include <rdma/ib_verbs.h>
46 #include <rdma/ib_cache.h>
47 #include <rdma/ib_addr.h>
49 #include "core_priv.h"
51 static const char * const ib_events
[] = {
52 [IB_EVENT_CQ_ERR
] = "CQ error",
53 [IB_EVENT_QP_FATAL
] = "QP fatal error",
54 [IB_EVENT_QP_REQ_ERR
] = "QP request error",
55 [IB_EVENT_QP_ACCESS_ERR
] = "QP access error",
56 [IB_EVENT_COMM_EST
] = "communication established",
57 [IB_EVENT_SQ_DRAINED
] = "send queue drained",
58 [IB_EVENT_PATH_MIG
] = "path migration successful",
59 [IB_EVENT_PATH_MIG_ERR
] = "path migration error",
60 [IB_EVENT_DEVICE_FATAL
] = "device fatal error",
61 [IB_EVENT_PORT_ACTIVE
] = "port active",
62 [IB_EVENT_PORT_ERR
] = "port error",
63 [IB_EVENT_LID_CHANGE
] = "LID change",
64 [IB_EVENT_PKEY_CHANGE
] = "P_key change",
65 [IB_EVENT_SM_CHANGE
] = "SM change",
66 [IB_EVENT_SRQ_ERR
] = "SRQ error",
67 [IB_EVENT_SRQ_LIMIT_REACHED
] = "SRQ limit reached",
68 [IB_EVENT_QP_LAST_WQE_REACHED
] = "last WQE reached",
69 [IB_EVENT_CLIENT_REREGISTER
] = "client reregister",
70 [IB_EVENT_GID_CHANGE
] = "GID changed",
73 const char *ib_event_msg(enum ib_event_type event
)
77 return (index
< ARRAY_SIZE(ib_events
) && ib_events
[index
]) ?
78 ib_events
[index
] : "unrecognized event";
80 EXPORT_SYMBOL(ib_event_msg
);
82 static const char * const wc_statuses
[] = {
83 [IB_WC_SUCCESS
] = "success",
84 [IB_WC_LOC_LEN_ERR
] = "local length error",
85 [IB_WC_LOC_QP_OP_ERR
] = "local QP operation error",
86 [IB_WC_LOC_EEC_OP_ERR
] = "local EE context operation error",
87 [IB_WC_LOC_PROT_ERR
] = "local protection error",
88 [IB_WC_WR_FLUSH_ERR
] = "WR flushed",
89 [IB_WC_MW_BIND_ERR
] = "memory management operation error",
90 [IB_WC_BAD_RESP_ERR
] = "bad response error",
91 [IB_WC_LOC_ACCESS_ERR
] = "local access error",
92 [IB_WC_REM_INV_REQ_ERR
] = "invalid request error",
93 [IB_WC_REM_ACCESS_ERR
] = "remote access error",
94 [IB_WC_REM_OP_ERR
] = "remote operation error",
95 [IB_WC_RETRY_EXC_ERR
] = "transport retry counter exceeded",
96 [IB_WC_RNR_RETRY_EXC_ERR
] = "RNR retry counter exceeded",
97 [IB_WC_LOC_RDD_VIOL_ERR
] = "local RDD violation error",
98 [IB_WC_REM_INV_RD_REQ_ERR
] = "remote invalid RD request",
99 [IB_WC_REM_ABORT_ERR
] = "operation aborted",
100 [IB_WC_INV_EECN_ERR
] = "invalid EE context number",
101 [IB_WC_INV_EEC_STATE_ERR
] = "invalid EE context state",
102 [IB_WC_FATAL_ERR
] = "fatal error",
103 [IB_WC_RESP_TIMEOUT_ERR
] = "response timeout error",
104 [IB_WC_GENERAL_ERR
] = "general error",
107 const char *ib_wc_status_msg(enum ib_wc_status status
)
109 size_t index
= status
;
111 return (index
< ARRAY_SIZE(wc_statuses
) && wc_statuses
[index
]) ?
112 wc_statuses
[index
] : "unrecognized status";
114 EXPORT_SYMBOL(ib_wc_status_msg
);
116 __attribute_const__
int ib_rate_to_mult(enum ib_rate rate
)
119 case IB_RATE_2_5_GBPS
: return 1;
120 case IB_RATE_5_GBPS
: return 2;
121 case IB_RATE_10_GBPS
: return 4;
122 case IB_RATE_20_GBPS
: return 8;
123 case IB_RATE_30_GBPS
: return 12;
124 case IB_RATE_40_GBPS
: return 16;
125 case IB_RATE_60_GBPS
: return 24;
126 case IB_RATE_80_GBPS
: return 32;
127 case IB_RATE_120_GBPS
: return 48;
131 EXPORT_SYMBOL(ib_rate_to_mult
);
133 __attribute_const__
enum ib_rate
mult_to_ib_rate(int mult
)
136 case 1: return IB_RATE_2_5_GBPS
;
137 case 2: return IB_RATE_5_GBPS
;
138 case 4: return IB_RATE_10_GBPS
;
139 case 8: return IB_RATE_20_GBPS
;
140 case 12: return IB_RATE_30_GBPS
;
141 case 16: return IB_RATE_40_GBPS
;
142 case 24: return IB_RATE_60_GBPS
;
143 case 32: return IB_RATE_80_GBPS
;
144 case 48: return IB_RATE_120_GBPS
;
145 default: return IB_RATE_PORT_CURRENT
;
148 EXPORT_SYMBOL(mult_to_ib_rate
);
150 __attribute_const__
int ib_rate_to_mbps(enum ib_rate rate
)
153 case IB_RATE_2_5_GBPS
: return 2500;
154 case IB_RATE_5_GBPS
: return 5000;
155 case IB_RATE_10_GBPS
: return 10000;
156 case IB_RATE_20_GBPS
: return 20000;
157 case IB_RATE_30_GBPS
: return 30000;
158 case IB_RATE_40_GBPS
: return 40000;
159 case IB_RATE_60_GBPS
: return 60000;
160 case IB_RATE_80_GBPS
: return 80000;
161 case IB_RATE_120_GBPS
: return 120000;
162 case IB_RATE_14_GBPS
: return 14062;
163 case IB_RATE_56_GBPS
: return 56250;
164 case IB_RATE_112_GBPS
: return 112500;
165 case IB_RATE_168_GBPS
: return 168750;
166 case IB_RATE_25_GBPS
: return 25781;
167 case IB_RATE_100_GBPS
: return 103125;
168 case IB_RATE_200_GBPS
: return 206250;
169 case IB_RATE_300_GBPS
: return 309375;
173 EXPORT_SYMBOL(ib_rate_to_mbps
);
175 __attribute_const__
enum rdma_transport_type
176 rdma_node_get_transport(enum rdma_node_type node_type
)
179 case RDMA_NODE_IB_CA
:
180 case RDMA_NODE_IB_SWITCH
:
181 case RDMA_NODE_IB_ROUTER
:
182 return RDMA_TRANSPORT_IB
;
184 return RDMA_TRANSPORT_IWARP
;
185 case RDMA_NODE_USNIC
:
186 return RDMA_TRANSPORT_USNIC
;
187 case RDMA_NODE_USNIC_UDP
:
188 return RDMA_TRANSPORT_USNIC_UDP
;
194 EXPORT_SYMBOL(rdma_node_get_transport
);
196 enum rdma_link_layer
rdma_port_get_link_layer(struct ib_device
*device
, u8 port_num
)
198 if (device
->get_link_layer
)
199 return device
->get_link_layer(device
, port_num
);
201 switch (rdma_node_get_transport(device
->node_type
)) {
202 case RDMA_TRANSPORT_IB
:
203 return IB_LINK_LAYER_INFINIBAND
;
204 case RDMA_TRANSPORT_IWARP
:
205 case RDMA_TRANSPORT_USNIC
:
206 case RDMA_TRANSPORT_USNIC_UDP
:
207 return IB_LINK_LAYER_ETHERNET
;
209 return IB_LINK_LAYER_UNSPECIFIED
;
212 EXPORT_SYMBOL(rdma_port_get_link_layer
);
214 /* Protection domains */
217 * ib_alloc_pd - Allocates an unused protection domain.
218 * @device: The device on which to allocate the protection domain.
220 * A protection domain object provides an association between QPs, shared
221 * receive queues, address handles, memory regions, and memory windows.
223 * Every PD has a local_dma_lkey which can be used as the lkey value for local
226 struct ib_pd
*ib_alloc_pd(struct ib_device
*device
)
229 struct ib_device_attr devattr
;
232 rc
= ib_query_device(device
, &devattr
);
236 pd
= device
->alloc_pd(device
, NULL
, NULL
);
243 atomic_set(&pd
->usecnt
, 0);
245 if (devattr
.device_cap_flags
& IB_DEVICE_LOCAL_DMA_LKEY
)
246 pd
->local_dma_lkey
= device
->local_dma_lkey
;
250 mr
= ib_get_dma_mr(pd
, IB_ACCESS_LOCAL_WRITE
);
253 return (struct ib_pd
*)mr
;
257 pd
->local_dma_lkey
= pd
->local_mr
->lkey
;
261 EXPORT_SYMBOL(ib_alloc_pd
);
264 * ib_dealloc_pd - Deallocates a protection domain.
265 * @pd: The protection domain to deallocate.
267 * It is an error to call this function while any resources in the pd still
268 * exist. The caller is responsible to synchronously destroy them and
269 * guarantee no new allocations will happen.
271 void ib_dealloc_pd(struct ib_pd
*pd
)
276 ret
= ib_dereg_mr(pd
->local_mr
);
281 /* uverbs manipulates usecnt with proper locking, while the kabi
282 requires the caller to guarantee we can't race here. */
283 WARN_ON(atomic_read(&pd
->usecnt
));
285 /* Making delalloc_pd a void return is a WIP, no driver should return
287 ret
= pd
->device
->dealloc_pd(pd
);
288 WARN_ONCE(ret
, "Infiniband HW driver failed dealloc_pd");
290 EXPORT_SYMBOL(ib_dealloc_pd
);
292 /* Address handles */
294 struct ib_ah
*ib_create_ah(struct ib_pd
*pd
, struct ib_ah_attr
*ah_attr
)
298 ah
= pd
->device
->create_ah(pd
, ah_attr
);
301 ah
->device
= pd
->device
;
304 atomic_inc(&pd
->usecnt
);
309 EXPORT_SYMBOL(ib_create_ah
);
311 int ib_init_ah_from_wc(struct ib_device
*device
, u8 port_num
,
312 const struct ib_wc
*wc
, const struct ib_grh
*grh
,
313 struct ib_ah_attr
*ah_attr
)
319 memset(ah_attr
, 0, sizeof *ah_attr
);
320 if (rdma_cap_eth_ah(device
, port_num
)) {
321 if (!(wc
->wc_flags
& IB_WC_GRH
))
324 if (wc
->wc_flags
& IB_WC_WITH_SMAC
&&
325 wc
->wc_flags
& IB_WC_WITH_VLAN
) {
326 memcpy(ah_attr
->dmac
, wc
->smac
, ETH_ALEN
);
327 ah_attr
->vlan_id
= wc
->vlan_id
;
329 ret
= rdma_addr_find_dmac_by_grh(&grh
->dgid
, &grh
->sgid
,
330 ah_attr
->dmac
, &ah_attr
->vlan_id
);
335 ah_attr
->vlan_id
= 0xffff;
338 ah_attr
->dlid
= wc
->slid
;
339 ah_attr
->sl
= wc
->sl
;
340 ah_attr
->src_path_bits
= wc
->dlid_path_bits
;
341 ah_attr
->port_num
= port_num
;
343 if (wc
->wc_flags
& IB_WC_GRH
) {
344 ah_attr
->ah_flags
= IB_AH_GRH
;
345 ah_attr
->grh
.dgid
= grh
->sgid
;
347 ret
= ib_find_cached_gid(device
, &grh
->dgid
, &port_num
,
352 ah_attr
->grh
.sgid_index
= (u8
) gid_index
;
353 flow_class
= be32_to_cpu(grh
->version_tclass_flow
);
354 ah_attr
->grh
.flow_label
= flow_class
& 0xFFFFF;
355 ah_attr
->grh
.hop_limit
= 0xFF;
356 ah_attr
->grh
.traffic_class
= (flow_class
>> 20) & 0xFF;
360 EXPORT_SYMBOL(ib_init_ah_from_wc
);
362 struct ib_ah
*ib_create_ah_from_wc(struct ib_pd
*pd
, const struct ib_wc
*wc
,
363 const struct ib_grh
*grh
, u8 port_num
)
365 struct ib_ah_attr ah_attr
;
368 ret
= ib_init_ah_from_wc(pd
->device
, port_num
, wc
, grh
, &ah_attr
);
372 return ib_create_ah(pd
, &ah_attr
);
374 EXPORT_SYMBOL(ib_create_ah_from_wc
);
376 int ib_modify_ah(struct ib_ah
*ah
, struct ib_ah_attr
*ah_attr
)
378 return ah
->device
->modify_ah
?
379 ah
->device
->modify_ah(ah
, ah_attr
) :
382 EXPORT_SYMBOL(ib_modify_ah
);
384 int ib_query_ah(struct ib_ah
*ah
, struct ib_ah_attr
*ah_attr
)
386 return ah
->device
->query_ah
?
387 ah
->device
->query_ah(ah
, ah_attr
) :
390 EXPORT_SYMBOL(ib_query_ah
);
392 int ib_destroy_ah(struct ib_ah
*ah
)
398 ret
= ah
->device
->destroy_ah(ah
);
400 atomic_dec(&pd
->usecnt
);
404 EXPORT_SYMBOL(ib_destroy_ah
);
406 /* Shared receive queues */
408 struct ib_srq
*ib_create_srq(struct ib_pd
*pd
,
409 struct ib_srq_init_attr
*srq_init_attr
)
413 if (!pd
->device
->create_srq
)
414 return ERR_PTR(-ENOSYS
);
416 srq
= pd
->device
->create_srq(pd
, srq_init_attr
, NULL
);
419 srq
->device
= pd
->device
;
422 srq
->event_handler
= srq_init_attr
->event_handler
;
423 srq
->srq_context
= srq_init_attr
->srq_context
;
424 srq
->srq_type
= srq_init_attr
->srq_type
;
425 if (srq
->srq_type
== IB_SRQT_XRC
) {
426 srq
->ext
.xrc
.xrcd
= srq_init_attr
->ext
.xrc
.xrcd
;
427 srq
->ext
.xrc
.cq
= srq_init_attr
->ext
.xrc
.cq
;
428 atomic_inc(&srq
->ext
.xrc
.xrcd
->usecnt
);
429 atomic_inc(&srq
->ext
.xrc
.cq
->usecnt
);
431 atomic_inc(&pd
->usecnt
);
432 atomic_set(&srq
->usecnt
, 0);
437 EXPORT_SYMBOL(ib_create_srq
);
439 int ib_modify_srq(struct ib_srq
*srq
,
440 struct ib_srq_attr
*srq_attr
,
441 enum ib_srq_attr_mask srq_attr_mask
)
443 return srq
->device
->modify_srq
?
444 srq
->device
->modify_srq(srq
, srq_attr
, srq_attr_mask
, NULL
) :
447 EXPORT_SYMBOL(ib_modify_srq
);
449 int ib_query_srq(struct ib_srq
*srq
,
450 struct ib_srq_attr
*srq_attr
)
452 return srq
->device
->query_srq
?
453 srq
->device
->query_srq(srq
, srq_attr
) : -ENOSYS
;
455 EXPORT_SYMBOL(ib_query_srq
);
457 int ib_destroy_srq(struct ib_srq
*srq
)
460 enum ib_srq_type srq_type
;
461 struct ib_xrcd
*uninitialized_var(xrcd
);
462 struct ib_cq
*uninitialized_var(cq
);
465 if (atomic_read(&srq
->usecnt
))
469 srq_type
= srq
->srq_type
;
470 if (srq_type
== IB_SRQT_XRC
) {
471 xrcd
= srq
->ext
.xrc
.xrcd
;
472 cq
= srq
->ext
.xrc
.cq
;
475 ret
= srq
->device
->destroy_srq(srq
);
477 atomic_dec(&pd
->usecnt
);
478 if (srq_type
== IB_SRQT_XRC
) {
479 atomic_dec(&xrcd
->usecnt
);
480 atomic_dec(&cq
->usecnt
);
486 EXPORT_SYMBOL(ib_destroy_srq
);
490 static void __ib_shared_qp_event_handler(struct ib_event
*event
, void *context
)
492 struct ib_qp
*qp
= context
;
495 spin_lock_irqsave(&qp
->device
->event_handler_lock
, flags
);
496 list_for_each_entry(event
->element
.qp
, &qp
->open_list
, open_list
)
497 if (event
->element
.qp
->event_handler
)
498 event
->element
.qp
->event_handler(event
, event
->element
.qp
->qp_context
);
499 spin_unlock_irqrestore(&qp
->device
->event_handler_lock
, flags
);
502 static void __ib_insert_xrcd_qp(struct ib_xrcd
*xrcd
, struct ib_qp
*qp
)
504 mutex_lock(&xrcd
->tgt_qp_mutex
);
505 list_add(&qp
->xrcd_list
, &xrcd
->tgt_qp_list
);
506 mutex_unlock(&xrcd
->tgt_qp_mutex
);
509 static struct ib_qp
*__ib_open_qp(struct ib_qp
*real_qp
,
510 void (*event_handler
)(struct ib_event
*, void *),
516 qp
= kzalloc(sizeof *qp
, GFP_KERNEL
);
518 return ERR_PTR(-ENOMEM
);
520 qp
->real_qp
= real_qp
;
521 atomic_inc(&real_qp
->usecnt
);
522 qp
->device
= real_qp
->device
;
523 qp
->event_handler
= event_handler
;
524 qp
->qp_context
= qp_context
;
525 qp
->qp_num
= real_qp
->qp_num
;
526 qp
->qp_type
= real_qp
->qp_type
;
528 spin_lock_irqsave(&real_qp
->device
->event_handler_lock
, flags
);
529 list_add(&qp
->open_list
, &real_qp
->open_list
);
530 spin_unlock_irqrestore(&real_qp
->device
->event_handler_lock
, flags
);
535 struct ib_qp
*ib_open_qp(struct ib_xrcd
*xrcd
,
536 struct ib_qp_open_attr
*qp_open_attr
)
538 struct ib_qp
*qp
, *real_qp
;
540 if (qp_open_attr
->qp_type
!= IB_QPT_XRC_TGT
)
541 return ERR_PTR(-EINVAL
);
543 qp
= ERR_PTR(-EINVAL
);
544 mutex_lock(&xrcd
->tgt_qp_mutex
);
545 list_for_each_entry(real_qp
, &xrcd
->tgt_qp_list
, xrcd_list
) {
546 if (real_qp
->qp_num
== qp_open_attr
->qp_num
) {
547 qp
= __ib_open_qp(real_qp
, qp_open_attr
->event_handler
,
548 qp_open_attr
->qp_context
);
552 mutex_unlock(&xrcd
->tgt_qp_mutex
);
555 EXPORT_SYMBOL(ib_open_qp
);
557 struct ib_qp
*ib_create_qp(struct ib_pd
*pd
,
558 struct ib_qp_init_attr
*qp_init_attr
)
560 struct ib_qp
*qp
, *real_qp
;
561 struct ib_device
*device
;
563 device
= pd
? pd
->device
: qp_init_attr
->xrcd
->device
;
564 qp
= device
->create_qp(pd
, qp_init_attr
, NULL
);
570 qp
->qp_type
= qp_init_attr
->qp_type
;
572 atomic_set(&qp
->usecnt
, 0);
573 if (qp_init_attr
->qp_type
== IB_QPT_XRC_TGT
) {
574 qp
->event_handler
= __ib_shared_qp_event_handler
;
577 qp
->send_cq
= qp
->recv_cq
= NULL
;
579 qp
->xrcd
= qp_init_attr
->xrcd
;
580 atomic_inc(&qp_init_attr
->xrcd
->usecnt
);
581 INIT_LIST_HEAD(&qp
->open_list
);
584 qp
= __ib_open_qp(real_qp
, qp_init_attr
->event_handler
,
585 qp_init_attr
->qp_context
);
587 __ib_insert_xrcd_qp(qp_init_attr
->xrcd
, real_qp
);
589 real_qp
->device
->destroy_qp(real_qp
);
591 qp
->event_handler
= qp_init_attr
->event_handler
;
592 qp
->qp_context
= qp_init_attr
->qp_context
;
593 if (qp_init_attr
->qp_type
== IB_QPT_XRC_INI
) {
597 qp
->recv_cq
= qp_init_attr
->recv_cq
;
598 atomic_inc(&qp_init_attr
->recv_cq
->usecnt
);
599 qp
->srq
= qp_init_attr
->srq
;
601 atomic_inc(&qp_init_attr
->srq
->usecnt
);
605 qp
->send_cq
= qp_init_attr
->send_cq
;
608 atomic_inc(&pd
->usecnt
);
609 atomic_inc(&qp_init_attr
->send_cq
->usecnt
);
615 EXPORT_SYMBOL(ib_create_qp
);
617 static const struct {
619 enum ib_qp_attr_mask req_param
[IB_QPT_MAX
];
620 enum ib_qp_attr_mask req_param_add_eth
[IB_QPT_MAX
];
621 enum ib_qp_attr_mask opt_param
[IB_QPT_MAX
];
622 enum ib_qp_attr_mask opt_param_add_eth
[IB_QPT_MAX
];
623 } qp_state_table
[IB_QPS_ERR
+ 1][IB_QPS_ERR
+ 1] = {
625 [IB_QPS_RESET
] = { .valid
= 1 },
629 [IB_QPT_UD
] = (IB_QP_PKEY_INDEX
|
632 [IB_QPT_RAW_PACKET
] = IB_QP_PORT
,
633 [IB_QPT_UC
] = (IB_QP_PKEY_INDEX
|
636 [IB_QPT_RC
] = (IB_QP_PKEY_INDEX
|
639 [IB_QPT_XRC_INI
] = (IB_QP_PKEY_INDEX
|
642 [IB_QPT_XRC_TGT
] = (IB_QP_PKEY_INDEX
|
645 [IB_QPT_SMI
] = (IB_QP_PKEY_INDEX
|
647 [IB_QPT_GSI
] = (IB_QP_PKEY_INDEX
|
653 [IB_QPS_RESET
] = { .valid
= 1 },
654 [IB_QPS_ERR
] = { .valid
= 1 },
658 [IB_QPT_UD
] = (IB_QP_PKEY_INDEX
|
661 [IB_QPT_UC
] = (IB_QP_PKEY_INDEX
|
664 [IB_QPT_RC
] = (IB_QP_PKEY_INDEX
|
667 [IB_QPT_XRC_INI
] = (IB_QP_PKEY_INDEX
|
670 [IB_QPT_XRC_TGT
] = (IB_QP_PKEY_INDEX
|
673 [IB_QPT_SMI
] = (IB_QP_PKEY_INDEX
|
675 [IB_QPT_GSI
] = (IB_QP_PKEY_INDEX
|
682 [IB_QPT_UC
] = (IB_QP_AV
|
686 [IB_QPT_RC
] = (IB_QP_AV
|
690 IB_QP_MAX_DEST_RD_ATOMIC
|
691 IB_QP_MIN_RNR_TIMER
),
692 [IB_QPT_XRC_INI
] = (IB_QP_AV
|
696 [IB_QPT_XRC_TGT
] = (IB_QP_AV
|
700 IB_QP_MAX_DEST_RD_ATOMIC
|
701 IB_QP_MIN_RNR_TIMER
),
703 .req_param_add_eth
= {
704 [IB_QPT_RC
] = (IB_QP_SMAC
),
705 [IB_QPT_UC
] = (IB_QP_SMAC
),
706 [IB_QPT_XRC_INI
] = (IB_QP_SMAC
),
707 [IB_QPT_XRC_TGT
] = (IB_QP_SMAC
)
710 [IB_QPT_UD
] = (IB_QP_PKEY_INDEX
|
712 [IB_QPT_UC
] = (IB_QP_ALT_PATH
|
715 [IB_QPT_RC
] = (IB_QP_ALT_PATH
|
718 [IB_QPT_XRC_INI
] = (IB_QP_ALT_PATH
|
721 [IB_QPT_XRC_TGT
] = (IB_QP_ALT_PATH
|
724 [IB_QPT_SMI
] = (IB_QP_PKEY_INDEX
|
726 [IB_QPT_GSI
] = (IB_QP_PKEY_INDEX
|
729 .opt_param_add_eth
= {
730 [IB_QPT_RC
] = (IB_QP_ALT_SMAC
|
733 [IB_QPT_UC
] = (IB_QP_ALT_SMAC
|
736 [IB_QPT_XRC_INI
] = (IB_QP_ALT_SMAC
|
739 [IB_QPT_XRC_TGT
] = (IB_QP_ALT_SMAC
|
746 [IB_QPS_RESET
] = { .valid
= 1 },
747 [IB_QPS_ERR
] = { .valid
= 1 },
751 [IB_QPT_UD
] = IB_QP_SQ_PSN
,
752 [IB_QPT_UC
] = IB_QP_SQ_PSN
,
753 [IB_QPT_RC
] = (IB_QP_TIMEOUT
|
757 IB_QP_MAX_QP_RD_ATOMIC
),
758 [IB_QPT_XRC_INI
] = (IB_QP_TIMEOUT
|
762 IB_QP_MAX_QP_RD_ATOMIC
),
763 [IB_QPT_XRC_TGT
] = (IB_QP_TIMEOUT
|
765 [IB_QPT_SMI
] = IB_QP_SQ_PSN
,
766 [IB_QPT_GSI
] = IB_QP_SQ_PSN
,
769 [IB_QPT_UD
] = (IB_QP_CUR_STATE
|
771 [IB_QPT_UC
] = (IB_QP_CUR_STATE
|
774 IB_QP_PATH_MIG_STATE
),
775 [IB_QPT_RC
] = (IB_QP_CUR_STATE
|
778 IB_QP_MIN_RNR_TIMER
|
779 IB_QP_PATH_MIG_STATE
),
780 [IB_QPT_XRC_INI
] = (IB_QP_CUR_STATE
|
783 IB_QP_PATH_MIG_STATE
),
784 [IB_QPT_XRC_TGT
] = (IB_QP_CUR_STATE
|
787 IB_QP_MIN_RNR_TIMER
|
788 IB_QP_PATH_MIG_STATE
),
789 [IB_QPT_SMI
] = (IB_QP_CUR_STATE
|
791 [IB_QPT_GSI
] = (IB_QP_CUR_STATE
|
797 [IB_QPS_RESET
] = { .valid
= 1 },
798 [IB_QPS_ERR
] = { .valid
= 1 },
802 [IB_QPT_UD
] = (IB_QP_CUR_STATE
|
804 [IB_QPT_UC
] = (IB_QP_CUR_STATE
|
807 IB_QP_PATH_MIG_STATE
),
808 [IB_QPT_RC
] = (IB_QP_CUR_STATE
|
811 IB_QP_PATH_MIG_STATE
|
812 IB_QP_MIN_RNR_TIMER
),
813 [IB_QPT_XRC_INI
] = (IB_QP_CUR_STATE
|
816 IB_QP_PATH_MIG_STATE
),
817 [IB_QPT_XRC_TGT
] = (IB_QP_CUR_STATE
|
820 IB_QP_PATH_MIG_STATE
|
821 IB_QP_MIN_RNR_TIMER
),
822 [IB_QPT_SMI
] = (IB_QP_CUR_STATE
|
824 [IB_QPT_GSI
] = (IB_QP_CUR_STATE
|
831 [IB_QPT_UD
] = IB_QP_EN_SQD_ASYNC_NOTIFY
,
832 [IB_QPT_UC
] = IB_QP_EN_SQD_ASYNC_NOTIFY
,
833 [IB_QPT_RC
] = IB_QP_EN_SQD_ASYNC_NOTIFY
,
834 [IB_QPT_XRC_INI
] = IB_QP_EN_SQD_ASYNC_NOTIFY
,
835 [IB_QPT_XRC_TGT
] = IB_QP_EN_SQD_ASYNC_NOTIFY
, /* ??? */
836 [IB_QPT_SMI
] = IB_QP_EN_SQD_ASYNC_NOTIFY
,
837 [IB_QPT_GSI
] = IB_QP_EN_SQD_ASYNC_NOTIFY
842 [IB_QPS_RESET
] = { .valid
= 1 },
843 [IB_QPS_ERR
] = { .valid
= 1 },
847 [IB_QPT_UD
] = (IB_QP_CUR_STATE
|
849 [IB_QPT_UC
] = (IB_QP_CUR_STATE
|
852 IB_QP_PATH_MIG_STATE
),
853 [IB_QPT_RC
] = (IB_QP_CUR_STATE
|
856 IB_QP_MIN_RNR_TIMER
|
857 IB_QP_PATH_MIG_STATE
),
858 [IB_QPT_XRC_INI
] = (IB_QP_CUR_STATE
|
861 IB_QP_PATH_MIG_STATE
),
862 [IB_QPT_XRC_TGT
] = (IB_QP_CUR_STATE
|
865 IB_QP_MIN_RNR_TIMER
|
866 IB_QP_PATH_MIG_STATE
),
867 [IB_QPT_SMI
] = (IB_QP_CUR_STATE
|
869 [IB_QPT_GSI
] = (IB_QP_CUR_STATE
|
876 [IB_QPT_UD
] = (IB_QP_PKEY_INDEX
|
878 [IB_QPT_UC
] = (IB_QP_AV
|
882 IB_QP_PATH_MIG_STATE
),
883 [IB_QPT_RC
] = (IB_QP_PORT
|
888 IB_QP_MAX_QP_RD_ATOMIC
|
889 IB_QP_MAX_DEST_RD_ATOMIC
|
893 IB_QP_MIN_RNR_TIMER
|
894 IB_QP_PATH_MIG_STATE
),
895 [IB_QPT_XRC_INI
] = (IB_QP_PORT
|
900 IB_QP_MAX_QP_RD_ATOMIC
|
904 IB_QP_PATH_MIG_STATE
),
905 [IB_QPT_XRC_TGT
] = (IB_QP_PORT
|
908 IB_QP_MAX_DEST_RD_ATOMIC
|
912 IB_QP_MIN_RNR_TIMER
|
913 IB_QP_PATH_MIG_STATE
),
914 [IB_QPT_SMI
] = (IB_QP_PKEY_INDEX
|
916 [IB_QPT_GSI
] = (IB_QP_PKEY_INDEX
|
922 [IB_QPS_RESET
] = { .valid
= 1 },
923 [IB_QPS_ERR
] = { .valid
= 1 },
927 [IB_QPT_UD
] = (IB_QP_CUR_STATE
|
929 [IB_QPT_UC
] = (IB_QP_CUR_STATE
|
931 [IB_QPT_SMI
] = (IB_QP_CUR_STATE
|
933 [IB_QPT_GSI
] = (IB_QP_CUR_STATE
|
939 [IB_QPS_RESET
] = { .valid
= 1 },
940 [IB_QPS_ERR
] = { .valid
= 1 }
944 int ib_modify_qp_is_ok(enum ib_qp_state cur_state
, enum ib_qp_state next_state
,
945 enum ib_qp_type type
, enum ib_qp_attr_mask mask
,
946 enum rdma_link_layer ll
)
948 enum ib_qp_attr_mask req_param
, opt_param
;
950 if (cur_state
< 0 || cur_state
> IB_QPS_ERR
||
951 next_state
< 0 || next_state
> IB_QPS_ERR
)
954 if (mask
& IB_QP_CUR_STATE
&&
955 cur_state
!= IB_QPS_RTR
&& cur_state
!= IB_QPS_RTS
&&
956 cur_state
!= IB_QPS_SQD
&& cur_state
!= IB_QPS_SQE
)
959 if (!qp_state_table
[cur_state
][next_state
].valid
)
962 req_param
= qp_state_table
[cur_state
][next_state
].req_param
[type
];
963 opt_param
= qp_state_table
[cur_state
][next_state
].opt_param
[type
];
965 if (ll
== IB_LINK_LAYER_ETHERNET
) {
966 req_param
|= qp_state_table
[cur_state
][next_state
].
967 req_param_add_eth
[type
];
968 opt_param
|= qp_state_table
[cur_state
][next_state
].
969 opt_param_add_eth
[type
];
972 if ((mask
& req_param
) != req_param
)
975 if (mask
& ~(req_param
| opt_param
| IB_QP_STATE
))
980 EXPORT_SYMBOL(ib_modify_qp_is_ok
);
982 int ib_resolve_eth_l2_attrs(struct ib_qp
*qp
,
983 struct ib_qp_attr
*qp_attr
, int *qp_attr_mask
)
988 if ((*qp_attr_mask
& IB_QP_AV
) &&
989 (rdma_cap_eth_ah(qp
->device
, qp_attr
->ah_attr
.port_num
))) {
990 ret
= ib_query_gid(qp
->device
, qp_attr
->ah_attr
.port_num
,
991 qp_attr
->ah_attr
.grh
.sgid_index
, &sgid
);
994 if (rdma_link_local_addr((struct in6_addr
*)qp_attr
->ah_attr
.grh
.dgid
.raw
)) {
995 rdma_get_ll_mac((struct in6_addr
*)qp_attr
->ah_attr
.grh
.dgid
.raw
, qp_attr
->ah_attr
.dmac
);
996 rdma_get_ll_mac((struct in6_addr
*)sgid
.raw
, qp_attr
->smac
);
997 if (!(*qp_attr_mask
& IB_QP_VID
))
998 qp_attr
->vlan_id
= rdma_get_vlan_id(&sgid
);
1000 ret
= rdma_addr_find_dmac_by_grh(&sgid
, &qp_attr
->ah_attr
.grh
.dgid
,
1001 qp_attr
->ah_attr
.dmac
, &qp_attr
->vlan_id
);
1004 ret
= rdma_addr_find_smac_by_sgid(&sgid
, qp_attr
->smac
, NULL
);
1008 *qp_attr_mask
|= IB_QP_SMAC
;
1009 if (qp_attr
->vlan_id
< 0xFFFF)
1010 *qp_attr_mask
|= IB_QP_VID
;
1015 EXPORT_SYMBOL(ib_resolve_eth_l2_attrs
);
1018 int ib_modify_qp(struct ib_qp
*qp
,
1019 struct ib_qp_attr
*qp_attr
,
1024 ret
= ib_resolve_eth_l2_attrs(qp
, qp_attr
, &qp_attr_mask
);
1028 return qp
->device
->modify_qp(qp
->real_qp
, qp_attr
, qp_attr_mask
, NULL
);
1030 EXPORT_SYMBOL(ib_modify_qp
);
1032 int ib_query_qp(struct ib_qp
*qp
,
1033 struct ib_qp_attr
*qp_attr
,
1035 struct ib_qp_init_attr
*qp_init_attr
)
1037 return qp
->device
->query_qp
?
1038 qp
->device
->query_qp(qp
->real_qp
, qp_attr
, qp_attr_mask
, qp_init_attr
) :
1041 EXPORT_SYMBOL(ib_query_qp
);
1043 int ib_close_qp(struct ib_qp
*qp
)
1045 struct ib_qp
*real_qp
;
1046 unsigned long flags
;
1048 real_qp
= qp
->real_qp
;
1052 spin_lock_irqsave(&real_qp
->device
->event_handler_lock
, flags
);
1053 list_del(&qp
->open_list
);
1054 spin_unlock_irqrestore(&real_qp
->device
->event_handler_lock
, flags
);
1056 atomic_dec(&real_qp
->usecnt
);
1061 EXPORT_SYMBOL(ib_close_qp
);
1063 static int __ib_destroy_shared_qp(struct ib_qp
*qp
)
1065 struct ib_xrcd
*xrcd
;
1066 struct ib_qp
*real_qp
;
1069 real_qp
= qp
->real_qp
;
1070 xrcd
= real_qp
->xrcd
;
1072 mutex_lock(&xrcd
->tgt_qp_mutex
);
1074 if (atomic_read(&real_qp
->usecnt
) == 0)
1075 list_del(&real_qp
->xrcd_list
);
1078 mutex_unlock(&xrcd
->tgt_qp_mutex
);
1081 ret
= ib_destroy_qp(real_qp
);
1083 atomic_dec(&xrcd
->usecnt
);
1085 __ib_insert_xrcd_qp(xrcd
, real_qp
);
1091 int ib_destroy_qp(struct ib_qp
*qp
)
1094 struct ib_cq
*scq
, *rcq
;
1098 if (atomic_read(&qp
->usecnt
))
1101 if (qp
->real_qp
!= qp
)
1102 return __ib_destroy_shared_qp(qp
);
1109 ret
= qp
->device
->destroy_qp(qp
);
1112 atomic_dec(&pd
->usecnt
);
1114 atomic_dec(&scq
->usecnt
);
1116 atomic_dec(&rcq
->usecnt
);
1118 atomic_dec(&srq
->usecnt
);
1123 EXPORT_SYMBOL(ib_destroy_qp
);
1125 /* Completion queues */
1127 struct ib_cq
*ib_create_cq(struct ib_device
*device
,
1128 ib_comp_handler comp_handler
,
1129 void (*event_handler
)(struct ib_event
*, void *),
1131 const struct ib_cq_init_attr
*cq_attr
)
1135 cq
= device
->create_cq(device
, cq_attr
, NULL
, NULL
);
1138 cq
->device
= device
;
1140 cq
->comp_handler
= comp_handler
;
1141 cq
->event_handler
= event_handler
;
1142 cq
->cq_context
= cq_context
;
1143 atomic_set(&cq
->usecnt
, 0);
1148 EXPORT_SYMBOL(ib_create_cq
);
1150 int ib_modify_cq(struct ib_cq
*cq
, u16 cq_count
, u16 cq_period
)
1152 return cq
->device
->modify_cq
?
1153 cq
->device
->modify_cq(cq
, cq_count
, cq_period
) : -ENOSYS
;
1155 EXPORT_SYMBOL(ib_modify_cq
);
1157 int ib_destroy_cq(struct ib_cq
*cq
)
1159 if (atomic_read(&cq
->usecnt
))
1162 return cq
->device
->destroy_cq(cq
);
1164 EXPORT_SYMBOL(ib_destroy_cq
);
1166 int ib_resize_cq(struct ib_cq
*cq
, int cqe
)
1168 return cq
->device
->resize_cq
?
1169 cq
->device
->resize_cq(cq
, cqe
, NULL
) : -ENOSYS
;
1171 EXPORT_SYMBOL(ib_resize_cq
);
1173 /* Memory regions */
1175 struct ib_mr
*ib_get_dma_mr(struct ib_pd
*pd
, int mr_access_flags
)
1180 err
= ib_check_mr_access(mr_access_flags
);
1182 return ERR_PTR(err
);
1184 mr
= pd
->device
->get_dma_mr(pd
, mr_access_flags
);
1187 mr
->device
= pd
->device
;
1190 atomic_inc(&pd
->usecnt
);
1191 atomic_set(&mr
->usecnt
, 0);
1196 EXPORT_SYMBOL(ib_get_dma_mr
);
1198 int ib_query_mr(struct ib_mr
*mr
, struct ib_mr_attr
*mr_attr
)
1200 return mr
->device
->query_mr
?
1201 mr
->device
->query_mr(mr
, mr_attr
) : -ENOSYS
;
1203 EXPORT_SYMBOL(ib_query_mr
);
1205 int ib_dereg_mr(struct ib_mr
*mr
)
1210 if (atomic_read(&mr
->usecnt
))
1214 ret
= mr
->device
->dereg_mr(mr
);
1216 atomic_dec(&pd
->usecnt
);
1220 EXPORT_SYMBOL(ib_dereg_mr
);
1223 * ib_alloc_mr() - Allocates a memory region
1224 * @pd: protection domain associated with the region
1225 * @mr_type: memory region type
1226 * @max_num_sg: maximum sg entries available for registration.
1229 * Memory registeration page/sg lists must not exceed max_num_sg.
1230 * For mr_type IB_MR_TYPE_MEM_REG, the total length cannot exceed
1231 * max_num_sg * used_page_size.
1234 struct ib_mr
*ib_alloc_mr(struct ib_pd
*pd
,
1235 enum ib_mr_type mr_type
,
1240 if (!pd
->device
->alloc_mr
)
1241 return ERR_PTR(-ENOSYS
);
1243 mr
= pd
->device
->alloc_mr(pd
, mr_type
, max_num_sg
);
1245 mr
->device
= pd
->device
;
1248 atomic_inc(&pd
->usecnt
);
1249 atomic_set(&mr
->usecnt
, 0);
1254 EXPORT_SYMBOL(ib_alloc_mr
);
1256 struct ib_fast_reg_page_list
*ib_alloc_fast_reg_page_list(struct ib_device
*device
,
1257 int max_page_list_len
)
1259 struct ib_fast_reg_page_list
*page_list
;
1261 if (!device
->alloc_fast_reg_page_list
)
1262 return ERR_PTR(-ENOSYS
);
1264 page_list
= device
->alloc_fast_reg_page_list(device
, max_page_list_len
);
1266 if (!IS_ERR(page_list
)) {
1267 page_list
->device
= device
;
1268 page_list
->max_page_list_len
= max_page_list_len
;
1273 EXPORT_SYMBOL(ib_alloc_fast_reg_page_list
);
1275 void ib_free_fast_reg_page_list(struct ib_fast_reg_page_list
*page_list
)
1277 page_list
->device
->free_fast_reg_page_list(page_list
);
1279 EXPORT_SYMBOL(ib_free_fast_reg_page_list
);
1281 /* Memory windows */
1283 struct ib_mw
*ib_alloc_mw(struct ib_pd
*pd
, enum ib_mw_type type
)
1287 if (!pd
->device
->alloc_mw
)
1288 return ERR_PTR(-ENOSYS
);
1290 mw
= pd
->device
->alloc_mw(pd
, type
);
1292 mw
->device
= pd
->device
;
1296 atomic_inc(&pd
->usecnt
);
1301 EXPORT_SYMBOL(ib_alloc_mw
);
1303 int ib_dealloc_mw(struct ib_mw
*mw
)
1309 ret
= mw
->device
->dealloc_mw(mw
);
1311 atomic_dec(&pd
->usecnt
);
1315 EXPORT_SYMBOL(ib_dealloc_mw
);
1317 /* "Fast" memory regions */
1319 struct ib_fmr
*ib_alloc_fmr(struct ib_pd
*pd
,
1320 int mr_access_flags
,
1321 struct ib_fmr_attr
*fmr_attr
)
1325 if (!pd
->device
->alloc_fmr
)
1326 return ERR_PTR(-ENOSYS
);
1328 fmr
= pd
->device
->alloc_fmr(pd
, mr_access_flags
, fmr_attr
);
1330 fmr
->device
= pd
->device
;
1332 atomic_inc(&pd
->usecnt
);
1337 EXPORT_SYMBOL(ib_alloc_fmr
);
1339 int ib_unmap_fmr(struct list_head
*fmr_list
)
1343 if (list_empty(fmr_list
))
1346 fmr
= list_entry(fmr_list
->next
, struct ib_fmr
, list
);
1347 return fmr
->device
->unmap_fmr(fmr_list
);
1349 EXPORT_SYMBOL(ib_unmap_fmr
);
1351 int ib_dealloc_fmr(struct ib_fmr
*fmr
)
1357 ret
= fmr
->device
->dealloc_fmr(fmr
);
1359 atomic_dec(&pd
->usecnt
);
1363 EXPORT_SYMBOL(ib_dealloc_fmr
);
1365 /* Multicast groups */
1367 int ib_attach_mcast(struct ib_qp
*qp
, union ib_gid
*gid
, u16 lid
)
1371 if (!qp
->device
->attach_mcast
)
1373 if (gid
->raw
[0] != 0xff || qp
->qp_type
!= IB_QPT_UD
)
1376 ret
= qp
->device
->attach_mcast(qp
, gid
, lid
);
1378 atomic_inc(&qp
->usecnt
);
1381 EXPORT_SYMBOL(ib_attach_mcast
);
1383 int ib_detach_mcast(struct ib_qp
*qp
, union ib_gid
*gid
, u16 lid
)
1387 if (!qp
->device
->detach_mcast
)
1389 if (gid
->raw
[0] != 0xff || qp
->qp_type
!= IB_QPT_UD
)
1392 ret
= qp
->device
->detach_mcast(qp
, gid
, lid
);
1394 atomic_dec(&qp
->usecnt
);
1397 EXPORT_SYMBOL(ib_detach_mcast
);
1399 struct ib_xrcd
*ib_alloc_xrcd(struct ib_device
*device
)
1401 struct ib_xrcd
*xrcd
;
1403 if (!device
->alloc_xrcd
)
1404 return ERR_PTR(-ENOSYS
);
1406 xrcd
= device
->alloc_xrcd(device
, NULL
, NULL
);
1407 if (!IS_ERR(xrcd
)) {
1408 xrcd
->device
= device
;
1410 atomic_set(&xrcd
->usecnt
, 0);
1411 mutex_init(&xrcd
->tgt_qp_mutex
);
1412 INIT_LIST_HEAD(&xrcd
->tgt_qp_list
);
1417 EXPORT_SYMBOL(ib_alloc_xrcd
);
1419 int ib_dealloc_xrcd(struct ib_xrcd
*xrcd
)
1424 if (atomic_read(&xrcd
->usecnt
))
1427 while (!list_empty(&xrcd
->tgt_qp_list
)) {
1428 qp
= list_entry(xrcd
->tgt_qp_list
.next
, struct ib_qp
, xrcd_list
);
1429 ret
= ib_destroy_qp(qp
);
1434 return xrcd
->device
->dealloc_xrcd(xrcd
);
1436 EXPORT_SYMBOL(ib_dealloc_xrcd
);
1438 struct ib_flow
*ib_create_flow(struct ib_qp
*qp
,
1439 struct ib_flow_attr
*flow_attr
,
1442 struct ib_flow
*flow_id
;
1443 if (!qp
->device
->create_flow
)
1444 return ERR_PTR(-ENOSYS
);
1446 flow_id
= qp
->device
->create_flow(qp
, flow_attr
, domain
);
1447 if (!IS_ERR(flow_id
))
1448 atomic_inc(&qp
->usecnt
);
1451 EXPORT_SYMBOL(ib_create_flow
);
1453 int ib_destroy_flow(struct ib_flow
*flow_id
)
1456 struct ib_qp
*qp
= flow_id
->qp
;
1458 err
= qp
->device
->destroy_flow(flow_id
);
1460 atomic_dec(&qp
->usecnt
);
1463 EXPORT_SYMBOL(ib_destroy_flow
);
1465 int ib_check_mr_status(struct ib_mr
*mr
, u32 check_mask
,
1466 struct ib_mr_status
*mr_status
)
1468 return mr
->device
->check_mr_status
?
1469 mr
->device
->check_mr_status(mr
, check_mask
, mr_status
) : -ENOSYS
;
1471 EXPORT_SYMBOL(ib_check_mr_status
);