]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - include/rdma/ib_verbs.h
IB/ipoib: Fix for potential no-carrier state
[mirror_ubuntu-hirsute-kernel.git] / include / rdma / ib_verbs.h
CommitLineData
1da177e4
LT
1/*
2 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2004 Infinicon Corporation. All rights reserved.
4 * Copyright (c) 2004 Intel Corporation. All rights reserved.
5 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
2a1d9b7f 7 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
f7c6a7b5 8 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved.
1da177e4
LT
9 *
10 * This software is available to you under a choice of one of two
11 * licenses. You may choose to be licensed under the terms of the GNU
12 * General Public License (GPL) Version 2, available from the file
13 * COPYING in the main directory of this source tree, or the
14 * OpenIB.org BSD license below:
15 *
16 * Redistribution and use in source and binary forms, with or
17 * without modification, are permitted provided that the following
18 * conditions are met:
19 *
20 * - Redistributions of source code must retain the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer.
23 *
24 * - Redistributions in binary form must reproduce the above
25 * copyright notice, this list of conditions and the following
26 * disclaimer in the documentation and/or other materials
27 * provided with the distribution.
28 *
29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
33 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
34 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
35 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 * SOFTWARE.
1da177e4
LT
37 */
38
39#if !defined(IB_VERBS_H)
40#define IB_VERBS_H
41
42#include <linux/types.h>
43#include <linux/device.h>
9b513090
RC
44#include <linux/mm.h>
45#include <linux/dma-mapping.h>
459d6e2a 46#include <linux/kref.h>
bfb3ea12
DB
47#include <linux/list.h>
48#include <linux/rwsem.h>
87ae9afd 49#include <linux/scatterlist.h>
f0626710 50#include <linux/workqueue.h>
9268f72d 51#include <linux/socket.h>
14d3a3b2 52#include <linux/irq_poll.h>
dd5f03be 53#include <uapi/linux/if_ether.h>
c865f246
SK
54#include <net/ipv6.h>
55#include <net/ip.h>
301a721e
MB
56#include <linux/string.h>
57#include <linux/slab.h>
2fc77572 58#include <linux/netdevice.h>
e2773c06 59
50174a7f 60#include <linux/if_link.h>
60063497 61#include <linux/atomic.h>
882214e2 62#include <linux/mmu_notifier.h>
7c0f6ba6 63#include <linux/uaccess.h>
43579b5f 64#include <linux/cgroup_rdma.h>
ea6819e1 65#include <uapi/rdma/ib_user_verbs.h>
02d8883f 66#include <rdma/restrack.h>
1da177e4 67
9abb0d1b
LR
68#define IB_FW_VERSION_NAME_MAX ETHTOOL_FWVERS_LEN
69
f0626710 70extern struct workqueue_struct *ib_wq;
14d3a3b2 71extern struct workqueue_struct *ib_comp_wq;
f0626710 72
1da177e4
LT
73union ib_gid {
74 u8 raw[16];
75 struct {
97f52eb4
SH
76 __be64 subnet_prefix;
77 __be64 interface_id;
1da177e4
LT
78 } global;
79};
80
e26be1bf
MS
81extern union ib_gid zgid;
82
b39ffa1d
MB
83enum ib_gid_type {
84 /* If link layer is Ethernet, this is RoCE V1 */
85 IB_GID_TYPE_IB = 0,
86 IB_GID_TYPE_ROCE = 0,
7766a99f 87 IB_GID_TYPE_ROCE_UDP_ENCAP = 1,
b39ffa1d
MB
88 IB_GID_TYPE_SIZE
89};
90
7ead4bcb 91#define ROCE_V2_UDP_DPORT 4791
03db3a2d 92struct ib_gid_attr {
b39ffa1d 93 enum ib_gid_type gid_type;
03db3a2d
MB
94 struct net_device *ndev;
95};
96
07ebafba
TT
97enum rdma_node_type {
98 /* IB values map to NodeInfo:NodeType. */
99 RDMA_NODE_IB_CA = 1,
100 RDMA_NODE_IB_SWITCH,
101 RDMA_NODE_IB_ROUTER,
180771a3
UM
102 RDMA_NODE_RNIC,
103 RDMA_NODE_USNIC,
5db5765e 104 RDMA_NODE_USNIC_UDP,
1da177e4
LT
105};
106
a0c1b2a3
EC
107enum {
108 /* set the local administered indication */
109 IB_SA_WELL_KNOWN_GUID = BIT_ULL(57) | 2,
110};
111
07ebafba
TT
112enum rdma_transport_type {
113 RDMA_TRANSPORT_IB,
180771a3 114 RDMA_TRANSPORT_IWARP,
248567f7
UM
115 RDMA_TRANSPORT_USNIC,
116 RDMA_TRANSPORT_USNIC_UDP
07ebafba
TT
117};
118
6b90a6d6
MW
119enum rdma_protocol_type {
120 RDMA_PROTOCOL_IB,
121 RDMA_PROTOCOL_IBOE,
122 RDMA_PROTOCOL_IWARP,
123 RDMA_PROTOCOL_USNIC_UDP
124};
125
8385fd84
RD
126__attribute_const__ enum rdma_transport_type
127rdma_node_get_transport(enum rdma_node_type node_type);
07ebafba 128
c865f246
SK
129enum rdma_network_type {
130 RDMA_NETWORK_IB,
131 RDMA_NETWORK_ROCE_V1 = RDMA_NETWORK_IB,
132 RDMA_NETWORK_IPV4,
133 RDMA_NETWORK_IPV6
134};
135
136static inline enum ib_gid_type ib_network_to_gid_type(enum rdma_network_type network_type)
137{
138 if (network_type == RDMA_NETWORK_IPV4 ||
139 network_type == RDMA_NETWORK_IPV6)
140 return IB_GID_TYPE_ROCE_UDP_ENCAP;
141
142 /* IB_GID_TYPE_IB same as RDMA_NETWORK_ROCE_V1 */
143 return IB_GID_TYPE_IB;
144}
145
146static inline enum rdma_network_type ib_gid_to_network_type(enum ib_gid_type gid_type,
147 union ib_gid *gid)
148{
149 if (gid_type == IB_GID_TYPE_IB)
150 return RDMA_NETWORK_IB;
151
152 if (ipv6_addr_v4mapped((struct in6_addr *)gid))
153 return RDMA_NETWORK_IPV4;
154 else
155 return RDMA_NETWORK_IPV6;
156}
157
a3f5adaf
EC
158enum rdma_link_layer {
159 IB_LINK_LAYER_UNSPECIFIED,
160 IB_LINK_LAYER_INFINIBAND,
161 IB_LINK_LAYER_ETHERNET,
162};
163
1da177e4 164enum ib_device_cap_flags {
7ca0bc53
LR
165 IB_DEVICE_RESIZE_MAX_WR = (1 << 0),
166 IB_DEVICE_BAD_PKEY_CNTR = (1 << 1),
167 IB_DEVICE_BAD_QKEY_CNTR = (1 << 2),
168 IB_DEVICE_RAW_MULTI = (1 << 3),
169 IB_DEVICE_AUTO_PATH_MIG = (1 << 4),
170 IB_DEVICE_CHANGE_PHY_PORT = (1 << 5),
171 IB_DEVICE_UD_AV_PORT_ENFORCE = (1 << 6),
172 IB_DEVICE_CURR_QP_STATE_MOD = (1 << 7),
173 IB_DEVICE_SHUTDOWN_PORT = (1 << 8),
78b57f95 174 /* Not in use, former INIT_TYPE = (1 << 9),*/
7ca0bc53
LR
175 IB_DEVICE_PORT_ACTIVE_EVENT = (1 << 10),
176 IB_DEVICE_SYS_IMAGE_GUID = (1 << 11),
177 IB_DEVICE_RC_RNR_NAK_GEN = (1 << 12),
178 IB_DEVICE_SRQ_RESIZE = (1 << 13),
179 IB_DEVICE_N_NOTIFY_CQ = (1 << 14),
b1adc714
CH
180
181 /*
182 * This device supports a per-device lkey or stag that can be
183 * used without performing a memory registration for the local
184 * memory. Note that ULPs should never check this flag, but
185 * instead of use the local_dma_lkey flag in the ib_pd structure,
186 * which will always contain a usable lkey.
187 */
7ca0bc53 188 IB_DEVICE_LOCAL_DMA_LKEY = (1 << 15),
78b57f95 189 /* Reserved, old SEND_W_INV = (1 << 16),*/
7ca0bc53 190 IB_DEVICE_MEM_WINDOW = (1 << 17),
e0605d91
EC
191 /*
192 * Devices should set IB_DEVICE_UD_IP_SUM if they support
193 * insertion of UDP and TCP checksum on outgoing UD IPoIB
194 * messages and can verify the validity of checksum for
195 * incoming messages. Setting this flag implies that the
196 * IPoIB driver may set NETIF_F_IP_CSUM for datagram mode.
197 */
7ca0bc53
LR
198 IB_DEVICE_UD_IP_CSUM = (1 << 18),
199 IB_DEVICE_UD_TSO = (1 << 19),
200 IB_DEVICE_XRC = (1 << 20),
b1adc714
CH
201
202 /*
203 * This device supports the IB "base memory management extension",
204 * which includes support for fast registrations (IB_WR_REG_MR,
205 * IB_WR_LOCAL_INV and IB_WR_SEND_WITH_INV verbs). This flag should
206 * also be set by any iWarp device which must support FRs to comply
207 * to the iWarp verbs spec. iWarp devices also support the
208 * IB_WR_RDMA_READ_WITH_INV verb for RDMA READs that invalidate the
209 * stag.
210 */
7ca0bc53
LR
211 IB_DEVICE_MEM_MGT_EXTENSIONS = (1 << 21),
212 IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = (1 << 22),
213 IB_DEVICE_MEM_WINDOW_TYPE_2A = (1 << 23),
214 IB_DEVICE_MEM_WINDOW_TYPE_2B = (1 << 24),
215 IB_DEVICE_RC_IP_CSUM = (1 << 25),
ebaaee25 216 /* Deprecated. Please use IB_RAW_PACKET_CAP_IP_CSUM. */
7ca0bc53 217 IB_DEVICE_RAW_IP_CSUM = (1 << 26),
8a06ce59
LR
218 /*
219 * Devices should set IB_DEVICE_CROSS_CHANNEL if they
220 * support execution of WQEs that involve synchronization
221 * of I/O operations with single completion queue managed
222 * by hardware.
223 */
78b57f95 224 IB_DEVICE_CROSS_CHANNEL = (1 << 27),
7ca0bc53
LR
225 IB_DEVICE_MANAGED_FLOW_STEERING = (1 << 29),
226 IB_DEVICE_SIGNATURE_HANDOVER = (1 << 30),
47355b3c 227 IB_DEVICE_ON_DEMAND_PAGING = (1ULL << 31),
f5aa9159 228 IB_DEVICE_SG_GAPS_REG = (1ULL << 32),
c7e162a4 229 IB_DEVICE_VIRTUAL_FUNCTION = (1ULL << 33),
ebaaee25 230 /* Deprecated. Please use IB_RAW_PACKET_CAP_SCATTER_FCS. */
c7e162a4 231 IB_DEVICE_RAW_SCATTER_FCS = (1ULL << 34),
62e45949 232 IB_DEVICE_RDMA_NETDEV_OPA_VNIC = (1ULL << 35),
e1d2e887
NO
233 /* The device supports padding incoming writes to cacheline. */
234 IB_DEVICE_PCI_WRITE_END_PADDING = (1ULL << 36),
1b01d335
SG
235};
236
237enum ib_signature_prot_cap {
238 IB_PROT_T10DIF_TYPE_1 = 1,
239 IB_PROT_T10DIF_TYPE_2 = 1 << 1,
240 IB_PROT_T10DIF_TYPE_3 = 1 << 2,
241};
242
243enum ib_signature_guard_cap {
244 IB_GUARD_T10DIF_CRC = 1,
245 IB_GUARD_T10DIF_CSUM = 1 << 1,
1da177e4
LT
246};
247
248enum ib_atomic_cap {
249 IB_ATOMIC_NONE,
250 IB_ATOMIC_HCA,
251 IB_ATOMIC_GLOB
252};
253
860f10a7 254enum ib_odp_general_cap_bits {
25bf14d6
AK
255 IB_ODP_SUPPORT = 1 << 0,
256 IB_ODP_SUPPORT_IMPLICIT = 1 << 1,
860f10a7
SG
257};
258
259enum ib_odp_transport_cap_bits {
260 IB_ODP_SUPPORT_SEND = 1 << 0,
261 IB_ODP_SUPPORT_RECV = 1 << 1,
262 IB_ODP_SUPPORT_WRITE = 1 << 2,
263 IB_ODP_SUPPORT_READ = 1 << 3,
264 IB_ODP_SUPPORT_ATOMIC = 1 << 4,
265};
266
267struct ib_odp_caps {
268 uint64_t general_caps;
269 struct {
270 uint32_t rc_odp_caps;
271 uint32_t uc_odp_caps;
272 uint32_t ud_odp_caps;
273 } per_transport_caps;
274};
275
ccf20562
YH
276struct ib_rss_caps {
277 /* Corresponding bit will be set if qp type from
278 * 'enum ib_qp_type' is supported, e.g.
279 * supported_qpts |= 1 << IB_QPT_UD
280 */
281 u32 supported_qpts;
282 u32 max_rwq_indirection_tables;
283 u32 max_rwq_indirection_table_size;
284};
285
6938fc1e
AK
286enum ib_tm_cap_flags {
287 /* Support tag matching on RC transport */
288 IB_TM_CAP_RC = 1 << 0,
289};
290
78b1beb0 291struct ib_tm_caps {
6938fc1e
AK
292 /* Max size of RNDV header */
293 u32 max_rndv_hdr_size;
294 /* Max number of entries in tag matching list */
295 u32 max_num_tags;
296 /* From enum ib_tm_cap_flags */
297 u32 flags;
298 /* Max number of outstanding list operations */
299 u32 max_ops;
300 /* Max number of SGE in tag matching entry */
301 u32 max_sge;
302};
303
bcf4c1ea
MB
304struct ib_cq_init_attr {
305 unsigned int cqe;
306 int comp_vector;
307 u32 flags;
308};
309
869ddcf8
YC
310enum ib_cq_attr_mask {
311 IB_CQ_MODERATE = 1 << 0,
312};
313
18bd9072
YC
314struct ib_cq_caps {
315 u16 max_cq_moderation_count;
316 u16 max_cq_moderation_period;
317};
318
1da177e4
LT
319struct ib_device_attr {
320 u64 fw_ver;
97f52eb4 321 __be64 sys_image_guid;
1da177e4
LT
322 u64 max_mr_size;
323 u64 page_size_cap;
324 u32 vendor_id;
325 u32 vendor_part_id;
326 u32 hw_ver;
327 int max_qp;
328 int max_qp_wr;
fb532d6a 329 u64 device_cap_flags;
1da177e4
LT
330 int max_sge;
331 int max_sge_rd;
332 int max_cq;
333 int max_cqe;
334 int max_mr;
335 int max_pd;
336 int max_qp_rd_atom;
337 int max_ee_rd_atom;
338 int max_res_rd_atom;
339 int max_qp_init_rd_atom;
340 int max_ee_init_rd_atom;
341 enum ib_atomic_cap atomic_cap;
5e80ba8f 342 enum ib_atomic_cap masked_atomic_cap;
1da177e4
LT
343 int max_ee;
344 int max_rdd;
345 int max_mw;
346 int max_raw_ipv6_qp;
347 int max_raw_ethy_qp;
348 int max_mcast_grp;
349 int max_mcast_qp_attach;
350 int max_total_mcast_qp_attach;
351 int max_ah;
352 int max_fmr;
353 int max_map_per_fmr;
354 int max_srq;
355 int max_srq_wr;
356 int max_srq_sge;
00f7ec36 357 unsigned int max_fast_reg_page_list_len;
1da177e4
LT
358 u16 max_pkeys;
359 u8 local_ca_ack_delay;
1b01d335
SG
360 int sig_prot_cap;
361 int sig_guard_cap;
860f10a7 362 struct ib_odp_caps odp_caps;
24306dc6
MB
363 uint64_t timestamp_mask;
364 uint64_t hca_core_clock; /* in KHZ */
ccf20562
YH
365 struct ib_rss_caps rss_caps;
366 u32 max_wq_type_rq;
ebaaee25 367 u32 raw_packet_caps; /* Use ib_raw_packet_caps enum */
78b1beb0 368 struct ib_tm_caps tm_caps;
18bd9072 369 struct ib_cq_caps cq_caps;
1da177e4
LT
370};
371
372enum ib_mtu {
373 IB_MTU_256 = 1,
374 IB_MTU_512 = 2,
375 IB_MTU_1024 = 3,
376 IB_MTU_2048 = 4,
377 IB_MTU_4096 = 5
378};
379
380static inline int ib_mtu_enum_to_int(enum ib_mtu mtu)
381{
382 switch (mtu) {
383 case IB_MTU_256: return 256;
384 case IB_MTU_512: return 512;
385 case IB_MTU_1024: return 1024;
386 case IB_MTU_2048: return 2048;
387 case IB_MTU_4096: return 4096;
388 default: return -1;
389 }
390}
391
d3f4aadd
AR
392static inline enum ib_mtu ib_mtu_int_to_enum(int mtu)
393{
394 if (mtu >= 4096)
395 return IB_MTU_4096;
396 else if (mtu >= 2048)
397 return IB_MTU_2048;
398 else if (mtu >= 1024)
399 return IB_MTU_1024;
400 else if (mtu >= 512)
401 return IB_MTU_512;
402 else
403 return IB_MTU_256;
404}
405
1da177e4
LT
406enum ib_port_state {
407 IB_PORT_NOP = 0,
408 IB_PORT_DOWN = 1,
409 IB_PORT_INIT = 2,
410 IB_PORT_ARMED = 3,
411 IB_PORT_ACTIVE = 4,
412 IB_PORT_ACTIVE_DEFER = 5
413};
414
415enum ib_port_cap_flags {
416 IB_PORT_SM = 1 << 1,
417 IB_PORT_NOTICE_SUP = 1 << 2,
418 IB_PORT_TRAP_SUP = 1 << 3,
419 IB_PORT_OPT_IPD_SUP = 1 << 4,
420 IB_PORT_AUTO_MIGR_SUP = 1 << 5,
421 IB_PORT_SL_MAP_SUP = 1 << 6,
422 IB_PORT_MKEY_NVRAM = 1 << 7,
423 IB_PORT_PKEY_NVRAM = 1 << 8,
424 IB_PORT_LED_INFO_SUP = 1 << 9,
425 IB_PORT_SM_DISABLED = 1 << 10,
426 IB_PORT_SYS_IMAGE_GUID_SUP = 1 << 11,
427 IB_PORT_PKEY_SW_EXT_PORT_TRAP_SUP = 1 << 12,
71eeba16 428 IB_PORT_EXTENDED_SPEEDS_SUP = 1 << 14,
1da177e4
LT
429 IB_PORT_CM_SUP = 1 << 16,
430 IB_PORT_SNMP_TUNNEL_SUP = 1 << 17,
431 IB_PORT_REINIT_SUP = 1 << 18,
432 IB_PORT_DEVICE_MGMT_SUP = 1 << 19,
433 IB_PORT_VENDOR_CLASS_SUP = 1 << 20,
434 IB_PORT_DR_NOTICE_SUP = 1 << 21,
435 IB_PORT_CAP_MASK_NOTICE_SUP = 1 << 22,
436 IB_PORT_BOOT_MGMT_SUP = 1 << 23,
437 IB_PORT_LINK_LATENCY_SUP = 1 << 24,
b4a26a27 438 IB_PORT_CLIENT_REG_SUP = 1 << 25,
03db3a2d 439 IB_PORT_IP_BASED_GIDS = 1 << 26,
1da177e4
LT
440};
441
442enum ib_port_width {
443 IB_WIDTH_1X = 1,
444 IB_WIDTH_4X = 2,
445 IB_WIDTH_8X = 4,
446 IB_WIDTH_12X = 8
447};
448
449static inline int ib_width_enum_to_int(enum ib_port_width width)
450{
451 switch (width) {
452 case IB_WIDTH_1X: return 1;
453 case IB_WIDTH_4X: return 4;
454 case IB_WIDTH_8X: return 8;
455 case IB_WIDTH_12X: return 12;
456 default: return -1;
457 }
458}
459
2e96691c
OG
460enum ib_port_speed {
461 IB_SPEED_SDR = 1,
462 IB_SPEED_DDR = 2,
463 IB_SPEED_QDR = 4,
464 IB_SPEED_FDR10 = 8,
465 IB_SPEED_FDR = 16,
12113a35
NO
466 IB_SPEED_EDR = 32,
467 IB_SPEED_HDR = 64
2e96691c
OG
468};
469
b40f4757
CL
470/**
471 * struct rdma_hw_stats
472 * @timestamp - Used by the core code to track when the last update was
473 * @lifespan - Used by the core code to determine how old the counters
474 * should be before being updated again. Stored in jiffies, defaults
475 * to 10 milliseconds, drivers can override the default be specifying
476 * their own value during their allocation routine.
477 * @name - Array of pointers to static names used for the counters in
478 * directory.
479 * @num_counters - How many hardware counters there are. If name is
480 * shorter than this number, a kernel oops will result. Driver authors
481 * are encouraged to leave BUILD_BUG_ON(ARRAY_SIZE(@name) < num_counters)
482 * in their code to prevent this.
483 * @value - Array of u64 counters that are accessed by the sysfs code and
484 * filled in by the drivers get_stats routine
485 */
486struct rdma_hw_stats {
487 unsigned long timestamp;
488 unsigned long lifespan;
489 const char * const *names;
490 int num_counters;
491 u64 value[];
7f624d02
SW
492};
493
b40f4757
CL
494#define RDMA_HW_STATS_DEFAULT_LIFESPAN 10
495/**
496 * rdma_alloc_hw_stats_struct - Helper function to allocate dynamic struct
497 * for drivers.
498 * @names - Array of static const char *
499 * @num_counters - How many elements in array
500 * @lifespan - How many milliseconds between updates
501 */
502static inline struct rdma_hw_stats *rdma_alloc_hw_stats_struct(
503 const char * const *names, int num_counters,
504 unsigned long lifespan)
505{
506 struct rdma_hw_stats *stats;
507
508 stats = kzalloc(sizeof(*stats) + num_counters * sizeof(u64),
509 GFP_KERNEL);
510 if (!stats)
511 return NULL;
512 stats->names = names;
513 stats->num_counters = num_counters;
514 stats->lifespan = msecs_to_jiffies(lifespan);
515
516 return stats;
517}
518
519
f9b22e35
IW
520/* Define bits for the various functionality this port needs to be supported by
521 * the core.
522 */
523/* Management 0x00000FFF */
524#define RDMA_CORE_CAP_IB_MAD 0x00000001
525#define RDMA_CORE_CAP_IB_SMI 0x00000002
526#define RDMA_CORE_CAP_IB_CM 0x00000004
527#define RDMA_CORE_CAP_IW_CM 0x00000008
528#define RDMA_CORE_CAP_IB_SA 0x00000010
65995fee 529#define RDMA_CORE_CAP_OPA_MAD 0x00000020
f9b22e35
IW
530
531/* Address format 0x000FF000 */
532#define RDMA_CORE_CAP_AF_IB 0x00001000
533#define RDMA_CORE_CAP_ETH_AH 0x00002000
94d595c5 534#define RDMA_CORE_CAP_OPA_AH 0x00004000
f9b22e35
IW
535
536/* Protocol 0xFFF00000 */
537#define RDMA_CORE_CAP_PROT_IB 0x00100000
538#define RDMA_CORE_CAP_PROT_ROCE 0x00200000
539#define RDMA_CORE_CAP_PROT_IWARP 0x00400000
7766a99f 540#define RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP 0x00800000
aa773bd4 541#define RDMA_CORE_CAP_PROT_RAW_PACKET 0x01000000
ce1e055f 542#define RDMA_CORE_CAP_PROT_USNIC 0x02000000
f9b22e35
IW
543
544#define RDMA_CORE_PORT_IBA_IB (RDMA_CORE_CAP_PROT_IB \
545 | RDMA_CORE_CAP_IB_MAD \
546 | RDMA_CORE_CAP_IB_SMI \
547 | RDMA_CORE_CAP_IB_CM \
548 | RDMA_CORE_CAP_IB_SA \
549 | RDMA_CORE_CAP_AF_IB)
550#define RDMA_CORE_PORT_IBA_ROCE (RDMA_CORE_CAP_PROT_ROCE \
551 | RDMA_CORE_CAP_IB_MAD \
552 | RDMA_CORE_CAP_IB_CM \
f9b22e35
IW
553 | RDMA_CORE_CAP_AF_IB \
554 | RDMA_CORE_CAP_ETH_AH)
7766a99f
MB
555#define RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP \
556 (RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP \
557 | RDMA_CORE_CAP_IB_MAD \
558 | RDMA_CORE_CAP_IB_CM \
559 | RDMA_CORE_CAP_AF_IB \
560 | RDMA_CORE_CAP_ETH_AH)
f9b22e35
IW
561#define RDMA_CORE_PORT_IWARP (RDMA_CORE_CAP_PROT_IWARP \
562 | RDMA_CORE_CAP_IW_CM)
65995fee
IW
563#define RDMA_CORE_PORT_INTEL_OPA (RDMA_CORE_PORT_IBA_IB \
564 | RDMA_CORE_CAP_OPA_MAD)
f9b22e35 565
aa773bd4
OG
566#define RDMA_CORE_PORT_RAW_PACKET (RDMA_CORE_CAP_PROT_RAW_PACKET)
567
ce1e055f
OG
568#define RDMA_CORE_PORT_USNIC (RDMA_CORE_CAP_PROT_USNIC)
569
1da177e4 570struct ib_port_attr {
fad61ad4 571 u64 subnet_prefix;
1da177e4
LT
572 enum ib_port_state state;
573 enum ib_mtu max_mtu;
574 enum ib_mtu active_mtu;
575 int gid_tbl_len;
576 u32 port_cap_flags;
577 u32 max_msg_sz;
578 u32 bad_pkey_cntr;
579 u32 qkey_viol_cntr;
580 u16 pkey_tbl_len;
db58540b 581 u32 sm_lid;
582faf31 582 u32 lid;
1da177e4
LT
583 u8 lmc;
584 u8 max_vl_num;
585 u8 sm_sl;
586 u8 subnet_timeout;
587 u8 init_type_reply;
588 u8 active_width;
589 u8 active_speed;
590 u8 phys_state;
a0c1b2a3 591 bool grh_required;
1da177e4
LT
592};
593
594enum ib_device_modify_flags {
c5bcbbb9
RD
595 IB_DEVICE_MODIFY_SYS_IMAGE_GUID = 1 << 0,
596 IB_DEVICE_MODIFY_NODE_DESC = 1 << 1
1da177e4
LT
597};
598
bd99fdea
YS
599#define IB_DEVICE_NODE_DESC_MAX 64
600
1da177e4
LT
601struct ib_device_modify {
602 u64 sys_image_guid;
bd99fdea 603 char node_desc[IB_DEVICE_NODE_DESC_MAX];
1da177e4
LT
604};
605
606enum ib_port_modify_flags {
607 IB_PORT_SHUTDOWN = 1,
608 IB_PORT_INIT_TYPE = (1<<2),
cb49366f
VN
609 IB_PORT_RESET_QKEY_CNTR = (1<<3),
610 IB_PORT_OPA_MASK_CHG = (1<<4)
1da177e4
LT
611};
612
613struct ib_port_modify {
614 u32 set_port_cap_mask;
615 u32 clr_port_cap_mask;
616 u8 init_type;
617};
618
619enum ib_event_type {
620 IB_EVENT_CQ_ERR,
621 IB_EVENT_QP_FATAL,
622 IB_EVENT_QP_REQ_ERR,
623 IB_EVENT_QP_ACCESS_ERR,
624 IB_EVENT_COMM_EST,
625 IB_EVENT_SQ_DRAINED,
626 IB_EVENT_PATH_MIG,
627 IB_EVENT_PATH_MIG_ERR,
628 IB_EVENT_DEVICE_FATAL,
629 IB_EVENT_PORT_ACTIVE,
630 IB_EVENT_PORT_ERR,
631 IB_EVENT_LID_CHANGE,
632 IB_EVENT_PKEY_CHANGE,
d41fcc67
RD
633 IB_EVENT_SM_CHANGE,
634 IB_EVENT_SRQ_ERR,
635 IB_EVENT_SRQ_LIMIT_REACHED,
63942c9a 636 IB_EVENT_QP_LAST_WQE_REACHED,
761d90ed
OG
637 IB_EVENT_CLIENT_REREGISTER,
638 IB_EVENT_GID_CHANGE,
f213c052 639 IB_EVENT_WQ_FATAL,
1da177e4
LT
640};
641
db7489e0 642const char *__attribute_const__ ib_event_msg(enum ib_event_type event);
2b1b5b60 643
1da177e4
LT
644struct ib_event {
645 struct ib_device *device;
646 union {
647 struct ib_cq *cq;
648 struct ib_qp *qp;
d41fcc67 649 struct ib_srq *srq;
f213c052 650 struct ib_wq *wq;
1da177e4
LT
651 u8 port_num;
652 } element;
653 enum ib_event_type event;
654};
655
656struct ib_event_handler {
657 struct ib_device *device;
658 void (*handler)(struct ib_event_handler *, struct ib_event *);
659 struct list_head list;
660};
661
662#define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler) \
663 do { \
664 (_ptr)->device = _device; \
665 (_ptr)->handler = _handler; \
666 INIT_LIST_HEAD(&(_ptr)->list); \
667 } while (0)
668
669struct ib_global_route {
670 union ib_gid dgid;
671 u32 flow_label;
672 u8 sgid_index;
673 u8 hop_limit;
674 u8 traffic_class;
675};
676
513789ed 677struct ib_grh {
97f52eb4
SH
678 __be32 version_tclass_flow;
679 __be16 paylen;
513789ed
HR
680 u8 next_hdr;
681 u8 hop_limit;
682 union ib_gid sgid;
683 union ib_gid dgid;
684};
685
c865f246
SK
686union rdma_network_hdr {
687 struct ib_grh ibgrh;
688 struct {
689 /* The IB spec states that if it's IPv4, the header
690 * is located in the last 20 bytes of the header.
691 */
692 u8 reserved[20];
693 struct iphdr roce4grh;
694 };
695};
696
7dafbab3
DH
697#define IB_QPN_MASK 0xFFFFFF
698
1da177e4
LT
699enum {
700 IB_MULTICAST_QPN = 0xffffff
701};
702
f3a7c66b 703#define IB_LID_PERMISSIVE cpu_to_be16(0xFFFF)
b4e64397 704#define IB_MULTICAST_LID_BASE cpu_to_be16(0xC000)
97f52eb4 705
1da177e4
LT
706enum ib_ah_flags {
707 IB_AH_GRH = 1
708};
709
bf6a9e31
JM
710enum ib_rate {
711 IB_RATE_PORT_CURRENT = 0,
712 IB_RATE_2_5_GBPS = 2,
713 IB_RATE_5_GBPS = 5,
714 IB_RATE_10_GBPS = 3,
715 IB_RATE_20_GBPS = 6,
716 IB_RATE_30_GBPS = 4,
717 IB_RATE_40_GBPS = 7,
718 IB_RATE_60_GBPS = 8,
719 IB_RATE_80_GBPS = 9,
71eeba16
MA
720 IB_RATE_120_GBPS = 10,
721 IB_RATE_14_GBPS = 11,
722 IB_RATE_56_GBPS = 12,
723 IB_RATE_112_GBPS = 13,
724 IB_RATE_168_GBPS = 14,
725 IB_RATE_25_GBPS = 15,
726 IB_RATE_100_GBPS = 16,
727 IB_RATE_200_GBPS = 17,
728 IB_RATE_300_GBPS = 18
bf6a9e31
JM
729};
730
731/**
732 * ib_rate_to_mult - Convert the IB rate enum to a multiple of the
733 * base rate of 2.5 Gbit/sec. For example, IB_RATE_5_GBPS will be
734 * converted to 2, since 5 Gbit/sec is 2 * 2.5 Gbit/sec.
735 * @rate: rate to convert.
736 */
8385fd84 737__attribute_const__ int ib_rate_to_mult(enum ib_rate rate);
bf6a9e31 738
71eeba16
MA
739/**
740 * ib_rate_to_mbps - Convert the IB rate enum to Mbps.
741 * For example, IB_RATE_2_5_GBPS will be converted to 2500.
742 * @rate: rate to convert.
743 */
8385fd84 744__attribute_const__ int ib_rate_to_mbps(enum ib_rate rate);
71eeba16 745
17cd3a2d
SG
746
747/**
9bee178b
SG
748 * enum ib_mr_type - memory region type
749 * @IB_MR_TYPE_MEM_REG: memory region that is used for
750 * normal registration
751 * @IB_MR_TYPE_SIGNATURE: memory region that is used for
752 * signature operations (data-integrity
753 * capable regions)
f5aa9159
SG
754 * @IB_MR_TYPE_SG_GAPS: memory region that is capable to
755 * register any arbitrary sg lists (without
756 * the normal mr constraints - see
757 * ib_map_mr_sg)
17cd3a2d 758 */
9bee178b
SG
759enum ib_mr_type {
760 IB_MR_TYPE_MEM_REG,
761 IB_MR_TYPE_SIGNATURE,
f5aa9159 762 IB_MR_TYPE_SG_GAPS,
17cd3a2d
SG
763};
764
1b01d335 765/**
78eda2bb
SG
766 * Signature types
767 * IB_SIG_TYPE_NONE: Unprotected.
768 * IB_SIG_TYPE_T10_DIF: Type T10-DIF
1b01d335 769 */
78eda2bb
SG
770enum ib_signature_type {
771 IB_SIG_TYPE_NONE,
772 IB_SIG_TYPE_T10_DIF,
1b01d335
SG
773};
774
775/**
776 * Signature T10-DIF block-guard types
777 * IB_T10DIF_CRC: Corresponds to T10-PI mandated CRC checksum rules.
778 * IB_T10DIF_CSUM: Corresponds to IP checksum rules.
779 */
780enum ib_t10_dif_bg_type {
781 IB_T10DIF_CRC,
782 IB_T10DIF_CSUM
783};
784
785/**
786 * struct ib_t10_dif_domain - Parameters specific for T10-DIF
787 * domain.
1b01d335
SG
788 * @bg_type: T10-DIF block guard type (CRC|CSUM)
789 * @pi_interval: protection information interval.
790 * @bg: seed of guard computation.
791 * @app_tag: application tag of guard block
792 * @ref_tag: initial guard block reference tag.
78eda2bb
SG
793 * @ref_remap: Indicate wethear the reftag increments each block
794 * @app_escape: Indicate to skip block check if apptag=0xffff
795 * @ref_escape: Indicate to skip block check if reftag=0xffffffff
796 * @apptag_check_mask: check bitmask of application tag.
1b01d335
SG
797 */
798struct ib_t10_dif_domain {
1b01d335
SG
799 enum ib_t10_dif_bg_type bg_type;
800 u16 pi_interval;
801 u16 bg;
802 u16 app_tag;
803 u32 ref_tag;
78eda2bb
SG
804 bool ref_remap;
805 bool app_escape;
806 bool ref_escape;
807 u16 apptag_check_mask;
1b01d335
SG
808};
809
810/**
811 * struct ib_sig_domain - Parameters for signature domain
812 * @sig_type: specific signauture type
813 * @sig: union of all signature domain attributes that may
814 * be used to set domain layout.
815 */
816struct ib_sig_domain {
817 enum ib_signature_type sig_type;
818 union {
819 struct ib_t10_dif_domain dif;
820 } sig;
821};
822
823/**
824 * struct ib_sig_attrs - Parameters for signature handover operation
825 * @check_mask: bitmask for signature byte check (8 bytes)
826 * @mem: memory domain layout desciptor.
827 * @wire: wire domain layout desciptor.
828 */
829struct ib_sig_attrs {
830 u8 check_mask;
831 struct ib_sig_domain mem;
832 struct ib_sig_domain wire;
833};
834
835enum ib_sig_err_type {
836 IB_SIG_BAD_GUARD,
837 IB_SIG_BAD_REFTAG,
838 IB_SIG_BAD_APPTAG,
839};
840
841/**
842 * struct ib_sig_err - signature error descriptor
843 */
844struct ib_sig_err {
845 enum ib_sig_err_type err_type;
846 u32 expected;
847 u32 actual;
848 u64 sig_err_offset;
849 u32 key;
850};
851
852enum ib_mr_status_check {
853 IB_MR_CHECK_SIG_STATUS = 1,
854};
855
856/**
857 * struct ib_mr_status - Memory region status container
858 *
859 * @fail_status: Bitmask of MR checks status. For each
860 * failed check a corresponding status bit is set.
861 * @sig_err: Additional info for IB_MR_CEHCK_SIG_STATUS
862 * failure.
863 */
864struct ib_mr_status {
865 u32 fail_status;
866 struct ib_sig_err sig_err;
867};
868
bf6a9e31
JM
869/**
870 * mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate
871 * enum.
872 * @mult: multiple to convert.
873 */
8385fd84 874__attribute_const__ enum ib_rate mult_to_ib_rate(int mult);
bf6a9e31 875
44c58487
DC
876enum rdma_ah_attr_type {
877 RDMA_AH_ATTR_TYPE_IB,
878 RDMA_AH_ATTR_TYPE_ROCE,
64b4646e 879 RDMA_AH_ATTR_TYPE_OPA,
44c58487
DC
880};
881
882struct ib_ah_attr {
883 u16 dlid;
884 u8 src_path_bits;
885};
886
887struct roce_ah_attr {
888 u8 dmac[ETH_ALEN];
889};
890
64b4646e
DC
891struct opa_ah_attr {
892 u32 dlid;
893 u8 src_path_bits;
d98bb7f7 894 bool make_grd;
64b4646e
DC
895};
896
90898850 897struct rdma_ah_attr {
1da177e4 898 struct ib_global_route grh;
1da177e4 899 u8 sl;
1da177e4 900 u8 static_rate;
1da177e4 901 u8 port_num;
44c58487
DC
902 u8 ah_flags;
903 enum rdma_ah_attr_type type;
904 union {
905 struct ib_ah_attr ib;
906 struct roce_ah_attr roce;
64b4646e 907 struct opa_ah_attr opa;
44c58487 908 };
1da177e4
LT
909};
910
911enum ib_wc_status {
912 IB_WC_SUCCESS,
913 IB_WC_LOC_LEN_ERR,
914 IB_WC_LOC_QP_OP_ERR,
915 IB_WC_LOC_EEC_OP_ERR,
916 IB_WC_LOC_PROT_ERR,
917 IB_WC_WR_FLUSH_ERR,
918 IB_WC_MW_BIND_ERR,
919 IB_WC_BAD_RESP_ERR,
920 IB_WC_LOC_ACCESS_ERR,
921 IB_WC_REM_INV_REQ_ERR,
922 IB_WC_REM_ACCESS_ERR,
923 IB_WC_REM_OP_ERR,
924 IB_WC_RETRY_EXC_ERR,
925 IB_WC_RNR_RETRY_EXC_ERR,
926 IB_WC_LOC_RDD_VIOL_ERR,
927 IB_WC_REM_INV_RD_REQ_ERR,
928 IB_WC_REM_ABORT_ERR,
929 IB_WC_INV_EECN_ERR,
930 IB_WC_INV_EEC_STATE_ERR,
931 IB_WC_FATAL_ERR,
932 IB_WC_RESP_TIMEOUT_ERR,
933 IB_WC_GENERAL_ERR
934};
935
db7489e0 936const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status);
2b1b5b60 937
1da177e4
LT
938enum ib_wc_opcode {
939 IB_WC_SEND,
940 IB_WC_RDMA_WRITE,
941 IB_WC_RDMA_READ,
942 IB_WC_COMP_SWAP,
943 IB_WC_FETCH_ADD,
c93570f2 944 IB_WC_LSO,
00f7ec36 945 IB_WC_LOCAL_INV,
4c67e2bf 946 IB_WC_REG_MR,
5e80ba8f
VS
947 IB_WC_MASKED_COMP_SWAP,
948 IB_WC_MASKED_FETCH_ADD,
1da177e4
LT
949/*
950 * Set value of IB_WC_RECV so consumers can test if a completion is a
951 * receive by testing (opcode & IB_WC_RECV).
952 */
953 IB_WC_RECV = 1 << 7,
954 IB_WC_RECV_RDMA_WITH_IMM
955};
956
957enum ib_wc_flags {
958 IB_WC_GRH = 1,
00f7ec36
SW
959 IB_WC_WITH_IMM = (1<<1),
960 IB_WC_WITH_INVALIDATE = (1<<2),
d927d505 961 IB_WC_IP_CSUM_OK = (1<<3),
dd5f03be
MB
962 IB_WC_WITH_SMAC = (1<<4),
963 IB_WC_WITH_VLAN = (1<<5),
c865f246 964 IB_WC_WITH_NETWORK_HDR_TYPE = (1<<6),
1da177e4
LT
965};
966
967struct ib_wc {
14d3a3b2
CH
968 union {
969 u64 wr_id;
970 struct ib_cqe *wr_cqe;
971 };
1da177e4
LT
972 enum ib_wc_status status;
973 enum ib_wc_opcode opcode;
974 u32 vendor_err;
975 u32 byte_len;
062dbb69 976 struct ib_qp *qp;
00f7ec36
SW
977 union {
978 __be32 imm_data;
979 u32 invalidate_rkey;
980 } ex;
1da177e4 981 u32 src_qp;
cd2a6e7d 982 u32 slid;
1da177e4
LT
983 int wc_flags;
984 u16 pkey_index;
1da177e4
LT
985 u8 sl;
986 u8 dlid_path_bits;
987 u8 port_num; /* valid only for DR SMPs on switches */
dd5f03be
MB
988 u8 smac[ETH_ALEN];
989 u16 vlan_id;
c865f246 990 u8 network_hdr_type;
1da177e4
LT
991};
992
ed23a727
RD
993enum ib_cq_notify_flags {
994 IB_CQ_SOLICITED = 1 << 0,
995 IB_CQ_NEXT_COMP = 1 << 1,
996 IB_CQ_SOLICITED_MASK = IB_CQ_SOLICITED | IB_CQ_NEXT_COMP,
997 IB_CQ_REPORT_MISSED_EVENTS = 1 << 2,
1da177e4
LT
998};
999
96104eda 1000enum ib_srq_type {
418d5130 1001 IB_SRQT_BASIC,
9c2c8496
AK
1002 IB_SRQT_XRC,
1003 IB_SRQT_TM,
96104eda
SH
1004};
1005
1a56ff6d
AK
1006static inline bool ib_srq_has_cq(enum ib_srq_type srq_type)
1007{
9c2c8496
AK
1008 return srq_type == IB_SRQT_XRC ||
1009 srq_type == IB_SRQT_TM;
1a56ff6d
AK
1010}
1011
d41fcc67
RD
1012enum ib_srq_attr_mask {
1013 IB_SRQ_MAX_WR = 1 << 0,
1014 IB_SRQ_LIMIT = 1 << 1,
1015};
1016
1017struct ib_srq_attr {
1018 u32 max_wr;
1019 u32 max_sge;
1020 u32 srq_limit;
1021};
1022
1023struct ib_srq_init_attr {
1024 void (*event_handler)(struct ib_event *, void *);
1025 void *srq_context;
1026 struct ib_srq_attr attr;
96104eda 1027 enum ib_srq_type srq_type;
418d5130 1028
1a56ff6d
AK
1029 struct {
1030 struct ib_cq *cq;
1031 union {
1032 struct {
1033 struct ib_xrcd *xrcd;
1034 } xrc;
9c2c8496
AK
1035
1036 struct {
1037 u32 max_num_tags;
1038 } tag_matching;
1a56ff6d 1039 };
418d5130 1040 } ext;
d41fcc67
RD
1041};
1042
1da177e4
LT
1043struct ib_qp_cap {
1044 u32 max_send_wr;
1045 u32 max_recv_wr;
1046 u32 max_send_sge;
1047 u32 max_recv_sge;
1048 u32 max_inline_data;
a060b562
CH
1049
1050 /*
1051 * Maximum number of rdma_rw_ctx structures in flight at a time.
1052 * ib_create_qp() will calculate the right amount of neededed WRs
1053 * and MRs based on this.
1054 */
1055 u32 max_rdma_ctxs;
1da177e4
LT
1056};
1057
1058enum ib_sig_type {
1059 IB_SIGNAL_ALL_WR,
1060 IB_SIGNAL_REQ_WR
1061};
1062
1063enum ib_qp_type {
1064 /*
1065 * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries
1066 * here (and in that order) since the MAD layer uses them as
1067 * indices into a 2-entry table.
1068 */
1069 IB_QPT_SMI,
1070 IB_QPT_GSI,
1071
1072 IB_QPT_RC,
1073 IB_QPT_UC,
1074 IB_QPT_UD,
1075 IB_QPT_RAW_IPV6,
b42b63cf 1076 IB_QPT_RAW_ETHERTYPE,
c938a616 1077 IB_QPT_RAW_PACKET = 8,
b42b63cf
SH
1078 IB_QPT_XRC_INI = 9,
1079 IB_QPT_XRC_TGT,
0134f16b 1080 IB_QPT_MAX,
8011c1e3 1081 IB_QPT_DRIVER = 0xFF,
0134f16b
JM
1082 /* Reserve a range for qp types internal to the low level driver.
1083 * These qp types will not be visible at the IB core layer, so the
1084 * IB_QPT_MAX usages should not be affected in the core layer
1085 */
1086 IB_QPT_RESERVED1 = 0x1000,
1087 IB_QPT_RESERVED2,
1088 IB_QPT_RESERVED3,
1089 IB_QPT_RESERVED4,
1090 IB_QPT_RESERVED5,
1091 IB_QPT_RESERVED6,
1092 IB_QPT_RESERVED7,
1093 IB_QPT_RESERVED8,
1094 IB_QPT_RESERVED9,
1095 IB_QPT_RESERVED10,
1da177e4
LT
1096};
1097
b846f25a 1098enum ib_qp_create_flags {
47ee1b9f
RL
1099 IB_QP_CREATE_IPOIB_UD_LSO = 1 << 0,
1100 IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK = 1 << 1,
8a06ce59
LR
1101 IB_QP_CREATE_CROSS_CHANNEL = 1 << 2,
1102 IB_QP_CREATE_MANAGED_SEND = 1 << 3,
1103 IB_QP_CREATE_MANAGED_RECV = 1 << 4,
90f1d1b4 1104 IB_QP_CREATE_NETIF_QP = 1 << 5,
1b01d335 1105 IB_QP_CREATE_SIGNATURE_EN = 1 << 6,
7855f584 1106 /* FREE = 1 << 7, */
b531b909 1107 IB_QP_CREATE_SCATTER_FCS = 1 << 8,
9c2b270e 1108 IB_QP_CREATE_CVLAN_STRIPPING = 1 << 9,
02984cc7 1109 IB_QP_CREATE_SOURCE_QPN = 1 << 10,
e1d2e887 1110 IB_QP_CREATE_PCI_WRITE_END_PADDING = 1 << 11,
d2b57063
JM
1111 /* reserve bits 26-31 for low level drivers' internal use */
1112 IB_QP_CREATE_RESERVED_START = 1 << 26,
1113 IB_QP_CREATE_RESERVED_END = 1 << 31,
b846f25a
EC
1114};
1115
73c40c61
YH
1116/*
1117 * Note: users may not call ib_close_qp or ib_destroy_qp from the event_handler
1118 * callback to destroy the passed in QP.
1119 */
1120
1da177e4
LT
1121struct ib_qp_init_attr {
1122 void (*event_handler)(struct ib_event *, void *);
1123 void *qp_context;
1124 struct ib_cq *send_cq;
1125 struct ib_cq *recv_cq;
1126 struct ib_srq *srq;
b42b63cf 1127 struct ib_xrcd *xrcd; /* XRC TGT QPs only */
1da177e4
LT
1128 struct ib_qp_cap cap;
1129 enum ib_sig_type sq_sig_type;
1130 enum ib_qp_type qp_type;
b846f25a 1131 enum ib_qp_create_flags create_flags;
a060b562
CH
1132
1133 /*
1134 * Only needed for special QP types, or when using the RW API.
1135 */
1136 u8 port_num;
a9017e23 1137 struct ib_rwq_ind_table *rwq_ind_tbl;
02984cc7 1138 u32 source_qpn;
1da177e4
LT
1139};
1140
0e0ec7e0
SH
1141struct ib_qp_open_attr {
1142 void (*event_handler)(struct ib_event *, void *);
1143 void *qp_context;
1144 u32 qp_num;
1145 enum ib_qp_type qp_type;
1146};
1147
1da177e4
LT
1148enum ib_rnr_timeout {
1149 IB_RNR_TIMER_655_36 = 0,
1150 IB_RNR_TIMER_000_01 = 1,
1151 IB_RNR_TIMER_000_02 = 2,
1152 IB_RNR_TIMER_000_03 = 3,
1153 IB_RNR_TIMER_000_04 = 4,
1154 IB_RNR_TIMER_000_06 = 5,
1155 IB_RNR_TIMER_000_08 = 6,
1156 IB_RNR_TIMER_000_12 = 7,
1157 IB_RNR_TIMER_000_16 = 8,
1158 IB_RNR_TIMER_000_24 = 9,
1159 IB_RNR_TIMER_000_32 = 10,
1160 IB_RNR_TIMER_000_48 = 11,
1161 IB_RNR_TIMER_000_64 = 12,
1162 IB_RNR_TIMER_000_96 = 13,
1163 IB_RNR_TIMER_001_28 = 14,
1164 IB_RNR_TIMER_001_92 = 15,
1165 IB_RNR_TIMER_002_56 = 16,
1166 IB_RNR_TIMER_003_84 = 17,
1167 IB_RNR_TIMER_005_12 = 18,
1168 IB_RNR_TIMER_007_68 = 19,
1169 IB_RNR_TIMER_010_24 = 20,
1170 IB_RNR_TIMER_015_36 = 21,
1171 IB_RNR_TIMER_020_48 = 22,
1172 IB_RNR_TIMER_030_72 = 23,
1173 IB_RNR_TIMER_040_96 = 24,
1174 IB_RNR_TIMER_061_44 = 25,
1175 IB_RNR_TIMER_081_92 = 26,
1176 IB_RNR_TIMER_122_88 = 27,
1177 IB_RNR_TIMER_163_84 = 28,
1178 IB_RNR_TIMER_245_76 = 29,
1179 IB_RNR_TIMER_327_68 = 30,
1180 IB_RNR_TIMER_491_52 = 31
1181};
1182
1183enum ib_qp_attr_mask {
1184 IB_QP_STATE = 1,
1185 IB_QP_CUR_STATE = (1<<1),
1186 IB_QP_EN_SQD_ASYNC_NOTIFY = (1<<2),
1187 IB_QP_ACCESS_FLAGS = (1<<3),
1188 IB_QP_PKEY_INDEX = (1<<4),
1189 IB_QP_PORT = (1<<5),
1190 IB_QP_QKEY = (1<<6),
1191 IB_QP_AV = (1<<7),
1192 IB_QP_PATH_MTU = (1<<8),
1193 IB_QP_TIMEOUT = (1<<9),
1194 IB_QP_RETRY_CNT = (1<<10),
1195 IB_QP_RNR_RETRY = (1<<11),
1196 IB_QP_RQ_PSN = (1<<12),
1197 IB_QP_MAX_QP_RD_ATOMIC = (1<<13),
1198 IB_QP_ALT_PATH = (1<<14),
1199 IB_QP_MIN_RNR_TIMER = (1<<15),
1200 IB_QP_SQ_PSN = (1<<16),
1201 IB_QP_MAX_DEST_RD_ATOMIC = (1<<17),
1202 IB_QP_PATH_MIG_STATE = (1<<18),
1203 IB_QP_CAP = (1<<19),
dd5f03be 1204 IB_QP_DEST_QPN = (1<<20),
aa744cc0
MB
1205 IB_QP_RESERVED1 = (1<<21),
1206 IB_QP_RESERVED2 = (1<<22),
1207 IB_QP_RESERVED3 = (1<<23),
1208 IB_QP_RESERVED4 = (1<<24),
528e5a1b 1209 IB_QP_RATE_LIMIT = (1<<25),
1da177e4
LT
1210};
1211
1212enum ib_qp_state {
1213 IB_QPS_RESET,
1214 IB_QPS_INIT,
1215 IB_QPS_RTR,
1216 IB_QPS_RTS,
1217 IB_QPS_SQD,
1218 IB_QPS_SQE,
1219 IB_QPS_ERR
1220};
1221
1222enum ib_mig_state {
1223 IB_MIG_MIGRATED,
1224 IB_MIG_REARM,
1225 IB_MIG_ARMED
1226};
1227
7083e42e
SM
1228enum ib_mw_type {
1229 IB_MW_TYPE_1 = 1,
1230 IB_MW_TYPE_2 = 2
1231};
1232
1da177e4
LT
1233struct ib_qp_attr {
1234 enum ib_qp_state qp_state;
1235 enum ib_qp_state cur_qp_state;
1236 enum ib_mtu path_mtu;
1237 enum ib_mig_state path_mig_state;
1238 u32 qkey;
1239 u32 rq_psn;
1240 u32 sq_psn;
1241 u32 dest_qp_num;
1242 int qp_access_flags;
1243 struct ib_qp_cap cap;
90898850
DC
1244 struct rdma_ah_attr ah_attr;
1245 struct rdma_ah_attr alt_ah_attr;
1da177e4
LT
1246 u16 pkey_index;
1247 u16 alt_pkey_index;
1248 u8 en_sqd_async_notify;
1249 u8 sq_draining;
1250 u8 max_rd_atomic;
1251 u8 max_dest_rd_atomic;
1252 u8 min_rnr_timer;
1253 u8 port_num;
1254 u8 timeout;
1255 u8 retry_cnt;
1256 u8 rnr_retry;
1257 u8 alt_port_num;
1258 u8 alt_timeout;
528e5a1b 1259 u32 rate_limit;
1da177e4
LT
1260};
1261
1262enum ib_wr_opcode {
1263 IB_WR_RDMA_WRITE,
1264 IB_WR_RDMA_WRITE_WITH_IMM,
1265 IB_WR_SEND,
1266 IB_WR_SEND_WITH_IMM,
1267 IB_WR_RDMA_READ,
1268 IB_WR_ATOMIC_CMP_AND_SWP,
c93570f2 1269 IB_WR_ATOMIC_FETCH_AND_ADD,
0f39cf3d
RD
1270 IB_WR_LSO,
1271 IB_WR_SEND_WITH_INV,
00f7ec36
SW
1272 IB_WR_RDMA_READ_WITH_INV,
1273 IB_WR_LOCAL_INV,
4c67e2bf 1274 IB_WR_REG_MR,
5e80ba8f
VS
1275 IB_WR_MASKED_ATOMIC_CMP_AND_SWP,
1276 IB_WR_MASKED_ATOMIC_FETCH_AND_ADD,
1b01d335 1277 IB_WR_REG_SIG_MR,
0134f16b
JM
1278 /* reserve values for low level drivers' internal use.
1279 * These values will not be used at all in the ib core layer.
1280 */
1281 IB_WR_RESERVED1 = 0xf0,
1282 IB_WR_RESERVED2,
1283 IB_WR_RESERVED3,
1284 IB_WR_RESERVED4,
1285 IB_WR_RESERVED5,
1286 IB_WR_RESERVED6,
1287 IB_WR_RESERVED7,
1288 IB_WR_RESERVED8,
1289 IB_WR_RESERVED9,
1290 IB_WR_RESERVED10,
1da177e4
LT
1291};
1292
1293enum ib_send_flags {
1294 IB_SEND_FENCE = 1,
1295 IB_SEND_SIGNALED = (1<<1),
1296 IB_SEND_SOLICITED = (1<<2),
e0605d91 1297 IB_SEND_INLINE = (1<<3),
0134f16b
JM
1298 IB_SEND_IP_CSUM = (1<<4),
1299
1300 /* reserve bits 26-31 for low level drivers' internal use */
1301 IB_SEND_RESERVED_START = (1 << 26),
1302 IB_SEND_RESERVED_END = (1 << 31),
1da177e4
LT
1303};
1304
1305struct ib_sge {
1306 u64 addr;
1307 u32 length;
1308 u32 lkey;
1309};
1310
14d3a3b2
CH
1311struct ib_cqe {
1312 void (*done)(struct ib_cq *cq, struct ib_wc *wc);
1313};
1314
1da177e4
LT
1315struct ib_send_wr {
1316 struct ib_send_wr *next;
14d3a3b2
CH
1317 union {
1318 u64 wr_id;
1319 struct ib_cqe *wr_cqe;
1320 };
1da177e4
LT
1321 struct ib_sge *sg_list;
1322 int num_sge;
1323 enum ib_wr_opcode opcode;
1324 int send_flags;
0f39cf3d
RD
1325 union {
1326 __be32 imm_data;
1327 u32 invalidate_rkey;
1328 } ex;
1da177e4
LT
1329};
1330
e622f2f4
CH
1331struct ib_rdma_wr {
1332 struct ib_send_wr wr;
1333 u64 remote_addr;
1334 u32 rkey;
1335};
1336
1337static inline struct ib_rdma_wr *rdma_wr(struct ib_send_wr *wr)
1338{
1339 return container_of(wr, struct ib_rdma_wr, wr);
1340}
1341
1342struct ib_atomic_wr {
1343 struct ib_send_wr wr;
1344 u64 remote_addr;
1345 u64 compare_add;
1346 u64 swap;
1347 u64 compare_add_mask;
1348 u64 swap_mask;
1349 u32 rkey;
1350};
1351
1352static inline struct ib_atomic_wr *atomic_wr(struct ib_send_wr *wr)
1353{
1354 return container_of(wr, struct ib_atomic_wr, wr);
1355}
1356
1357struct ib_ud_wr {
1358 struct ib_send_wr wr;
1359 struct ib_ah *ah;
1360 void *header;
1361 int hlen;
1362 int mss;
1363 u32 remote_qpn;
1364 u32 remote_qkey;
1365 u16 pkey_index; /* valid for GSI only */
1366 u8 port_num; /* valid for DR SMPs on switch only */
1367};
1368
1369static inline struct ib_ud_wr *ud_wr(struct ib_send_wr *wr)
1370{
1371 return container_of(wr, struct ib_ud_wr, wr);
1372}
1373
4c67e2bf
SG
1374struct ib_reg_wr {
1375 struct ib_send_wr wr;
1376 struct ib_mr *mr;
1377 u32 key;
1378 int access;
1379};
1380
1381static inline struct ib_reg_wr *reg_wr(struct ib_send_wr *wr)
1382{
1383 return container_of(wr, struct ib_reg_wr, wr);
1384}
1385
e622f2f4
CH
1386struct ib_sig_handover_wr {
1387 struct ib_send_wr wr;
1388 struct ib_sig_attrs *sig_attrs;
1389 struct ib_mr *sig_mr;
1390 int access_flags;
1391 struct ib_sge *prot;
1392};
1393
1394static inline struct ib_sig_handover_wr *sig_handover_wr(struct ib_send_wr *wr)
1395{
1396 return container_of(wr, struct ib_sig_handover_wr, wr);
1397}
1398
1da177e4
LT
1399struct ib_recv_wr {
1400 struct ib_recv_wr *next;
14d3a3b2
CH
1401 union {
1402 u64 wr_id;
1403 struct ib_cqe *wr_cqe;
1404 };
1da177e4
LT
1405 struct ib_sge *sg_list;
1406 int num_sge;
1407};
1408
1409enum ib_access_flags {
1410 IB_ACCESS_LOCAL_WRITE = 1,
1411 IB_ACCESS_REMOTE_WRITE = (1<<1),
1412 IB_ACCESS_REMOTE_READ = (1<<2),
1413 IB_ACCESS_REMOTE_ATOMIC = (1<<3),
7083e42e 1414 IB_ACCESS_MW_BIND = (1<<4),
860f10a7
SG
1415 IB_ZERO_BASED = (1<<5),
1416 IB_ACCESS_ON_DEMAND = (1<<6),
0008b84e 1417 IB_ACCESS_HUGETLB = (1<<7),
1da177e4
LT
1418};
1419
b7d3e0a9
CH
1420/*
1421 * XXX: these are apparently used for ->rereg_user_mr, no idea why they
1422 * are hidden here instead of a uapi header!
1423 */
1da177e4
LT
1424enum ib_mr_rereg_flags {
1425 IB_MR_REREG_TRANS = 1,
1426 IB_MR_REREG_PD = (1<<1),
7e6edb9b
MB
1427 IB_MR_REREG_ACCESS = (1<<2),
1428 IB_MR_REREG_SUPPORTED = ((IB_MR_REREG_ACCESS << 1) - 1)
1da177e4
LT
1429};
1430
1da177e4
LT
1431struct ib_fmr_attr {
1432 int max_pages;
1433 int max_maps;
d36f34aa 1434 u8 page_shift;
1da177e4
LT
1435};
1436
882214e2
HE
1437struct ib_umem;
1438
38321256
MB
1439enum rdma_remove_reason {
1440 /* Userspace requested uobject deletion. Call could fail */
1441 RDMA_REMOVE_DESTROY,
1442 /* Context deletion. This call should delete the actual object itself */
1443 RDMA_REMOVE_CLOSE,
1444 /* Driver is being hot-unplugged. This call should delete the actual object itself */
1445 RDMA_REMOVE_DRIVER_REMOVE,
1446 /* Context is being cleaned-up, but commit was just completed */
1447 RDMA_REMOVE_DURING_CLEANUP,
1448};
1449
43579b5f
PP
1450struct ib_rdmacg_object {
1451#ifdef CONFIG_CGROUP_RDMA
1452 struct rdma_cgroup *cg; /* owner rdma cgroup */
1453#endif
1454};
1455
e2773c06
RD
1456struct ib_ucontext {
1457 struct ib_device *device;
771addf6 1458 struct ib_uverbs_file *ufile;
f7c6a7b5 1459 int closing;
8ada2c1c 1460
38321256
MB
1461 /* locking the uobjects_list */
1462 struct mutex uobjects_lock;
1463 struct list_head uobjects;
1464 /* protects cleanup process from other actions */
1465 struct rw_semaphore cleanup_rwsem;
1466 enum rdma_remove_reason cleanup_reason;
1467
8ada2c1c 1468 struct pid *tgid;
882214e2 1469#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
f808c13f 1470 struct rb_root_cached umem_tree;
882214e2
HE
1471 /*
1472 * Protects .umem_rbroot and tree, as well as odp_mrs_count and
1473 * mmu notifiers registration.
1474 */
1475 struct rw_semaphore umem_rwsem;
1476 void (*invalidate_range)(struct ib_umem *umem,
1477 unsigned long start, unsigned long end);
1478
1479 struct mmu_notifier mn;
1480 atomic_t notifier_count;
1481 /* A list of umems that don't have private mmu notifier counters yet. */
1482 struct list_head no_private_counters;
1483 int odp_mrs_count;
1484#endif
43579b5f
PP
1485
1486 struct ib_rdmacg_object cg_obj;
e2773c06
RD
1487};
1488
1489struct ib_uobject {
1490 u64 user_handle; /* handle given to us by userspace */
1491 struct ib_ucontext *context; /* associated user context */
9ead190b 1492 void *object; /* containing object */
e2773c06 1493 struct list_head list; /* link to context's list */
43579b5f 1494 struct ib_rdmacg_object cg_obj; /* rdmacg object */
b3d636b0 1495 int id; /* index into kernel idr */
9ead190b 1496 struct kref ref;
38321256 1497 atomic_t usecnt; /* protects exclusive access */
d144da8c 1498 struct rcu_head rcu; /* kfree_rcu() overhead */
38321256
MB
1499
1500 const struct uverbs_obj_type *type;
e2773c06
RD
1501};
1502
cf8966b3
MB
1503struct ib_uobject_file {
1504 struct ib_uobject uobj;
1505 /* ufile contains the lock between context release and file close */
1506 struct ib_uverbs_file *ufile;
e2773c06
RD
1507};
1508
e2773c06 1509struct ib_udata {
309243ec 1510 const void __user *inbuf;
e2773c06
RD
1511 void __user *outbuf;
1512 size_t inlen;
1513 size_t outlen;
1514};
1515
1da177e4 1516struct ib_pd {
96249d70 1517 u32 local_dma_lkey;
ed082d36 1518 u32 flags;
e2773c06
RD
1519 struct ib_device *device;
1520 struct ib_uobject *uobject;
1521 atomic_t usecnt; /* count all resources */
50d46335 1522
ed082d36
CH
1523 u32 unsafe_global_rkey;
1524
50d46335
CH
1525 /*
1526 * Implementation details of the RDMA core, don't use in drivers:
1527 */
1528 struct ib_mr *__internal_mr;
02d8883f 1529 struct rdma_restrack_entry res;
1da177e4
LT
1530};
1531
59991f94
SH
1532struct ib_xrcd {
1533 struct ib_device *device;
d3d72d90 1534 atomic_t usecnt; /* count all exposed resources */
53d0bd1e 1535 struct inode *inode;
d3d72d90
SH
1536
1537 struct mutex tgt_qp_mutex;
1538 struct list_head tgt_qp_list;
02d8883f
LR
1539 /*
1540 * Implementation details of the RDMA core, don't use in drivers:
1541 */
1542 struct rdma_restrack_entry res;
59991f94
SH
1543};
1544
1da177e4
LT
1545struct ib_ah {
1546 struct ib_device *device;
1547 struct ib_pd *pd;
e2773c06 1548 struct ib_uobject *uobject;
44c58487 1549 enum rdma_ah_attr_type type;
1da177e4
LT
1550};
1551
1552typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
1553
14d3a3b2
CH
1554enum ib_poll_context {
1555 IB_POLL_DIRECT, /* caller context, no hw completions */
1556 IB_POLL_SOFTIRQ, /* poll from softirq context */
1557 IB_POLL_WORKQUEUE, /* poll from workqueue */
1558};
1559
1da177e4 1560struct ib_cq {
e2773c06
RD
1561 struct ib_device *device;
1562 struct ib_uobject *uobject;
1563 ib_comp_handler comp_handler;
1564 void (*event_handler)(struct ib_event *, void *);
4deccd6d 1565 void *cq_context;
e2773c06
RD
1566 int cqe;
1567 atomic_t usecnt; /* count number of work queues */
14d3a3b2
CH
1568 enum ib_poll_context poll_ctx;
1569 struct ib_wc *wc;
1570 union {
1571 struct irq_poll iop;
1572 struct work_struct work;
1573 };
02d8883f
LR
1574 /*
1575 * Implementation details of the RDMA core, don't use in drivers:
1576 */
1577 struct rdma_restrack_entry res;
1da177e4
LT
1578};
1579
1580struct ib_srq {
d41fcc67
RD
1581 struct ib_device *device;
1582 struct ib_pd *pd;
1583 struct ib_uobject *uobject;
1584 void (*event_handler)(struct ib_event *, void *);
1585 void *srq_context;
96104eda 1586 enum ib_srq_type srq_type;
1da177e4 1587 atomic_t usecnt;
418d5130 1588
1a56ff6d
AK
1589 struct {
1590 struct ib_cq *cq;
1591 union {
1592 struct {
1593 struct ib_xrcd *xrcd;
1594 u32 srq_num;
1595 } xrc;
1596 };
418d5130 1597 } ext;
1da177e4
LT
1598};
1599
ebaaee25
NO
1600enum ib_raw_packet_caps {
1601 /* Strip cvlan from incoming packet and report it in the matching work
1602 * completion is supported.
1603 */
1604 IB_RAW_PACKET_CAP_CVLAN_STRIPPING = (1 << 0),
1605 /* Scatter FCS field of an incoming packet to host memory is supported.
1606 */
1607 IB_RAW_PACKET_CAP_SCATTER_FCS = (1 << 1),
1608 /* Checksum offloads are supported (for both send and receive). */
1609 IB_RAW_PACKET_CAP_IP_CSUM = (1 << 2),
7d9336d8
MG
1610 /* When a packet is received for an RQ with no receive WQEs, the
1611 * packet processing is delayed.
1612 */
1613 IB_RAW_PACKET_CAP_DELAY_DROP = (1 << 3),
ebaaee25
NO
1614};
1615
5fd251c8
YH
1616enum ib_wq_type {
1617 IB_WQT_RQ
1618};
1619
1620enum ib_wq_state {
1621 IB_WQS_RESET,
1622 IB_WQS_RDY,
1623 IB_WQS_ERR
1624};
1625
1626struct ib_wq {
1627 struct ib_device *device;
1628 struct ib_uobject *uobject;
1629 void *wq_context;
1630 void (*event_handler)(struct ib_event *, void *);
1631 struct ib_pd *pd;
1632 struct ib_cq *cq;
1633 u32 wq_num;
1634 enum ib_wq_state state;
1635 enum ib_wq_type wq_type;
1636 atomic_t usecnt;
1637};
1638
10bac72b
NO
1639enum ib_wq_flags {
1640 IB_WQ_FLAGS_CVLAN_STRIPPING = 1 << 0,
27b0df11 1641 IB_WQ_FLAGS_SCATTER_FCS = 1 << 1,
7d9336d8 1642 IB_WQ_FLAGS_DELAY_DROP = 1 << 2,
e1d2e887 1643 IB_WQ_FLAGS_PCI_WRITE_END_PADDING = 1 << 3,
10bac72b
NO
1644};
1645
5fd251c8
YH
1646struct ib_wq_init_attr {
1647 void *wq_context;
1648 enum ib_wq_type wq_type;
1649 u32 max_wr;
1650 u32 max_sge;
1651 struct ib_cq *cq;
1652 void (*event_handler)(struct ib_event *, void *);
10bac72b 1653 u32 create_flags; /* Use enum ib_wq_flags */
5fd251c8
YH
1654};
1655
1656enum ib_wq_attr_mask {
10bac72b
NO
1657 IB_WQ_STATE = 1 << 0,
1658 IB_WQ_CUR_STATE = 1 << 1,
1659 IB_WQ_FLAGS = 1 << 2,
5fd251c8
YH
1660};
1661
1662struct ib_wq_attr {
1663 enum ib_wq_state wq_state;
1664 enum ib_wq_state curr_wq_state;
10bac72b
NO
1665 u32 flags; /* Use enum ib_wq_flags */
1666 u32 flags_mask; /* Use enum ib_wq_flags */
5fd251c8
YH
1667};
1668
6d39786b
YH
1669struct ib_rwq_ind_table {
1670 struct ib_device *device;
1671 struct ib_uobject *uobject;
1672 atomic_t usecnt;
1673 u32 ind_tbl_num;
1674 u32 log_ind_tbl_size;
1675 struct ib_wq **ind_tbl;
1676};
1677
1678struct ib_rwq_ind_table_init_attr {
1679 u32 log_ind_tbl_size;
1680 /* Each entry is a pointer to Receive Work Queue */
1681 struct ib_wq **ind_tbl;
1682};
1683
d291f1a6
DJ
1684enum port_pkey_state {
1685 IB_PORT_PKEY_NOT_VALID = 0,
1686 IB_PORT_PKEY_VALID = 1,
1687 IB_PORT_PKEY_LISTED = 2,
1688};
1689
1690struct ib_qp_security;
1691
1692struct ib_port_pkey {
1693 enum port_pkey_state state;
1694 u16 pkey_index;
1695 u8 port_num;
1696 struct list_head qp_list;
1697 struct list_head to_error_list;
1698 struct ib_qp_security *sec;
1699};
1700
1701struct ib_ports_pkeys {
1702 struct ib_port_pkey main;
1703 struct ib_port_pkey alt;
1704};
1705
1706struct ib_qp_security {
1707 struct ib_qp *qp;
1708 struct ib_device *dev;
1709 /* Hold this mutex when changing port and pkey settings. */
1710 struct mutex mutex;
1711 struct ib_ports_pkeys *ports_pkeys;
1712 /* A list of all open shared QP handles. Required to enforce security
1713 * properly for all users of a shared QP.
1714 */
1715 struct list_head shared_qp_list;
1716 void *security;
1717 bool destroying;
1718 atomic_t error_list_count;
1719 struct completion error_complete;
1720 int error_comps_pending;
1721};
1722
632bc3f6
BVA
1723/*
1724 * @max_write_sge: Maximum SGE elements per RDMA WRITE request.
1725 * @max_read_sge: Maximum SGE elements per RDMA READ request.
1726 */
1da177e4
LT
1727struct ib_qp {
1728 struct ib_device *device;
1729 struct ib_pd *pd;
1730 struct ib_cq *send_cq;
1731 struct ib_cq *recv_cq;
fffb0383
CH
1732 spinlock_t mr_lock;
1733 int mrs_used;
a060b562 1734 struct list_head rdma_mrs;
0e353e34 1735 struct list_head sig_mrs;
1da177e4 1736 struct ib_srq *srq;
b42b63cf 1737 struct ib_xrcd *xrcd; /* XRC TGT QPs only */
d3d72d90 1738 struct list_head xrcd_list;
fffb0383 1739
319a441d
HHZ
1740 /* count times opened, mcast attaches, flow attaches */
1741 atomic_t usecnt;
0e0ec7e0
SH
1742 struct list_head open_list;
1743 struct ib_qp *real_qp;
e2773c06 1744 struct ib_uobject *uobject;
1da177e4
LT
1745 void (*event_handler)(struct ib_event *, void *);
1746 void *qp_context;
1747 u32 qp_num;
632bc3f6
BVA
1748 u32 max_write_sge;
1749 u32 max_read_sge;
1da177e4 1750 enum ib_qp_type qp_type;
a9017e23 1751 struct ib_rwq_ind_table *rwq_ind_tbl;
d291f1a6 1752 struct ib_qp_security *qp_sec;
498ca3c8 1753 u8 port;
02d8883f
LR
1754
1755 /*
1756 * Implementation details of the RDMA core, don't use in drivers:
1757 */
1758 struct rdma_restrack_entry res;
1da177e4
LT
1759};
1760
1761struct ib_mr {
e2773c06
RD
1762 struct ib_device *device;
1763 struct ib_pd *pd;
e2773c06
RD
1764 u32 lkey;
1765 u32 rkey;
4c67e2bf 1766 u64 iova;
edd31551 1767 u64 length;
4c67e2bf 1768 unsigned int page_size;
d4a85c30 1769 bool need_inval;
fffb0383
CH
1770 union {
1771 struct ib_uobject *uobject; /* user */
1772 struct list_head qp_entry; /* FR */
1773 };
1da177e4
LT
1774};
1775
1776struct ib_mw {
1777 struct ib_device *device;
1778 struct ib_pd *pd;
e2773c06 1779 struct ib_uobject *uobject;
1da177e4 1780 u32 rkey;
7083e42e 1781 enum ib_mw_type type;
1da177e4
LT
1782};
1783
1784struct ib_fmr {
1785 struct ib_device *device;
1786 struct ib_pd *pd;
1787 struct list_head list;
1788 u32 lkey;
1789 u32 rkey;
1790};
1791
319a441d
HHZ
1792/* Supported steering options */
1793enum ib_flow_attr_type {
1794 /* steering according to rule specifications */
1795 IB_FLOW_ATTR_NORMAL = 0x0,
1796 /* default unicast and multicast rule -
1797 * receive all Eth traffic which isn't steered to any QP
1798 */
1799 IB_FLOW_ATTR_ALL_DEFAULT = 0x1,
1800 /* default multicast rule -
1801 * receive all Eth multicast traffic which isn't steered to any QP
1802 */
1803 IB_FLOW_ATTR_MC_DEFAULT = 0x2,
1804 /* sniffer rule - receive all port traffic */
1805 IB_FLOW_ATTR_SNIFFER = 0x3
1806};
1807
1808/* Supported steering header types */
1809enum ib_flow_spec_type {
1810 /* L2 headers*/
76bd23b3
MR
1811 IB_FLOW_SPEC_ETH = 0x20,
1812 IB_FLOW_SPEC_IB = 0x22,
319a441d 1813 /* L3 header*/
76bd23b3
MR
1814 IB_FLOW_SPEC_IPV4 = 0x30,
1815 IB_FLOW_SPEC_IPV6 = 0x31,
319a441d 1816 /* L4 headers*/
76bd23b3
MR
1817 IB_FLOW_SPEC_TCP = 0x40,
1818 IB_FLOW_SPEC_UDP = 0x41,
0dbf3332 1819 IB_FLOW_SPEC_VXLAN_TUNNEL = 0x50,
fbf46860 1820 IB_FLOW_SPEC_INNER = 0x100,
460d0198
MR
1821 /* Actions */
1822 IB_FLOW_SPEC_ACTION_TAG = 0x1000,
483a3966 1823 IB_FLOW_SPEC_ACTION_DROP = 0x1001,
319a441d 1824};
240ae00e 1825#define IB_FLOW_SPEC_LAYER_MASK 0xF0
fbf46860 1826#define IB_FLOW_SPEC_SUPPORT_LAYERS 8
22878dbc 1827
319a441d
HHZ
1828/* Flow steering rule priority is set according to it's domain.
1829 * Lower domain value means higher priority.
1830 */
1831enum ib_flow_domain {
1832 IB_FLOW_DOMAIN_USER,
1833 IB_FLOW_DOMAIN_ETHTOOL,
1834 IB_FLOW_DOMAIN_RFS,
1835 IB_FLOW_DOMAIN_NIC,
1836 IB_FLOW_DOMAIN_NUM /* Must be last */
1837};
1838
a3100a78
MV
1839enum ib_flow_flags {
1840 IB_FLOW_ATTR_FLAGS_DONT_TRAP = 1UL << 1, /* Continue match, no steal */
1841 IB_FLOW_ATTR_FLAGS_RESERVED = 1UL << 2 /* Must be last */
1842};
1843
319a441d
HHZ
1844struct ib_flow_eth_filter {
1845 u8 dst_mac[6];
1846 u8 src_mac[6];
1847 __be16 ether_type;
1848 __be16 vlan_tag;
15dfbd6b
MG
1849 /* Must be last */
1850 u8 real_sz[0];
319a441d
HHZ
1851};
1852
1853struct ib_flow_spec_eth {
fbf46860 1854 u32 type;
319a441d
HHZ
1855 u16 size;
1856 struct ib_flow_eth_filter val;
1857 struct ib_flow_eth_filter mask;
1858};
1859
240ae00e
MB
1860struct ib_flow_ib_filter {
1861 __be16 dlid;
1862 __u8 sl;
15dfbd6b
MG
1863 /* Must be last */
1864 u8 real_sz[0];
240ae00e
MB
1865};
1866
1867struct ib_flow_spec_ib {
fbf46860 1868 u32 type;
240ae00e
MB
1869 u16 size;
1870 struct ib_flow_ib_filter val;
1871 struct ib_flow_ib_filter mask;
1872};
1873
989a3a8f
MG
1874/* IPv4 header flags */
1875enum ib_ipv4_flags {
1876 IB_IPV4_DONT_FRAG = 0x2, /* Don't enable packet fragmentation */
1877 IB_IPV4_MORE_FRAG = 0X4 /* For All fragmented packets except the
1878 last have this flag set */
1879};
1880
319a441d
HHZ
1881struct ib_flow_ipv4_filter {
1882 __be32 src_ip;
1883 __be32 dst_ip;
989a3a8f
MG
1884 u8 proto;
1885 u8 tos;
1886 u8 ttl;
1887 u8 flags;
15dfbd6b
MG
1888 /* Must be last */
1889 u8 real_sz[0];
319a441d
HHZ
1890};
1891
1892struct ib_flow_spec_ipv4 {
fbf46860 1893 u32 type;
319a441d
HHZ
1894 u16 size;
1895 struct ib_flow_ipv4_filter val;
1896 struct ib_flow_ipv4_filter mask;
1897};
1898
4c2aae71
MG
1899struct ib_flow_ipv6_filter {
1900 u8 src_ip[16];
1901 u8 dst_ip[16];
a72c6a2b
MG
1902 __be32 flow_label;
1903 u8 next_hdr;
1904 u8 traffic_class;
1905 u8 hop_limit;
15dfbd6b
MG
1906 /* Must be last */
1907 u8 real_sz[0];
4c2aae71
MG
1908};
1909
1910struct ib_flow_spec_ipv6 {
fbf46860 1911 u32 type;
4c2aae71
MG
1912 u16 size;
1913 struct ib_flow_ipv6_filter val;
1914 struct ib_flow_ipv6_filter mask;
1915};
1916
319a441d
HHZ
1917struct ib_flow_tcp_udp_filter {
1918 __be16 dst_port;
1919 __be16 src_port;
15dfbd6b
MG
1920 /* Must be last */
1921 u8 real_sz[0];
319a441d
HHZ
1922};
1923
1924struct ib_flow_spec_tcp_udp {
fbf46860 1925 u32 type;
319a441d
HHZ
1926 u16 size;
1927 struct ib_flow_tcp_udp_filter val;
1928 struct ib_flow_tcp_udp_filter mask;
1929};
1930
0dbf3332
MR
1931struct ib_flow_tunnel_filter {
1932 __be32 tunnel_id;
1933 u8 real_sz[0];
1934};
1935
1936/* ib_flow_spec_tunnel describes the Vxlan tunnel
1937 * the tunnel_id from val has the vni value
1938 */
1939struct ib_flow_spec_tunnel {
fbf46860 1940 u32 type;
0dbf3332
MR
1941 u16 size;
1942 struct ib_flow_tunnel_filter val;
1943 struct ib_flow_tunnel_filter mask;
1944};
1945
460d0198
MR
1946struct ib_flow_spec_action_tag {
1947 enum ib_flow_spec_type type;
1948 u16 size;
1949 u32 tag_id;
1950};
1951
483a3966
SS
1952struct ib_flow_spec_action_drop {
1953 enum ib_flow_spec_type type;
1954 u16 size;
1955};
1956
319a441d
HHZ
1957union ib_flow_spec {
1958 struct {
fbf46860 1959 u32 type;
319a441d
HHZ
1960 u16 size;
1961 };
1962 struct ib_flow_spec_eth eth;
240ae00e 1963 struct ib_flow_spec_ib ib;
319a441d
HHZ
1964 struct ib_flow_spec_ipv4 ipv4;
1965 struct ib_flow_spec_tcp_udp tcp_udp;
4c2aae71 1966 struct ib_flow_spec_ipv6 ipv6;
0dbf3332 1967 struct ib_flow_spec_tunnel tunnel;
460d0198 1968 struct ib_flow_spec_action_tag flow_tag;
483a3966 1969 struct ib_flow_spec_action_drop drop;
319a441d
HHZ
1970};
1971
1972struct ib_flow_attr {
1973 enum ib_flow_attr_type type;
1974 u16 size;
1975 u16 priority;
1976 u32 flags;
1977 u8 num_of_specs;
1978 u8 port;
1979 /* Following are the optional layers according to user request
1980 * struct ib_flow_spec_xxx
1981 * struct ib_flow_spec_yyy
1982 */
1983};
1984
1985struct ib_flow {
1986 struct ib_qp *qp;
1987 struct ib_uobject *uobject;
1988};
1989
4cd7c947 1990struct ib_mad_hdr;
1da177e4
LT
1991struct ib_grh;
1992
1993enum ib_process_mad_flags {
1994 IB_MAD_IGNORE_MKEY = 1,
1995 IB_MAD_IGNORE_BKEY = 2,
1996 IB_MAD_IGNORE_ALL = IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY
1997};
1998
1999enum ib_mad_result {
2000 IB_MAD_RESULT_FAILURE = 0, /* (!SUCCESS is the important flag) */
2001 IB_MAD_RESULT_SUCCESS = 1 << 0, /* MAD was successfully processed */
2002 IB_MAD_RESULT_REPLY = 1 << 1, /* Reply packet needs to be sent */
2003 IB_MAD_RESULT_CONSUMED = 1 << 2 /* Packet consumed: stop processing */
2004};
2005
21d6454a 2006struct ib_port_cache {
883c71fe 2007 u64 subnet_prefix;
21d6454a
JW
2008 struct ib_pkey_cache *pkey;
2009 struct ib_gid_table *gid;
2010 u8 lmc;
2011 enum ib_port_state port_state;
2012};
2013
1da177e4
LT
2014struct ib_cache {
2015 rwlock_t lock;
2016 struct ib_event_handler event_handler;
21d6454a 2017 struct ib_port_cache *ports;
1da177e4
LT
2018};
2019
07ebafba
TT
2020struct iw_cm_verbs;
2021
7738613e
IW
2022struct ib_port_immutable {
2023 int pkey_tbl_len;
2024 int gid_tbl_len;
f9b22e35 2025 u32 core_cap_flags;
337877a4 2026 u32 max_mad_size;
7738613e
IW
2027};
2028
2fc77572
VN
2029/* rdma netdev type - specifies protocol type */
2030enum rdma_netdev_t {
f0ad83ac
NV
2031 RDMA_NETDEV_OPA_VNIC,
2032 RDMA_NETDEV_IPOIB,
2fc77572
VN
2033};
2034
2035/**
2036 * struct rdma_netdev - rdma netdev
2037 * For cases where netstack interfacing is required.
2038 */
2039struct rdma_netdev {
2040 void *clnt_priv;
2041 struct ib_device *hca;
2042 u8 port_num;
2043
8e959601
NV
2044 /* cleanup function must be specified */
2045 void (*free_rdma_netdev)(struct net_device *netdev);
2046
2fc77572
VN
2047 /* control functions */
2048 void (*set_id)(struct net_device *netdev, int id);
f0ad83ac
NV
2049 /* send packet */
2050 int (*send)(struct net_device *dev, struct sk_buff *skb,
2051 struct ib_ah *address, u32 dqpn);
2052 /* multicast */
2053 int (*attach_mcast)(struct net_device *dev, struct ib_device *hca,
2054 union ib_gid *gid, u16 mlid,
2055 int set_qkey, u32 qkey);
2056 int (*detach_mcast)(struct net_device *dev, struct ib_device *hca,
2057 union ib_gid *gid, u16 mlid);
2fc77572
VN
2058};
2059
d291f1a6
DJ
2060struct ib_port_pkey_list {
2061 /* Lock to hold while modifying the list. */
2062 spinlock_t list_lock;
2063 struct list_head pkey_list;
2064};
2065
1da177e4 2066struct ib_device {
0957c29f
BVA
2067 /* Do not access @dma_device directly from ULP nor from HW drivers. */
2068 struct device *dma_device;
2069
1da177e4
LT
2070 char name[IB_DEVICE_NAME_MAX];
2071
2072 struct list_head event_handler_list;
2073 spinlock_t event_handler_lock;
2074
17a55f79 2075 spinlock_t client_data_lock;
1da177e4 2076 struct list_head core_list;
7c1eb45a
HE
2077 /* Access to the client_data_list is protected by the client_data_lock
2078 * spinlock and the lists_rwsem read-write semaphore */
1da177e4 2079 struct list_head client_data_list;
1da177e4
LT
2080
2081 struct ib_cache cache;
7738613e
IW
2082 /**
2083 * port_immutable is indexed by port number
2084 */
2085 struct ib_port_immutable *port_immutable;
1da177e4 2086
f4fd0b22
MT
2087 int num_comp_vectors;
2088
d291f1a6
DJ
2089 struct ib_port_pkey_list *port_pkey_list;
2090
07ebafba
TT
2091 struct iw_cm_verbs *iwcm;
2092
b40f4757
CL
2093 /**
2094 * alloc_hw_stats - Allocate a struct rdma_hw_stats and fill in the
2095 * driver initialized data. The struct is kfree()'ed by the sysfs
2096 * core when the device is removed. A lifespan of -1 in the return
2097 * struct tells the core to set a default lifespan.
2098 */
2099 struct rdma_hw_stats *(*alloc_hw_stats)(struct ib_device *device,
2100 u8 port_num);
2101 /**
2102 * get_hw_stats - Fill in the counter value(s) in the stats struct.
2103 * @index - The index in the value array we wish to have updated, or
2104 * num_counters if we want all stats updated
2105 * Return codes -
2106 * < 0 - Error, no counters updated
2107 * index - Updated the single counter pointed to by index
2108 * num_counters - Updated all counters (will reset the timestamp
2109 * and prevent further calls for lifespan milliseconds)
2110 * Drivers are allowed to update all counters in leiu of just the
2111 * one given in index at their option
2112 */
2113 int (*get_hw_stats)(struct ib_device *device,
2114 struct rdma_hw_stats *stats,
2115 u8 port, int index);
1da177e4 2116 int (*query_device)(struct ib_device *device,
2528e33e
MB
2117 struct ib_device_attr *device_attr,
2118 struct ib_udata *udata);
1da177e4
LT
2119 int (*query_port)(struct ib_device *device,
2120 u8 port_num,
2121 struct ib_port_attr *port_attr);
a3f5adaf
EC
2122 enum rdma_link_layer (*get_link_layer)(struct ib_device *device,
2123 u8 port_num);
03db3a2d
MB
2124 /* When calling get_netdev, the HW vendor's driver should return the
2125 * net device of device @device at port @port_num or NULL if such
2126 * a net device doesn't exist. The vendor driver should call dev_hold
2127 * on this net device. The HW vendor's device driver must guarantee
2128 * that this function returns NULL before the net device reaches
2129 * NETDEV_UNREGISTER_FINAL state.
2130 */
2131 struct net_device *(*get_netdev)(struct ib_device *device,
2132 u8 port_num);
1da177e4
LT
2133 int (*query_gid)(struct ib_device *device,
2134 u8 port_num, int index,
2135 union ib_gid *gid);
03db3a2d
MB
2136 /* When calling add_gid, the HW vendor's driver should
2137 * add the gid of device @device at gid index @index of
2138 * port @port_num to be @gid. Meta-info of that gid (for example,
2139 * the network device related to this gid is available
2140 * at @attr. @context allows the HW vendor driver to store extra
2141 * information together with a GID entry. The HW vendor may allocate
2142 * memory to contain this information and store it in @context when a
2143 * new GID entry is written to. Params are consistent until the next
2144 * call of add_gid or delete_gid. The function should return 0 on
2145 * success or error otherwise. The function could be called
2146 * concurrently for different ports. This function is only called
2147 * when roce_gid_table is used.
2148 */
2149 int (*add_gid)(struct ib_device *device,
2150 u8 port_num,
2151 unsigned int index,
2152 const union ib_gid *gid,
2153 const struct ib_gid_attr *attr,
2154 void **context);
2155 /* When calling del_gid, the HW vendor's driver should delete the
2156 * gid of device @device at gid index @index of port @port_num.
2157 * Upon the deletion of a GID entry, the HW vendor must free any
2158 * allocated memory. The caller will clear @context afterwards.
2159 * This function is only called when roce_gid_table is used.
2160 */
2161 int (*del_gid)(struct ib_device *device,
2162 u8 port_num,
2163 unsigned int index,
2164 void **context);
1da177e4
LT
2165 int (*query_pkey)(struct ib_device *device,
2166 u8 port_num, u16 index, u16 *pkey);
2167 int (*modify_device)(struct ib_device *device,
2168 int device_modify_mask,
2169 struct ib_device_modify *device_modify);
2170 int (*modify_port)(struct ib_device *device,
2171 u8 port_num, int port_modify_mask,
2172 struct ib_port_modify *port_modify);
e2773c06
RD
2173 struct ib_ucontext * (*alloc_ucontext)(struct ib_device *device,
2174 struct ib_udata *udata);
2175 int (*dealloc_ucontext)(struct ib_ucontext *context);
2176 int (*mmap)(struct ib_ucontext *context,
2177 struct vm_area_struct *vma);
2178 struct ib_pd * (*alloc_pd)(struct ib_device *device,
2179 struct ib_ucontext *context,
2180 struct ib_udata *udata);
1da177e4
LT
2181 int (*dealloc_pd)(struct ib_pd *pd);
2182 struct ib_ah * (*create_ah)(struct ib_pd *pd,
90898850 2183 struct rdma_ah_attr *ah_attr,
477864c8 2184 struct ib_udata *udata);
1da177e4 2185 int (*modify_ah)(struct ib_ah *ah,
90898850 2186 struct rdma_ah_attr *ah_attr);
1da177e4 2187 int (*query_ah)(struct ib_ah *ah,
90898850 2188 struct rdma_ah_attr *ah_attr);
1da177e4 2189 int (*destroy_ah)(struct ib_ah *ah);
d41fcc67
RD
2190 struct ib_srq * (*create_srq)(struct ib_pd *pd,
2191 struct ib_srq_init_attr *srq_init_attr,
2192 struct ib_udata *udata);
2193 int (*modify_srq)(struct ib_srq *srq,
2194 struct ib_srq_attr *srq_attr,
9bc57e2d
RC
2195 enum ib_srq_attr_mask srq_attr_mask,
2196 struct ib_udata *udata);
d41fcc67
RD
2197 int (*query_srq)(struct ib_srq *srq,
2198 struct ib_srq_attr *srq_attr);
2199 int (*destroy_srq)(struct ib_srq *srq);
2200 int (*post_srq_recv)(struct ib_srq *srq,
2201 struct ib_recv_wr *recv_wr,
2202 struct ib_recv_wr **bad_recv_wr);
1da177e4 2203 struct ib_qp * (*create_qp)(struct ib_pd *pd,
e2773c06
RD
2204 struct ib_qp_init_attr *qp_init_attr,
2205 struct ib_udata *udata);
1da177e4
LT
2206 int (*modify_qp)(struct ib_qp *qp,
2207 struct ib_qp_attr *qp_attr,
9bc57e2d
RC
2208 int qp_attr_mask,
2209 struct ib_udata *udata);
1da177e4
LT
2210 int (*query_qp)(struct ib_qp *qp,
2211 struct ib_qp_attr *qp_attr,
2212 int qp_attr_mask,
2213 struct ib_qp_init_attr *qp_init_attr);
2214 int (*destroy_qp)(struct ib_qp *qp);
2215 int (*post_send)(struct ib_qp *qp,
2216 struct ib_send_wr *send_wr,
2217 struct ib_send_wr **bad_send_wr);
2218 int (*post_recv)(struct ib_qp *qp,
2219 struct ib_recv_wr *recv_wr,
2220 struct ib_recv_wr **bad_recv_wr);
bcf4c1ea
MB
2221 struct ib_cq * (*create_cq)(struct ib_device *device,
2222 const struct ib_cq_init_attr *attr,
e2773c06
RD
2223 struct ib_ucontext *context,
2224 struct ib_udata *udata);
2dd57162
EC
2225 int (*modify_cq)(struct ib_cq *cq, u16 cq_count,
2226 u16 cq_period);
1da177e4 2227 int (*destroy_cq)(struct ib_cq *cq);
33b9b3ee
RD
2228 int (*resize_cq)(struct ib_cq *cq, int cqe,
2229 struct ib_udata *udata);
1da177e4
LT
2230 int (*poll_cq)(struct ib_cq *cq, int num_entries,
2231 struct ib_wc *wc);
2232 int (*peek_cq)(struct ib_cq *cq, int wc_cnt);
2233 int (*req_notify_cq)(struct ib_cq *cq,
ed23a727 2234 enum ib_cq_notify_flags flags);
1da177e4
LT
2235 int (*req_ncomp_notif)(struct ib_cq *cq,
2236 int wc_cnt);
2237 struct ib_mr * (*get_dma_mr)(struct ib_pd *pd,
2238 int mr_access_flags);
e2773c06 2239 struct ib_mr * (*reg_user_mr)(struct ib_pd *pd,
f7c6a7b5
RD
2240 u64 start, u64 length,
2241 u64 virt_addr,
e2773c06
RD
2242 int mr_access_flags,
2243 struct ib_udata *udata);
7e6edb9b
MB
2244 int (*rereg_user_mr)(struct ib_mr *mr,
2245 int flags,
2246 u64 start, u64 length,
2247 u64 virt_addr,
2248 int mr_access_flags,
2249 struct ib_pd *pd,
2250 struct ib_udata *udata);
1da177e4 2251 int (*dereg_mr)(struct ib_mr *mr);
9bee178b
SG
2252 struct ib_mr * (*alloc_mr)(struct ib_pd *pd,
2253 enum ib_mr_type mr_type,
2254 u32 max_num_sg);
4c67e2bf
SG
2255 int (*map_mr_sg)(struct ib_mr *mr,
2256 struct scatterlist *sg,
ff2ba993 2257 int sg_nents,
9aa8b321 2258 unsigned int *sg_offset);
7083e42e 2259 struct ib_mw * (*alloc_mw)(struct ib_pd *pd,
b2a239df
MB
2260 enum ib_mw_type type,
2261 struct ib_udata *udata);
1da177e4
LT
2262 int (*dealloc_mw)(struct ib_mw *mw);
2263 struct ib_fmr * (*alloc_fmr)(struct ib_pd *pd,
2264 int mr_access_flags,
2265 struct ib_fmr_attr *fmr_attr);
2266 int (*map_phys_fmr)(struct ib_fmr *fmr,
2267 u64 *page_list, int list_len,
2268 u64 iova);
2269 int (*unmap_fmr)(struct list_head *fmr_list);
2270 int (*dealloc_fmr)(struct ib_fmr *fmr);
2271 int (*attach_mcast)(struct ib_qp *qp,
2272 union ib_gid *gid,
2273 u16 lid);
2274 int (*detach_mcast)(struct ib_qp *qp,
2275 union ib_gid *gid,
2276 u16 lid);
2277 int (*process_mad)(struct ib_device *device,
2278 int process_mad_flags,
2279 u8 port_num,
a97e2d86
IW
2280 const struct ib_wc *in_wc,
2281 const struct ib_grh *in_grh,
4cd7c947
IW
2282 const struct ib_mad_hdr *in_mad,
2283 size_t in_mad_size,
2284 struct ib_mad_hdr *out_mad,
2285 size_t *out_mad_size,
2286 u16 *out_mad_pkey_index);
59991f94
SH
2287 struct ib_xrcd * (*alloc_xrcd)(struct ib_device *device,
2288 struct ib_ucontext *ucontext,
2289 struct ib_udata *udata);
2290 int (*dealloc_xrcd)(struct ib_xrcd *xrcd);
319a441d
HHZ
2291 struct ib_flow * (*create_flow)(struct ib_qp *qp,
2292 struct ib_flow_attr
2293 *flow_attr,
2294 int domain);
2295 int (*destroy_flow)(struct ib_flow *flow_id);
1b01d335
SG
2296 int (*check_mr_status)(struct ib_mr *mr, u32 check_mask,
2297 struct ib_mr_status *mr_status);
036b1063 2298 void (*disassociate_ucontext)(struct ib_ucontext *ibcontext);
765d6774
SW
2299 void (*drain_rq)(struct ib_qp *qp);
2300 void (*drain_sq)(struct ib_qp *qp);
50174a7f
EC
2301 int (*set_vf_link_state)(struct ib_device *device, int vf, u8 port,
2302 int state);
2303 int (*get_vf_config)(struct ib_device *device, int vf, u8 port,
2304 struct ifla_vf_info *ivf);
2305 int (*get_vf_stats)(struct ib_device *device, int vf, u8 port,
2306 struct ifla_vf_stats *stats);
2307 int (*set_vf_guid)(struct ib_device *device, int vf, u8 port, u64 guid,
2308 int type);
5fd251c8
YH
2309 struct ib_wq * (*create_wq)(struct ib_pd *pd,
2310 struct ib_wq_init_attr *init_attr,
2311 struct ib_udata *udata);
2312 int (*destroy_wq)(struct ib_wq *wq);
2313 int (*modify_wq)(struct ib_wq *wq,
2314 struct ib_wq_attr *attr,
2315 u32 wq_attr_mask,
2316 struct ib_udata *udata);
6d39786b
YH
2317 struct ib_rwq_ind_table * (*create_rwq_ind_table)(struct ib_device *device,
2318 struct ib_rwq_ind_table_init_attr *init_attr,
2319 struct ib_udata *udata);
2320 int (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *wq_ind_table);
2fc77572 2321 /**
8e959601 2322 * rdma netdev operation
2fc77572
VN
2323 *
2324 * Driver implementing alloc_rdma_netdev must return -EOPNOTSUPP if it
2325 * doesn't support the specified rdma netdev type.
2326 */
2327 struct net_device *(*alloc_rdma_netdev)(
2328 struct ib_device *device,
2329 u8 port_num,
2330 enum rdma_netdev_t type,
2331 const char *name,
2332 unsigned char name_assign_type,
2333 void (*setup)(struct net_device *));
9b513090 2334
e2773c06 2335 struct module *owner;
f4e91eb4 2336 struct device dev;
35be0681 2337 struct kobject *ports_parent;
1da177e4
LT
2338 struct list_head port_list;
2339
2340 enum {
2341 IB_DEV_UNINITIALIZED,
2342 IB_DEV_REGISTERED,
2343 IB_DEV_UNREGISTERED
2344 } reg_state;
2345
274c0891 2346 int uverbs_abi_ver;
17a55f79 2347 u64 uverbs_cmd_mask;
f21519b2 2348 u64 uverbs_ex_cmd_mask;
274c0891 2349
bd99fdea 2350 char node_desc[IB_DEVICE_NODE_DESC_MAX];
cf311cd4 2351 __be64 node_guid;
96f15c03 2352 u32 local_dma_lkey;
4139032b 2353 u16 is_switch:1;
1da177e4
LT
2354 u8 node_type;
2355 u8 phys_port_cnt;
3e153a93 2356 struct ib_device_attr attrs;
b40f4757
CL
2357 struct attribute_group *hw_stats_ag;
2358 struct rdma_hw_stats *hw_stats;
7738613e 2359
43579b5f
PP
2360#ifdef CONFIG_CGROUP_RDMA
2361 struct rdmacg_device cg_device;
2362#endif
2363
ecc82c53 2364 u32 index;
02d8883f
LR
2365 /*
2366 * Implementation details of the RDMA core, don't use in drivers
2367 */
2368 struct rdma_restrack_root res;
ecc82c53 2369
7738613e
IW
2370 /**
2371 * The following mandatory functions are used only at device
2372 * registration. Keep functions such as these at the end of this
2373 * structure to avoid cache line misses when accessing struct ib_device
2374 * in fast paths.
2375 */
2376 int (*get_port_immutable)(struct ib_device *, u8, struct ib_port_immutable *);
9abb0d1b 2377 void (*get_dev_fw_str)(struct ib_device *, char *str);
c66cd353
SG
2378 const struct cpumask *(*get_vector_affinity)(struct ib_device *ibdev,
2379 int comp_vector);
fac9658c
MB
2380
2381 struct uverbs_root_spec *specs_root;
1da177e4
LT
2382};
2383
2384struct ib_client {
2385 char *name;
2386 void (*add) (struct ib_device *);
7c1eb45a 2387 void (*remove)(struct ib_device *, void *client_data);
1da177e4 2388
9268f72d
YK
2389 /* Returns the net_dev belonging to this ib_client and matching the
2390 * given parameters.
2391 * @dev: An RDMA device that the net_dev use for communication.
2392 * @port: A physical port number on the RDMA device.
2393 * @pkey: P_Key that the net_dev uses if applicable.
2394 * @gid: A GID that the net_dev uses to communicate.
2395 * @addr: An IP address the net_dev is configured with.
2396 * @client_data: The device's client data set by ib_set_client_data().
2397 *
2398 * An ib_client that implements a net_dev on top of RDMA devices
2399 * (such as IP over IB) should implement this callback, allowing the
2400 * rdma_cm module to find the right net_dev for a given request.
2401 *
2402 * The caller is responsible for calling dev_put on the returned
2403 * netdev. */
2404 struct net_device *(*get_net_dev_by_params)(
2405 struct ib_device *dev,
2406 u8 port,
2407 u16 pkey,
2408 const union ib_gid *gid,
2409 const struct sockaddr *addr,
2410 void *client_data);
1da177e4
LT
2411 struct list_head list;
2412};
2413
2414struct ib_device *ib_alloc_device(size_t size);
2415void ib_dealloc_device(struct ib_device *device);
2416
9abb0d1b 2417void ib_get_device_fw_str(struct ib_device *device, char *str);
5fa76c20 2418
9a6edb60
RC
2419int ib_register_device(struct ib_device *device,
2420 int (*port_callback)(struct ib_device *,
2421 u8, struct kobject *));
1da177e4
LT
2422void ib_unregister_device(struct ib_device *device);
2423
2424int ib_register_client (struct ib_client *client);
2425void ib_unregister_client(struct ib_client *client);
2426
2427void *ib_get_client_data(struct ib_device *device, struct ib_client *client);
2428void ib_set_client_data(struct ib_device *device, struct ib_client *client,
2429 void *data);
2430
e2773c06
RD
2431static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len)
2432{
2433 return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0;
2434}
2435
2436static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len)
2437{
43c61165 2438 return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0;
e2773c06
RD
2439}
2440
301a721e
MB
2441static inline bool ib_is_udata_cleared(struct ib_udata *udata,
2442 size_t offset,
2443 size_t len)
2444{
2445 const void __user *p = udata->inbuf + offset;
92d27ae6 2446 bool ret;
301a721e
MB
2447 u8 *buf;
2448
2449 if (len > USHRT_MAX)
2450 return false;
2451
92d27ae6
ME
2452 buf = memdup_user(p, len);
2453 if (IS_ERR(buf))
301a721e
MB
2454 return false;
2455
301a721e 2456 ret = !memchr_inv(buf, 0, len);
301a721e
MB
2457 kfree(buf);
2458 return ret;
2459}
2460
8a51866f
RD
2461/**
2462 * ib_modify_qp_is_ok - Check that the supplied attribute mask
2463 * contains all required attributes and no attributes not allowed for
2464 * the given QP state transition.
2465 * @cur_state: Current QP state
2466 * @next_state: Next QP state
2467 * @type: QP type
2468 * @mask: Mask of supplied QP attributes
dd5f03be 2469 * @ll : link layer of port
8a51866f
RD
2470 *
2471 * This function is a helper function that a low-level driver's
2472 * modify_qp method can use to validate the consumer's input. It
2473 * checks that cur_state and next_state are valid QP states, that a
2474 * transition from cur_state to next_state is allowed by the IB spec,
2475 * and that the attribute mask supplied is allowed for the transition.
2476 */
2477int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
dd5f03be
MB
2478 enum ib_qp_type type, enum ib_qp_attr_mask mask,
2479 enum rdma_link_layer ll);
8a51866f 2480
dcc9881e
LR
2481void ib_register_event_handler(struct ib_event_handler *event_handler);
2482void ib_unregister_event_handler(struct ib_event_handler *event_handler);
1da177e4
LT
2483void ib_dispatch_event(struct ib_event *event);
2484
1da177e4
LT
2485int ib_query_port(struct ib_device *device,
2486 u8 port_num, struct ib_port_attr *port_attr);
2487
a3f5adaf
EC
2488enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device,
2489 u8 port_num);
2490
4139032b
HR
2491/**
2492 * rdma_cap_ib_switch - Check if the device is IB switch
2493 * @device: Device to check
2494 *
2495 * Device driver is responsible for setting is_switch bit on
2496 * in ib_device structure at init time.
2497 *
2498 * Return: true if the device is IB switch.
2499 */
2500static inline bool rdma_cap_ib_switch(const struct ib_device *device)
2501{
2502 return device->is_switch;
2503}
2504
0cf18d77
IW
2505/**
2506 * rdma_start_port - Return the first valid port number for the device
2507 * specified
2508 *
2509 * @device: Device to be checked
2510 *
2511 * Return start port number
2512 */
2513static inline u8 rdma_start_port(const struct ib_device *device)
2514{
4139032b 2515 return rdma_cap_ib_switch(device) ? 0 : 1;
0cf18d77
IW
2516}
2517
2518/**
2519 * rdma_end_port - Return the last valid port number for the device
2520 * specified
2521 *
2522 * @device: Device to be checked
2523 *
2524 * Return last port number
2525 */
2526static inline u8 rdma_end_port(const struct ib_device *device)
2527{
4139032b 2528 return rdma_cap_ib_switch(device) ? 0 : device->phys_port_cnt;
0cf18d77
IW
2529}
2530
24dc831b
YS
2531static inline int rdma_is_port_valid(const struct ib_device *device,
2532 unsigned int port)
2533{
2534 return (port >= rdma_start_port(device) &&
2535 port <= rdma_end_port(device));
2536}
2537
5ede9289 2538static inline bool rdma_protocol_ib(const struct ib_device *device, u8 port_num)
de66be94 2539{
f9b22e35 2540 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IB;
de66be94
MW
2541}
2542
5ede9289 2543static inline bool rdma_protocol_roce(const struct ib_device *device, u8 port_num)
7766a99f
MB
2544{
2545 return device->port_immutable[port_num].core_cap_flags &
2546 (RDMA_CORE_CAP_PROT_ROCE | RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP);
2547}
2548
2549static inline bool rdma_protocol_roce_udp_encap(const struct ib_device *device, u8 port_num)
2550{
2551 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP;
2552}
2553
2554static inline bool rdma_protocol_roce_eth_encap(const struct ib_device *device, u8 port_num)
de66be94 2555{
f9b22e35 2556 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_ROCE;
de66be94
MW
2557}
2558
5ede9289 2559static inline bool rdma_protocol_iwarp(const struct ib_device *device, u8 port_num)
de66be94 2560{
f9b22e35 2561 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IWARP;
de66be94
MW
2562}
2563
5ede9289 2564static inline bool rdma_ib_or_roce(const struct ib_device *device, u8 port_num)
de66be94 2565{
7766a99f
MB
2566 return rdma_protocol_ib(device, port_num) ||
2567 rdma_protocol_roce(device, port_num);
de66be94
MW
2568}
2569
aa773bd4
OG
2570static inline bool rdma_protocol_raw_packet(const struct ib_device *device, u8 port_num)
2571{
2572 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_RAW_PACKET;
2573}
2574
ce1e055f
OG
2575static inline bool rdma_protocol_usnic(const struct ib_device *device, u8 port_num)
2576{
2577 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_USNIC;
2578}
2579
c757dea8 2580/**
296ec009 2581 * rdma_cap_ib_mad - Check if the port of a device supports Infiniband
c757dea8 2582 * Management Datagrams.
296ec009
MW
2583 * @device: Device to check
2584 * @port_num: Port number to check
c757dea8 2585 *
296ec009
MW
2586 * Management Datagrams (MAD) are a required part of the InfiniBand
2587 * specification and are supported on all InfiniBand devices. A slightly
2588 * extended version are also supported on OPA interfaces.
c757dea8 2589 *
296ec009 2590 * Return: true if the port supports sending/receiving of MAD packets.
c757dea8 2591 */
5ede9289 2592static inline bool rdma_cap_ib_mad(const struct ib_device *device, u8 port_num)
c757dea8 2593{
f9b22e35 2594 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_MAD;
c757dea8
MW
2595}
2596
65995fee
IW
2597/**
2598 * rdma_cap_opa_mad - Check if the port of device provides support for OPA
2599 * Management Datagrams.
2600 * @device: Device to check
2601 * @port_num: Port number to check
2602 *
2603 * Intel OmniPath devices extend and/or replace the InfiniBand Management
2604 * datagrams with their own versions. These OPA MADs share many but not all of
2605 * the characteristics of InfiniBand MADs.
2606 *
2607 * OPA MADs differ in the following ways:
2608 *
2609 * 1) MADs are variable size up to 2K
2610 * IBTA defined MADs remain fixed at 256 bytes
2611 * 2) OPA SMPs must carry valid PKeys
2612 * 3) OPA SMP packets are a different format
2613 *
2614 * Return: true if the port supports OPA MAD packet formats.
2615 */
2616static inline bool rdma_cap_opa_mad(struct ib_device *device, u8 port_num)
2617{
2618 return (device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_OPA_MAD)
2619 == RDMA_CORE_CAP_OPA_MAD;
2620}
2621
29541e3a 2622/**
296ec009
MW
2623 * rdma_cap_ib_smi - Check if the port of a device provides an Infiniband
2624 * Subnet Management Agent (SMA) on the Subnet Management Interface (SMI).
2625 * @device: Device to check
2626 * @port_num: Port number to check
29541e3a 2627 *
296ec009
MW
2628 * Each InfiniBand node is required to provide a Subnet Management Agent
2629 * that the subnet manager can access. Prior to the fabric being fully
2630 * configured by the subnet manager, the SMA is accessed via a well known
2631 * interface called the Subnet Management Interface (SMI). This interface
2632 * uses directed route packets to communicate with the SM to get around the
2633 * chicken and egg problem of the SM needing to know what's on the fabric
2634 * in order to configure the fabric, and needing to configure the fabric in
2635 * order to send packets to the devices on the fabric. These directed
2636 * route packets do not need the fabric fully configured in order to reach
2637 * their destination. The SMI is the only method allowed to send
2638 * directed route packets on an InfiniBand fabric.
29541e3a 2639 *
296ec009 2640 * Return: true if the port provides an SMI.
29541e3a 2641 */
5ede9289 2642static inline bool rdma_cap_ib_smi(const struct ib_device *device, u8 port_num)
29541e3a 2643{
f9b22e35 2644 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_SMI;
29541e3a
MW
2645}
2646
72219cea
MW
2647/**
2648 * rdma_cap_ib_cm - Check if the port of device has the capability Infiniband
2649 * Communication Manager.
296ec009
MW
2650 * @device: Device to check
2651 * @port_num: Port number to check
72219cea 2652 *
296ec009
MW
2653 * The InfiniBand Communication Manager is one of many pre-defined General
2654 * Service Agents (GSA) that are accessed via the General Service
2655 * Interface (GSI). It's role is to facilitate establishment of connections
2656 * between nodes as well as other management related tasks for established
2657 * connections.
72219cea 2658 *
296ec009
MW
2659 * Return: true if the port supports an IB CM (this does not guarantee that
2660 * a CM is actually running however).
72219cea 2661 */
5ede9289 2662static inline bool rdma_cap_ib_cm(const struct ib_device *device, u8 port_num)
72219cea 2663{
f9b22e35 2664 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_CM;
72219cea
MW
2665}
2666
04215330
MW
2667/**
2668 * rdma_cap_iw_cm - Check if the port of device has the capability IWARP
2669 * Communication Manager.
296ec009
MW
2670 * @device: Device to check
2671 * @port_num: Port number to check
04215330 2672 *
296ec009
MW
2673 * Similar to above, but specific to iWARP connections which have a different
2674 * managment protocol than InfiniBand.
04215330 2675 *
296ec009
MW
2676 * Return: true if the port supports an iWARP CM (this does not guarantee that
2677 * a CM is actually running however).
04215330 2678 */
5ede9289 2679static inline bool rdma_cap_iw_cm(const struct ib_device *device, u8 port_num)
04215330 2680{
f9b22e35 2681 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IW_CM;
04215330
MW
2682}
2683
fe53ba2f
MW
2684/**
2685 * rdma_cap_ib_sa - Check if the port of device has the capability Infiniband
2686 * Subnet Administration.
296ec009
MW
2687 * @device: Device to check
2688 * @port_num: Port number to check
fe53ba2f 2689 *
296ec009
MW
2690 * An InfiniBand Subnet Administration (SA) service is a pre-defined General
2691 * Service Agent (GSA) provided by the Subnet Manager (SM). On InfiniBand
2692 * fabrics, devices should resolve routes to other hosts by contacting the
2693 * SA to query the proper route.
fe53ba2f 2694 *
296ec009
MW
2695 * Return: true if the port should act as a client to the fabric Subnet
2696 * Administration interface. This does not imply that the SA service is
2697 * running locally.
fe53ba2f 2698 */
5ede9289 2699static inline bool rdma_cap_ib_sa(const struct ib_device *device, u8 port_num)
fe53ba2f 2700{
f9b22e35 2701 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_SA;
fe53ba2f
MW
2702}
2703
a31ad3b0
MW
2704/**
2705 * rdma_cap_ib_mcast - Check if the port of device has the capability Infiniband
2706 * Multicast.
296ec009
MW
2707 * @device: Device to check
2708 * @port_num: Port number to check
a31ad3b0 2709 *
296ec009
MW
2710 * InfiniBand multicast registration is more complex than normal IPv4 or
2711 * IPv6 multicast registration. Each Host Channel Adapter must register
2712 * with the Subnet Manager when it wishes to join a multicast group. It
2713 * should do so only once regardless of how many queue pairs it subscribes
2714 * to this group. And it should leave the group only after all queue pairs
2715 * attached to the group have been detached.
a31ad3b0 2716 *
296ec009
MW
2717 * Return: true if the port must undertake the additional adminstrative
2718 * overhead of registering/unregistering with the SM and tracking of the
2719 * total number of queue pairs attached to the multicast group.
a31ad3b0 2720 */
5ede9289 2721static inline bool rdma_cap_ib_mcast(const struct ib_device *device, u8 port_num)
a31ad3b0
MW
2722{
2723 return rdma_cap_ib_sa(device, port_num);
2724}
2725
30a74ef4
MW
2726/**
2727 * rdma_cap_af_ib - Check if the port of device has the capability
2728 * Native Infiniband Address.
296ec009
MW
2729 * @device: Device to check
2730 * @port_num: Port number to check
30a74ef4 2731 *
296ec009
MW
2732 * InfiniBand addressing uses a port's GUID + Subnet Prefix to make a default
2733 * GID. RoCE uses a different mechanism, but still generates a GID via
2734 * a prescribed mechanism and port specific data.
30a74ef4 2735 *
296ec009
MW
2736 * Return: true if the port uses a GID address to identify devices on the
2737 * network.
30a74ef4 2738 */
5ede9289 2739static inline bool rdma_cap_af_ib(const struct ib_device *device, u8 port_num)
30a74ef4 2740{
f9b22e35 2741 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_AF_IB;
30a74ef4
MW
2742}
2743
227128fc
MW
2744/**
2745 * rdma_cap_eth_ah - Check if the port of device has the capability
296ec009
MW
2746 * Ethernet Address Handle.
2747 * @device: Device to check
2748 * @port_num: Port number to check
227128fc 2749 *
296ec009
MW
2750 * RoCE is InfiniBand over Ethernet, and it uses a well defined technique
2751 * to fabricate GIDs over Ethernet/IP specific addresses native to the
2752 * port. Normally, packet headers are generated by the sending host
2753 * adapter, but when sending connectionless datagrams, we must manually
2754 * inject the proper headers for the fabric we are communicating over.
227128fc 2755 *
296ec009
MW
2756 * Return: true if we are running as a RoCE port and must force the
2757 * addition of a Global Route Header built from our Ethernet Address
2758 * Handle into our header list for connectionless packets.
227128fc 2759 */
5ede9289 2760static inline bool rdma_cap_eth_ah(const struct ib_device *device, u8 port_num)
227128fc 2761{
f9b22e35 2762 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_ETH_AH;
227128fc
MW
2763}
2764
94d595c5
DC
2765/**
2766 * rdma_cap_opa_ah - Check if the port of device supports
2767 * OPA Address handles
2768 * @device: Device to check
2769 * @port_num: Port number to check
2770 *
2771 * Return: true if we are running on an OPA device which supports
2772 * the extended OPA addressing.
2773 */
2774static inline bool rdma_cap_opa_ah(struct ib_device *device, u8 port_num)
2775{
2776 return (device->port_immutable[port_num].core_cap_flags &
2777 RDMA_CORE_CAP_OPA_AH) == RDMA_CORE_CAP_OPA_AH;
2778}
2779
337877a4
IW
2780/**
2781 * rdma_max_mad_size - Return the max MAD size required by this RDMA Port.
2782 *
2783 * @device: Device
2784 * @port_num: Port number
2785 *
2786 * This MAD size includes the MAD headers and MAD payload. No other headers
2787 * are included.
2788 *
2789 * Return the max MAD size required by the Port. Will return 0 if the port
2790 * does not support MADs
2791 */
2792static inline size_t rdma_max_mad_size(const struct ib_device *device, u8 port_num)
2793{
2794 return device->port_immutable[port_num].max_mad_size;
2795}
2796
03db3a2d
MB
2797/**
2798 * rdma_cap_roce_gid_table - Check if the port of device uses roce_gid_table
2799 * @device: Device to check
2800 * @port_num: Port number to check
2801 *
2802 * RoCE GID table mechanism manages the various GIDs for a device.
2803 *
2804 * NOTE: if allocating the port's GID table has failed, this call will still
2805 * return true, but any RoCE GID table API will fail.
2806 *
2807 * Return: true if the port uses RoCE GID table mechanism in order to manage
2808 * its GIDs.
2809 */
2810static inline bool rdma_cap_roce_gid_table(const struct ib_device *device,
2811 u8 port_num)
2812{
2813 return rdma_protocol_roce(device, port_num) &&
2814 device->add_gid && device->del_gid;
2815}
2816
002516ed
CH
2817/*
2818 * Check if the device supports READ W/ INVALIDATE.
2819 */
2820static inline bool rdma_cap_read_inv(struct ib_device *dev, u32 port_num)
2821{
2822 /*
2823 * iWarp drivers must support READ W/ INVALIDATE. No other protocol
2824 * has support for it yet.
2825 */
2826 return rdma_protocol_iwarp(dev, port_num);
2827}
2828
1da177e4 2829int ib_query_gid(struct ib_device *device,
55ee3ab2
MB
2830 u8 port_num, int index, union ib_gid *gid,
2831 struct ib_gid_attr *attr);
1da177e4 2832
50174a7f
EC
2833int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port,
2834 int state);
2835int ib_get_vf_config(struct ib_device *device, int vf, u8 port,
2836 struct ifla_vf_info *info);
2837int ib_get_vf_stats(struct ib_device *device, int vf, u8 port,
2838 struct ifla_vf_stats *stats);
2839int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid,
2840 int type);
2841
1da177e4
LT
2842int ib_query_pkey(struct ib_device *device,
2843 u8 port_num, u16 index, u16 *pkey);
2844
2845int ib_modify_device(struct ib_device *device,
2846 int device_modify_mask,
2847 struct ib_device_modify *device_modify);
2848
2849int ib_modify_port(struct ib_device *device,
2850 u8 port_num, int port_modify_mask,
2851 struct ib_port_modify *port_modify);
2852
5eb620c8 2853int ib_find_gid(struct ib_device *device, union ib_gid *gid,
dbb12562 2854 struct net_device *ndev, u8 *port_num, u16 *index);
5eb620c8
YE
2855
2856int ib_find_pkey(struct ib_device *device,
2857 u8 port_num, u16 pkey, u16 *index);
2858
ed082d36
CH
2859enum ib_pd_flags {
2860 /*
2861 * Create a memory registration for all memory in the system and place
2862 * the rkey for it into pd->unsafe_global_rkey. This can be used by
2863 * ULPs to avoid the overhead of dynamic MRs.
2864 *
2865 * This flag is generally considered unsafe and must only be used in
2866 * extremly trusted environments. Every use of it will log a warning
2867 * in the kernel log.
2868 */
2869 IB_PD_UNSAFE_GLOBAL_RKEY = 0x01,
2870};
1da177e4 2871
ed082d36
CH
2872struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
2873 const char *caller);
2874#define ib_alloc_pd(device, flags) \
e4496447 2875 __ib_alloc_pd((device), (flags), KBUILD_MODNAME)
7dd78647 2876void ib_dealloc_pd(struct ib_pd *pd);
1da177e4
LT
2877
2878/**
0a18cfe4 2879 * rdma_create_ah - Creates an address handle for the given address vector.
1da177e4
LT
2880 * @pd: The protection domain associated with the address handle.
2881 * @ah_attr: The attributes of the address vector.
2882 *
2883 * The address handle is used to reference a local or global destination
2884 * in all UD QP post sends.
2885 */
0a18cfe4 2886struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr);
1da177e4 2887
5cda6587
PP
2888/**
2889 * rdma_create_user_ah - Creates an address handle for the given address vector.
2890 * It resolves destination mac address for ah attribute of RoCE type.
2891 * @pd: The protection domain associated with the address handle.
2892 * @ah_attr: The attributes of the address vector.
2893 * @udata: pointer to user's input output buffer information need by
2894 * provider driver.
2895 *
2896 * It returns 0 on success and returns appropriate error code on error.
2897 * The address handle is used to reference a local or global destination
2898 * in all UD QP post sends.
2899 */
2900struct ib_ah *rdma_create_user_ah(struct ib_pd *pd,
2901 struct rdma_ah_attr *ah_attr,
2902 struct ib_udata *udata);
850d8fd7
MS
2903/**
2904 * ib_get_gids_from_rdma_hdr - Get sgid and dgid from GRH or IPv4 header
2905 * work completion.
2906 * @hdr: the L3 header to parse
2907 * @net_type: type of header to parse
2908 * @sgid: place to store source gid
2909 * @dgid: place to store destination gid
2910 */
2911int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr,
2912 enum rdma_network_type net_type,
2913 union ib_gid *sgid, union ib_gid *dgid);
2914
2915/**
2916 * ib_get_rdma_header_version - Get the header version
2917 * @hdr: the L3 header to parse
2918 */
2919int ib_get_rdma_header_version(const union rdma_network_hdr *hdr);
2920
4e00d694 2921/**
f6bdb142 2922 * ib_init_ah_attr_from_wc - Initializes address handle attributes from a
4e00d694
SH
2923 * work completion.
2924 * @device: Device on which the received message arrived.
2925 * @port_num: Port on which the received message arrived.
2926 * @wc: Work completion associated with the received message.
2927 * @grh: References the received global route header. This parameter is
2928 * ignored unless the work completion indicates that the GRH is valid.
2929 * @ah_attr: Returned attributes that can be used when creating an address
2930 * handle for replying to the message.
2931 */
f6bdb142
PP
2932int ib_init_ah_attr_from_wc(struct ib_device *device, u8 port_num,
2933 const struct ib_wc *wc, const struct ib_grh *grh,
2934 struct rdma_ah_attr *ah_attr);
4e00d694 2935
513789ed
HR
2936/**
2937 * ib_create_ah_from_wc - Creates an address handle associated with the
2938 * sender of the specified work completion.
2939 * @pd: The protection domain associated with the address handle.
2940 * @wc: Work completion information associated with a received message.
2941 * @grh: References the received global route header. This parameter is
2942 * ignored unless the work completion indicates that the GRH is valid.
2943 * @port_num: The outbound port number to associate with the address.
2944 *
2945 * The address handle is used to reference a local or global destination
2946 * in all UD QP post sends.
2947 */
73cdaaee
IW
2948struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc,
2949 const struct ib_grh *grh, u8 port_num);
513789ed 2950
1da177e4 2951/**
67b985b6 2952 * rdma_modify_ah - Modifies the address vector associated with an address
1da177e4
LT
2953 * handle.
2954 * @ah: The address handle to modify.
2955 * @ah_attr: The new address vector attributes to associate with the
2956 * address handle.
2957 */
67b985b6 2958int rdma_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
1da177e4
LT
2959
2960/**
bfbfd661 2961 * rdma_query_ah - Queries the address vector associated with an address
1da177e4
LT
2962 * handle.
2963 * @ah: The address handle to query.
2964 * @ah_attr: The address vector attributes associated with the address
2965 * handle.
2966 */
bfbfd661 2967int rdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
1da177e4
LT
2968
2969/**
36523159 2970 * rdma_destroy_ah - Destroys an address handle.
1da177e4
LT
2971 * @ah: The address handle to destroy.
2972 */
36523159 2973int rdma_destroy_ah(struct ib_ah *ah);
1da177e4 2974
d41fcc67
RD
2975/**
2976 * ib_create_srq - Creates a SRQ associated with the specified protection
2977 * domain.
2978 * @pd: The protection domain associated with the SRQ.
abb6e9ba
DB
2979 * @srq_init_attr: A list of initial attributes required to create the
2980 * SRQ. If SRQ creation succeeds, then the attributes are updated to
2981 * the actual capabilities of the created SRQ.
d41fcc67
RD
2982 *
2983 * srq_attr->max_wr and srq_attr->max_sge are read the determine the
2984 * requested size of the SRQ, and set to the actual values allocated
2985 * on return. If ib_create_srq() succeeds, then max_wr and max_sge
2986 * will always be at least as large as the requested values.
2987 */
2988struct ib_srq *ib_create_srq(struct ib_pd *pd,
2989 struct ib_srq_init_attr *srq_init_attr);
2990
2991/**
2992 * ib_modify_srq - Modifies the attributes for the specified SRQ.
2993 * @srq: The SRQ to modify.
2994 * @srq_attr: On input, specifies the SRQ attributes to modify. On output,
2995 * the current values of selected SRQ attributes are returned.
2996 * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ
2997 * are being modified.
2998 *
2999 * The mask may contain IB_SRQ_MAX_WR to resize the SRQ and/or
3000 * IB_SRQ_LIMIT to set the SRQ's limit and request notification when
3001 * the number of receives queued drops below the limit.
3002 */
3003int ib_modify_srq(struct ib_srq *srq,
3004 struct ib_srq_attr *srq_attr,
3005 enum ib_srq_attr_mask srq_attr_mask);
3006
3007/**
3008 * ib_query_srq - Returns the attribute list and current values for the
3009 * specified SRQ.
3010 * @srq: The SRQ to query.
3011 * @srq_attr: The attributes of the specified SRQ.
3012 */
3013int ib_query_srq(struct ib_srq *srq,
3014 struct ib_srq_attr *srq_attr);
3015
3016/**
3017 * ib_destroy_srq - Destroys the specified SRQ.
3018 * @srq: The SRQ to destroy.
3019 */
3020int ib_destroy_srq(struct ib_srq *srq);
3021
3022/**
3023 * ib_post_srq_recv - Posts a list of work requests to the specified SRQ.
3024 * @srq: The SRQ to post the work request on.
3025 * @recv_wr: A list of work requests to post on the receive queue.
3026 * @bad_recv_wr: On an immediate failure, this parameter will reference
3027 * the work request that failed to be posted on the QP.
3028 */
3029static inline int ib_post_srq_recv(struct ib_srq *srq,
3030 struct ib_recv_wr *recv_wr,
3031 struct ib_recv_wr **bad_recv_wr)
3032{
3033 return srq->device->post_srq_recv(srq, recv_wr, bad_recv_wr);
3034}
3035
1da177e4
LT
3036/**
3037 * ib_create_qp - Creates a QP associated with the specified protection
3038 * domain.
3039 * @pd: The protection domain associated with the QP.
abb6e9ba
DB
3040 * @qp_init_attr: A list of initial attributes required to create the
3041 * QP. If QP creation succeeds, then the attributes are updated to
3042 * the actual capabilities of the created QP.
1da177e4
LT
3043 */
3044struct ib_qp *ib_create_qp(struct ib_pd *pd,
3045 struct ib_qp_init_attr *qp_init_attr);
3046
a512c2fb
PP
3047/**
3048 * ib_modify_qp_with_udata - Modifies the attributes for the specified QP.
3049 * @qp: The QP to modify.
3050 * @attr: On input, specifies the QP attributes to modify. On output,
3051 * the current values of selected QP attributes are returned.
3052 * @attr_mask: A bit-mask used to specify which attributes of the QP
3053 * are being modified.
3054 * @udata: pointer to user's input output buffer information
3055 * are being modified.
3056 * It returns 0 on success and returns appropriate error code on error.
3057 */
3058int ib_modify_qp_with_udata(struct ib_qp *qp,
3059 struct ib_qp_attr *attr,
3060 int attr_mask,
3061 struct ib_udata *udata);
3062
1da177e4
LT
3063/**
3064 * ib_modify_qp - Modifies the attributes for the specified QP and then
3065 * transitions the QP to the given state.
3066 * @qp: The QP to modify.
3067 * @qp_attr: On input, specifies the QP attributes to modify. On output,
3068 * the current values of selected QP attributes are returned.
3069 * @qp_attr_mask: A bit-mask used to specify which attributes of the QP
3070 * are being modified.
3071 */
3072int ib_modify_qp(struct ib_qp *qp,
3073 struct ib_qp_attr *qp_attr,
3074 int qp_attr_mask);
3075
3076/**
3077 * ib_query_qp - Returns the attribute list and current values for the
3078 * specified QP.
3079 * @qp: The QP to query.
3080 * @qp_attr: The attributes of the specified QP.
3081 * @qp_attr_mask: A bit-mask used to select specific attributes to query.
3082 * @qp_init_attr: Additional attributes of the selected QP.
3083 *
3084 * The qp_attr_mask may be used to limit the query to gathering only the
3085 * selected attributes.
3086 */
3087int ib_query_qp(struct ib_qp *qp,
3088 struct ib_qp_attr *qp_attr,
3089 int qp_attr_mask,
3090 struct ib_qp_init_attr *qp_init_attr);
3091
3092/**
3093 * ib_destroy_qp - Destroys the specified QP.
3094 * @qp: The QP to destroy.
3095 */
3096int ib_destroy_qp(struct ib_qp *qp);
3097
d3d72d90 3098/**
0e0ec7e0
SH
3099 * ib_open_qp - Obtain a reference to an existing sharable QP.
3100 * @xrcd - XRC domain
3101 * @qp_open_attr: Attributes identifying the QP to open.
3102 *
3103 * Returns a reference to a sharable QP.
3104 */
3105struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
3106 struct ib_qp_open_attr *qp_open_attr);
3107
3108/**
3109 * ib_close_qp - Release an external reference to a QP.
d3d72d90
SH
3110 * @qp: The QP handle to release
3111 *
0e0ec7e0
SH
3112 * The opened QP handle is released by the caller. The underlying
3113 * shared QP is not destroyed until all internal references are released.
d3d72d90 3114 */
0e0ec7e0 3115int ib_close_qp(struct ib_qp *qp);
d3d72d90 3116
1da177e4
LT
3117/**
3118 * ib_post_send - Posts a list of work requests to the send queue of
3119 * the specified QP.
3120 * @qp: The QP to post the work request on.
3121 * @send_wr: A list of work requests to post on the send queue.
3122 * @bad_send_wr: On an immediate failure, this parameter will reference
3123 * the work request that failed to be posted on the QP.
55464d46
BVA
3124 *
3125 * While IBA Vol. 1 section 11.4.1.1 specifies that if an immediate
3126 * error is returned, the QP state shall not be affected,
3127 * ib_post_send() will return an immediate error after queueing any
3128 * earlier work requests in the list.
1da177e4
LT
3129 */
3130static inline int ib_post_send(struct ib_qp *qp,
3131 struct ib_send_wr *send_wr,
3132 struct ib_send_wr **bad_send_wr)
3133{
3134 return qp->device->post_send(qp, send_wr, bad_send_wr);
3135}
3136
3137/**
3138 * ib_post_recv - Posts a list of work requests to the receive queue of
3139 * the specified QP.
3140 * @qp: The QP to post the work request on.
3141 * @recv_wr: A list of work requests to post on the receive queue.
3142 * @bad_recv_wr: On an immediate failure, this parameter will reference
3143 * the work request that failed to be posted on the QP.
3144 */
3145static inline int ib_post_recv(struct ib_qp *qp,
3146 struct ib_recv_wr *recv_wr,
3147 struct ib_recv_wr **bad_recv_wr)
3148{
3149 return qp->device->post_recv(qp, recv_wr, bad_recv_wr);
3150}
3151
f66c8ba4
LR
3152struct ib_cq *__ib_alloc_cq(struct ib_device *dev, void *private,
3153 int nr_cqe, int comp_vector,
3154 enum ib_poll_context poll_ctx, const char *caller);
3155#define ib_alloc_cq(device, priv, nr_cqe, comp_vect, poll_ctx) \
3156 __ib_alloc_cq((device), (priv), (nr_cqe), (comp_vect), (poll_ctx), KBUILD_MODNAME)
3157
14d3a3b2
CH
3158void ib_free_cq(struct ib_cq *cq);
3159int ib_process_cq_direct(struct ib_cq *cq, int budget);
3160
1da177e4
LT
3161/**
3162 * ib_create_cq - Creates a CQ on the specified device.
3163 * @device: The device on which to create the CQ.
3164 * @comp_handler: A user-specified callback that is invoked when a
3165 * completion event occurs on the CQ.
3166 * @event_handler: A user-specified callback that is invoked when an
3167 * asynchronous event not associated with a completion occurs on the CQ.
3168 * @cq_context: Context associated with the CQ returned to the user via
3169 * the associated completion and event handlers.
8e37210b 3170 * @cq_attr: The attributes the CQ should be created upon.
1da177e4
LT
3171 *
3172 * Users can examine the cq structure to determine the actual CQ size.
3173 */
3174struct ib_cq *ib_create_cq(struct ib_device *device,
3175 ib_comp_handler comp_handler,
3176 void (*event_handler)(struct ib_event *, void *),
8e37210b
MB
3177 void *cq_context,
3178 const struct ib_cq_init_attr *cq_attr);
1da177e4
LT
3179
3180/**
3181 * ib_resize_cq - Modifies the capacity of the CQ.
3182 * @cq: The CQ to resize.
3183 * @cqe: The minimum size of the CQ.
3184 *
3185 * Users can examine the cq structure to determine the actual CQ size.
3186 */
3187int ib_resize_cq(struct ib_cq *cq, int cqe);
3188
2dd57162 3189/**
4190b4e9 3190 * rdma_set_cq_moderation - Modifies moderation params of the CQ
2dd57162
EC
3191 * @cq: The CQ to modify.
3192 * @cq_count: number of CQEs that will trigger an event
3193 * @cq_period: max period of time in usec before triggering an event
3194 *
3195 */
4190b4e9 3196int rdma_set_cq_moderation(struct ib_cq *cq, u16 cq_count, u16 cq_period);
2dd57162 3197
1da177e4
LT
3198/**
3199 * ib_destroy_cq - Destroys the specified CQ.
3200 * @cq: The CQ to destroy.
3201 */
3202int ib_destroy_cq(struct ib_cq *cq);
3203
3204/**
3205 * ib_poll_cq - poll a CQ for completion(s)
3206 * @cq:the CQ being polled
3207 * @num_entries:maximum number of completions to return
3208 * @wc:array of at least @num_entries &struct ib_wc where completions
3209 * will be returned
3210 *
3211 * Poll a CQ for (possibly multiple) completions. If the return value
3212 * is < 0, an error occurred. If the return value is >= 0, it is the
3213 * number of completions returned. If the return value is
3214 * non-negative and < num_entries, then the CQ was emptied.
3215 */
3216static inline int ib_poll_cq(struct ib_cq *cq, int num_entries,
3217 struct ib_wc *wc)
3218{
3219 return cq->device->poll_cq(cq, num_entries, wc);
3220}
3221
3222/**
3223 * ib_peek_cq - Returns the number of unreaped completions currently
3224 * on the specified CQ.
3225 * @cq: The CQ to peek.
3226 * @wc_cnt: A minimum number of unreaped completions to check for.
3227 *
3228 * If the number of unreaped completions is greater than or equal to wc_cnt,
3229 * this function returns wc_cnt, otherwise, it returns the actual number of
3230 * unreaped completions.
3231 */
3232int ib_peek_cq(struct ib_cq *cq, int wc_cnt);
3233
3234/**
3235 * ib_req_notify_cq - Request completion notification on a CQ.
3236 * @cq: The CQ to generate an event for.
ed23a727
RD
3237 * @flags:
3238 * Must contain exactly one of %IB_CQ_SOLICITED or %IB_CQ_NEXT_COMP
3239 * to request an event on the next solicited event or next work
3240 * completion at any type, respectively. %IB_CQ_REPORT_MISSED_EVENTS
3241 * may also be |ed in to request a hint about missed events, as
3242 * described below.
3243 *
3244 * Return Value:
3245 * < 0 means an error occurred while requesting notification
3246 * == 0 means notification was requested successfully, and if
3247 * IB_CQ_REPORT_MISSED_EVENTS was passed in, then no events
3248 * were missed and it is safe to wait for another event. In
3249 * this case is it guaranteed that any work completions added
3250 * to the CQ since the last CQ poll will trigger a completion
3251 * notification event.
3252 * > 0 is only returned if IB_CQ_REPORT_MISSED_EVENTS was passed
3253 * in. It means that the consumer must poll the CQ again to
3254 * make sure it is empty to avoid missing an event because of a
3255 * race between requesting notification and an entry being
3256 * added to the CQ. This return value means it is possible
3257 * (but not guaranteed) that a work completion has been added
3258 * to the CQ since the last poll without triggering a
3259 * completion notification event.
1da177e4
LT
3260 */
3261static inline int ib_req_notify_cq(struct ib_cq *cq,
ed23a727 3262 enum ib_cq_notify_flags flags)
1da177e4 3263{
ed23a727 3264 return cq->device->req_notify_cq(cq, flags);
1da177e4
LT
3265}
3266
3267/**
3268 * ib_req_ncomp_notif - Request completion notification when there are
3269 * at least the specified number of unreaped completions on the CQ.
3270 * @cq: The CQ to generate an event for.
3271 * @wc_cnt: The number of unreaped completions that should be on the
3272 * CQ before an event is generated.
3273 */
3274static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
3275{
3276 return cq->device->req_ncomp_notif ?
3277 cq->device->req_ncomp_notif(cq, wc_cnt) :
3278 -ENOSYS;
3279}
3280
9b513090
RC
3281/**
3282 * ib_dma_mapping_error - check a DMA addr for error
3283 * @dev: The device for which the dma_addr was created
3284 * @dma_addr: The DMA address to check
3285 */
3286static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
3287{
0957c29f 3288 return dma_mapping_error(dev->dma_device, dma_addr);
9b513090
RC
3289}
3290
3291/**
3292 * ib_dma_map_single - Map a kernel virtual address to DMA address
3293 * @dev: The device for which the dma_addr is to be created
3294 * @cpu_addr: The kernel virtual address
3295 * @size: The size of the region in bytes
3296 * @direction: The direction of the DMA
3297 */
3298static inline u64 ib_dma_map_single(struct ib_device *dev,
3299 void *cpu_addr, size_t size,
3300 enum dma_data_direction direction)
3301{
0957c29f 3302 return dma_map_single(dev->dma_device, cpu_addr, size, direction);
9b513090
RC
3303}
3304
3305/**
3306 * ib_dma_unmap_single - Destroy a mapping created by ib_dma_map_single()
3307 * @dev: The device for which the DMA address was created
3308 * @addr: The DMA address
3309 * @size: The size of the region in bytes
3310 * @direction: The direction of the DMA
3311 */
3312static inline void ib_dma_unmap_single(struct ib_device *dev,
3313 u64 addr, size_t size,
3314 enum dma_data_direction direction)
3315{
0957c29f 3316 dma_unmap_single(dev->dma_device, addr, size, direction);
cb9fbc5c
AK
3317}
3318
9b513090
RC
3319/**
3320 * ib_dma_map_page - Map a physical page to DMA address
3321 * @dev: The device for which the dma_addr is to be created
3322 * @page: The page to be mapped
3323 * @offset: The offset within the page
3324 * @size: The size of the region in bytes
3325 * @direction: The direction of the DMA
3326 */
3327static inline u64 ib_dma_map_page(struct ib_device *dev,
3328 struct page *page,
3329 unsigned long offset,
3330 size_t size,
3331 enum dma_data_direction direction)
3332{
0957c29f 3333 return dma_map_page(dev->dma_device, page, offset, size, direction);
9b513090
RC
3334}
3335
3336/**
3337 * ib_dma_unmap_page - Destroy a mapping created by ib_dma_map_page()
3338 * @dev: The device for which the DMA address was created
3339 * @addr: The DMA address
3340 * @size: The size of the region in bytes
3341 * @direction: The direction of the DMA
3342 */
3343static inline void ib_dma_unmap_page(struct ib_device *dev,
3344 u64 addr, size_t size,
3345 enum dma_data_direction direction)
3346{
0957c29f 3347 dma_unmap_page(dev->dma_device, addr, size, direction);
9b513090
RC
3348}
3349
3350/**
3351 * ib_dma_map_sg - Map a scatter/gather list to DMA addresses
3352 * @dev: The device for which the DMA addresses are to be created
3353 * @sg: The array of scatter/gather entries
3354 * @nents: The number of scatter/gather entries
3355 * @direction: The direction of the DMA
3356 */
3357static inline int ib_dma_map_sg(struct ib_device *dev,
3358 struct scatterlist *sg, int nents,
3359 enum dma_data_direction direction)
3360{
0957c29f 3361 return dma_map_sg(dev->dma_device, sg, nents, direction);
9b513090
RC
3362}
3363
3364/**
3365 * ib_dma_unmap_sg - Unmap a scatter/gather list of DMA addresses
3366 * @dev: The device for which the DMA addresses were created
3367 * @sg: The array of scatter/gather entries
3368 * @nents: The number of scatter/gather entries
3369 * @direction: The direction of the DMA
3370 */
3371static inline void ib_dma_unmap_sg(struct ib_device *dev,
3372 struct scatterlist *sg, int nents,
3373 enum dma_data_direction direction)
3374{
0957c29f 3375 dma_unmap_sg(dev->dma_device, sg, nents, direction);
9b513090
RC
3376}
3377
cb9fbc5c
AK
3378static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
3379 struct scatterlist *sg, int nents,
3380 enum dma_data_direction direction,
00085f1e 3381 unsigned long dma_attrs)
cb9fbc5c 3382{
0957c29f
BVA
3383 return dma_map_sg_attrs(dev->dma_device, sg, nents, direction,
3384 dma_attrs);
cb9fbc5c
AK
3385}
3386
3387static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
3388 struct scatterlist *sg, int nents,
3389 enum dma_data_direction direction,
00085f1e 3390 unsigned long dma_attrs)
cb9fbc5c 3391{
0957c29f 3392 dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, dma_attrs);
cb9fbc5c 3393}
9b513090
RC
3394/**
3395 * ib_sg_dma_address - Return the DMA address from a scatter/gather entry
3396 * @dev: The device for which the DMA addresses were created
3397 * @sg: The scatter/gather entry
ea58a595
MM
3398 *
3399 * Note: this function is obsolete. To do: change all occurrences of
3400 * ib_sg_dma_address() into sg_dma_address().
9b513090
RC
3401 */
3402static inline u64 ib_sg_dma_address(struct ib_device *dev,
3403 struct scatterlist *sg)
3404{
d1998ef3 3405 return sg_dma_address(sg);
9b513090
RC
3406}
3407
3408/**
3409 * ib_sg_dma_len - Return the DMA length from a scatter/gather entry
3410 * @dev: The device for which the DMA addresses were created
3411 * @sg: The scatter/gather entry
ea58a595
MM
3412 *
3413 * Note: this function is obsolete. To do: change all occurrences of
3414 * ib_sg_dma_len() into sg_dma_len().
9b513090
RC
3415 */
3416static inline unsigned int ib_sg_dma_len(struct ib_device *dev,
3417 struct scatterlist *sg)
3418{
d1998ef3 3419 return sg_dma_len(sg);
9b513090
RC
3420}
3421
3422/**
3423 * ib_dma_sync_single_for_cpu - Prepare DMA region to be accessed by CPU
3424 * @dev: The device for which the DMA address was created
3425 * @addr: The DMA address
3426 * @size: The size of the region in bytes
3427 * @dir: The direction of the DMA
3428 */
3429static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
3430 u64 addr,
3431 size_t size,
3432 enum dma_data_direction dir)
3433{
0957c29f 3434 dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
9b513090
RC
3435}
3436
3437/**
3438 * ib_dma_sync_single_for_device - Prepare DMA region to be accessed by device
3439 * @dev: The device for which the DMA address was created
3440 * @addr: The DMA address
3441 * @size: The size of the region in bytes
3442 * @dir: The direction of the DMA
3443 */
3444static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
3445 u64 addr,
3446 size_t size,
3447 enum dma_data_direction dir)
3448{
0957c29f 3449 dma_sync_single_for_device(dev->dma_device, addr, size, dir);
9b513090
RC
3450}
3451
3452/**
3453 * ib_dma_alloc_coherent - Allocate memory and map it for DMA
3454 * @dev: The device for which the DMA address is requested
3455 * @size: The size of the region to allocate in bytes
3456 * @dma_handle: A pointer for returning the DMA address of the region
3457 * @flag: memory allocator flags
3458 */
3459static inline void *ib_dma_alloc_coherent(struct ib_device *dev,
3460 size_t size,
d43dbacf 3461 dma_addr_t *dma_handle,
9b513090
RC
3462 gfp_t flag)
3463{
0957c29f 3464 return dma_alloc_coherent(dev->dma_device, size, dma_handle, flag);
9b513090
RC
3465}
3466
3467/**
3468 * ib_dma_free_coherent - Free memory allocated by ib_dma_alloc_coherent()
3469 * @dev: The device for which the DMA addresses were allocated
3470 * @size: The size of the region
3471 * @cpu_addr: the address returned by ib_dma_alloc_coherent()
3472 * @dma_handle: the DMA address returned by ib_dma_alloc_coherent()
3473 */
3474static inline void ib_dma_free_coherent(struct ib_device *dev,
3475 size_t size, void *cpu_addr,
d43dbacf 3476 dma_addr_t dma_handle)
9b513090 3477{
0957c29f 3478 dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle);
9b513090
RC
3479}
3480
1da177e4
LT
3481/**
3482 * ib_dereg_mr - Deregisters a memory region and removes it from the
3483 * HCA translation table.
3484 * @mr: The memory region to deregister.
7083e42e
SM
3485 *
3486 * This function can fail, if the memory region has memory windows bound to it.
1da177e4
LT
3487 */
3488int ib_dereg_mr(struct ib_mr *mr);
3489
9bee178b
SG
3490struct ib_mr *ib_alloc_mr(struct ib_pd *pd,
3491 enum ib_mr_type mr_type,
3492 u32 max_num_sg);
00f7ec36 3493
00f7ec36
SW
3494/**
3495 * ib_update_fast_reg_key - updates the key portion of the fast_reg MR
3496 * R_Key and L_Key.
3497 * @mr - struct ib_mr pointer to be updated.
3498 * @newkey - new key to be used.
3499 */
3500static inline void ib_update_fast_reg_key(struct ib_mr *mr, u8 newkey)
3501{
3502 mr->lkey = (mr->lkey & 0xffffff00) | newkey;
3503 mr->rkey = (mr->rkey & 0xffffff00) | newkey;
3504}
3505
7083e42e
SM
3506/**
3507 * ib_inc_rkey - increments the key portion of the given rkey. Can be used
3508 * for calculating a new rkey for type 2 memory windows.
3509 * @rkey - the rkey to increment.
3510 */
3511static inline u32 ib_inc_rkey(u32 rkey)
3512{
3513 const u32 mask = 0x000000ff;
3514 return ((rkey + 1) & mask) | (rkey & ~mask);
3515}
3516
1da177e4
LT
3517/**
3518 * ib_alloc_fmr - Allocates a unmapped fast memory region.
3519 * @pd: The protection domain associated with the unmapped region.
3520 * @mr_access_flags: Specifies the memory access rights.
3521 * @fmr_attr: Attributes of the unmapped region.
3522 *
3523 * A fast memory region must be mapped before it can be used as part of
3524 * a work request.
3525 */
3526struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
3527 int mr_access_flags,
3528 struct ib_fmr_attr *fmr_attr);
3529
3530/**
3531 * ib_map_phys_fmr - Maps a list of physical pages to a fast memory region.
3532 * @fmr: The fast memory region to associate with the pages.
3533 * @page_list: An array of physical pages to map to the fast memory region.
3534 * @list_len: The number of pages in page_list.
3535 * @iova: The I/O virtual address to use with the mapped region.
3536 */
3537static inline int ib_map_phys_fmr(struct ib_fmr *fmr,
3538 u64 *page_list, int list_len,
3539 u64 iova)
3540{
3541 return fmr->device->map_phys_fmr(fmr, page_list, list_len, iova);
3542}
3543
3544/**
3545 * ib_unmap_fmr - Removes the mapping from a list of fast memory regions.
3546 * @fmr_list: A linked list of fast memory regions to unmap.
3547 */
3548int ib_unmap_fmr(struct list_head *fmr_list);
3549
3550/**
3551 * ib_dealloc_fmr - Deallocates a fast memory region.
3552 * @fmr: The fast memory region to deallocate.
3553 */
3554int ib_dealloc_fmr(struct ib_fmr *fmr);
3555
3556/**
3557 * ib_attach_mcast - Attaches the specified QP to a multicast group.
3558 * @qp: QP to attach to the multicast group. The QP must be type
3559 * IB_QPT_UD.
3560 * @gid: Multicast group GID.
3561 * @lid: Multicast group LID in host byte order.
3562 *
3563 * In order to send and receive multicast packets, subnet
3564 * administration must have created the multicast group and configured
3565 * the fabric appropriately. The port associated with the specified
3566 * QP must also be a member of the multicast group.
3567 */
3568int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
3569
3570/**
3571 * ib_detach_mcast - Detaches the specified QP from a multicast group.
3572 * @qp: QP to detach from the multicast group.
3573 * @gid: Multicast group GID.
3574 * @lid: Multicast group LID in host byte order.
3575 */
3576int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
3577
59991f94
SH
3578/**
3579 * ib_alloc_xrcd - Allocates an XRC domain.
3580 * @device: The device on which to allocate the XRC domain.
f66c8ba4 3581 * @caller: Module name for kernel consumers
59991f94 3582 */
f66c8ba4
LR
3583struct ib_xrcd *__ib_alloc_xrcd(struct ib_device *device, const char *caller);
3584#define ib_alloc_xrcd(device) \
3585 __ib_alloc_xrcd((device), KBUILD_MODNAME)
59991f94
SH
3586
3587/**
3588 * ib_dealloc_xrcd - Deallocates an XRC domain.
3589 * @xrcd: The XRC domain to deallocate.
3590 */
3591int ib_dealloc_xrcd(struct ib_xrcd *xrcd);
3592
319a441d
HHZ
3593struct ib_flow *ib_create_flow(struct ib_qp *qp,
3594 struct ib_flow_attr *flow_attr, int domain);
3595int ib_destroy_flow(struct ib_flow *flow_id);
3596
1c636f80
EC
3597static inline int ib_check_mr_access(int flags)
3598{
3599 /*
3600 * Local write permission is required if remote write or
3601 * remote atomic permission is also requested.
3602 */
3603 if (flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) &&
3604 !(flags & IB_ACCESS_LOCAL_WRITE))
3605 return -EINVAL;
3606
3607 return 0;
3608}
3609
1b01d335
SG
3610/**
3611 * ib_check_mr_status: lightweight check of MR status.
3612 * This routine may provide status checks on a selected
3613 * ib_mr. first use is for signature status check.
3614 *
3615 * @mr: A memory region.
3616 * @check_mask: Bitmask of which checks to perform from
3617 * ib_mr_status_check enumeration.
3618 * @mr_status: The container of relevant status checks.
3619 * failed checks will be indicated in the status bitmask
3620 * and the relevant info shall be in the error item.
3621 */
3622int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
3623 struct ib_mr_status *mr_status);
3624
9268f72d
YK
3625struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u8 port,
3626 u16 pkey, const union ib_gid *gid,
3627 const struct sockaddr *addr);
5fd251c8
YH
3628struct ib_wq *ib_create_wq(struct ib_pd *pd,
3629 struct ib_wq_init_attr *init_attr);
3630int ib_destroy_wq(struct ib_wq *wq);
3631int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *attr,
3632 u32 wq_attr_mask);
6d39786b
YH
3633struct ib_rwq_ind_table *ib_create_rwq_ind_table(struct ib_device *device,
3634 struct ib_rwq_ind_table_init_attr*
3635 wq_ind_table_init_attr);
3636int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
9268f72d 3637
ff2ba993 3638int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
9aa8b321 3639 unsigned int *sg_offset, unsigned int page_size);
4c67e2bf
SG
3640
3641static inline int
ff2ba993 3642ib_map_mr_sg_zbva(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
9aa8b321 3643 unsigned int *sg_offset, unsigned int page_size)
4c67e2bf
SG
3644{
3645 int n;
3646
ff2ba993 3647 n = ib_map_mr_sg(mr, sg, sg_nents, sg_offset, page_size);
4c67e2bf
SG
3648 mr->iova = 0;
3649
3650 return n;
3651}
3652
ff2ba993 3653int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents,
9aa8b321 3654 unsigned int *sg_offset, int (*set_page)(struct ib_mr *, u64));
4c67e2bf 3655
765d6774
SW
3656void ib_drain_rq(struct ib_qp *qp);
3657void ib_drain_sq(struct ib_qp *qp);
3658void ib_drain_qp(struct ib_qp *qp);
850d8fd7 3659
d4186194 3660int ib_get_eth_speed(struct ib_device *dev, u8 port_num, u8 *speed, u8 *width);
2224c47a
DC
3661
3662static inline u8 *rdma_ah_retrieve_dmac(struct rdma_ah_attr *attr)
3663{
44c58487
DC
3664 if (attr->type == RDMA_AH_ATTR_TYPE_ROCE)
3665 return attr->roce.dmac;
3666 return NULL;
2224c47a
DC
3667}
3668
64b4646e 3669static inline void rdma_ah_set_dlid(struct rdma_ah_attr *attr, u32 dlid)
2224c47a 3670{
44c58487 3671 if (attr->type == RDMA_AH_ATTR_TYPE_IB)
64b4646e
DC
3672 attr->ib.dlid = (u16)dlid;
3673 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
3674 attr->opa.dlid = dlid;
2224c47a
DC
3675}
3676
64b4646e 3677static inline u32 rdma_ah_get_dlid(const struct rdma_ah_attr *attr)
2224c47a 3678{
44c58487
DC
3679 if (attr->type == RDMA_AH_ATTR_TYPE_IB)
3680 return attr->ib.dlid;
64b4646e
DC
3681 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
3682 return attr->opa.dlid;
44c58487 3683 return 0;
2224c47a
DC
3684}
3685
3686static inline void rdma_ah_set_sl(struct rdma_ah_attr *attr, u8 sl)
3687{
3688 attr->sl = sl;
3689}
3690
3691static inline u8 rdma_ah_get_sl(const struct rdma_ah_attr *attr)
3692{
3693 return attr->sl;
3694}
3695
3696static inline void rdma_ah_set_path_bits(struct rdma_ah_attr *attr,
3697 u8 src_path_bits)
3698{
44c58487
DC
3699 if (attr->type == RDMA_AH_ATTR_TYPE_IB)
3700 attr->ib.src_path_bits = src_path_bits;
64b4646e
DC
3701 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
3702 attr->opa.src_path_bits = src_path_bits;
2224c47a
DC
3703}
3704
3705static inline u8 rdma_ah_get_path_bits(const struct rdma_ah_attr *attr)
3706{
44c58487
DC
3707 if (attr->type == RDMA_AH_ATTR_TYPE_IB)
3708 return attr->ib.src_path_bits;
64b4646e
DC
3709 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
3710 return attr->opa.src_path_bits;
44c58487 3711 return 0;
2224c47a
DC
3712}
3713
d98bb7f7
DH
3714static inline void rdma_ah_set_make_grd(struct rdma_ah_attr *attr,
3715 bool make_grd)
3716{
3717 if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
3718 attr->opa.make_grd = make_grd;
3719}
3720
3721static inline bool rdma_ah_get_make_grd(const struct rdma_ah_attr *attr)
3722{
3723 if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
3724 return attr->opa.make_grd;
3725 return false;
3726}
3727
2224c47a
DC
3728static inline void rdma_ah_set_port_num(struct rdma_ah_attr *attr, u8 port_num)
3729{
3730 attr->port_num = port_num;
3731}
3732
3733static inline u8 rdma_ah_get_port_num(const struct rdma_ah_attr *attr)
3734{
3735 return attr->port_num;
3736}
3737
3738static inline void rdma_ah_set_static_rate(struct rdma_ah_attr *attr,
3739 u8 static_rate)
3740{
3741 attr->static_rate = static_rate;
3742}
3743
3744static inline u8 rdma_ah_get_static_rate(const struct rdma_ah_attr *attr)
3745{
3746 return attr->static_rate;
3747}
3748
3749static inline void rdma_ah_set_ah_flags(struct rdma_ah_attr *attr,
3750 enum ib_ah_flags flag)
3751{
3752 attr->ah_flags = flag;
3753}
3754
3755static inline enum ib_ah_flags
3756 rdma_ah_get_ah_flags(const struct rdma_ah_attr *attr)
3757{
3758 return attr->ah_flags;
3759}
3760
3761static inline const struct ib_global_route
3762 *rdma_ah_read_grh(const struct rdma_ah_attr *attr)
3763{
3764 return &attr->grh;
3765}
3766
3767/*To retrieve and modify the grh */
3768static inline struct ib_global_route
3769 *rdma_ah_retrieve_grh(struct rdma_ah_attr *attr)
3770{
3771 return &attr->grh;
3772}
3773
3774static inline void rdma_ah_set_dgid_raw(struct rdma_ah_attr *attr, void *dgid)
3775{
3776 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
3777
3778 memcpy(grh->dgid.raw, dgid, sizeof(grh->dgid));
3779}
3780
3781static inline void rdma_ah_set_subnet_prefix(struct rdma_ah_attr *attr,
3782 __be64 prefix)
3783{
3784 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
3785
3786 grh->dgid.global.subnet_prefix = prefix;
3787}
3788
3789static inline void rdma_ah_set_interface_id(struct rdma_ah_attr *attr,
3790 __be64 if_id)
3791{
3792 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
3793
3794 grh->dgid.global.interface_id = if_id;
3795}
3796
3797static inline void rdma_ah_set_grh(struct rdma_ah_attr *attr,
3798 union ib_gid *dgid, u32 flow_label,
3799 u8 sgid_index, u8 hop_limit,
3800 u8 traffic_class)
3801{
3802 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
3803
3804 attr->ah_flags = IB_AH_GRH;
3805 if (dgid)
3806 grh->dgid = *dgid;
3807 grh->flow_label = flow_label;
3808 grh->sgid_index = sgid_index;
3809 grh->hop_limit = hop_limit;
3810 grh->traffic_class = traffic_class;
3811}
44c58487
DC
3812
3813/*Get AH type */
3814static inline enum rdma_ah_attr_type rdma_ah_find_type(struct ib_device *dev,
3815 u32 port_num)
3816{
a6532e71 3817 if (rdma_protocol_roce(dev, port_num))
44c58487 3818 return RDMA_AH_ATTR_TYPE_ROCE;
64b4646e
DC
3819 else if ((rdma_protocol_ib(dev, port_num)) &&
3820 (rdma_cap_opa_ah(dev, port_num)))
3821 return RDMA_AH_ATTR_TYPE_OPA;
44c58487
DC
3822 else
3823 return RDMA_AH_ATTR_TYPE_IB;
3824}
7db20ecd 3825
62ede777
HD
3826/**
3827 * ib_lid_cpu16 - Return lid in 16bit CPU encoding.
3828 * In the current implementation the only way to get
3829 * get the 32bit lid is from other sources for OPA.
3830 * For IB, lids will always be 16bits so cast the
3831 * value accordingly.
3832 *
3833 * @lid: A 32bit LID
3834 */
3835static inline u16 ib_lid_cpu16(u32 lid)
7db20ecd 3836{
62ede777
HD
3837 WARN_ON_ONCE(lid & 0xFFFF0000);
3838 return (u16)lid;
7db20ecd
HD
3839}
3840
62ede777
HD
3841/**
3842 * ib_lid_be16 - Return lid in 16bit BE encoding.
3843 *
3844 * @lid: A 32bit LID
3845 */
3846static inline __be16 ib_lid_be16(u32 lid)
7db20ecd 3847{
62ede777
HD
3848 WARN_ON_ONCE(lid & 0xFFFF0000);
3849 return cpu_to_be16((u16)lid);
7db20ecd 3850}
32043830 3851
c66cd353
SG
3852/**
3853 * ib_get_vector_affinity - Get the affinity mappings of a given completion
3854 * vector
3855 * @device: the rdma device
3856 * @comp_vector: index of completion vector
3857 *
3858 * Returns NULL on failure, otherwise a corresponding cpu map of the
3859 * completion vector (returns all-cpus map if the device driver doesn't
3860 * implement get_vector_affinity).
3861 */
3862static inline const struct cpumask *
3863ib_get_vector_affinity(struct ib_device *device, int comp_vector)
3864{
3865 if (comp_vector < 0 || comp_vector >= device->num_comp_vectors ||
3866 !device->get_vector_affinity)
3867 return NULL;
3868
3869 return device->get_vector_affinity(device, comp_vector);
3870
3871}
3872
32f69e4b
DJ
3873/**
3874 * rdma_roce_rescan_device - Rescan all of the network devices in the system
3875 * and add their gids, as needed, to the relevant RoCE devices.
3876 *
3877 * @device: the rdma device
3878 */
3879void rdma_roce_rescan_device(struct ib_device *ibdev);
3880
1da177e4 3881#endif /* IB_VERBS_H */