]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - include/rdma/ib_verbs.h
RDMA/cma: Fix use after free race in roce multicast join
[mirror_ubuntu-jammy-kernel.git] / include / rdma / ib_verbs.h
CommitLineData
6bf9d8f6 1/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
1da177e4
LT
2/*
3 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved.
4 * Copyright (c) 2004 Infinicon Corporation. All rights reserved.
5 * Copyright (c) 2004 Intel Corporation. All rights reserved.
6 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
7 * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
2a1d9b7f 8 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
f7c6a7b5 9 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved.
1da177e4
LT
10 */
11
6bf9d8f6 12#ifndef IB_VERBS_H
1da177e4
LT
13#define IB_VERBS_H
14
15#include <linux/types.h>
16#include <linux/device.h>
9b513090 17#include <linux/dma-mapping.h>
459d6e2a 18#include <linux/kref.h>
bfb3ea12
DB
19#include <linux/list.h>
20#include <linux/rwsem.h>
f0626710 21#include <linux/workqueue.h>
14d3a3b2 22#include <linux/irq_poll.h>
dd5f03be 23#include <uapi/linux/if_ether.h>
c865f246
SK
24#include <net/ipv6.h>
25#include <net/ip.h>
301a721e
MB
26#include <linux/string.h>
27#include <linux/slab.h>
2fc77572 28#include <linux/netdevice.h>
01b67117 29#include <linux/refcount.h>
50174a7f 30#include <linux/if_link.h>
60063497 31#include <linux/atomic.h>
882214e2 32#include <linux/mmu_notifier.h>
7c0f6ba6 33#include <linux/uaccess.h>
43579b5f 34#include <linux/cgroup_rdma.h>
f6316032
LR
35#include <linux/irqflags.h>
36#include <linux/preempt.h>
da662979 37#include <linux/dim.h>
ea6819e1 38#include <uapi/rdma/ib_user_verbs.h>
413d3347 39#include <rdma/rdma_counter.h>
02d8883f 40#include <rdma/restrack.h>
36b1e47f 41#include <rdma/signature.h>
0ede73bc 42#include <uapi/rdma/rdma_user_ioctl.h>
2eb9beae 43#include <uapi/rdma/ib_user_ioctl_verbs.h>
1da177e4 44
9abb0d1b
LR
45#define IB_FW_VERSION_NAME_MAX ETHTOOL_FWVERS_LEN
46
b5231b01 47struct ib_umem_odp;
620d3f81 48struct ib_uqp_object;
9fbe334c 49struct ib_usrq_object;
e04dd131 50struct ib_uwq_object;
211cd945 51struct rdma_cm_id;
b5231b01 52
f0626710 53extern struct workqueue_struct *ib_wq;
14d3a3b2 54extern struct workqueue_struct *ib_comp_wq;
f794809a 55extern struct workqueue_struct *ib_comp_unbound_wq;
f0626710 56
5bd48c18
JG
57struct ib_ucq_object;
58
923abb9d
GP
59__printf(3, 4) __cold
60void ibdev_printk(const char *level, const struct ib_device *ibdev,
61 const char *format, ...);
62__printf(2, 3) __cold
63void ibdev_emerg(const struct ib_device *ibdev, const char *format, ...);
64__printf(2, 3) __cold
65void ibdev_alert(const struct ib_device *ibdev, const char *format, ...);
66__printf(2, 3) __cold
67void ibdev_crit(const struct ib_device *ibdev, const char *format, ...);
68__printf(2, 3) __cold
69void ibdev_err(const struct ib_device *ibdev, const char *format, ...);
70__printf(2, 3) __cold
71void ibdev_warn(const struct ib_device *ibdev, const char *format, ...);
72__printf(2, 3) __cold
73void ibdev_notice(const struct ib_device *ibdev, const char *format, ...);
74__printf(2, 3) __cold
75void ibdev_info(const struct ib_device *ibdev, const char *format, ...);
76
ceabef7d
OZ
77#if defined(CONFIG_DYNAMIC_DEBUG) || \
78 (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
923abb9d
GP
79#define ibdev_dbg(__dev, format, args...) \
80 dynamic_ibdev_dbg(__dev, format, ##args)
923abb9d
GP
81#else
82__printf(2, 3) __cold
83static inline
84void ibdev_dbg(const struct ib_device *ibdev, const char *format, ...) {}
85#endif
86
05bb411a
GP
87#define ibdev_level_ratelimited(ibdev_level, ibdev, fmt, ...) \
88do { \
89 static DEFINE_RATELIMIT_STATE(_rs, \
90 DEFAULT_RATELIMIT_INTERVAL, \
91 DEFAULT_RATELIMIT_BURST); \
92 if (__ratelimit(&_rs)) \
93 ibdev_level(ibdev, fmt, ##__VA_ARGS__); \
94} while (0)
95
96#define ibdev_emerg_ratelimited(ibdev, fmt, ...) \
97 ibdev_level_ratelimited(ibdev_emerg, ibdev, fmt, ##__VA_ARGS__)
98#define ibdev_alert_ratelimited(ibdev, fmt, ...) \
99 ibdev_level_ratelimited(ibdev_alert, ibdev, fmt, ##__VA_ARGS__)
100#define ibdev_crit_ratelimited(ibdev, fmt, ...) \
101 ibdev_level_ratelimited(ibdev_crit, ibdev, fmt, ##__VA_ARGS__)
102#define ibdev_err_ratelimited(ibdev, fmt, ...) \
103 ibdev_level_ratelimited(ibdev_err, ibdev, fmt, ##__VA_ARGS__)
104#define ibdev_warn_ratelimited(ibdev, fmt, ...) \
105 ibdev_level_ratelimited(ibdev_warn, ibdev, fmt, ##__VA_ARGS__)
106#define ibdev_notice_ratelimited(ibdev, fmt, ...) \
107 ibdev_level_ratelimited(ibdev_notice, ibdev, fmt, ##__VA_ARGS__)
108#define ibdev_info_ratelimited(ibdev, fmt, ...) \
109 ibdev_level_ratelimited(ibdev_info, ibdev, fmt, ##__VA_ARGS__)
110
ceabef7d
OZ
111#if defined(CONFIG_DYNAMIC_DEBUG) || \
112 (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
05bb411a
GP
113/* descriptor check is first to prevent flooding with "callbacks suppressed" */
114#define ibdev_dbg_ratelimited(ibdev, fmt, ...) \
115do { \
116 static DEFINE_RATELIMIT_STATE(_rs, \
117 DEFAULT_RATELIMIT_INTERVAL, \
118 DEFAULT_RATELIMIT_BURST); \
119 DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
120 if (DYNAMIC_DEBUG_BRANCH(descriptor) && __ratelimit(&_rs)) \
121 __dynamic_ibdev_dbg(&descriptor, ibdev, fmt, \
122 ##__VA_ARGS__); \
123} while (0)
124#else
125__printf(2, 3) __cold
126static inline
127void ibdev_dbg_ratelimited(const struct ib_device *ibdev, const char *format, ...) {}
128#endif
129
1da177e4
LT
130union ib_gid {
131 u8 raw[16];
132 struct {
97f52eb4
SH
133 __be64 subnet_prefix;
134 __be64 interface_id;
1da177e4
LT
135 } global;
136};
137
e26be1bf
MS
138extern union ib_gid zgid;
139
b39ffa1d
MB
140enum ib_gid_type {
141 /* If link layer is Ethernet, this is RoCE V1 */
142 IB_GID_TYPE_IB = 0,
143 IB_GID_TYPE_ROCE = 0,
7766a99f 144 IB_GID_TYPE_ROCE_UDP_ENCAP = 1,
b39ffa1d
MB
145 IB_GID_TYPE_SIZE
146};
147
7ead4bcb 148#define ROCE_V2_UDP_DPORT 4791
03db3a2d 149struct ib_gid_attr {
943bd984 150 struct net_device __rcu *ndev;
598ff6ba 151 struct ib_device *device;
b150c386 152 union ib_gid gid;
598ff6ba
PP
153 enum ib_gid_type gid_type;
154 u16 index;
155 u8 port_num;
03db3a2d
MB
156};
157
a0c1b2a3
EC
158enum {
159 /* set the local administered indication */
160 IB_SA_WELL_KNOWN_GUID = BIT_ULL(57) | 2,
161};
162
07ebafba
TT
163enum rdma_transport_type {
164 RDMA_TRANSPORT_IB,
180771a3 165 RDMA_TRANSPORT_IWARP,
248567f7 166 RDMA_TRANSPORT_USNIC,
f95be3d2
GP
167 RDMA_TRANSPORT_USNIC_UDP,
168 RDMA_TRANSPORT_UNSPECIFIED,
07ebafba
TT
169};
170
6b90a6d6
MW
171enum rdma_protocol_type {
172 RDMA_PROTOCOL_IB,
173 RDMA_PROTOCOL_IBOE,
174 RDMA_PROTOCOL_IWARP,
175 RDMA_PROTOCOL_USNIC_UDP
176};
177
8385fd84 178__attribute_const__ enum rdma_transport_type
5d60c111 179rdma_node_get_transport(unsigned int node_type);
07ebafba 180
c865f246
SK
181enum rdma_network_type {
182 RDMA_NETWORK_IB,
183 RDMA_NETWORK_ROCE_V1 = RDMA_NETWORK_IB,
184 RDMA_NETWORK_IPV4,
185 RDMA_NETWORK_IPV6
186};
187
188static inline enum ib_gid_type ib_network_to_gid_type(enum rdma_network_type network_type)
189{
190 if (network_type == RDMA_NETWORK_IPV4 ||
191 network_type == RDMA_NETWORK_IPV6)
192 return IB_GID_TYPE_ROCE_UDP_ENCAP;
193
194 /* IB_GID_TYPE_IB same as RDMA_NETWORK_ROCE_V1 */
195 return IB_GID_TYPE_IB;
196}
197
47ec3866
PP
198static inline enum rdma_network_type
199rdma_gid_attr_network_type(const struct ib_gid_attr *attr)
c865f246 200{
47ec3866 201 if (attr->gid_type == IB_GID_TYPE_IB)
c865f246
SK
202 return RDMA_NETWORK_IB;
203
47ec3866 204 if (ipv6_addr_v4mapped((struct in6_addr *)&attr->gid))
c865f246
SK
205 return RDMA_NETWORK_IPV4;
206 else
207 return RDMA_NETWORK_IPV6;
208}
209
a3f5adaf
EC
210enum rdma_link_layer {
211 IB_LINK_LAYER_UNSPECIFIED,
212 IB_LINK_LAYER_INFINIBAND,
213 IB_LINK_LAYER_ETHERNET,
214};
215
1da177e4 216enum ib_device_cap_flags {
7ca0bc53
LR
217 IB_DEVICE_RESIZE_MAX_WR = (1 << 0),
218 IB_DEVICE_BAD_PKEY_CNTR = (1 << 1),
219 IB_DEVICE_BAD_QKEY_CNTR = (1 << 2),
220 IB_DEVICE_RAW_MULTI = (1 << 3),
221 IB_DEVICE_AUTO_PATH_MIG = (1 << 4),
222 IB_DEVICE_CHANGE_PHY_PORT = (1 << 5),
223 IB_DEVICE_UD_AV_PORT_ENFORCE = (1 << 6),
224 IB_DEVICE_CURR_QP_STATE_MOD = (1 << 7),
225 IB_DEVICE_SHUTDOWN_PORT = (1 << 8),
78b57f95 226 /* Not in use, former INIT_TYPE = (1 << 9),*/
7ca0bc53
LR
227 IB_DEVICE_PORT_ACTIVE_EVENT = (1 << 10),
228 IB_DEVICE_SYS_IMAGE_GUID = (1 << 11),
229 IB_DEVICE_RC_RNR_NAK_GEN = (1 << 12),
230 IB_DEVICE_SRQ_RESIZE = (1 << 13),
231 IB_DEVICE_N_NOTIFY_CQ = (1 << 14),
b1adc714
CH
232
233 /*
234 * This device supports a per-device lkey or stag that can be
235 * used without performing a memory registration for the local
236 * memory. Note that ULPs should never check this flag, but
237 * instead of use the local_dma_lkey flag in the ib_pd structure,
238 * which will always contain a usable lkey.
239 */
7ca0bc53 240 IB_DEVICE_LOCAL_DMA_LKEY = (1 << 15),
78b57f95 241 /* Reserved, old SEND_W_INV = (1 << 16),*/
7ca0bc53 242 IB_DEVICE_MEM_WINDOW = (1 << 17),
e0605d91
EC
243 /*
244 * Devices should set IB_DEVICE_UD_IP_SUM if they support
245 * insertion of UDP and TCP checksum on outgoing UD IPoIB
246 * messages and can verify the validity of checksum for
247 * incoming messages. Setting this flag implies that the
248 * IPoIB driver may set NETIF_F_IP_CSUM for datagram mode.
249 */
7ca0bc53
LR
250 IB_DEVICE_UD_IP_CSUM = (1 << 18),
251 IB_DEVICE_UD_TSO = (1 << 19),
252 IB_DEVICE_XRC = (1 << 20),
b1adc714
CH
253
254 /*
255 * This device supports the IB "base memory management extension",
256 * which includes support for fast registrations (IB_WR_REG_MR,
257 * IB_WR_LOCAL_INV and IB_WR_SEND_WITH_INV verbs). This flag should
258 * also be set by any iWarp device which must support FRs to comply
259 * to the iWarp verbs spec. iWarp devices also support the
260 * IB_WR_RDMA_READ_WITH_INV verb for RDMA READs that invalidate the
261 * stag.
262 */
7ca0bc53
LR
263 IB_DEVICE_MEM_MGT_EXTENSIONS = (1 << 21),
264 IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = (1 << 22),
265 IB_DEVICE_MEM_WINDOW_TYPE_2A = (1 << 23),
266 IB_DEVICE_MEM_WINDOW_TYPE_2B = (1 << 24),
267 IB_DEVICE_RC_IP_CSUM = (1 << 25),
ebaaee25 268 /* Deprecated. Please use IB_RAW_PACKET_CAP_IP_CSUM. */
7ca0bc53 269 IB_DEVICE_RAW_IP_CSUM = (1 << 26),
8a06ce59
LR
270 /*
271 * Devices should set IB_DEVICE_CROSS_CHANNEL if they
272 * support execution of WQEs that involve synchronization
273 * of I/O operations with single completion queue managed
274 * by hardware.
275 */
78b57f95 276 IB_DEVICE_CROSS_CHANNEL = (1 << 27),
7ca0bc53 277 IB_DEVICE_MANAGED_FLOW_STEERING = (1 << 29),
c0a6cbb9 278 IB_DEVICE_INTEGRITY_HANDOVER = (1 << 30),
47355b3c 279 IB_DEVICE_ON_DEMAND_PAGING = (1ULL << 31),
f5aa9159 280 IB_DEVICE_SG_GAPS_REG = (1ULL << 32),
c7e162a4 281 IB_DEVICE_VIRTUAL_FUNCTION = (1ULL << 33),
ebaaee25 282 /* Deprecated. Please use IB_RAW_PACKET_CAP_SCATTER_FCS. */
c7e162a4 283 IB_DEVICE_RAW_SCATTER_FCS = (1ULL << 34),
7f90a5a0 284 IB_DEVICE_RDMA_NETDEV_OPA = (1ULL << 35),
e1d2e887
NO
285 /* The device supports padding incoming writes to cacheline. */
286 IB_DEVICE_PCI_WRITE_END_PADDING = (1ULL << 36),
3856ec4b 287 IB_DEVICE_ALLOW_USER_UNREG = (1ULL << 37),
1b01d335
SG
288};
289
1da177e4
LT
290enum ib_atomic_cap {
291 IB_ATOMIC_NONE,
292 IB_ATOMIC_HCA,
293 IB_ATOMIC_GLOB
294};
295
860f10a7 296enum ib_odp_general_cap_bits {
25bf14d6
AK
297 IB_ODP_SUPPORT = 1 << 0,
298 IB_ODP_SUPPORT_IMPLICIT = 1 << 1,
860f10a7
SG
299};
300
301enum ib_odp_transport_cap_bits {
302 IB_ODP_SUPPORT_SEND = 1 << 0,
303 IB_ODP_SUPPORT_RECV = 1 << 1,
304 IB_ODP_SUPPORT_WRITE = 1 << 2,
305 IB_ODP_SUPPORT_READ = 1 << 3,
306 IB_ODP_SUPPORT_ATOMIC = 1 << 4,
da823342 307 IB_ODP_SUPPORT_SRQ_RECV = 1 << 5,
860f10a7
SG
308};
309
310struct ib_odp_caps {
311 uint64_t general_caps;
312 struct {
313 uint32_t rc_odp_caps;
314 uint32_t uc_odp_caps;
315 uint32_t ud_odp_caps;
52a72e2a 316 uint32_t xrc_odp_caps;
860f10a7
SG
317 } per_transport_caps;
318};
319
ccf20562
YH
320struct ib_rss_caps {
321 /* Corresponding bit will be set if qp type from
322 * 'enum ib_qp_type' is supported, e.g.
323 * supported_qpts |= 1 << IB_QPT_UD
324 */
325 u32 supported_qpts;
326 u32 max_rwq_indirection_tables;
327 u32 max_rwq_indirection_table_size;
328};
329
6938fc1e 330enum ib_tm_cap_flags {
89705e92
DG
331 /* Support tag matching with rendezvous offload for RC transport */
332 IB_TM_CAP_RNDV_RC = 1 << 0,
6938fc1e
AK
333};
334
78b1beb0 335struct ib_tm_caps {
6938fc1e
AK
336 /* Max size of RNDV header */
337 u32 max_rndv_hdr_size;
338 /* Max number of entries in tag matching list */
339 u32 max_num_tags;
340 /* From enum ib_tm_cap_flags */
341 u32 flags;
342 /* Max number of outstanding list operations */
343 u32 max_ops;
344 /* Max number of SGE in tag matching entry */
345 u32 max_sge;
346};
347
bcf4c1ea
MB
348struct ib_cq_init_attr {
349 unsigned int cqe;
a9018adf 350 u32 comp_vector;
bcf4c1ea
MB
351 u32 flags;
352};
353
869ddcf8
YC
354enum ib_cq_attr_mask {
355 IB_CQ_MODERATE = 1 << 0,
356};
357
18bd9072
YC
358struct ib_cq_caps {
359 u16 max_cq_moderation_count;
360 u16 max_cq_moderation_period;
361};
362
be934cca
AL
363struct ib_dm_mr_attr {
364 u64 length;
365 u64 offset;
366 u32 access_flags;
367};
368
bee76d7a
AL
369struct ib_dm_alloc_attr {
370 u64 length;
371 u32 alignment;
372 u32 flags;
373};
374
1da177e4
LT
375struct ib_device_attr {
376 u64 fw_ver;
97f52eb4 377 __be64 sys_image_guid;
1da177e4
LT
378 u64 max_mr_size;
379 u64 page_size_cap;
380 u32 vendor_id;
381 u32 vendor_part_id;
382 u32 hw_ver;
383 int max_qp;
384 int max_qp_wr;
fb532d6a 385 u64 device_cap_flags;
33023fb8
SW
386 int max_send_sge;
387 int max_recv_sge;
1da177e4
LT
388 int max_sge_rd;
389 int max_cq;
390 int max_cqe;
391 int max_mr;
392 int max_pd;
393 int max_qp_rd_atom;
394 int max_ee_rd_atom;
395 int max_res_rd_atom;
396 int max_qp_init_rd_atom;
397 int max_ee_init_rd_atom;
398 enum ib_atomic_cap atomic_cap;
5e80ba8f 399 enum ib_atomic_cap masked_atomic_cap;
1da177e4
LT
400 int max_ee;
401 int max_rdd;
402 int max_mw;
403 int max_raw_ipv6_qp;
404 int max_raw_ethy_qp;
405 int max_mcast_grp;
406 int max_mcast_qp_attach;
407 int max_total_mcast_qp_attach;
408 int max_ah;
1da177e4
LT
409 int max_srq;
410 int max_srq_wr;
411 int max_srq_sge;
00f7ec36 412 unsigned int max_fast_reg_page_list_len;
62e3c379 413 unsigned int max_pi_fast_reg_page_list_len;
1da177e4
LT
414 u16 max_pkeys;
415 u8 local_ca_ack_delay;
1b01d335
SG
416 int sig_prot_cap;
417 int sig_guard_cap;
860f10a7 418 struct ib_odp_caps odp_caps;
24306dc6
MB
419 uint64_t timestamp_mask;
420 uint64_t hca_core_clock; /* in KHZ */
ccf20562
YH
421 struct ib_rss_caps rss_caps;
422 u32 max_wq_type_rq;
ebaaee25 423 u32 raw_packet_caps; /* Use ib_raw_packet_caps enum */
78b1beb0 424 struct ib_tm_caps tm_caps;
18bd9072 425 struct ib_cq_caps cq_caps;
1d8eeb9f 426 u64 max_dm_size;
00bd1439
YF
427 /* Max entries for sgl for optimized performance per READ */
428 u32 max_sgl_rd;
1da177e4
LT
429};
430
431enum ib_mtu {
432 IB_MTU_256 = 1,
433 IB_MTU_512 = 2,
434 IB_MTU_1024 = 3,
435 IB_MTU_2048 = 4,
436 IB_MTU_4096 = 5
437};
438
6d72344c
KW
439enum opa_mtu {
440 OPA_MTU_8192 = 6,
441 OPA_MTU_10240 = 7
442};
443
1da177e4
LT
444static inline int ib_mtu_enum_to_int(enum ib_mtu mtu)
445{
446 switch (mtu) {
447 case IB_MTU_256: return 256;
448 case IB_MTU_512: return 512;
449 case IB_MTU_1024: return 1024;
450 case IB_MTU_2048: return 2048;
451 case IB_MTU_4096: return 4096;
452 default: return -1;
453 }
454}
455
d3f4aadd
AR
456static inline enum ib_mtu ib_mtu_int_to_enum(int mtu)
457{
458 if (mtu >= 4096)
459 return IB_MTU_4096;
460 else if (mtu >= 2048)
461 return IB_MTU_2048;
462 else if (mtu >= 1024)
463 return IB_MTU_1024;
464 else if (mtu >= 512)
465 return IB_MTU_512;
466 else
467 return IB_MTU_256;
468}
469
6d72344c
KW
470static inline int opa_mtu_enum_to_int(enum opa_mtu mtu)
471{
472 switch (mtu) {
473 case OPA_MTU_8192:
474 return 8192;
475 case OPA_MTU_10240:
476 return 10240;
477 default:
478 return(ib_mtu_enum_to_int((enum ib_mtu)mtu));
479 }
480}
481
482static inline enum opa_mtu opa_mtu_int_to_enum(int mtu)
483{
484 if (mtu >= 10240)
485 return OPA_MTU_10240;
486 else if (mtu >= 8192)
487 return OPA_MTU_8192;
488 else
489 return ((enum opa_mtu)ib_mtu_int_to_enum(mtu));
490}
491
1da177e4
LT
492enum ib_port_state {
493 IB_PORT_NOP = 0,
494 IB_PORT_DOWN = 1,
495 IB_PORT_INIT = 2,
496 IB_PORT_ARMED = 3,
497 IB_PORT_ACTIVE = 4,
498 IB_PORT_ACTIVE_DEFER = 5
499};
500
72a7720f
KH
501enum ib_port_phys_state {
502 IB_PORT_PHYS_STATE_SLEEP = 1,
503 IB_PORT_PHYS_STATE_POLLING = 2,
504 IB_PORT_PHYS_STATE_DISABLED = 3,
505 IB_PORT_PHYS_STATE_PORT_CONFIGURATION_TRAINING = 4,
506 IB_PORT_PHYS_STATE_LINK_UP = 5,
507 IB_PORT_PHYS_STATE_LINK_ERROR_RECOVERY = 6,
508 IB_PORT_PHYS_STATE_PHY_TEST = 7,
509};
510
1da177e4
LT
511enum ib_port_width {
512 IB_WIDTH_1X = 1,
dbabf685 513 IB_WIDTH_2X = 16,
1da177e4
LT
514 IB_WIDTH_4X = 2,
515 IB_WIDTH_8X = 4,
516 IB_WIDTH_12X = 8
517};
518
519static inline int ib_width_enum_to_int(enum ib_port_width width)
520{
521 switch (width) {
522 case IB_WIDTH_1X: return 1;
dbabf685 523 case IB_WIDTH_2X: return 2;
1da177e4
LT
524 case IB_WIDTH_4X: return 4;
525 case IB_WIDTH_8X: return 8;
526 case IB_WIDTH_12X: return 12;
527 default: return -1;
528 }
529}
530
2e96691c
OG
531enum ib_port_speed {
532 IB_SPEED_SDR = 1,
533 IB_SPEED_DDR = 2,
534 IB_SPEED_QDR = 4,
535 IB_SPEED_FDR10 = 8,
536 IB_SPEED_FDR = 16,
12113a35
NO
537 IB_SPEED_EDR = 32,
538 IB_SPEED_HDR = 64
2e96691c
OG
539};
540
b40f4757
CL
541/**
542 * struct rdma_hw_stats
e945130b
MB
543 * @lock - Mutex to protect parallel write access to lifespan and values
544 * of counters, which are 64bits and not guaranteeed to be written
545 * atomicaly on 32bits systems.
b40f4757
CL
546 * @timestamp - Used by the core code to track when the last update was
547 * @lifespan - Used by the core code to determine how old the counters
548 * should be before being updated again. Stored in jiffies, defaults
549 * to 10 milliseconds, drivers can override the default be specifying
550 * their own value during their allocation routine.
551 * @name - Array of pointers to static names used for the counters in
552 * directory.
553 * @num_counters - How many hardware counters there are. If name is
554 * shorter than this number, a kernel oops will result. Driver authors
555 * are encouraged to leave BUILD_BUG_ON(ARRAY_SIZE(@name) < num_counters)
556 * in their code to prevent this.
557 * @value - Array of u64 counters that are accessed by the sysfs code and
558 * filled in by the drivers get_stats routine
559 */
560struct rdma_hw_stats {
e945130b 561 struct mutex lock; /* Protect lifespan and values[] */
b40f4757
CL
562 unsigned long timestamp;
563 unsigned long lifespan;
564 const char * const *names;
565 int num_counters;
566 u64 value[];
7f624d02
SW
567};
568
b40f4757
CL
569#define RDMA_HW_STATS_DEFAULT_LIFESPAN 10
570/**
571 * rdma_alloc_hw_stats_struct - Helper function to allocate dynamic struct
572 * for drivers.
573 * @names - Array of static const char *
574 * @num_counters - How many elements in array
575 * @lifespan - How many milliseconds between updates
576 */
577static inline struct rdma_hw_stats *rdma_alloc_hw_stats_struct(
578 const char * const *names, int num_counters,
579 unsigned long lifespan)
580{
581 struct rdma_hw_stats *stats;
582
583 stats = kzalloc(sizeof(*stats) + num_counters * sizeof(u64),
584 GFP_KERNEL);
585 if (!stats)
586 return NULL;
587 stats->names = names;
588 stats->num_counters = num_counters;
589 stats->lifespan = msecs_to_jiffies(lifespan);
590
591 return stats;
592}
593
594
f9b22e35
IW
595/* Define bits for the various functionality this port needs to be supported by
596 * the core.
597 */
598/* Management 0x00000FFF */
599#define RDMA_CORE_CAP_IB_MAD 0x00000001
600#define RDMA_CORE_CAP_IB_SMI 0x00000002
601#define RDMA_CORE_CAP_IB_CM 0x00000004
602#define RDMA_CORE_CAP_IW_CM 0x00000008
603#define RDMA_CORE_CAP_IB_SA 0x00000010
65995fee 604#define RDMA_CORE_CAP_OPA_MAD 0x00000020
f9b22e35
IW
605
606/* Address format 0x000FF000 */
607#define RDMA_CORE_CAP_AF_IB 0x00001000
608#define RDMA_CORE_CAP_ETH_AH 0x00002000
94d595c5 609#define RDMA_CORE_CAP_OPA_AH 0x00004000
b02289b3 610#define RDMA_CORE_CAP_IB_GRH_REQUIRED 0x00008000
f9b22e35
IW
611
612/* Protocol 0xFFF00000 */
613#define RDMA_CORE_CAP_PROT_IB 0x00100000
614#define RDMA_CORE_CAP_PROT_ROCE 0x00200000
615#define RDMA_CORE_CAP_PROT_IWARP 0x00400000
7766a99f 616#define RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP 0x00800000
aa773bd4 617#define RDMA_CORE_CAP_PROT_RAW_PACKET 0x01000000
ce1e055f 618#define RDMA_CORE_CAP_PROT_USNIC 0x02000000
f9b22e35 619
b02289b3
AK
620#define RDMA_CORE_PORT_IB_GRH_REQUIRED (RDMA_CORE_CAP_IB_GRH_REQUIRED \
621 | RDMA_CORE_CAP_PROT_ROCE \
622 | RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP)
623
f9b22e35
IW
624#define RDMA_CORE_PORT_IBA_IB (RDMA_CORE_CAP_PROT_IB \
625 | RDMA_CORE_CAP_IB_MAD \
626 | RDMA_CORE_CAP_IB_SMI \
627 | RDMA_CORE_CAP_IB_CM \
628 | RDMA_CORE_CAP_IB_SA \
629 | RDMA_CORE_CAP_AF_IB)
630#define RDMA_CORE_PORT_IBA_ROCE (RDMA_CORE_CAP_PROT_ROCE \
631 | RDMA_CORE_CAP_IB_MAD \
632 | RDMA_CORE_CAP_IB_CM \
f9b22e35
IW
633 | RDMA_CORE_CAP_AF_IB \
634 | RDMA_CORE_CAP_ETH_AH)
7766a99f
MB
635#define RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP \
636 (RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP \
637 | RDMA_CORE_CAP_IB_MAD \
638 | RDMA_CORE_CAP_IB_CM \
639 | RDMA_CORE_CAP_AF_IB \
640 | RDMA_CORE_CAP_ETH_AH)
f9b22e35
IW
641#define RDMA_CORE_PORT_IWARP (RDMA_CORE_CAP_PROT_IWARP \
642 | RDMA_CORE_CAP_IW_CM)
65995fee
IW
643#define RDMA_CORE_PORT_INTEL_OPA (RDMA_CORE_PORT_IBA_IB \
644 | RDMA_CORE_CAP_OPA_MAD)
f9b22e35 645
aa773bd4
OG
646#define RDMA_CORE_PORT_RAW_PACKET (RDMA_CORE_CAP_PROT_RAW_PACKET)
647
ce1e055f
OG
648#define RDMA_CORE_PORT_USNIC (RDMA_CORE_CAP_PROT_USNIC)
649
1da177e4 650struct ib_port_attr {
fad61ad4 651 u64 subnet_prefix;
1da177e4
LT
652 enum ib_port_state state;
653 enum ib_mtu max_mtu;
654 enum ib_mtu active_mtu;
6d72344c 655 u32 phys_mtu;
1da177e4 656 int gid_tbl_len;
2f944c0f
JG
657 unsigned int ip_gids:1;
658 /* This is the value from PortInfo CapabilityMask, defined by IBA */
1da177e4
LT
659 u32 port_cap_flags;
660 u32 max_msg_sz;
661 u32 bad_pkey_cntr;
662 u32 qkey_viol_cntr;
663 u16 pkey_tbl_len;
db58540b 664 u32 sm_lid;
582faf31 665 u32 lid;
1da177e4
LT
666 u8 lmc;
667 u8 max_vl_num;
668 u8 sm_sl;
669 u8 subnet_timeout;
670 u8 init_type_reply;
671 u8 active_width;
672 u8 active_speed;
673 u8 phys_state;
1e8f43b7 674 u16 port_cap_flags2;
1da177e4
LT
675};
676
677enum ib_device_modify_flags {
c5bcbbb9
RD
678 IB_DEVICE_MODIFY_SYS_IMAGE_GUID = 1 << 0,
679 IB_DEVICE_MODIFY_NODE_DESC = 1 << 1
1da177e4
LT
680};
681
bd99fdea
YS
682#define IB_DEVICE_NODE_DESC_MAX 64
683
1da177e4
LT
684struct ib_device_modify {
685 u64 sys_image_guid;
bd99fdea 686 char node_desc[IB_DEVICE_NODE_DESC_MAX];
1da177e4
LT
687};
688
689enum ib_port_modify_flags {
690 IB_PORT_SHUTDOWN = 1,
691 IB_PORT_INIT_TYPE = (1<<2),
cb49366f
VN
692 IB_PORT_RESET_QKEY_CNTR = (1<<3),
693 IB_PORT_OPA_MASK_CHG = (1<<4)
1da177e4
LT
694};
695
696struct ib_port_modify {
697 u32 set_port_cap_mask;
698 u32 clr_port_cap_mask;
699 u8 init_type;
700};
701
702enum ib_event_type {
703 IB_EVENT_CQ_ERR,
704 IB_EVENT_QP_FATAL,
705 IB_EVENT_QP_REQ_ERR,
706 IB_EVENT_QP_ACCESS_ERR,
707 IB_EVENT_COMM_EST,
708 IB_EVENT_SQ_DRAINED,
709 IB_EVENT_PATH_MIG,
710 IB_EVENT_PATH_MIG_ERR,
711 IB_EVENT_DEVICE_FATAL,
712 IB_EVENT_PORT_ACTIVE,
713 IB_EVENT_PORT_ERR,
714 IB_EVENT_LID_CHANGE,
715 IB_EVENT_PKEY_CHANGE,
d41fcc67
RD
716 IB_EVENT_SM_CHANGE,
717 IB_EVENT_SRQ_ERR,
718 IB_EVENT_SRQ_LIMIT_REACHED,
63942c9a 719 IB_EVENT_QP_LAST_WQE_REACHED,
761d90ed
OG
720 IB_EVENT_CLIENT_REREGISTER,
721 IB_EVENT_GID_CHANGE,
f213c052 722 IB_EVENT_WQ_FATAL,
1da177e4
LT
723};
724
db7489e0 725const char *__attribute_const__ ib_event_msg(enum ib_event_type event);
2b1b5b60 726
1da177e4
LT
727struct ib_event {
728 struct ib_device *device;
729 union {
730 struct ib_cq *cq;
731 struct ib_qp *qp;
d41fcc67 732 struct ib_srq *srq;
f213c052 733 struct ib_wq *wq;
1da177e4
LT
734 u8 port_num;
735 } element;
736 enum ib_event_type event;
737};
738
739struct ib_event_handler {
740 struct ib_device *device;
741 void (*handler)(struct ib_event_handler *, struct ib_event *);
742 struct list_head list;
743};
744
745#define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler) \
746 do { \
747 (_ptr)->device = _device; \
748 (_ptr)->handler = _handler; \
749 INIT_LIST_HEAD(&(_ptr)->list); \
750 } while (0)
751
752struct ib_global_route {
8d9ec9ad 753 const struct ib_gid_attr *sgid_attr;
1da177e4
LT
754 union ib_gid dgid;
755 u32 flow_label;
756 u8 sgid_index;
757 u8 hop_limit;
758 u8 traffic_class;
759};
760
513789ed 761struct ib_grh {
97f52eb4
SH
762 __be32 version_tclass_flow;
763 __be16 paylen;
513789ed
HR
764 u8 next_hdr;
765 u8 hop_limit;
766 union ib_gid sgid;
767 union ib_gid dgid;
768};
769
c865f246
SK
770union rdma_network_hdr {
771 struct ib_grh ibgrh;
772 struct {
773 /* The IB spec states that if it's IPv4, the header
774 * is located in the last 20 bytes of the header.
775 */
776 u8 reserved[20];
777 struct iphdr roce4grh;
778 };
779};
780
7dafbab3
DH
781#define IB_QPN_MASK 0xFFFFFF
782
1da177e4
LT
783enum {
784 IB_MULTICAST_QPN = 0xffffff
785};
786
f3a7c66b 787#define IB_LID_PERMISSIVE cpu_to_be16(0xFFFF)
b4e64397 788#define IB_MULTICAST_LID_BASE cpu_to_be16(0xC000)
97f52eb4 789
1da177e4
LT
790enum ib_ah_flags {
791 IB_AH_GRH = 1
792};
793
bf6a9e31
JM
794enum ib_rate {
795 IB_RATE_PORT_CURRENT = 0,
796 IB_RATE_2_5_GBPS = 2,
797 IB_RATE_5_GBPS = 5,
798 IB_RATE_10_GBPS = 3,
799 IB_RATE_20_GBPS = 6,
800 IB_RATE_30_GBPS = 4,
801 IB_RATE_40_GBPS = 7,
802 IB_RATE_60_GBPS = 8,
803 IB_RATE_80_GBPS = 9,
71eeba16
MA
804 IB_RATE_120_GBPS = 10,
805 IB_RATE_14_GBPS = 11,
806 IB_RATE_56_GBPS = 12,
807 IB_RATE_112_GBPS = 13,
808 IB_RATE_168_GBPS = 14,
809 IB_RATE_25_GBPS = 15,
810 IB_RATE_100_GBPS = 16,
811 IB_RATE_200_GBPS = 17,
a5a5d199
MG
812 IB_RATE_300_GBPS = 18,
813 IB_RATE_28_GBPS = 19,
814 IB_RATE_50_GBPS = 20,
815 IB_RATE_400_GBPS = 21,
816 IB_RATE_600_GBPS = 22,
bf6a9e31
JM
817};
818
819/**
820 * ib_rate_to_mult - Convert the IB rate enum to a multiple of the
821 * base rate of 2.5 Gbit/sec. For example, IB_RATE_5_GBPS will be
822 * converted to 2, since 5 Gbit/sec is 2 * 2.5 Gbit/sec.
823 * @rate: rate to convert.
824 */
8385fd84 825__attribute_const__ int ib_rate_to_mult(enum ib_rate rate);
bf6a9e31 826
71eeba16
MA
827/**
828 * ib_rate_to_mbps - Convert the IB rate enum to Mbps.
829 * For example, IB_RATE_2_5_GBPS will be converted to 2500.
830 * @rate: rate to convert.
831 */
8385fd84 832__attribute_const__ int ib_rate_to_mbps(enum ib_rate rate);
71eeba16 833
17cd3a2d
SG
834
835/**
9bee178b
SG
836 * enum ib_mr_type - memory region type
837 * @IB_MR_TYPE_MEM_REG: memory region that is used for
838 * normal registration
f5aa9159
SG
839 * @IB_MR_TYPE_SG_GAPS: memory region that is capable to
840 * register any arbitrary sg lists (without
841 * the normal mr constraints - see
842 * ib_map_mr_sg)
a0bc099a
MG
843 * @IB_MR_TYPE_DM: memory region that is used for device
844 * memory registration
845 * @IB_MR_TYPE_USER: memory region that is used for the user-space
846 * application
847 * @IB_MR_TYPE_DMA: memory region that is used for DMA operations
848 * without address translations (VA=PA)
26bc7eae
IR
849 * @IB_MR_TYPE_INTEGRITY: memory region that is used for
850 * data integrity operations
17cd3a2d 851 */
9bee178b
SG
852enum ib_mr_type {
853 IB_MR_TYPE_MEM_REG,
f5aa9159 854 IB_MR_TYPE_SG_GAPS,
a0bc099a
MG
855 IB_MR_TYPE_DM,
856 IB_MR_TYPE_USER,
857 IB_MR_TYPE_DMA,
26bc7eae 858 IB_MR_TYPE_INTEGRITY,
17cd3a2d
SG
859};
860
1b01d335
SG
861enum ib_mr_status_check {
862 IB_MR_CHECK_SIG_STATUS = 1,
863};
864
865/**
866 * struct ib_mr_status - Memory region status container
867 *
868 * @fail_status: Bitmask of MR checks status. For each
869 * failed check a corresponding status bit is set.
870 * @sig_err: Additional info for IB_MR_CEHCK_SIG_STATUS
871 * failure.
872 */
873struct ib_mr_status {
874 u32 fail_status;
875 struct ib_sig_err sig_err;
876};
877
bf6a9e31
JM
878/**
879 * mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate
880 * enum.
881 * @mult: multiple to convert.
882 */
8385fd84 883__attribute_const__ enum ib_rate mult_to_ib_rate(int mult);
bf6a9e31 884
fa5d010c
MG
885struct rdma_ah_init_attr {
886 struct rdma_ah_attr *ah_attr;
887 u32 flags;
51aab126 888 struct net_device *xmit_slave;
fa5d010c
MG
889};
890
44c58487 891enum rdma_ah_attr_type {
87daac68 892 RDMA_AH_ATTR_TYPE_UNDEFINED,
44c58487
DC
893 RDMA_AH_ATTR_TYPE_IB,
894 RDMA_AH_ATTR_TYPE_ROCE,
64b4646e 895 RDMA_AH_ATTR_TYPE_OPA,
44c58487
DC
896};
897
898struct ib_ah_attr {
899 u16 dlid;
900 u8 src_path_bits;
901};
902
903struct roce_ah_attr {
904 u8 dmac[ETH_ALEN];
905};
906
64b4646e
DC
907struct opa_ah_attr {
908 u32 dlid;
909 u8 src_path_bits;
d98bb7f7 910 bool make_grd;
64b4646e
DC
911};
912
90898850 913struct rdma_ah_attr {
1da177e4 914 struct ib_global_route grh;
1da177e4 915 u8 sl;
1da177e4 916 u8 static_rate;
1da177e4 917 u8 port_num;
44c58487
DC
918 u8 ah_flags;
919 enum rdma_ah_attr_type type;
920 union {
921 struct ib_ah_attr ib;
922 struct roce_ah_attr roce;
64b4646e 923 struct opa_ah_attr opa;
44c58487 924 };
1da177e4
LT
925};
926
927enum ib_wc_status {
928 IB_WC_SUCCESS,
929 IB_WC_LOC_LEN_ERR,
930 IB_WC_LOC_QP_OP_ERR,
931 IB_WC_LOC_EEC_OP_ERR,
932 IB_WC_LOC_PROT_ERR,
933 IB_WC_WR_FLUSH_ERR,
934 IB_WC_MW_BIND_ERR,
935 IB_WC_BAD_RESP_ERR,
936 IB_WC_LOC_ACCESS_ERR,
937 IB_WC_REM_INV_REQ_ERR,
938 IB_WC_REM_ACCESS_ERR,
939 IB_WC_REM_OP_ERR,
940 IB_WC_RETRY_EXC_ERR,
941 IB_WC_RNR_RETRY_EXC_ERR,
942 IB_WC_LOC_RDD_VIOL_ERR,
943 IB_WC_REM_INV_RD_REQ_ERR,
944 IB_WC_REM_ABORT_ERR,
945 IB_WC_INV_EECN_ERR,
946 IB_WC_INV_EEC_STATE_ERR,
947 IB_WC_FATAL_ERR,
948 IB_WC_RESP_TIMEOUT_ERR,
949 IB_WC_GENERAL_ERR
950};
951
db7489e0 952const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status);
2b1b5b60 953
1da177e4 954enum ib_wc_opcode {
b60b9c02
BP
955 IB_WC_SEND = IB_UVERBS_WC_SEND,
956 IB_WC_RDMA_WRITE = IB_UVERBS_WC_RDMA_WRITE,
957 IB_WC_RDMA_READ = IB_UVERBS_WC_RDMA_READ,
958 IB_WC_COMP_SWAP = IB_UVERBS_WC_COMP_SWAP,
959 IB_WC_FETCH_ADD = IB_UVERBS_WC_FETCH_ADD,
960 IB_WC_BIND_MW = IB_UVERBS_WC_BIND_MW,
961 IB_WC_LOCAL_INV = IB_UVERBS_WC_LOCAL_INV,
962 IB_WC_LSO = IB_UVERBS_WC_TSO,
4c67e2bf 963 IB_WC_REG_MR,
5e80ba8f
VS
964 IB_WC_MASKED_COMP_SWAP,
965 IB_WC_MASKED_FETCH_ADD,
1da177e4
LT
966/*
967 * Set value of IB_WC_RECV so consumers can test if a completion is a
968 * receive by testing (opcode & IB_WC_RECV).
969 */
970 IB_WC_RECV = 1 << 7,
971 IB_WC_RECV_RDMA_WITH_IMM
972};
973
974enum ib_wc_flags {
975 IB_WC_GRH = 1,
00f7ec36
SW
976 IB_WC_WITH_IMM = (1<<1),
977 IB_WC_WITH_INVALIDATE = (1<<2),
d927d505 978 IB_WC_IP_CSUM_OK = (1<<3),
dd5f03be
MB
979 IB_WC_WITH_SMAC = (1<<4),
980 IB_WC_WITH_VLAN = (1<<5),
c865f246 981 IB_WC_WITH_NETWORK_HDR_TYPE = (1<<6),
1da177e4
LT
982};
983
984struct ib_wc {
14d3a3b2
CH
985 union {
986 u64 wr_id;
987 struct ib_cqe *wr_cqe;
988 };
1da177e4
LT
989 enum ib_wc_status status;
990 enum ib_wc_opcode opcode;
991 u32 vendor_err;
992 u32 byte_len;
062dbb69 993 struct ib_qp *qp;
00f7ec36
SW
994 union {
995 __be32 imm_data;
996 u32 invalidate_rkey;
997 } ex;
1da177e4 998 u32 src_qp;
cd2a6e7d 999 u32 slid;
1da177e4
LT
1000 int wc_flags;
1001 u16 pkey_index;
1da177e4
LT
1002 u8 sl;
1003 u8 dlid_path_bits;
1004 u8 port_num; /* valid only for DR SMPs on switches */
dd5f03be
MB
1005 u8 smac[ETH_ALEN];
1006 u16 vlan_id;
c865f246 1007 u8 network_hdr_type;
1da177e4
LT
1008};
1009
ed23a727
RD
1010enum ib_cq_notify_flags {
1011 IB_CQ_SOLICITED = 1 << 0,
1012 IB_CQ_NEXT_COMP = 1 << 1,
1013 IB_CQ_SOLICITED_MASK = IB_CQ_SOLICITED | IB_CQ_NEXT_COMP,
1014 IB_CQ_REPORT_MISSED_EVENTS = 1 << 2,
1da177e4
LT
1015};
1016
96104eda 1017enum ib_srq_type {
175ba58d
YH
1018 IB_SRQT_BASIC = IB_UVERBS_SRQT_BASIC,
1019 IB_SRQT_XRC = IB_UVERBS_SRQT_XRC,
1020 IB_SRQT_TM = IB_UVERBS_SRQT_TM,
96104eda
SH
1021};
1022
1a56ff6d
AK
1023static inline bool ib_srq_has_cq(enum ib_srq_type srq_type)
1024{
9c2c8496
AK
1025 return srq_type == IB_SRQT_XRC ||
1026 srq_type == IB_SRQT_TM;
1a56ff6d
AK
1027}
1028
d41fcc67
RD
1029enum ib_srq_attr_mask {
1030 IB_SRQ_MAX_WR = 1 << 0,
1031 IB_SRQ_LIMIT = 1 << 1,
1032};
1033
1034struct ib_srq_attr {
1035 u32 max_wr;
1036 u32 max_sge;
1037 u32 srq_limit;
1038};
1039
1040struct ib_srq_init_attr {
1041 void (*event_handler)(struct ib_event *, void *);
1042 void *srq_context;
1043 struct ib_srq_attr attr;
96104eda 1044 enum ib_srq_type srq_type;
418d5130 1045
1a56ff6d
AK
1046 struct {
1047 struct ib_cq *cq;
1048 union {
1049 struct {
1050 struct ib_xrcd *xrcd;
1051 } xrc;
9c2c8496
AK
1052
1053 struct {
1054 u32 max_num_tags;
1055 } tag_matching;
1a56ff6d 1056 };
418d5130 1057 } ext;
d41fcc67
RD
1058};
1059
1da177e4
LT
1060struct ib_qp_cap {
1061 u32 max_send_wr;
1062 u32 max_recv_wr;
1063 u32 max_send_sge;
1064 u32 max_recv_sge;
1065 u32 max_inline_data;
a060b562
CH
1066
1067 /*
1068 * Maximum number of rdma_rw_ctx structures in flight at a time.
1069 * ib_create_qp() will calculate the right amount of neededed WRs
1070 * and MRs based on this.
1071 */
1072 u32 max_rdma_ctxs;
1da177e4
LT
1073};
1074
1075enum ib_sig_type {
1076 IB_SIGNAL_ALL_WR,
1077 IB_SIGNAL_REQ_WR
1078};
1079
1080enum ib_qp_type {
1081 /*
1082 * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries
1083 * here (and in that order) since the MAD layer uses them as
1084 * indices into a 2-entry table.
1085 */
1086 IB_QPT_SMI,
1087 IB_QPT_GSI,
1088
175ba58d
YH
1089 IB_QPT_RC = IB_UVERBS_QPT_RC,
1090 IB_QPT_UC = IB_UVERBS_QPT_UC,
1091 IB_QPT_UD = IB_UVERBS_QPT_UD,
1da177e4 1092 IB_QPT_RAW_IPV6,
b42b63cf 1093 IB_QPT_RAW_ETHERTYPE,
175ba58d
YH
1094 IB_QPT_RAW_PACKET = IB_UVERBS_QPT_RAW_PACKET,
1095 IB_QPT_XRC_INI = IB_UVERBS_QPT_XRC_INI,
1096 IB_QPT_XRC_TGT = IB_UVERBS_QPT_XRC_TGT,
0134f16b 1097 IB_QPT_MAX,
175ba58d 1098 IB_QPT_DRIVER = IB_UVERBS_QPT_DRIVER,
0134f16b
JM
1099 /* Reserve a range for qp types internal to the low level driver.
1100 * These qp types will not be visible at the IB core layer, so the
1101 * IB_QPT_MAX usages should not be affected in the core layer
1102 */
1103 IB_QPT_RESERVED1 = 0x1000,
1104 IB_QPT_RESERVED2,
1105 IB_QPT_RESERVED3,
1106 IB_QPT_RESERVED4,
1107 IB_QPT_RESERVED5,
1108 IB_QPT_RESERVED6,
1109 IB_QPT_RESERVED7,
1110 IB_QPT_RESERVED8,
1111 IB_QPT_RESERVED9,
1112 IB_QPT_RESERVED10,
1da177e4
LT
1113};
1114
b846f25a 1115enum ib_qp_create_flags {
47ee1b9f 1116 IB_QP_CREATE_IPOIB_UD_LSO = 1 << 0,
175ba58d
YH
1117 IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK =
1118 IB_UVERBS_QP_CREATE_BLOCK_MULTICAST_LOOPBACK,
8a06ce59
LR
1119 IB_QP_CREATE_CROSS_CHANNEL = 1 << 2,
1120 IB_QP_CREATE_MANAGED_SEND = 1 << 3,
1121 IB_QP_CREATE_MANAGED_RECV = 1 << 4,
90f1d1b4 1122 IB_QP_CREATE_NETIF_QP = 1 << 5,
c0a6cbb9 1123 IB_QP_CREATE_INTEGRITY_EN = 1 << 6,
7f90a5a0 1124 IB_QP_CREATE_NETDEV_USE = 1 << 7,
175ba58d
YH
1125 IB_QP_CREATE_SCATTER_FCS =
1126 IB_UVERBS_QP_CREATE_SCATTER_FCS,
1127 IB_QP_CREATE_CVLAN_STRIPPING =
1128 IB_UVERBS_QP_CREATE_CVLAN_STRIPPING,
02984cc7 1129 IB_QP_CREATE_SOURCE_QPN = 1 << 10,
175ba58d
YH
1130 IB_QP_CREATE_PCI_WRITE_END_PADDING =
1131 IB_UVERBS_QP_CREATE_PCI_WRITE_END_PADDING,
d2b57063
JM
1132 /* reserve bits 26-31 for low level drivers' internal use */
1133 IB_QP_CREATE_RESERVED_START = 1 << 26,
1134 IB_QP_CREATE_RESERVED_END = 1 << 31,
b846f25a
EC
1135};
1136
73c40c61
YH
1137/*
1138 * Note: users may not call ib_close_qp or ib_destroy_qp from the event_handler
1139 * callback to destroy the passed in QP.
1140 */
1141
1da177e4 1142struct ib_qp_init_attr {
eb93c82e 1143 /* Consumer's event_handler callback must not block */
1da177e4 1144 void (*event_handler)(struct ib_event *, void *);
eb93c82e 1145
1da177e4
LT
1146 void *qp_context;
1147 struct ib_cq *send_cq;
1148 struct ib_cq *recv_cq;
1149 struct ib_srq *srq;
b42b63cf 1150 struct ib_xrcd *xrcd; /* XRC TGT QPs only */
1da177e4
LT
1151 struct ib_qp_cap cap;
1152 enum ib_sig_type sq_sig_type;
1153 enum ib_qp_type qp_type;
b56511c1 1154 u32 create_flags;
a060b562
CH
1155
1156 /*
1157 * Only needed for special QP types, or when using the RW API.
1158 */
1159 u8 port_num;
a9017e23 1160 struct ib_rwq_ind_table *rwq_ind_tbl;
02984cc7 1161 u32 source_qpn;
1da177e4
LT
1162};
1163
0e0ec7e0
SH
1164struct ib_qp_open_attr {
1165 void (*event_handler)(struct ib_event *, void *);
1166 void *qp_context;
1167 u32 qp_num;
1168 enum ib_qp_type qp_type;
1169};
1170
1da177e4
LT
1171enum ib_rnr_timeout {
1172 IB_RNR_TIMER_655_36 = 0,
1173 IB_RNR_TIMER_000_01 = 1,
1174 IB_RNR_TIMER_000_02 = 2,
1175 IB_RNR_TIMER_000_03 = 3,
1176 IB_RNR_TIMER_000_04 = 4,
1177 IB_RNR_TIMER_000_06 = 5,
1178 IB_RNR_TIMER_000_08 = 6,
1179 IB_RNR_TIMER_000_12 = 7,
1180 IB_RNR_TIMER_000_16 = 8,
1181 IB_RNR_TIMER_000_24 = 9,
1182 IB_RNR_TIMER_000_32 = 10,
1183 IB_RNR_TIMER_000_48 = 11,
1184 IB_RNR_TIMER_000_64 = 12,
1185 IB_RNR_TIMER_000_96 = 13,
1186 IB_RNR_TIMER_001_28 = 14,
1187 IB_RNR_TIMER_001_92 = 15,
1188 IB_RNR_TIMER_002_56 = 16,
1189 IB_RNR_TIMER_003_84 = 17,
1190 IB_RNR_TIMER_005_12 = 18,
1191 IB_RNR_TIMER_007_68 = 19,
1192 IB_RNR_TIMER_010_24 = 20,
1193 IB_RNR_TIMER_015_36 = 21,
1194 IB_RNR_TIMER_020_48 = 22,
1195 IB_RNR_TIMER_030_72 = 23,
1196 IB_RNR_TIMER_040_96 = 24,
1197 IB_RNR_TIMER_061_44 = 25,
1198 IB_RNR_TIMER_081_92 = 26,
1199 IB_RNR_TIMER_122_88 = 27,
1200 IB_RNR_TIMER_163_84 = 28,
1201 IB_RNR_TIMER_245_76 = 29,
1202 IB_RNR_TIMER_327_68 = 30,
1203 IB_RNR_TIMER_491_52 = 31
1204};
1205
1206enum ib_qp_attr_mask {
1207 IB_QP_STATE = 1,
1208 IB_QP_CUR_STATE = (1<<1),
1209 IB_QP_EN_SQD_ASYNC_NOTIFY = (1<<2),
1210 IB_QP_ACCESS_FLAGS = (1<<3),
1211 IB_QP_PKEY_INDEX = (1<<4),
1212 IB_QP_PORT = (1<<5),
1213 IB_QP_QKEY = (1<<6),
1214 IB_QP_AV = (1<<7),
1215 IB_QP_PATH_MTU = (1<<8),
1216 IB_QP_TIMEOUT = (1<<9),
1217 IB_QP_RETRY_CNT = (1<<10),
1218 IB_QP_RNR_RETRY = (1<<11),
1219 IB_QP_RQ_PSN = (1<<12),
1220 IB_QP_MAX_QP_RD_ATOMIC = (1<<13),
1221 IB_QP_ALT_PATH = (1<<14),
1222 IB_QP_MIN_RNR_TIMER = (1<<15),
1223 IB_QP_SQ_PSN = (1<<16),
1224 IB_QP_MAX_DEST_RD_ATOMIC = (1<<17),
1225 IB_QP_PATH_MIG_STATE = (1<<18),
1226 IB_QP_CAP = (1<<19),
dd5f03be 1227 IB_QP_DEST_QPN = (1<<20),
aa744cc0
MB
1228 IB_QP_RESERVED1 = (1<<21),
1229 IB_QP_RESERVED2 = (1<<22),
1230 IB_QP_RESERVED3 = (1<<23),
1231 IB_QP_RESERVED4 = (1<<24),
528e5a1b 1232 IB_QP_RATE_LIMIT = (1<<25),
1da177e4
LT
1233};
1234
1235enum ib_qp_state {
1236 IB_QPS_RESET,
1237 IB_QPS_INIT,
1238 IB_QPS_RTR,
1239 IB_QPS_RTS,
1240 IB_QPS_SQD,
1241 IB_QPS_SQE,
1242 IB_QPS_ERR
1243};
1244
1245enum ib_mig_state {
1246 IB_MIG_MIGRATED,
1247 IB_MIG_REARM,
1248 IB_MIG_ARMED
1249};
1250
7083e42e
SM
1251enum ib_mw_type {
1252 IB_MW_TYPE_1 = 1,
1253 IB_MW_TYPE_2 = 2
1254};
1255
1da177e4
LT
1256struct ib_qp_attr {
1257 enum ib_qp_state qp_state;
1258 enum ib_qp_state cur_qp_state;
1259 enum ib_mtu path_mtu;
1260 enum ib_mig_state path_mig_state;
1261 u32 qkey;
1262 u32 rq_psn;
1263 u32 sq_psn;
1264 u32 dest_qp_num;
1265 int qp_access_flags;
1266 struct ib_qp_cap cap;
90898850
DC
1267 struct rdma_ah_attr ah_attr;
1268 struct rdma_ah_attr alt_ah_attr;
1da177e4
LT
1269 u16 pkey_index;
1270 u16 alt_pkey_index;
1271 u8 en_sqd_async_notify;
1272 u8 sq_draining;
1273 u8 max_rd_atomic;
1274 u8 max_dest_rd_atomic;
1275 u8 min_rnr_timer;
1276 u8 port_num;
1277 u8 timeout;
1278 u8 retry_cnt;
1279 u8 rnr_retry;
1280 u8 alt_port_num;
1281 u8 alt_timeout;
528e5a1b 1282 u32 rate_limit;
51aab126 1283 struct net_device *xmit_slave;
1da177e4
LT
1284};
1285
1286enum ib_wr_opcode {
9a59739b
JG
1287 /* These are shared with userspace */
1288 IB_WR_RDMA_WRITE = IB_UVERBS_WR_RDMA_WRITE,
1289 IB_WR_RDMA_WRITE_WITH_IMM = IB_UVERBS_WR_RDMA_WRITE_WITH_IMM,
1290 IB_WR_SEND = IB_UVERBS_WR_SEND,
1291 IB_WR_SEND_WITH_IMM = IB_UVERBS_WR_SEND_WITH_IMM,
1292 IB_WR_RDMA_READ = IB_UVERBS_WR_RDMA_READ,
1293 IB_WR_ATOMIC_CMP_AND_SWP = IB_UVERBS_WR_ATOMIC_CMP_AND_SWP,
1294 IB_WR_ATOMIC_FETCH_AND_ADD = IB_UVERBS_WR_ATOMIC_FETCH_AND_ADD,
b60b9c02 1295 IB_WR_BIND_MW = IB_UVERBS_WR_BIND_MW,
9a59739b
JG
1296 IB_WR_LSO = IB_UVERBS_WR_TSO,
1297 IB_WR_SEND_WITH_INV = IB_UVERBS_WR_SEND_WITH_INV,
1298 IB_WR_RDMA_READ_WITH_INV = IB_UVERBS_WR_RDMA_READ_WITH_INV,
1299 IB_WR_LOCAL_INV = IB_UVERBS_WR_LOCAL_INV,
1300 IB_WR_MASKED_ATOMIC_CMP_AND_SWP =
1301 IB_UVERBS_WR_MASKED_ATOMIC_CMP_AND_SWP,
1302 IB_WR_MASKED_ATOMIC_FETCH_AND_ADD =
1303 IB_UVERBS_WR_MASKED_ATOMIC_FETCH_AND_ADD,
1304
1305 /* These are kernel only and can not be issued by userspace */
1306 IB_WR_REG_MR = 0x20,
38ca87c6 1307 IB_WR_REG_MR_INTEGRITY,
9a59739b 1308
0134f16b
JM
1309 /* reserve values for low level drivers' internal use.
1310 * These values will not be used at all in the ib core layer.
1311 */
1312 IB_WR_RESERVED1 = 0xf0,
1313 IB_WR_RESERVED2,
1314 IB_WR_RESERVED3,
1315 IB_WR_RESERVED4,
1316 IB_WR_RESERVED5,
1317 IB_WR_RESERVED6,
1318 IB_WR_RESERVED7,
1319 IB_WR_RESERVED8,
1320 IB_WR_RESERVED9,
1321 IB_WR_RESERVED10,
1da177e4
LT
1322};
1323
1324enum ib_send_flags {
1325 IB_SEND_FENCE = 1,
1326 IB_SEND_SIGNALED = (1<<1),
1327 IB_SEND_SOLICITED = (1<<2),
e0605d91 1328 IB_SEND_INLINE = (1<<3),
0134f16b
JM
1329 IB_SEND_IP_CSUM = (1<<4),
1330
1331 /* reserve bits 26-31 for low level drivers' internal use */
1332 IB_SEND_RESERVED_START = (1 << 26),
1333 IB_SEND_RESERVED_END = (1 << 31),
1da177e4
LT
1334};
1335
1336struct ib_sge {
1337 u64 addr;
1338 u32 length;
1339 u32 lkey;
1340};
1341
14d3a3b2
CH
1342struct ib_cqe {
1343 void (*done)(struct ib_cq *cq, struct ib_wc *wc);
1344};
1345
1da177e4
LT
1346struct ib_send_wr {
1347 struct ib_send_wr *next;
14d3a3b2
CH
1348 union {
1349 u64 wr_id;
1350 struct ib_cqe *wr_cqe;
1351 };
1da177e4
LT
1352 struct ib_sge *sg_list;
1353 int num_sge;
1354 enum ib_wr_opcode opcode;
1355 int send_flags;
0f39cf3d
RD
1356 union {
1357 __be32 imm_data;
1358 u32 invalidate_rkey;
1359 } ex;
1da177e4
LT
1360};
1361
e622f2f4
CH
1362struct ib_rdma_wr {
1363 struct ib_send_wr wr;
1364 u64 remote_addr;
1365 u32 rkey;
1366};
1367
f696bf6d 1368static inline const struct ib_rdma_wr *rdma_wr(const struct ib_send_wr *wr)
e622f2f4
CH
1369{
1370 return container_of(wr, struct ib_rdma_wr, wr);
1371}
1372
1373struct ib_atomic_wr {
1374 struct ib_send_wr wr;
1375 u64 remote_addr;
1376 u64 compare_add;
1377 u64 swap;
1378 u64 compare_add_mask;
1379 u64 swap_mask;
1380 u32 rkey;
1381};
1382
f696bf6d 1383static inline const struct ib_atomic_wr *atomic_wr(const struct ib_send_wr *wr)
e622f2f4
CH
1384{
1385 return container_of(wr, struct ib_atomic_wr, wr);
1386}
1387
1388struct ib_ud_wr {
1389 struct ib_send_wr wr;
1390 struct ib_ah *ah;
1391 void *header;
1392 int hlen;
1393 int mss;
1394 u32 remote_qpn;
1395 u32 remote_qkey;
1396 u16 pkey_index; /* valid for GSI only */
1397 u8 port_num; /* valid for DR SMPs on switch only */
1398};
1399
f696bf6d 1400static inline const struct ib_ud_wr *ud_wr(const struct ib_send_wr *wr)
e622f2f4
CH
1401{
1402 return container_of(wr, struct ib_ud_wr, wr);
1403}
1404
4c67e2bf
SG
1405struct ib_reg_wr {
1406 struct ib_send_wr wr;
1407 struct ib_mr *mr;
1408 u32 key;
1409 int access;
1410};
1411
f696bf6d 1412static inline const struct ib_reg_wr *reg_wr(const struct ib_send_wr *wr)
4c67e2bf
SG
1413{
1414 return container_of(wr, struct ib_reg_wr, wr);
1415}
1416
1da177e4
LT
1417struct ib_recv_wr {
1418 struct ib_recv_wr *next;
14d3a3b2
CH
1419 union {
1420 u64 wr_id;
1421 struct ib_cqe *wr_cqe;
1422 };
1da177e4
LT
1423 struct ib_sge *sg_list;
1424 int num_sge;
1425};
1426
1427enum ib_access_flags {
4fca0377
JG
1428 IB_ACCESS_LOCAL_WRITE = IB_UVERBS_ACCESS_LOCAL_WRITE,
1429 IB_ACCESS_REMOTE_WRITE = IB_UVERBS_ACCESS_REMOTE_WRITE,
1430 IB_ACCESS_REMOTE_READ = IB_UVERBS_ACCESS_REMOTE_READ,
1431 IB_ACCESS_REMOTE_ATOMIC = IB_UVERBS_ACCESS_REMOTE_ATOMIC,
1432 IB_ACCESS_MW_BIND = IB_UVERBS_ACCESS_MW_BIND,
1433 IB_ZERO_BASED = IB_UVERBS_ACCESS_ZERO_BASED,
1434 IB_ACCESS_ON_DEMAND = IB_UVERBS_ACCESS_ON_DEMAND,
1435 IB_ACCESS_HUGETLB = IB_UVERBS_ACCESS_HUGETLB,
2233c660 1436 IB_ACCESS_RELAXED_ORDERING = IB_UVERBS_ACCESS_RELAXED_ORDERING,
4fca0377 1437
68d384b9
MG
1438 IB_ACCESS_OPTIONAL = IB_UVERBS_ACCESS_OPTIONAL_RANGE,
1439 IB_ACCESS_SUPPORTED =
1440 ((IB_ACCESS_HUGETLB << 1) - 1) | IB_ACCESS_OPTIONAL,
1da177e4
LT
1441};
1442
b7d3e0a9
CH
1443/*
1444 * XXX: these are apparently used for ->rereg_user_mr, no idea why they
1445 * are hidden here instead of a uapi header!
1446 */
1da177e4
LT
1447enum ib_mr_rereg_flags {
1448 IB_MR_REREG_TRANS = 1,
1449 IB_MR_REREG_PD = (1<<1),
7e6edb9b
MB
1450 IB_MR_REREG_ACCESS = (1<<2),
1451 IB_MR_REREG_SUPPORTED = ((IB_MR_REREG_ACCESS << 1) - 1)
1da177e4
LT
1452};
1453
882214e2
HE
1454struct ib_umem;
1455
38321256 1456enum rdma_remove_reason {
1c77483e
YH
1457 /*
1458 * Userspace requested uobject deletion or initial try
1459 * to remove uobject via cleanup. Call could fail
1460 */
38321256
MB
1461 RDMA_REMOVE_DESTROY,
1462 /* Context deletion. This call should delete the actual object itself */
1463 RDMA_REMOVE_CLOSE,
1464 /* Driver is being hot-unplugged. This call should delete the actual object itself */
1465 RDMA_REMOVE_DRIVER_REMOVE,
87ad80ab
JG
1466 /* uobj is being cleaned-up before being committed */
1467 RDMA_REMOVE_ABORT,
38321256
MB
1468};
1469
43579b5f
PP
1470struct ib_rdmacg_object {
1471#ifdef CONFIG_CGROUP_RDMA
1472 struct rdma_cgroup *cg; /* owner rdma cgroup */
1473#endif
1474};
1475
e2773c06
RD
1476struct ib_ucontext {
1477 struct ib_device *device;
771addf6 1478 struct ib_uverbs_file *ufile;
e951747a
JG
1479 /*
1480 * 'closing' can be read by the driver only during a destroy callback,
1481 * it is set when we are closing the file descriptor and indicates
1482 * that mm_sem may be locked.
1483 */
6ceb6331 1484 bool closing;
8ada2c1c 1485
1c77483e 1486 bool cleanup_retryable;
38321256 1487
43579b5f 1488 struct ib_rdmacg_object cg_obj;
60615210
LR
1489 /*
1490 * Implementation details of the RDMA core, don't use in drivers:
1491 */
1492 struct rdma_restrack_entry res;
3411f9f0 1493 struct xarray mmap_xa;
e2773c06
RD
1494};
1495
1496struct ib_uobject {
1497 u64 user_handle; /* handle given to us by userspace */
6a5e9c88
JG
1498 /* ufile & ucontext owning this object */
1499 struct ib_uverbs_file *ufile;
1500 /* FIXME, save memory: ufile->context == context */
e2773c06 1501 struct ib_ucontext *context; /* associated user context */
9ead190b 1502 void *object; /* containing object */
e2773c06 1503 struct list_head list; /* link to context's list */
43579b5f 1504 struct ib_rdmacg_object cg_obj; /* rdmacg object */
b3d636b0 1505 int id; /* index into kernel idr */
9ead190b 1506 struct kref ref;
38321256 1507 atomic_t usecnt; /* protects exclusive access */
d144da8c 1508 struct rcu_head rcu; /* kfree_rcu() overhead */
38321256 1509
6b0d08f4 1510 const struct uverbs_api_object *uapi_object;
e2773c06
RD
1511};
1512
e2773c06 1513struct ib_udata {
309243ec 1514 const void __user *inbuf;
e2773c06
RD
1515 void __user *outbuf;
1516 size_t inlen;
1517 size_t outlen;
1518};
1519
1da177e4 1520struct ib_pd {
96249d70 1521 u32 local_dma_lkey;
ed082d36 1522 u32 flags;
e2773c06
RD
1523 struct ib_device *device;
1524 struct ib_uobject *uobject;
1525 atomic_t usecnt; /* count all resources */
50d46335 1526
ed082d36
CH
1527 u32 unsafe_global_rkey;
1528
50d46335
CH
1529 /*
1530 * Implementation details of the RDMA core, don't use in drivers:
1531 */
1532 struct ib_mr *__internal_mr;
02d8883f 1533 struct rdma_restrack_entry res;
1da177e4
LT
1534};
1535
59991f94
SH
1536struct ib_xrcd {
1537 struct ib_device *device;
d3d72d90 1538 atomic_t usecnt; /* count all exposed resources */
53d0bd1e 1539 struct inode *inode;
6f3ca6f4
MG
1540 struct rw_semaphore tgt_qps_rwsem;
1541 struct xarray tgt_qps;
59991f94
SH
1542};
1543
1da177e4
LT
1544struct ib_ah {
1545 struct ib_device *device;
1546 struct ib_pd *pd;
e2773c06 1547 struct ib_uobject *uobject;
1a1f460f 1548 const struct ib_gid_attr *sgid_attr;
44c58487 1549 enum rdma_ah_attr_type type;
1da177e4
LT
1550};
1551
1552typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
1553
14d3a3b2 1554enum ib_poll_context {
f794809a
JM
1555 IB_POLL_SOFTIRQ, /* poll from softirq context */
1556 IB_POLL_WORKQUEUE, /* poll from workqueue */
1557 IB_POLL_UNBOUND_WORKQUEUE, /* poll from unbound workqueue */
c7ff819a
YF
1558 IB_POLL_LAST_POOL_TYPE = IB_POLL_UNBOUND_WORKQUEUE,
1559
1560 IB_POLL_DIRECT, /* caller context, no hw completions */
14d3a3b2
CH
1561};
1562
1da177e4 1563struct ib_cq {
e2773c06 1564 struct ib_device *device;
5bd48c18 1565 struct ib_ucq_object *uobject;
e2773c06
RD
1566 ib_comp_handler comp_handler;
1567 void (*event_handler)(struct ib_event *, void *);
4deccd6d 1568 void *cq_context;
e2773c06 1569 int cqe;
c7ff819a 1570 unsigned int cqe_used;
e2773c06 1571 atomic_t usecnt; /* count number of work queues */
14d3a3b2
CH
1572 enum ib_poll_context poll_ctx;
1573 struct ib_wc *wc;
c7ff819a 1574 struct list_head pool_entry;
14d3a3b2
CH
1575 union {
1576 struct irq_poll iop;
1577 struct work_struct work;
1578 };
f794809a 1579 struct workqueue_struct *comp_wq;
da662979 1580 struct dim *dim;
3e5901cb
CL
1581
1582 /* updated only by trace points */
1583 ktime_t timestamp;
3446cbd2
YF
1584 u8 interrupt:1;
1585 u8 shared:1;
c7ff819a 1586 unsigned int comp_vector;
3e5901cb 1587
02d8883f
LR
1588 /*
1589 * Implementation details of the RDMA core, don't use in drivers:
1590 */
1591 struct rdma_restrack_entry res;
1da177e4
LT
1592};
1593
1594struct ib_srq {
d41fcc67
RD
1595 struct ib_device *device;
1596 struct ib_pd *pd;
9fbe334c 1597 struct ib_usrq_object *uobject;
d41fcc67
RD
1598 void (*event_handler)(struct ib_event *, void *);
1599 void *srq_context;
96104eda 1600 enum ib_srq_type srq_type;
1da177e4 1601 atomic_t usecnt;
418d5130 1602
1a56ff6d
AK
1603 struct {
1604 struct ib_cq *cq;
1605 union {
1606 struct {
1607 struct ib_xrcd *xrcd;
1608 u32 srq_num;
1609 } xrc;
1610 };
418d5130 1611 } ext;
1da177e4
LT
1612};
1613
ebaaee25
NO
1614enum ib_raw_packet_caps {
1615 /* Strip cvlan from incoming packet and report it in the matching work
1616 * completion is supported.
1617 */
1618 IB_RAW_PACKET_CAP_CVLAN_STRIPPING = (1 << 0),
1619 /* Scatter FCS field of an incoming packet to host memory is supported.
1620 */
1621 IB_RAW_PACKET_CAP_SCATTER_FCS = (1 << 1),
1622 /* Checksum offloads are supported (for both send and receive). */
1623 IB_RAW_PACKET_CAP_IP_CSUM = (1 << 2),
7d9336d8
MG
1624 /* When a packet is received for an RQ with no receive WQEs, the
1625 * packet processing is delayed.
1626 */
1627 IB_RAW_PACKET_CAP_DELAY_DROP = (1 << 3),
ebaaee25
NO
1628};
1629
5fd251c8 1630enum ib_wq_type {
175ba58d 1631 IB_WQT_RQ = IB_UVERBS_WQT_RQ,
5fd251c8
YH
1632};
1633
1634enum ib_wq_state {
1635 IB_WQS_RESET,
1636 IB_WQS_RDY,
1637 IB_WQS_ERR
1638};
1639
1640struct ib_wq {
1641 struct ib_device *device;
e04dd131 1642 struct ib_uwq_object *uobject;
5fd251c8
YH
1643 void *wq_context;
1644 void (*event_handler)(struct ib_event *, void *);
1645 struct ib_pd *pd;
1646 struct ib_cq *cq;
1647 u32 wq_num;
1648 enum ib_wq_state state;
1649 enum ib_wq_type wq_type;
1650 atomic_t usecnt;
1651};
1652
10bac72b 1653enum ib_wq_flags {
175ba58d
YH
1654 IB_WQ_FLAGS_CVLAN_STRIPPING = IB_UVERBS_WQ_FLAGS_CVLAN_STRIPPING,
1655 IB_WQ_FLAGS_SCATTER_FCS = IB_UVERBS_WQ_FLAGS_SCATTER_FCS,
1656 IB_WQ_FLAGS_DELAY_DROP = IB_UVERBS_WQ_FLAGS_DELAY_DROP,
1657 IB_WQ_FLAGS_PCI_WRITE_END_PADDING =
1658 IB_UVERBS_WQ_FLAGS_PCI_WRITE_END_PADDING,
10bac72b
NO
1659};
1660
5fd251c8
YH
1661struct ib_wq_init_attr {
1662 void *wq_context;
1663 enum ib_wq_type wq_type;
1664 u32 max_wr;
1665 u32 max_sge;
1666 struct ib_cq *cq;
1667 void (*event_handler)(struct ib_event *, void *);
10bac72b 1668 u32 create_flags; /* Use enum ib_wq_flags */
5fd251c8
YH
1669};
1670
1671enum ib_wq_attr_mask {
10bac72b
NO
1672 IB_WQ_STATE = 1 << 0,
1673 IB_WQ_CUR_STATE = 1 << 1,
1674 IB_WQ_FLAGS = 1 << 2,
5fd251c8
YH
1675};
1676
1677struct ib_wq_attr {
1678 enum ib_wq_state wq_state;
1679 enum ib_wq_state curr_wq_state;
10bac72b
NO
1680 u32 flags; /* Use enum ib_wq_flags */
1681 u32 flags_mask; /* Use enum ib_wq_flags */
5fd251c8
YH
1682};
1683
6d39786b
YH
1684struct ib_rwq_ind_table {
1685 struct ib_device *device;
1686 struct ib_uobject *uobject;
1687 atomic_t usecnt;
1688 u32 ind_tbl_num;
1689 u32 log_ind_tbl_size;
1690 struct ib_wq **ind_tbl;
1691};
1692
1693struct ib_rwq_ind_table_init_attr {
1694 u32 log_ind_tbl_size;
1695 /* Each entry is a pointer to Receive Work Queue */
1696 struct ib_wq **ind_tbl;
1697};
1698
d291f1a6
DJ
1699enum port_pkey_state {
1700 IB_PORT_PKEY_NOT_VALID = 0,
1701 IB_PORT_PKEY_VALID = 1,
1702 IB_PORT_PKEY_LISTED = 2,
1703};
1704
1705struct ib_qp_security;
1706
1707struct ib_port_pkey {
1708 enum port_pkey_state state;
1709 u16 pkey_index;
1710 u8 port_num;
1711 struct list_head qp_list;
1712 struct list_head to_error_list;
1713 struct ib_qp_security *sec;
1714};
1715
1716struct ib_ports_pkeys {
1717 struct ib_port_pkey main;
1718 struct ib_port_pkey alt;
1719};
1720
1721struct ib_qp_security {
1722 struct ib_qp *qp;
1723 struct ib_device *dev;
1724 /* Hold this mutex when changing port and pkey settings. */
1725 struct mutex mutex;
1726 struct ib_ports_pkeys *ports_pkeys;
1727 /* A list of all open shared QP handles. Required to enforce security
1728 * properly for all users of a shared QP.
1729 */
1730 struct list_head shared_qp_list;
1731 void *security;
1732 bool destroying;
1733 atomic_t error_list_count;
1734 struct completion error_complete;
1735 int error_comps_pending;
1736};
1737
632bc3f6
BVA
1738/*
1739 * @max_write_sge: Maximum SGE elements per RDMA WRITE request.
1740 * @max_read_sge: Maximum SGE elements per RDMA READ request.
1741 */
1da177e4
LT
1742struct ib_qp {
1743 struct ib_device *device;
1744 struct ib_pd *pd;
1745 struct ib_cq *send_cq;
1746 struct ib_cq *recv_cq;
fffb0383
CH
1747 spinlock_t mr_lock;
1748 int mrs_used;
a060b562 1749 struct list_head rdma_mrs;
0e353e34 1750 struct list_head sig_mrs;
1da177e4 1751 struct ib_srq *srq;
b42b63cf 1752 struct ib_xrcd *xrcd; /* XRC TGT QPs only */
d3d72d90 1753 struct list_head xrcd_list;
fffb0383 1754
319a441d
HHZ
1755 /* count times opened, mcast attaches, flow attaches */
1756 atomic_t usecnt;
0e0ec7e0
SH
1757 struct list_head open_list;
1758 struct ib_qp *real_qp;
620d3f81 1759 struct ib_uqp_object *uobject;
1da177e4
LT
1760 void (*event_handler)(struct ib_event *, void *);
1761 void *qp_context;
1a1f460f
JG
1762 /* sgid_attrs associated with the AV's */
1763 const struct ib_gid_attr *av_sgid_attr;
1764 const struct ib_gid_attr *alt_path_sgid_attr;
1da177e4 1765 u32 qp_num;
632bc3f6
BVA
1766 u32 max_write_sge;
1767 u32 max_read_sge;
1da177e4 1768 enum ib_qp_type qp_type;
a9017e23 1769 struct ib_rwq_ind_table *rwq_ind_tbl;
d291f1a6 1770 struct ib_qp_security *qp_sec;
498ca3c8 1771 u8 port;
02d8883f 1772
185eddc4 1773 bool integrity_en;
02d8883f
LR
1774 /*
1775 * Implementation details of the RDMA core, don't use in drivers:
1776 */
1777 struct rdma_restrack_entry res;
99fa331d
MZ
1778
1779 /* The counter the qp is bind to */
1780 struct rdma_counter *counter;
1da177e4
LT
1781};
1782
bee76d7a
AL
1783struct ib_dm {
1784 struct ib_device *device;
1785 u32 length;
1786 u32 flags;
1787 struct ib_uobject *uobject;
1788 atomic_t usecnt;
1789};
1790
1da177e4 1791struct ib_mr {
e2773c06
RD
1792 struct ib_device *device;
1793 struct ib_pd *pd;
e2773c06
RD
1794 u32 lkey;
1795 u32 rkey;
4c67e2bf 1796 u64 iova;
edd31551 1797 u64 length;
4c67e2bf 1798 unsigned int page_size;
a0bc099a 1799 enum ib_mr_type type;
d4a85c30 1800 bool need_inval;
fffb0383
CH
1801 union {
1802 struct ib_uobject *uobject; /* user */
1803 struct list_head qp_entry; /* FR */
1804 };
fccec5b8 1805
be934cca 1806 struct ib_dm *dm;
7c717d3a 1807 struct ib_sig_attrs *sig_attrs; /* only for IB_MR_TYPE_INTEGRITY MRs */
fccec5b8
SW
1808 /*
1809 * Implementation details of the RDMA core, don't use in drivers:
1810 */
1811 struct rdma_restrack_entry res;
1da177e4
LT
1812};
1813
1814struct ib_mw {
1815 struct ib_device *device;
1816 struct ib_pd *pd;
e2773c06 1817 struct ib_uobject *uobject;
1da177e4 1818 u32 rkey;
7083e42e 1819 enum ib_mw_type type;
1da177e4
LT
1820};
1821
319a441d
HHZ
1822/* Supported steering options */
1823enum ib_flow_attr_type {
1824 /* steering according to rule specifications */
1825 IB_FLOW_ATTR_NORMAL = 0x0,
1826 /* default unicast and multicast rule -
1827 * receive all Eth traffic which isn't steered to any QP
1828 */
1829 IB_FLOW_ATTR_ALL_DEFAULT = 0x1,
1830 /* default multicast rule -
1831 * receive all Eth multicast traffic which isn't steered to any QP
1832 */
1833 IB_FLOW_ATTR_MC_DEFAULT = 0x2,
1834 /* sniffer rule - receive all port traffic */
1835 IB_FLOW_ATTR_SNIFFER = 0x3
1836};
1837
1838/* Supported steering header types */
1839enum ib_flow_spec_type {
1840 /* L2 headers*/
76bd23b3
MR
1841 IB_FLOW_SPEC_ETH = 0x20,
1842 IB_FLOW_SPEC_IB = 0x22,
319a441d 1843 /* L3 header*/
76bd23b3
MR
1844 IB_FLOW_SPEC_IPV4 = 0x30,
1845 IB_FLOW_SPEC_IPV6 = 0x31,
56ab0b38 1846 IB_FLOW_SPEC_ESP = 0x34,
319a441d 1847 /* L4 headers*/
76bd23b3
MR
1848 IB_FLOW_SPEC_TCP = 0x40,
1849 IB_FLOW_SPEC_UDP = 0x41,
0dbf3332 1850 IB_FLOW_SPEC_VXLAN_TUNNEL = 0x50,
d90e5e50 1851 IB_FLOW_SPEC_GRE = 0x51,
b04f0f03 1852 IB_FLOW_SPEC_MPLS = 0x60,
fbf46860 1853 IB_FLOW_SPEC_INNER = 0x100,
460d0198
MR
1854 /* Actions */
1855 IB_FLOW_SPEC_ACTION_TAG = 0x1000,
483a3966 1856 IB_FLOW_SPEC_ACTION_DROP = 0x1001,
9b828441 1857 IB_FLOW_SPEC_ACTION_HANDLE = 0x1002,
7eea23a5 1858 IB_FLOW_SPEC_ACTION_COUNT = 0x1003,
319a441d 1859};
240ae00e 1860#define IB_FLOW_SPEC_LAYER_MASK 0xF0
7eea23a5 1861#define IB_FLOW_SPEC_SUPPORT_LAYERS 10
22878dbc 1862
a3100a78
MV
1863enum ib_flow_flags {
1864 IB_FLOW_ATTR_FLAGS_DONT_TRAP = 1UL << 1, /* Continue match, no steal */
21e82d3e
BP
1865 IB_FLOW_ATTR_FLAGS_EGRESS = 1UL << 2, /* Egress flow */
1866 IB_FLOW_ATTR_FLAGS_RESERVED = 1UL << 3 /* Must be last */
a3100a78
MV
1867};
1868
319a441d
HHZ
1869struct ib_flow_eth_filter {
1870 u8 dst_mac[6];
1871 u8 src_mac[6];
1872 __be16 ether_type;
1873 __be16 vlan_tag;
15dfbd6b 1874 /* Must be last */
5b361328 1875 u8 real_sz[];
319a441d
HHZ
1876};
1877
1878struct ib_flow_spec_eth {
fbf46860 1879 u32 type;
319a441d
HHZ
1880 u16 size;
1881 struct ib_flow_eth_filter val;
1882 struct ib_flow_eth_filter mask;
1883};
1884
240ae00e
MB
1885struct ib_flow_ib_filter {
1886 __be16 dlid;
1887 __u8 sl;
15dfbd6b 1888 /* Must be last */
5b361328 1889 u8 real_sz[];
240ae00e
MB
1890};
1891
1892struct ib_flow_spec_ib {
fbf46860 1893 u32 type;
240ae00e
MB
1894 u16 size;
1895 struct ib_flow_ib_filter val;
1896 struct ib_flow_ib_filter mask;
1897};
1898
989a3a8f
MG
1899/* IPv4 header flags */
1900enum ib_ipv4_flags {
1901 IB_IPV4_DONT_FRAG = 0x2, /* Don't enable packet fragmentation */
1902 IB_IPV4_MORE_FRAG = 0X4 /* For All fragmented packets except the
1903 last have this flag set */
1904};
1905
319a441d
HHZ
1906struct ib_flow_ipv4_filter {
1907 __be32 src_ip;
1908 __be32 dst_ip;
989a3a8f
MG
1909 u8 proto;
1910 u8 tos;
1911 u8 ttl;
1912 u8 flags;
15dfbd6b 1913 /* Must be last */
5b361328 1914 u8 real_sz[];
319a441d
HHZ
1915};
1916
1917struct ib_flow_spec_ipv4 {
fbf46860 1918 u32 type;
319a441d
HHZ
1919 u16 size;
1920 struct ib_flow_ipv4_filter val;
1921 struct ib_flow_ipv4_filter mask;
1922};
1923
4c2aae71
MG
1924struct ib_flow_ipv6_filter {
1925 u8 src_ip[16];
1926 u8 dst_ip[16];
a72c6a2b
MG
1927 __be32 flow_label;
1928 u8 next_hdr;
1929 u8 traffic_class;
1930 u8 hop_limit;
15dfbd6b 1931 /* Must be last */
5b361328 1932 u8 real_sz[];
4c2aae71
MG
1933};
1934
1935struct ib_flow_spec_ipv6 {
fbf46860 1936 u32 type;
4c2aae71
MG
1937 u16 size;
1938 struct ib_flow_ipv6_filter val;
1939 struct ib_flow_ipv6_filter mask;
1940};
1941
319a441d
HHZ
1942struct ib_flow_tcp_udp_filter {
1943 __be16 dst_port;
1944 __be16 src_port;
15dfbd6b 1945 /* Must be last */
5b361328 1946 u8 real_sz[];
319a441d
HHZ
1947};
1948
1949struct ib_flow_spec_tcp_udp {
fbf46860 1950 u32 type;
319a441d
HHZ
1951 u16 size;
1952 struct ib_flow_tcp_udp_filter val;
1953 struct ib_flow_tcp_udp_filter mask;
1954};
1955
0dbf3332
MR
1956struct ib_flow_tunnel_filter {
1957 __be32 tunnel_id;
5b361328 1958 u8 real_sz[];
0dbf3332
MR
1959};
1960
1961/* ib_flow_spec_tunnel describes the Vxlan tunnel
1962 * the tunnel_id from val has the vni value
1963 */
1964struct ib_flow_spec_tunnel {
fbf46860 1965 u32 type;
0dbf3332
MR
1966 u16 size;
1967 struct ib_flow_tunnel_filter val;
1968 struct ib_flow_tunnel_filter mask;
1969};
1970
56ab0b38
MB
1971struct ib_flow_esp_filter {
1972 __be32 spi;
1973 __be32 seq;
1974 /* Must be last */
5b361328 1975 u8 real_sz[];
56ab0b38
MB
1976};
1977
1978struct ib_flow_spec_esp {
1979 u32 type;
1980 u16 size;
1981 struct ib_flow_esp_filter val;
1982 struct ib_flow_esp_filter mask;
1983};
1984
d90e5e50
AL
1985struct ib_flow_gre_filter {
1986 __be16 c_ks_res0_ver;
1987 __be16 protocol;
1988 __be32 key;
1989 /* Must be last */
5b361328 1990 u8 real_sz[];
d90e5e50
AL
1991};
1992
1993struct ib_flow_spec_gre {
1994 u32 type;
1995 u16 size;
1996 struct ib_flow_gre_filter val;
1997 struct ib_flow_gre_filter mask;
1998};
1999
b04f0f03
AL
2000struct ib_flow_mpls_filter {
2001 __be32 tag;
2002 /* Must be last */
5b361328 2003 u8 real_sz[];
b04f0f03
AL
2004};
2005
2006struct ib_flow_spec_mpls {
2007 u32 type;
2008 u16 size;
2009 struct ib_flow_mpls_filter val;
2010 struct ib_flow_mpls_filter mask;
2011};
2012
460d0198
MR
2013struct ib_flow_spec_action_tag {
2014 enum ib_flow_spec_type type;
2015 u16 size;
2016 u32 tag_id;
2017};
2018
483a3966
SS
2019struct ib_flow_spec_action_drop {
2020 enum ib_flow_spec_type type;
2021 u16 size;
2022};
2023
9b828441
MB
2024struct ib_flow_spec_action_handle {
2025 enum ib_flow_spec_type type;
2026 u16 size;
2027 struct ib_flow_action *act;
2028};
2029
7eea23a5
RS
2030enum ib_counters_description {
2031 IB_COUNTER_PACKETS,
2032 IB_COUNTER_BYTES,
2033};
2034
2035struct ib_flow_spec_action_count {
2036 enum ib_flow_spec_type type;
2037 u16 size;
2038 struct ib_counters *counters;
2039};
2040
319a441d
HHZ
2041union ib_flow_spec {
2042 struct {
fbf46860 2043 u32 type;
319a441d
HHZ
2044 u16 size;
2045 };
2046 struct ib_flow_spec_eth eth;
240ae00e 2047 struct ib_flow_spec_ib ib;
319a441d
HHZ
2048 struct ib_flow_spec_ipv4 ipv4;
2049 struct ib_flow_spec_tcp_udp tcp_udp;
4c2aae71 2050 struct ib_flow_spec_ipv6 ipv6;
0dbf3332 2051 struct ib_flow_spec_tunnel tunnel;
56ab0b38 2052 struct ib_flow_spec_esp esp;
d90e5e50 2053 struct ib_flow_spec_gre gre;
b04f0f03 2054 struct ib_flow_spec_mpls mpls;
460d0198 2055 struct ib_flow_spec_action_tag flow_tag;
483a3966 2056 struct ib_flow_spec_action_drop drop;
9b828441 2057 struct ib_flow_spec_action_handle action;
7eea23a5 2058 struct ib_flow_spec_action_count flow_count;
319a441d
HHZ
2059};
2060
2061struct ib_flow_attr {
2062 enum ib_flow_attr_type type;
2063 u16 size;
2064 u16 priority;
2065 u32 flags;
2066 u8 num_of_specs;
2067 u8 port;
7654cb1b 2068 union ib_flow_spec flows[];
319a441d
HHZ
2069};
2070
2071struct ib_flow {
2072 struct ib_qp *qp;
6cd080a6 2073 struct ib_device *device;
319a441d
HHZ
2074 struct ib_uobject *uobject;
2075};
2076
2eb9beae
MB
2077enum ib_flow_action_type {
2078 IB_FLOW_ACTION_UNSPECIFIED,
2079 IB_FLOW_ACTION_ESP = 1,
2080};
2081
2082struct ib_flow_action_attrs_esp_keymats {
2083 enum ib_uverbs_flow_action_esp_keymat protocol;
2084 union {
2085 struct ib_uverbs_flow_action_esp_keymat_aes_gcm aes_gcm;
2086 } keymat;
2087};
2088
2089struct ib_flow_action_attrs_esp_replays {
2090 enum ib_uverbs_flow_action_esp_replay protocol;
2091 union {
2092 struct ib_uverbs_flow_action_esp_replay_bmp bmp;
2093 } replay;
2094};
2095
2096enum ib_flow_action_attrs_esp_flags {
2097 /* All user-space flags at the top: Use enum ib_uverbs_flow_action_esp_flags
2098 * This is done in order to share the same flags between user-space and
2099 * kernel and spare an unnecessary translation.
2100 */
2101
2102 /* Kernel flags */
2103 IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED = 1ULL << 32,
7d12f8d5 2104 IB_FLOW_ACTION_ESP_FLAGS_MOD_ESP_ATTRS = 1ULL << 33,
2eb9beae
MB
2105};
2106
2107struct ib_flow_spec_list {
2108 struct ib_flow_spec_list *next;
2109 union ib_flow_spec spec;
2110};
2111
2112struct ib_flow_action_attrs_esp {
2113 struct ib_flow_action_attrs_esp_keymats *keymat;
2114 struct ib_flow_action_attrs_esp_replays *replay;
2115 struct ib_flow_spec_list *encap;
2116 /* Used only if IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED is enabled.
2117 * Value of 0 is a valid value.
2118 */
2119 u32 esn;
2120 u32 spi;
2121 u32 seq;
2122 u32 tfc_pad;
2123 /* Use enum ib_flow_action_attrs_esp_flags */
2124 u64 flags;
2125 u64 hard_limit_pkts;
2126};
2127
2128struct ib_flow_action {
2129 struct ib_device *device;
2130 struct ib_uobject *uobject;
2131 enum ib_flow_action_type type;
2132 atomic_t usecnt;
2133};
2134
e26e7b88 2135struct ib_mad;
1da177e4
LT
2136struct ib_grh;
2137
2138enum ib_process_mad_flags {
2139 IB_MAD_IGNORE_MKEY = 1,
2140 IB_MAD_IGNORE_BKEY = 2,
2141 IB_MAD_IGNORE_ALL = IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY
2142};
2143
2144enum ib_mad_result {
2145 IB_MAD_RESULT_FAILURE = 0, /* (!SUCCESS is the important flag) */
2146 IB_MAD_RESULT_SUCCESS = 1 << 0, /* MAD was successfully processed */
2147 IB_MAD_RESULT_REPLY = 1 << 1, /* Reply packet needs to be sent */
2148 IB_MAD_RESULT_CONSUMED = 1 << 2 /* Packet consumed: stop processing */
2149};
2150
21d6454a 2151struct ib_port_cache {
883c71fe 2152 u64 subnet_prefix;
21d6454a
JW
2153 struct ib_pkey_cache *pkey;
2154 struct ib_gid_table *gid;
2155 u8 lmc;
2156 enum ib_port_state port_state;
2157};
2158
7738613e
IW
2159struct ib_port_immutable {
2160 int pkey_tbl_len;
2161 int gid_tbl_len;
f9b22e35 2162 u32 core_cap_flags;
337877a4 2163 u32 max_mad_size;
7738613e
IW
2164};
2165
8ceb1357 2166struct ib_port_data {
324e227e
JG
2167 struct ib_device *ib_dev;
2168
8ceb1357
JG
2169 struct ib_port_immutable immutable;
2170
2171 spinlock_t pkey_list_lock;
2172 struct list_head pkey_list;
8faea9fd
JG
2173
2174 struct ib_port_cache cache;
c2261dd7
JG
2175
2176 spinlock_t netdev_lock;
324e227e
JG
2177 struct net_device __rcu *netdev;
2178 struct hlist_node ndev_hash_link;
413d3347 2179 struct rdma_port_counter port_counter;
6e7be47a 2180 struct rdma_hw_stats *hw_stats;
8ceb1357
JG
2181};
2182
2fc77572
VN
2183/* rdma netdev type - specifies protocol type */
2184enum rdma_netdev_t {
f0ad83ac
NV
2185 RDMA_NETDEV_OPA_VNIC,
2186 RDMA_NETDEV_IPOIB,
2fc77572
VN
2187};
2188
2189/**
2190 * struct rdma_netdev - rdma netdev
2191 * For cases where netstack interfacing is required.
2192 */
2193struct rdma_netdev {
2194 void *clnt_priv;
2195 struct ib_device *hca;
2196 u8 port_num;
d99dc602 2197 int mtu;
2fc77572 2198
9f49a5b5
JG
2199 /*
2200 * cleanup function must be specified.
2201 * FIXME: This is only used for OPA_VNIC and that usage should be
2202 * removed too.
2203 */
8e959601
NV
2204 void (*free_rdma_netdev)(struct net_device *netdev);
2205
2fc77572
VN
2206 /* control functions */
2207 void (*set_id)(struct net_device *netdev, int id);
f0ad83ac
NV
2208 /* send packet */
2209 int (*send)(struct net_device *dev, struct sk_buff *skb,
2210 struct ib_ah *address, u32 dqpn);
2211 /* multicast */
2212 int (*attach_mcast)(struct net_device *dev, struct ib_device *hca,
2213 union ib_gid *gid, u16 mlid,
2214 int set_qkey, u32 qkey);
2215 int (*detach_mcast)(struct net_device *dev, struct ib_device *hca,
2216 union ib_gid *gid, u16 mlid);
2fc77572
VN
2217};
2218
f6a8a19b
DD
2219struct rdma_netdev_alloc_params {
2220 size_t sizeof_priv;
2221 unsigned int txqs;
2222 unsigned int rxqs;
2223 void *param;
2224
2225 int (*initialize_rdma_netdev)(struct ib_device *device, u8 port_num,
2226 struct net_device *netdev, void *param);
2227};
2228
a3de94e3
EA
2229struct ib_odp_counters {
2230 atomic64_t faults;
2231 atomic64_t invalidations;
d473f4dc 2232 atomic64_t prefetch;
a3de94e3
EA
2233};
2234
fa9b1802
RS
2235struct ib_counters {
2236 struct ib_device *device;
2237 struct ib_uobject *uobject;
2238 /* num of objects attached */
2239 atomic_t usecnt;
2240};
2241
51d7a538
RS
2242struct ib_counters_read_attr {
2243 u64 *counters_buff;
2244 u32 ncounters;
2245 u32 flags; /* use enum ib_read_counters_flags */
2246};
2247
2eb9beae 2248struct uverbs_attr_bundle;
dd05cb82
KH
2249struct iw_cm_id;
2250struct iw_cm_conn_param;
2eb9beae 2251
30471d4b
LR
2252#define INIT_RDMA_OBJ_SIZE(ib_struct, drv_struct, member) \
2253 .size_##ib_struct = \
2254 (sizeof(struct drv_struct) + \
2255 BUILD_BUG_ON_ZERO(offsetof(struct drv_struct, member)) + \
2256 BUILD_BUG_ON_ZERO( \
2257 !__same_type(((struct drv_struct *)NULL)->member, \
2258 struct ib_struct)))
2259
f6316032
LR
2260#define rdma_zalloc_drv_obj_gfp(ib_dev, ib_type, gfp) \
2261 ((struct ib_type *)kzalloc(ib_dev->ops.size_##ib_type, gfp))
2262
30471d4b 2263#define rdma_zalloc_drv_obj(ib_dev, ib_type) \
f6316032 2264 rdma_zalloc_drv_obj_gfp(ib_dev, ib_type, GFP_KERNEL)
30471d4b
LR
2265
2266#define DECLARE_RDMA_OBJ_SIZE(ib_struct) size_t size_##ib_struct
2267
3411f9f0
MK
2268struct rdma_user_mmap_entry {
2269 struct kref ref;
2270 struct ib_ucontext *ucontext;
2271 unsigned long start_pgoff;
2272 size_t npages;
2273 bool driver_removed;
2274};
2275
2276/* Return the offset (in bytes) the user should pass to libc's mmap() */
2277static inline u64
2278rdma_user_mmap_get_offset(const struct rdma_user_mmap_entry *entry)
2279{
2280 return (u64)entry->start_pgoff << PAGE_SHIFT;
2281}
2282
521ed0d9
KH
2283/**
2284 * struct ib_device_ops - InfiniBand device operations
2285 * This structure defines all the InfiniBand device operations, providers will
2286 * need to define the supported operations, otherwise they will be set to null.
2287 */
2288struct ib_device_ops {
7a154142 2289 struct module *owner;
b9560a41 2290 enum rdma_driver_id driver_id;
72c6ec18 2291 u32 uverbs_abi_ver;
8f71bb00 2292 unsigned int uverbs_no_driver_id_binding:1;
b9560a41 2293
521ed0d9
KH
2294 int (*post_send)(struct ib_qp *qp, const struct ib_send_wr *send_wr,
2295 const struct ib_send_wr **bad_send_wr);
2296 int (*post_recv)(struct ib_qp *qp, const struct ib_recv_wr *recv_wr,
2297 const struct ib_recv_wr **bad_recv_wr);
2298 void (*drain_rq)(struct ib_qp *qp);
2299 void (*drain_sq)(struct ib_qp *qp);
2300 int (*poll_cq)(struct ib_cq *cq, int num_entries, struct ib_wc *wc);
2301 int (*peek_cq)(struct ib_cq *cq, int wc_cnt);
2302 int (*req_notify_cq)(struct ib_cq *cq, enum ib_cq_notify_flags flags);
2303 int (*req_ncomp_notif)(struct ib_cq *cq, int wc_cnt);
2304 int (*post_srq_recv)(struct ib_srq *srq,
2305 const struct ib_recv_wr *recv_wr,
2306 const struct ib_recv_wr **bad_recv_wr);
2307 int (*process_mad)(struct ib_device *device, int process_mad_flags,
2308 u8 port_num, const struct ib_wc *in_wc,
2309 const struct ib_grh *in_grh,
e26e7b88
LR
2310 const struct ib_mad *in_mad, struct ib_mad *out_mad,
2311 size_t *out_mad_size, u16 *out_mad_pkey_index);
521ed0d9
KH
2312 int (*query_device)(struct ib_device *device,
2313 struct ib_device_attr *device_attr,
2314 struct ib_udata *udata);
2315 int (*modify_device)(struct ib_device *device, int device_modify_mask,
2316 struct ib_device_modify *device_modify);
2317 void (*get_dev_fw_str)(struct ib_device *device, char *str);
2318 const struct cpumask *(*get_vector_affinity)(struct ib_device *ibdev,
2319 int comp_vector);
2320 int (*query_port)(struct ib_device *device, u8 port_num,
2321 struct ib_port_attr *port_attr);
2322 int (*modify_port)(struct ib_device *device, u8 port_num,
2323 int port_modify_mask,
2324 struct ib_port_modify *port_modify);
2325 /**
2326 * The following mandatory functions are used only at device
2327 * registration. Keep functions such as these at the end of this
2328 * structure to avoid cache line misses when accessing struct ib_device
2329 * in fast paths.
2330 */
2331 int (*get_port_immutable)(struct ib_device *device, u8 port_num,
2332 struct ib_port_immutable *immutable);
2333 enum rdma_link_layer (*get_link_layer)(struct ib_device *device,
2334 u8 port_num);
2335 /**
2336 * When calling get_netdev, the HW vendor's driver should return the
2337 * net device of device @device at port @port_num or NULL if such
2338 * a net device doesn't exist. The vendor driver should call dev_hold
2339 * on this net device. The HW vendor's device driver must guarantee
2340 * that this function returns NULL before the net device has finished
2341 * NETDEV_UNREGISTER state.
2342 */
2343 struct net_device *(*get_netdev)(struct ib_device *device, u8 port_num);
2344 /**
2345 * rdma netdev operation
2346 *
2347 * Driver implementing alloc_rdma_netdev or rdma_netdev_get_params
2348 * must return -EOPNOTSUPP if it doesn't support the specified type.
2349 */
2350 struct net_device *(*alloc_rdma_netdev)(
2351 struct ib_device *device, u8 port_num, enum rdma_netdev_t type,
2352 const char *name, unsigned char name_assign_type,
2353 void (*setup)(struct net_device *));
2354
2355 int (*rdma_netdev_get_params)(struct ib_device *device, u8 port_num,
2356 enum rdma_netdev_t type,
2357 struct rdma_netdev_alloc_params *params);
2358 /**
2359 * query_gid should be return GID value for @device, when @port_num
2360 * link layer is either IB or iWarp. It is no-op if @port_num port
2361 * is RoCE link layer.
2362 */
2363 int (*query_gid)(struct ib_device *device, u8 port_num, int index,
2364 union ib_gid *gid);
2365 /**
2366 * When calling add_gid, the HW vendor's driver should add the gid
2367 * of device of port at gid index available at @attr. Meta-info of
2368 * that gid (for example, the network device related to this gid) is
2369 * available at @attr. @context allows the HW vendor driver to store
2370 * extra information together with a GID entry. The HW vendor driver may
2371 * allocate memory to contain this information and store it in @context
2372 * when a new GID entry is written to. Params are consistent until the
2373 * next call of add_gid or delete_gid. The function should return 0 on
2374 * success or error otherwise. The function could be called
2375 * concurrently for different ports. This function is only called when
2376 * roce_gid_table is used.
2377 */
2378 int (*add_gid)(const struct ib_gid_attr *attr, void **context);
2379 /**
2380 * When calling del_gid, the HW vendor's driver should delete the
2381 * gid of device @device at gid index gid_index of port port_num
2382 * available in @attr.
2383 * Upon the deletion of a GID entry, the HW vendor must free any
2384 * allocated memory. The caller will clear @context afterwards.
2385 * This function is only called when roce_gid_table is used.
2386 */
2387 int (*del_gid)(const struct ib_gid_attr *attr, void **context);
2388 int (*query_pkey)(struct ib_device *device, u8 port_num, u16 index,
2389 u16 *pkey);
a2a074ef
LR
2390 int (*alloc_ucontext)(struct ib_ucontext *context,
2391 struct ib_udata *udata);
2392 void (*dealloc_ucontext)(struct ib_ucontext *context);
521ed0d9 2393 int (*mmap)(struct ib_ucontext *context, struct vm_area_struct *vma);
3411f9f0
MK
2394 /**
2395 * This will be called once refcount of an entry in mmap_xa reaches
2396 * zero. The type of the memory that was mapped may differ between
2397 * entries and is opaque to the rdma_user_mmap interface.
2398 * Therefore needs to be implemented by the driver in mmap_free.
2399 */
2400 void (*mmap_free)(struct rdma_user_mmap_entry *entry);
521ed0d9 2401 void (*disassociate_ucontext)(struct ib_ucontext *ibcontext);
ff23dfa1 2402 int (*alloc_pd)(struct ib_pd *pd, struct ib_udata *udata);
91a7c58f 2403 int (*dealloc_pd)(struct ib_pd *pd, struct ib_udata *udata);
fa5d010c
MG
2404 int (*create_ah)(struct ib_ah *ah, struct rdma_ah_init_attr *attr,
2405 struct ib_udata *udata);
521ed0d9
KH
2406 int (*modify_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
2407 int (*query_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
9a9ebf8c 2408 int (*destroy_ah)(struct ib_ah *ah, u32 flags);
68e326de
LR
2409 int (*create_srq)(struct ib_srq *srq,
2410 struct ib_srq_init_attr *srq_init_attr,
2411 struct ib_udata *udata);
521ed0d9
KH
2412 int (*modify_srq)(struct ib_srq *srq, struct ib_srq_attr *srq_attr,
2413 enum ib_srq_attr_mask srq_attr_mask,
2414 struct ib_udata *udata);
2415 int (*query_srq)(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
119181d1 2416 int (*destroy_srq)(struct ib_srq *srq, struct ib_udata *udata);
521ed0d9
KH
2417 struct ib_qp *(*create_qp)(struct ib_pd *pd,
2418 struct ib_qp_init_attr *qp_init_attr,
2419 struct ib_udata *udata);
2420 int (*modify_qp)(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
2421 int qp_attr_mask, struct ib_udata *udata);
2422 int (*query_qp)(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
2423 int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr);
c4367a26 2424 int (*destroy_qp)(struct ib_qp *qp, struct ib_udata *udata);
e39afe3d
LR
2425 int (*create_cq)(struct ib_cq *cq, const struct ib_cq_init_attr *attr,
2426 struct ib_udata *udata);
521ed0d9 2427 int (*modify_cq)(struct ib_cq *cq, u16 cq_count, u16 cq_period);
43d781b9 2428 int (*destroy_cq)(struct ib_cq *cq, struct ib_udata *udata);
521ed0d9
KH
2429 int (*resize_cq)(struct ib_cq *cq, int cqe, struct ib_udata *udata);
2430 struct ib_mr *(*get_dma_mr)(struct ib_pd *pd, int mr_access_flags);
2431 struct ib_mr *(*reg_user_mr)(struct ib_pd *pd, u64 start, u64 length,
2432 u64 virt_addr, int mr_access_flags,
2433 struct ib_udata *udata);
2434 int (*rereg_user_mr)(struct ib_mr *mr, int flags, u64 start, u64 length,
2435 u64 virt_addr, int mr_access_flags,
2436 struct ib_pd *pd, struct ib_udata *udata);
c4367a26 2437 int (*dereg_mr)(struct ib_mr *mr, struct ib_udata *udata);
521ed0d9 2438 struct ib_mr *(*alloc_mr)(struct ib_pd *pd, enum ib_mr_type mr_type,
42a3b153 2439 u32 max_num_sg);
26bc7eae
IR
2440 struct ib_mr *(*alloc_mr_integrity)(struct ib_pd *pd,
2441 u32 max_num_data_sg,
2442 u32 max_num_meta_sg);
ad8a4496
MS
2443 int (*advise_mr)(struct ib_pd *pd,
2444 enum ib_uverbs_advise_mr_advice advice, u32 flags,
2445 struct ib_sge *sg_list, u32 num_sge,
2446 struct uverbs_attr_bundle *attrs);
521ed0d9
KH
2447 int (*map_mr_sg)(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
2448 unsigned int *sg_offset);
2449 int (*check_mr_status)(struct ib_mr *mr, u32 check_mask,
2450 struct ib_mr_status *mr_status);
2451 struct ib_mw *(*alloc_mw)(struct ib_pd *pd, enum ib_mw_type type,
2452 struct ib_udata *udata);
2453 int (*dealloc_mw)(struct ib_mw *mw);
521ed0d9
KH
2454 int (*attach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2455 int (*detach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid);
28ad5f65 2456 int (*alloc_xrcd)(struct ib_xrcd *xrcd, struct ib_udata *udata);
d0c45c85 2457 int (*dealloc_xrcd)(struct ib_xrcd *xrcd, struct ib_udata *udata);
521ed0d9
KH
2458 struct ib_flow *(*create_flow)(struct ib_qp *qp,
2459 struct ib_flow_attr *flow_attr,
d6673746 2460 struct ib_udata *udata);
521ed0d9
KH
2461 int (*destroy_flow)(struct ib_flow *flow_id);
2462 struct ib_flow_action *(*create_flow_action_esp)(
2463 struct ib_device *device,
2464 const struct ib_flow_action_attrs_esp *attr,
2465 struct uverbs_attr_bundle *attrs);
2466 int (*destroy_flow_action)(struct ib_flow_action *action);
2467 int (*modify_flow_action_esp)(
2468 struct ib_flow_action *action,
2469 const struct ib_flow_action_attrs_esp *attr,
2470 struct uverbs_attr_bundle *attrs);
2471 int (*set_vf_link_state)(struct ib_device *device, int vf, u8 port,
2472 int state);
2473 int (*get_vf_config)(struct ib_device *device, int vf, u8 port,
2474 struct ifla_vf_info *ivf);
2475 int (*get_vf_stats)(struct ib_device *device, int vf, u8 port,
2476 struct ifla_vf_stats *stats);
bfcb3c5d
DG
2477 int (*get_vf_guid)(struct ib_device *device, int vf, u8 port,
2478 struct ifla_vf_guid *node_guid,
2479 struct ifla_vf_guid *port_guid);
521ed0d9
KH
2480 int (*set_vf_guid)(struct ib_device *device, int vf, u8 port, u64 guid,
2481 int type);
2482 struct ib_wq *(*create_wq)(struct ib_pd *pd,
2483 struct ib_wq_init_attr *init_attr,
2484 struct ib_udata *udata);
add53535 2485 int (*destroy_wq)(struct ib_wq *wq, struct ib_udata *udata);
521ed0d9
KH
2486 int (*modify_wq)(struct ib_wq *wq, struct ib_wq_attr *attr,
2487 u32 wq_attr_mask, struct ib_udata *udata);
2488 struct ib_rwq_ind_table *(*create_rwq_ind_table)(
2489 struct ib_device *device,
2490 struct ib_rwq_ind_table_init_attr *init_attr,
2491 struct ib_udata *udata);
2492 int (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *wq_ind_table);
2493 struct ib_dm *(*alloc_dm)(struct ib_device *device,
2494 struct ib_ucontext *context,
2495 struct ib_dm_alloc_attr *attr,
2496 struct uverbs_attr_bundle *attrs);
c4367a26 2497 int (*dealloc_dm)(struct ib_dm *dm, struct uverbs_attr_bundle *attrs);
521ed0d9
KH
2498 struct ib_mr *(*reg_dm_mr)(struct ib_pd *pd, struct ib_dm *dm,
2499 struct ib_dm_mr_attr *attr,
2500 struct uverbs_attr_bundle *attrs);
3b023e1b
LR
2501 int (*create_counters)(struct ib_counters *counters,
2502 struct uverbs_attr_bundle *attrs);
71ff3f62 2503 int (*destroy_counters)(struct ib_counters *counters);
521ed0d9
KH
2504 int (*read_counters)(struct ib_counters *counters,
2505 struct ib_counters_read_attr *counters_read_attr,
2506 struct uverbs_attr_bundle *attrs);
2cdfcdd8
MG
2507 int (*map_mr_sg_pi)(struct ib_mr *mr, struct scatterlist *data_sg,
2508 int data_sg_nents, unsigned int *data_sg_offset,
2509 struct scatterlist *meta_sg, int meta_sg_nents,
2510 unsigned int *meta_sg_offset);
2511
521ed0d9
KH
2512 /**
2513 * alloc_hw_stats - Allocate a struct rdma_hw_stats and fill in the
2514 * driver initialized data. The struct is kfree()'ed by the sysfs
2515 * core when the device is removed. A lifespan of -1 in the return
2516 * struct tells the core to set a default lifespan.
2517 */
2518 struct rdma_hw_stats *(*alloc_hw_stats)(struct ib_device *device,
2519 u8 port_num);
2520 /**
2521 * get_hw_stats - Fill in the counter value(s) in the stats struct.
2522 * @index - The index in the value array we wish to have updated, or
2523 * num_counters if we want all stats updated
2524 * Return codes -
2525 * < 0 - Error, no counters updated
2526 * index - Updated the single counter pointed to by index
2527 * num_counters - Updated all counters (will reset the timestamp
2528 * and prevent further calls for lifespan milliseconds)
2529 * Drivers are allowed to update all counters in leiu of just the
2530 * one given in index at their option
2531 */
2532 int (*get_hw_stats)(struct ib_device *device,
2533 struct rdma_hw_stats *stats, u8 port, int index);
ea4baf7f
PP
2534 /*
2535 * This function is called once for each port when a ib device is
2536 * registered.
2537 */
2538 int (*init_port)(struct ib_device *device, u8 port_num,
2539 struct kobject *port_sysfs);
02da3750
LR
2540 /**
2541 * Allows rdma drivers to add their own restrack attributes.
2542 */
f4434529 2543 int (*fill_res_mr_entry)(struct sk_buff *msg, struct ib_mr *ibmr);
65959522 2544 int (*fill_res_mr_entry_raw)(struct sk_buff *msg, struct ib_mr *ibmr);
9e2a187a 2545 int (*fill_res_cq_entry)(struct sk_buff *msg, struct ib_cq *ibcq);
65959522 2546 int (*fill_res_cq_entry_raw)(struct sk_buff *msg, struct ib_cq *ibcq);
5cc34116 2547 int (*fill_res_qp_entry)(struct sk_buff *msg, struct ib_qp *ibqp);
65959522 2548 int (*fill_res_qp_entry_raw)(struct sk_buff *msg, struct ib_qp *ibqp);
211cd945 2549 int (*fill_res_cm_id_entry)(struct sk_buff *msg, struct rdma_cm_id *id);
21a428a0 2550
d0899892 2551 /* Device lifecycle callbacks */
ca22354b
JG
2552 /*
2553 * Called after the device becomes registered, before clients are
2554 * attached
2555 */
2556 int (*enable_driver)(struct ib_device *dev);
d0899892
JG
2557 /*
2558 * This is called as part of ib_dealloc_device().
2559 */
2560 void (*dealloc_driver)(struct ib_device *dev);
2561
dd05cb82
KH
2562 /* iWarp CM callbacks */
2563 void (*iw_add_ref)(struct ib_qp *qp);
2564 void (*iw_rem_ref)(struct ib_qp *qp);
2565 struct ib_qp *(*iw_get_qp)(struct ib_device *device, int qpn);
2566 int (*iw_connect)(struct iw_cm_id *cm_id,
2567 struct iw_cm_conn_param *conn_param);
2568 int (*iw_accept)(struct iw_cm_id *cm_id,
2569 struct iw_cm_conn_param *conn_param);
2570 int (*iw_reject)(struct iw_cm_id *cm_id, const void *pdata,
2571 u8 pdata_len);
2572 int (*iw_create_listen)(struct iw_cm_id *cm_id, int backlog);
2573 int (*iw_destroy_listen)(struct iw_cm_id *cm_id);
99fa331d
MZ
2574 /**
2575 * counter_bind_qp - Bind a QP to a counter.
2576 * @counter - The counter to be bound. If counter->id is zero then
2577 * the driver needs to allocate a new counter and set counter->id
2578 */
2579 int (*counter_bind_qp)(struct rdma_counter *counter, struct ib_qp *qp);
2580 /**
2581 * counter_unbind_qp - Unbind the qp from the dynamically-allocated
2582 * counter and bind it onto the default one
2583 */
2584 int (*counter_unbind_qp)(struct ib_qp *qp);
2585 /**
2586 * counter_dealloc -De-allocate the hw counter
2587 */
2588 int (*counter_dealloc)(struct rdma_counter *counter);
c4ffee7c
MZ
2589 /**
2590 * counter_alloc_stats - Allocate a struct rdma_hw_stats and fill in
2591 * the driver initialized data.
2592 */
2593 struct rdma_hw_stats *(*counter_alloc_stats)(
2594 struct rdma_counter *counter);
2595 /**
2596 * counter_update_stats - Query the stats value of this counter
2597 */
2598 int (*counter_update_stats)(struct rdma_counter *counter);
dd05cb82 2599
4061ff7a
EA
2600 /**
2601 * Allows rdma drivers to add their own restrack attributes
2602 * dumped via 'rdma stat' iproute2 command.
2603 */
f4434529 2604 int (*fill_stat_mr_entry)(struct sk_buff *msg, struct ib_mr *ibmr);
4061ff7a 2605
1c8fb1ea
YH
2606 /* query driver for its ucontext properties */
2607 int (*query_ucontext)(struct ib_ucontext *context,
2608 struct uverbs_attr_bundle *attrs);
2609
d3456914 2610 DECLARE_RDMA_OBJ_SIZE(ib_ah);
3b023e1b 2611 DECLARE_RDMA_OBJ_SIZE(ib_counters);
e39afe3d 2612 DECLARE_RDMA_OBJ_SIZE(ib_cq);
21a428a0 2613 DECLARE_RDMA_OBJ_SIZE(ib_pd);
68e326de 2614 DECLARE_RDMA_OBJ_SIZE(ib_srq);
a2a074ef 2615 DECLARE_RDMA_OBJ_SIZE(ib_ucontext);
28ad5f65 2616 DECLARE_RDMA_OBJ_SIZE(ib_xrcd);
521ed0d9
KH
2617};
2618
cebe556b
PP
2619struct ib_core_device {
2620 /* device must be the first element in structure until,
2621 * union of ib_core_device and device exists in ib_device.
2622 */
2623 struct device dev;
4e0f7b90 2624 possible_net_t rdma_net;
cebe556b
PP
2625 struct kobject *ports_kobj;
2626 struct list_head port_list;
2627 struct ib_device *owner; /* reach back to owner ib_device */
2628};
41eda65c 2629
cebe556b 2630struct rdma_restrack_root;
1da177e4 2631struct ib_device {
0957c29f
BVA
2632 /* Do not access @dma_device directly from ULP nor from HW drivers. */
2633 struct device *dma_device;
3023a1e9 2634 struct ib_device_ops ops;
1da177e4 2635 char name[IB_DEVICE_NAME_MAX];
324e227e 2636 struct rcu_head rcu_head;
1da177e4
LT
2637
2638 struct list_head event_handler_list;
6b57cea9
PP
2639 /* Protects event_handler_list */
2640 struct rw_semaphore event_handler_rwsem;
2641
2642 /* Protects QP's event_handler calls and open_qp list */
40adf686 2643 spinlock_t qp_open_list_lock;
1da177e4 2644
921eab11 2645 struct rw_semaphore client_data_rwsem;
0df91bb6 2646 struct xarray client_data;
d0899892 2647 struct mutex unregistration_lock;
1da177e4 2648
17e10646
PP
2649 /* Synchronize GID, Pkey cache entries, subnet prefix, LMC */
2650 rwlock_t cache_lock;
7738613e 2651 /**
8ceb1357 2652 * port_data is indexed by port number
7738613e 2653 */
8ceb1357 2654 struct ib_port_data *port_data;
1da177e4 2655
f4fd0b22
MT
2656 int num_comp_vectors;
2657
cebe556b
PP
2658 union {
2659 struct device dev;
2660 struct ib_core_device coredev;
2661 };
2662
d4122f5a
PP
2663 /* First group for device attributes,
2664 * Second group for driver provided attributes (optional).
2665 * It is NULL terminated array.
2666 */
2667 const struct attribute_group *groups[3];
adee9f3f 2668
17a55f79 2669 u64 uverbs_cmd_mask;
f21519b2 2670 u64 uverbs_ex_cmd_mask;
274c0891 2671
bd99fdea 2672 char node_desc[IB_DEVICE_NODE_DESC_MAX];
cf311cd4 2673 __be64 node_guid;
96f15c03 2674 u32 local_dma_lkey;
4139032b 2675 u16 is_switch:1;
6780c4fa
GP
2676 /* Indicates kernel verbs support, should not be used in drivers */
2677 u16 kverbs_provider:1;
da662979
YF
2678 /* CQ adaptive moderation (RDMA DIM) */
2679 u16 use_cq_dim:1;
1da177e4
LT
2680 u8 node_type;
2681 u8 phys_port_cnt;
3e153a93 2682 struct ib_device_attr attrs;
b40f4757
CL
2683 struct attribute_group *hw_stats_ag;
2684 struct rdma_hw_stats *hw_stats;
7738613e 2685
43579b5f
PP
2686#ifdef CONFIG_CGROUP_RDMA
2687 struct rdmacg_device cg_device;
2688#endif
2689
ecc82c53 2690 u32 index;
c7ff819a
YF
2691
2692 spinlock_t cq_pools_lock;
2693 struct list_head cq_pools[IB_POLL_LAST_POOL_TYPE + 1];
2694
41eda65c 2695 struct rdma_restrack_root *res;
ecc82c53 2696
0cbf432d 2697 const struct uapi_definition *driver_def;
d79af724 2698
01b67117 2699 /*
d79af724
JG
2700 * Positive refcount indicates that the device is currently
2701 * registered and cannot be unregistered.
01b67117
PP
2702 */
2703 refcount_t refcount;
2704 struct completion unreg_completion;
d0899892 2705 struct work_struct unregistration_work;
3856ec4b
SW
2706
2707 const struct rdma_link_ops *link_ops;
4e0f7b90
PP
2708
2709 /* Protects compat_devs xarray modifications */
2710 struct mutex compat_devs_mutex;
2711 /* Maintains compat devices for each net namespace */
2712 struct xarray compat_devs;
dd05cb82
KH
2713
2714 /* Used by iWarp CM */
2715 char iw_ifname[IFNAMSIZ];
2716 u32 iw_driver_flags;
bd3920ea 2717 u32 lag_flags;
1da177e4
LT
2718};
2719
0e2d00eb 2720struct ib_client_nl_info;
1da177e4 2721struct ib_client {
e59178d8 2722 const char *name;
11a0ae4c 2723 int (*add)(struct ib_device *ibdev);
7c1eb45a 2724 void (*remove)(struct ib_device *, void *client_data);
dc1435c0 2725 void (*rename)(struct ib_device *dev, void *client_data);
0e2d00eb
JG
2726 int (*get_nl_info)(struct ib_device *ibdev, void *client_data,
2727 struct ib_client_nl_info *res);
2728 int (*get_global_nl_info)(struct ib_client_nl_info *res);
1da177e4 2729
9268f72d
YK
2730 /* Returns the net_dev belonging to this ib_client and matching the
2731 * given parameters.
2732 * @dev: An RDMA device that the net_dev use for communication.
2733 * @port: A physical port number on the RDMA device.
2734 * @pkey: P_Key that the net_dev uses if applicable.
2735 * @gid: A GID that the net_dev uses to communicate.
2736 * @addr: An IP address the net_dev is configured with.
2737 * @client_data: The device's client data set by ib_set_client_data().
2738 *
2739 * An ib_client that implements a net_dev on top of RDMA devices
2740 * (such as IP over IB) should implement this callback, allowing the
2741 * rdma_cm module to find the right net_dev for a given request.
2742 *
2743 * The caller is responsible for calling dev_put on the returned
2744 * netdev. */
2745 struct net_device *(*get_net_dev_by_params)(
2746 struct ib_device *dev,
2747 u8 port,
2748 u16 pkey,
2749 const union ib_gid *gid,
2750 const struct sockaddr *addr,
2751 void *client_data);
621e55ff
JG
2752
2753 refcount_t uses;
2754 struct completion uses_zero;
e59178d8 2755 u32 client_id;
6780c4fa
GP
2756
2757 /* kverbs are not required by the client */
2758 u8 no_kverbs_req:1;
1da177e4
LT
2759};
2760
a808273a
SS
2761/*
2762 * IB block DMA iterator
2763 *
2764 * Iterates the DMA-mapped SGL in contiguous memory blocks aligned
2765 * to a HW supported page size.
2766 */
2767struct ib_block_iter {
2768 /* internal states */
2769 struct scatterlist *__sg; /* sg holding the current aligned block */
2770 dma_addr_t __dma_addr; /* unaligned DMA address of this block */
2771 unsigned int __sg_nents; /* number of SG entries */
2772 unsigned int __sg_advance; /* number of bytes to advance in sg in next step */
2773 unsigned int __pg_bit; /* alignment of current block */
2774};
2775
459cc69f
LR
2776struct ib_device *_ib_alloc_device(size_t size);
2777#define ib_alloc_device(drv_struct, member) \
2778 container_of(_ib_alloc_device(sizeof(struct drv_struct) + \
2779 BUILD_BUG_ON_ZERO(offsetof( \
2780 struct drv_struct, member))), \
2781 struct drv_struct, member)
2782
1da177e4
LT
2783void ib_dealloc_device(struct ib_device *device);
2784
9abb0d1b 2785void ib_get_device_fw_str(struct ib_device *device, char *str);
5fa76c20 2786
ea4baf7f 2787int ib_register_device(struct ib_device *device, const char *name);
1da177e4 2788void ib_unregister_device(struct ib_device *device);
d0899892
JG
2789void ib_unregister_driver(enum rdma_driver_id driver_id);
2790void ib_unregister_device_and_put(struct ib_device *device);
2791void ib_unregister_device_queued(struct ib_device *ib_dev);
1da177e4
LT
2792
2793int ib_register_client (struct ib_client *client);
2794void ib_unregister_client(struct ib_client *client);
2795
a808273a
SS
2796void __rdma_block_iter_start(struct ib_block_iter *biter,
2797 struct scatterlist *sglist,
2798 unsigned int nents,
2799 unsigned long pgsz);
2800bool __rdma_block_iter_next(struct ib_block_iter *biter);
2801
2802/**
2803 * rdma_block_iter_dma_address - get the aligned dma address of the current
2804 * block held by the block iterator.
2805 * @biter: block iterator holding the memory block
2806 */
2807static inline dma_addr_t
2808rdma_block_iter_dma_address(struct ib_block_iter *biter)
2809{
2810 return biter->__dma_addr & ~(BIT_ULL(biter->__pg_bit) - 1);
2811}
2812
2813/**
2814 * rdma_for_each_block - iterate over contiguous memory blocks of the sg list
2815 * @sglist: sglist to iterate over
2816 * @biter: block iterator holding the memory block
2817 * @nents: maximum number of sg entries to iterate over
2818 * @pgsz: best HW supported page size to use
2819 *
2820 * Callers may use rdma_block_iter_dma_address() to get each
2821 * blocks aligned DMA address.
2822 */
2823#define rdma_for_each_block(sglist, biter, nents, pgsz) \
2824 for (__rdma_block_iter_start(biter, sglist, nents, \
2825 pgsz); \
2826 __rdma_block_iter_next(biter);)
2827
0df91bb6
JG
2828/**
2829 * ib_get_client_data - Get IB client context
2830 * @device:Device to get context for
2831 * @client:Client to get context for
2832 *
2833 * ib_get_client_data() returns the client context data set with
2834 * ib_set_client_data(). This can only be called while the client is
2835 * registered to the device, once the ib_client remove() callback returns this
2836 * cannot be called.
2837 */
2838static inline void *ib_get_client_data(struct ib_device *device,
2839 struct ib_client *client)
2840{
2841 return xa_load(&device->client_data, client->client_id);
2842}
1da177e4
LT
2843void ib_set_client_data(struct ib_device *device, struct ib_client *client,
2844 void *data);
521ed0d9
KH
2845void ib_set_device_ops(struct ib_device *device,
2846 const struct ib_device_ops *ops);
1da177e4 2847
5f9794dc 2848int rdma_user_mmap_io(struct ib_ucontext *ucontext, struct vm_area_struct *vma,
c043ff2c
MK
2849 unsigned long pfn, unsigned long size, pgprot_t prot,
2850 struct rdma_user_mmap_entry *entry);
3411f9f0
MK
2851int rdma_user_mmap_entry_insert(struct ib_ucontext *ucontext,
2852 struct rdma_user_mmap_entry *entry,
2853 size_t length);
7a763d18
YH
2854int rdma_user_mmap_entry_insert_range(struct ib_ucontext *ucontext,
2855 struct rdma_user_mmap_entry *entry,
2856 size_t length, u32 min_pgoff,
2857 u32 max_pgoff);
2858
3411f9f0
MK
2859struct rdma_user_mmap_entry *
2860rdma_user_mmap_entry_get_pgoff(struct ib_ucontext *ucontext,
2861 unsigned long pgoff);
2862struct rdma_user_mmap_entry *
2863rdma_user_mmap_entry_get(struct ib_ucontext *ucontext,
2864 struct vm_area_struct *vma);
2865void rdma_user_mmap_entry_put(struct rdma_user_mmap_entry *entry);
2866
2867void rdma_user_mmap_entry_remove(struct rdma_user_mmap_entry *entry);
5f9794dc 2868
e2773c06
RD
2869static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len)
2870{
2871 return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0;
2872}
2873
2874static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len)
2875{
43c61165 2876 return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0;
e2773c06
RD
2877}
2878
c66db311
MB
2879static inline bool ib_is_buffer_cleared(const void __user *p,
2880 size_t len)
301a721e 2881{
92d27ae6 2882 bool ret;
301a721e
MB
2883 u8 *buf;
2884
2885 if (len > USHRT_MAX)
2886 return false;
2887
92d27ae6
ME
2888 buf = memdup_user(p, len);
2889 if (IS_ERR(buf))
301a721e
MB
2890 return false;
2891
301a721e 2892 ret = !memchr_inv(buf, 0, len);
301a721e
MB
2893 kfree(buf);
2894 return ret;
2895}
2896
c66db311
MB
2897static inline bool ib_is_udata_cleared(struct ib_udata *udata,
2898 size_t offset,
2899 size_t len)
2900{
2901 return ib_is_buffer_cleared(udata->inbuf + offset, len);
2902}
2903
1c77483e
YH
2904/**
2905 * ib_is_destroy_retryable - Check whether the uobject destruction
2906 * is retryable.
2907 * @ret: The initial destruction return code
2908 * @why: remove reason
2909 * @uobj: The uobject that is destroyed
2910 *
2911 * This function is a helper function that IB layer and low-level drivers
2912 * can use to consider whether the destruction of the given uobject is
2913 * retry-able.
2914 * It checks the original return code, if it wasn't success the destruction
2915 * is retryable according to the ucontext state (i.e. cleanup_retryable) and
2916 * the remove reason. (i.e. why).
2917 * Must be called with the object locked for destroy.
2918 */
2919static inline bool ib_is_destroy_retryable(int ret, enum rdma_remove_reason why,
2920 struct ib_uobject *uobj)
2921{
2922 return ret && (why == RDMA_REMOVE_DESTROY ||
2923 uobj->context->cleanup_retryable);
2924}
2925
2926/**
2927 * ib_destroy_usecnt - Called during destruction to check the usecnt
2928 * @usecnt: The usecnt atomic
2929 * @why: remove reason
2930 * @uobj: The uobject that is destroyed
2931 *
2932 * Non-zero usecnts will block destruction unless destruction was triggered by
2933 * a ucontext cleanup.
2934 */
2935static inline int ib_destroy_usecnt(atomic_t *usecnt,
2936 enum rdma_remove_reason why,
2937 struct ib_uobject *uobj)
2938{
2939 if (atomic_read(usecnt) && ib_is_destroy_retryable(-EBUSY, why, uobj))
2940 return -EBUSY;
2941 return 0;
2942}
2943
8a51866f
RD
2944/**
2945 * ib_modify_qp_is_ok - Check that the supplied attribute mask
2946 * contains all required attributes and no attributes not allowed for
2947 * the given QP state transition.
2948 * @cur_state: Current QP state
2949 * @next_state: Next QP state
2950 * @type: QP type
2951 * @mask: Mask of supplied QP attributes
2952 *
2953 * This function is a helper function that a low-level driver's
2954 * modify_qp method can use to validate the consumer's input. It
2955 * checks that cur_state and next_state are valid QP states, that a
2956 * transition from cur_state to next_state is allowed by the IB spec,
2957 * and that the attribute mask supplied is allowed for the transition.
2958 */
19b1f540 2959bool ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
d31131bb 2960 enum ib_qp_type type, enum ib_qp_attr_mask mask);
8a51866f 2961
dcc9881e
LR
2962void ib_register_event_handler(struct ib_event_handler *event_handler);
2963void ib_unregister_event_handler(struct ib_event_handler *event_handler);
6b57cea9 2964void ib_dispatch_event(const struct ib_event *event);
1da177e4 2965
1da177e4
LT
2966int ib_query_port(struct ib_device *device,
2967 u8 port_num, struct ib_port_attr *port_attr);
2968
a3f5adaf
EC
2969enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device,
2970 u8 port_num);
2971
4139032b
HR
2972/**
2973 * rdma_cap_ib_switch - Check if the device is IB switch
2974 * @device: Device to check
2975 *
2976 * Device driver is responsible for setting is_switch bit on
2977 * in ib_device structure at init time.
2978 *
2979 * Return: true if the device is IB switch.
2980 */
2981static inline bool rdma_cap_ib_switch(const struct ib_device *device)
2982{
2983 return device->is_switch;
2984}
2985
0cf18d77
IW
2986/**
2987 * rdma_start_port - Return the first valid port number for the device
2988 * specified
2989 *
2990 * @device: Device to be checked
2991 *
2992 * Return start port number
2993 */
2994static inline u8 rdma_start_port(const struct ib_device *device)
2995{
4139032b 2996 return rdma_cap_ib_switch(device) ? 0 : 1;
0cf18d77
IW
2997}
2998
ea1075ed
JG
2999/**
3000 * rdma_for_each_port - Iterate over all valid port numbers of the IB device
3001 * @device - The struct ib_device * to iterate over
3002 * @iter - The unsigned int to store the port number
3003 */
3004#define rdma_for_each_port(device, iter) \
3005 for (iter = rdma_start_port(device + BUILD_BUG_ON_ZERO(!__same_type( \
3006 unsigned int, iter))); \
3007 iter <= rdma_end_port(device); (iter)++)
3008
0cf18d77
IW
3009/**
3010 * rdma_end_port - Return the last valid port number for the device
3011 * specified
3012 *
3013 * @device: Device to be checked
3014 *
3015 * Return last port number
3016 */
3017static inline u8 rdma_end_port(const struct ib_device *device)
3018{
4139032b 3019 return rdma_cap_ib_switch(device) ? 0 : device->phys_port_cnt;
0cf18d77
IW
3020}
3021
24dc831b
YS
3022static inline int rdma_is_port_valid(const struct ib_device *device,
3023 unsigned int port)
3024{
3025 return (port >= rdma_start_port(device) &&
3026 port <= rdma_end_port(device));
3027}
3028
b02289b3
AK
3029static inline bool rdma_is_grh_required(const struct ib_device *device,
3030 u8 port_num)
3031{
8ceb1357
JG
3032 return device->port_data[port_num].immutable.core_cap_flags &
3033 RDMA_CORE_PORT_IB_GRH_REQUIRED;
b02289b3
AK
3034}
3035
5ede9289 3036static inline bool rdma_protocol_ib(const struct ib_device *device, u8 port_num)
de66be94 3037{
8ceb1357
JG
3038 return device->port_data[port_num].immutable.core_cap_flags &
3039 RDMA_CORE_CAP_PROT_IB;
de66be94
MW
3040}
3041
5ede9289 3042static inline bool rdma_protocol_roce(const struct ib_device *device, u8 port_num)
7766a99f 3043{
8ceb1357
JG
3044 return device->port_data[port_num].immutable.core_cap_flags &
3045 (RDMA_CORE_CAP_PROT_ROCE | RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP);
7766a99f
MB
3046}
3047
3048static inline bool rdma_protocol_roce_udp_encap(const struct ib_device *device, u8 port_num)
3049{
8ceb1357
JG
3050 return device->port_data[port_num].immutable.core_cap_flags &
3051 RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP;
7766a99f
MB
3052}
3053
3054static inline bool rdma_protocol_roce_eth_encap(const struct ib_device *device, u8 port_num)
de66be94 3055{
8ceb1357
JG
3056 return device->port_data[port_num].immutable.core_cap_flags &
3057 RDMA_CORE_CAP_PROT_ROCE;
de66be94
MW
3058}
3059
5ede9289 3060static inline bool rdma_protocol_iwarp(const struct ib_device *device, u8 port_num)
de66be94 3061{
8ceb1357
JG
3062 return device->port_data[port_num].immutable.core_cap_flags &
3063 RDMA_CORE_CAP_PROT_IWARP;
de66be94
MW
3064}
3065
5ede9289 3066static inline bool rdma_ib_or_roce(const struct ib_device *device, u8 port_num)
de66be94 3067{
7766a99f
MB
3068 return rdma_protocol_ib(device, port_num) ||
3069 rdma_protocol_roce(device, port_num);
de66be94
MW
3070}
3071
aa773bd4
OG
3072static inline bool rdma_protocol_raw_packet(const struct ib_device *device, u8 port_num)
3073{
8ceb1357
JG
3074 return device->port_data[port_num].immutable.core_cap_flags &
3075 RDMA_CORE_CAP_PROT_RAW_PACKET;
aa773bd4
OG
3076}
3077
ce1e055f
OG
3078static inline bool rdma_protocol_usnic(const struct ib_device *device, u8 port_num)
3079{
8ceb1357
JG
3080 return device->port_data[port_num].immutable.core_cap_flags &
3081 RDMA_CORE_CAP_PROT_USNIC;
ce1e055f
OG
3082}
3083
c757dea8 3084/**
296ec009 3085 * rdma_cap_ib_mad - Check if the port of a device supports Infiniband
c757dea8 3086 * Management Datagrams.
296ec009
MW
3087 * @device: Device to check
3088 * @port_num: Port number to check
c757dea8 3089 *
296ec009
MW
3090 * Management Datagrams (MAD) are a required part of the InfiniBand
3091 * specification and are supported on all InfiniBand devices. A slightly
3092 * extended version are also supported on OPA interfaces.
c757dea8 3093 *
296ec009 3094 * Return: true if the port supports sending/receiving of MAD packets.
c757dea8 3095 */
5ede9289 3096static inline bool rdma_cap_ib_mad(const struct ib_device *device, u8 port_num)
c757dea8 3097{
8ceb1357
JG
3098 return device->port_data[port_num].immutable.core_cap_flags &
3099 RDMA_CORE_CAP_IB_MAD;
c757dea8
MW
3100}
3101
65995fee
IW
3102/**
3103 * rdma_cap_opa_mad - Check if the port of device provides support for OPA
3104 * Management Datagrams.
3105 * @device: Device to check
3106 * @port_num: Port number to check
3107 *
3108 * Intel OmniPath devices extend and/or replace the InfiniBand Management
3109 * datagrams with their own versions. These OPA MADs share many but not all of
3110 * the characteristics of InfiniBand MADs.
3111 *
3112 * OPA MADs differ in the following ways:
3113 *
3114 * 1) MADs are variable size up to 2K
3115 * IBTA defined MADs remain fixed at 256 bytes
3116 * 2) OPA SMPs must carry valid PKeys
3117 * 3) OPA SMP packets are a different format
3118 *
3119 * Return: true if the port supports OPA MAD packet formats.
3120 */
3121static inline bool rdma_cap_opa_mad(struct ib_device *device, u8 port_num)
3122{
d3243da8
LR
3123 return device->port_data[port_num].immutable.core_cap_flags &
3124 RDMA_CORE_CAP_OPA_MAD;
65995fee
IW
3125}
3126
29541e3a 3127/**
296ec009
MW
3128 * rdma_cap_ib_smi - Check if the port of a device provides an Infiniband
3129 * Subnet Management Agent (SMA) on the Subnet Management Interface (SMI).
3130 * @device: Device to check
3131 * @port_num: Port number to check
29541e3a 3132 *
296ec009
MW
3133 * Each InfiniBand node is required to provide a Subnet Management Agent
3134 * that the subnet manager can access. Prior to the fabric being fully
3135 * configured by the subnet manager, the SMA is accessed via a well known
3136 * interface called the Subnet Management Interface (SMI). This interface
3137 * uses directed route packets to communicate with the SM to get around the
3138 * chicken and egg problem of the SM needing to know what's on the fabric
3139 * in order to configure the fabric, and needing to configure the fabric in
3140 * order to send packets to the devices on the fabric. These directed
3141 * route packets do not need the fabric fully configured in order to reach
3142 * their destination. The SMI is the only method allowed to send
3143 * directed route packets on an InfiniBand fabric.
29541e3a 3144 *
296ec009 3145 * Return: true if the port provides an SMI.
29541e3a 3146 */
5ede9289 3147static inline bool rdma_cap_ib_smi(const struct ib_device *device, u8 port_num)
29541e3a 3148{
8ceb1357
JG
3149 return device->port_data[port_num].immutable.core_cap_flags &
3150 RDMA_CORE_CAP_IB_SMI;
29541e3a
MW
3151}
3152
72219cea
MW
3153/**
3154 * rdma_cap_ib_cm - Check if the port of device has the capability Infiniband
3155 * Communication Manager.
296ec009
MW
3156 * @device: Device to check
3157 * @port_num: Port number to check
72219cea 3158 *
296ec009
MW
3159 * The InfiniBand Communication Manager is one of many pre-defined General
3160 * Service Agents (GSA) that are accessed via the General Service
3161 * Interface (GSI). It's role is to facilitate establishment of connections
3162 * between nodes as well as other management related tasks for established
3163 * connections.
72219cea 3164 *
296ec009
MW
3165 * Return: true if the port supports an IB CM (this does not guarantee that
3166 * a CM is actually running however).
72219cea 3167 */
5ede9289 3168static inline bool rdma_cap_ib_cm(const struct ib_device *device, u8 port_num)
72219cea 3169{
8ceb1357
JG
3170 return device->port_data[port_num].immutable.core_cap_flags &
3171 RDMA_CORE_CAP_IB_CM;
72219cea
MW
3172}
3173
04215330
MW
3174/**
3175 * rdma_cap_iw_cm - Check if the port of device has the capability IWARP
3176 * Communication Manager.
296ec009
MW
3177 * @device: Device to check
3178 * @port_num: Port number to check
04215330 3179 *
296ec009
MW
3180 * Similar to above, but specific to iWARP connections which have a different
3181 * managment protocol than InfiniBand.
04215330 3182 *
296ec009
MW
3183 * Return: true if the port supports an iWARP CM (this does not guarantee that
3184 * a CM is actually running however).
04215330 3185 */
5ede9289 3186static inline bool rdma_cap_iw_cm(const struct ib_device *device, u8 port_num)
04215330 3187{
8ceb1357
JG
3188 return device->port_data[port_num].immutable.core_cap_flags &
3189 RDMA_CORE_CAP_IW_CM;
04215330
MW
3190}
3191
fe53ba2f
MW
3192/**
3193 * rdma_cap_ib_sa - Check if the port of device has the capability Infiniband
3194 * Subnet Administration.
296ec009
MW
3195 * @device: Device to check
3196 * @port_num: Port number to check
fe53ba2f 3197 *
296ec009
MW
3198 * An InfiniBand Subnet Administration (SA) service is a pre-defined General
3199 * Service Agent (GSA) provided by the Subnet Manager (SM). On InfiniBand
3200 * fabrics, devices should resolve routes to other hosts by contacting the
3201 * SA to query the proper route.
fe53ba2f 3202 *
296ec009
MW
3203 * Return: true if the port should act as a client to the fabric Subnet
3204 * Administration interface. This does not imply that the SA service is
3205 * running locally.
fe53ba2f 3206 */
5ede9289 3207static inline bool rdma_cap_ib_sa(const struct ib_device *device, u8 port_num)
fe53ba2f 3208{
8ceb1357
JG
3209 return device->port_data[port_num].immutable.core_cap_flags &
3210 RDMA_CORE_CAP_IB_SA;
fe53ba2f
MW
3211}
3212
a31ad3b0
MW
3213/**
3214 * rdma_cap_ib_mcast - Check if the port of device has the capability Infiniband
3215 * Multicast.
296ec009
MW
3216 * @device: Device to check
3217 * @port_num: Port number to check
a31ad3b0 3218 *
296ec009
MW
3219 * InfiniBand multicast registration is more complex than normal IPv4 or
3220 * IPv6 multicast registration. Each Host Channel Adapter must register
3221 * with the Subnet Manager when it wishes to join a multicast group. It
3222 * should do so only once regardless of how many queue pairs it subscribes
3223 * to this group. And it should leave the group only after all queue pairs
3224 * attached to the group have been detached.
a31ad3b0 3225 *
296ec009
MW
3226 * Return: true if the port must undertake the additional adminstrative
3227 * overhead of registering/unregistering with the SM and tracking of the
3228 * total number of queue pairs attached to the multicast group.
a31ad3b0 3229 */
5ede9289 3230static inline bool rdma_cap_ib_mcast(const struct ib_device *device, u8 port_num)
a31ad3b0
MW
3231{
3232 return rdma_cap_ib_sa(device, port_num);
3233}
3234
30a74ef4
MW
3235/**
3236 * rdma_cap_af_ib - Check if the port of device has the capability
3237 * Native Infiniband Address.
296ec009
MW
3238 * @device: Device to check
3239 * @port_num: Port number to check
30a74ef4 3240 *
296ec009
MW
3241 * InfiniBand addressing uses a port's GUID + Subnet Prefix to make a default
3242 * GID. RoCE uses a different mechanism, but still generates a GID via
3243 * a prescribed mechanism and port specific data.
30a74ef4 3244 *
296ec009
MW
3245 * Return: true if the port uses a GID address to identify devices on the
3246 * network.
30a74ef4 3247 */
5ede9289 3248static inline bool rdma_cap_af_ib(const struct ib_device *device, u8 port_num)
30a74ef4 3249{
8ceb1357
JG
3250 return device->port_data[port_num].immutable.core_cap_flags &
3251 RDMA_CORE_CAP_AF_IB;
30a74ef4
MW
3252}
3253
227128fc
MW
3254/**
3255 * rdma_cap_eth_ah - Check if the port of device has the capability
296ec009
MW
3256 * Ethernet Address Handle.
3257 * @device: Device to check
3258 * @port_num: Port number to check
227128fc 3259 *
296ec009
MW
3260 * RoCE is InfiniBand over Ethernet, and it uses a well defined technique
3261 * to fabricate GIDs over Ethernet/IP specific addresses native to the
3262 * port. Normally, packet headers are generated by the sending host
3263 * adapter, but when sending connectionless datagrams, we must manually
3264 * inject the proper headers for the fabric we are communicating over.
227128fc 3265 *
296ec009
MW
3266 * Return: true if we are running as a RoCE port and must force the
3267 * addition of a Global Route Header built from our Ethernet Address
3268 * Handle into our header list for connectionless packets.
227128fc 3269 */
5ede9289 3270static inline bool rdma_cap_eth_ah(const struct ib_device *device, u8 port_num)
227128fc 3271{
8ceb1357
JG
3272 return device->port_data[port_num].immutable.core_cap_flags &
3273 RDMA_CORE_CAP_ETH_AH;
227128fc
MW
3274}
3275
94d595c5
DC
3276/**
3277 * rdma_cap_opa_ah - Check if the port of device supports
3278 * OPA Address handles
3279 * @device: Device to check
3280 * @port_num: Port number to check
3281 *
3282 * Return: true if we are running on an OPA device which supports
3283 * the extended OPA addressing.
3284 */
3285static inline bool rdma_cap_opa_ah(struct ib_device *device, u8 port_num)
3286{
8ceb1357 3287 return (device->port_data[port_num].immutable.core_cap_flags &
94d595c5
DC
3288 RDMA_CORE_CAP_OPA_AH) == RDMA_CORE_CAP_OPA_AH;
3289}
3290
337877a4
IW
3291/**
3292 * rdma_max_mad_size - Return the max MAD size required by this RDMA Port.
3293 *
3294 * @device: Device
3295 * @port_num: Port number
3296 *
3297 * This MAD size includes the MAD headers and MAD payload. No other headers
3298 * are included.
3299 *
3300 * Return the max MAD size required by the Port. Will return 0 if the port
3301 * does not support MADs
3302 */
3303static inline size_t rdma_max_mad_size(const struct ib_device *device, u8 port_num)
3304{
8ceb1357 3305 return device->port_data[port_num].immutable.max_mad_size;
337877a4
IW
3306}
3307
03db3a2d
MB
3308/**
3309 * rdma_cap_roce_gid_table - Check if the port of device uses roce_gid_table
3310 * @device: Device to check
3311 * @port_num: Port number to check
3312 *
3313 * RoCE GID table mechanism manages the various GIDs for a device.
3314 *
3315 * NOTE: if allocating the port's GID table has failed, this call will still
3316 * return true, but any RoCE GID table API will fail.
3317 *
3318 * Return: true if the port uses RoCE GID table mechanism in order to manage
3319 * its GIDs.
3320 */
3321static inline bool rdma_cap_roce_gid_table(const struct ib_device *device,
3322 u8 port_num)
3323{
3324 return rdma_protocol_roce(device, port_num) &&
3023a1e9 3325 device->ops.add_gid && device->ops.del_gid;
03db3a2d
MB
3326}
3327
002516ed
CH
3328/*
3329 * Check if the device supports READ W/ INVALIDATE.
3330 */
3331static inline bool rdma_cap_read_inv(struct ib_device *dev, u32 port_num)
3332{
3333 /*
3334 * iWarp drivers must support READ W/ INVALIDATE. No other protocol
3335 * has support for it yet.
3336 */
3337 return rdma_protocol_iwarp(dev, port_num);
3338}
3339
6d72344c
KW
3340/**
3341 * rdma_core_cap_opa_port - Return whether the RDMA Port is OPA or not.
3342 * @device: Device
3343 * @port_num: 1 based Port number
3344 *
3345 * Return true if port is an Intel OPA port , false if not
3346 */
3347static inline bool rdma_core_cap_opa_port(struct ib_device *device,
3348 u32 port_num)
3349{
3350 return (device->port_data[port_num].immutable.core_cap_flags &
3351 RDMA_CORE_PORT_INTEL_OPA) == RDMA_CORE_PORT_INTEL_OPA;
3352}
3353
3354/**
3355 * rdma_mtu_enum_to_int - Return the mtu of the port as an integer value.
3356 * @device: Device
3357 * @port_num: Port number
3358 * @mtu: enum value of MTU
3359 *
3360 * Return the MTU size supported by the port as an integer value. Will return
3361 * -1 if enum value of mtu is not supported.
3362 */
3363static inline int rdma_mtu_enum_to_int(struct ib_device *device, u8 port,
3364 int mtu)
3365{
3366 if (rdma_core_cap_opa_port(device, port))
3367 return opa_mtu_enum_to_int((enum opa_mtu)mtu);
3368 else
3369 return ib_mtu_enum_to_int((enum ib_mtu)mtu);
3370}
3371
3372/**
3373 * rdma_mtu_from_attr - Return the mtu of the port from the port attribute.
3374 * @device: Device
3375 * @port_num: Port number
3376 * @attr: port attribute
3377 *
3378 * Return the MTU size supported by the port as an integer value.
3379 */
3380static inline int rdma_mtu_from_attr(struct ib_device *device, u8 port,
3381 struct ib_port_attr *attr)
3382{
3383 if (rdma_core_cap_opa_port(device, port))
3384 return attr->phys_mtu;
3385 else
3386 return ib_mtu_enum_to_int(attr->max_mtu);
3387}
3388
50174a7f
EC
3389int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port,
3390 int state);
3391int ib_get_vf_config(struct ib_device *device, int vf, u8 port,
3392 struct ifla_vf_info *info);
3393int ib_get_vf_stats(struct ib_device *device, int vf, u8 port,
3394 struct ifla_vf_stats *stats);
bfcb3c5d
DG
3395int ib_get_vf_guid(struct ib_device *device, int vf, u8 port,
3396 struct ifla_vf_guid *node_guid,
3397 struct ifla_vf_guid *port_guid);
50174a7f
EC
3398int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid,
3399 int type);
3400
1da177e4
LT
3401int ib_query_pkey(struct ib_device *device,
3402 u8 port_num, u16 index, u16 *pkey);
3403
3404int ib_modify_device(struct ib_device *device,
3405 int device_modify_mask,
3406 struct ib_device_modify *device_modify);
3407
3408int ib_modify_port(struct ib_device *device,
3409 u8 port_num, int port_modify_mask,
3410 struct ib_port_modify *port_modify);
3411
5eb620c8 3412int ib_find_gid(struct ib_device *device, union ib_gid *gid,
b26c4a11 3413 u8 *port_num, u16 *index);
5eb620c8
YE
3414
3415int ib_find_pkey(struct ib_device *device,
3416 u8 port_num, u16 pkey, u16 *index);
3417
ed082d36
CH
3418enum ib_pd_flags {
3419 /*
3420 * Create a memory registration for all memory in the system and place
3421 * the rkey for it into pd->unsafe_global_rkey. This can be used by
3422 * ULPs to avoid the overhead of dynamic MRs.
3423 *
3424 * This flag is generally considered unsafe and must only be used in
3425 * extremly trusted environments. Every use of it will log a warning
3426 * in the kernel log.
3427 */
3428 IB_PD_UNSAFE_GLOBAL_RKEY = 0x01,
3429};
1da177e4 3430
ed082d36
CH
3431struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
3432 const char *caller);
c4367a26 3433
ed082d36 3434#define ib_alloc_pd(device, flags) \
e4496447 3435 __ib_alloc_pd((device), (flags), KBUILD_MODNAME)
c4367a26 3436
91a7c58f 3437int ib_dealloc_pd_user(struct ib_pd *pd, struct ib_udata *udata);
c4367a26
SR
3438
3439/**
3440 * ib_dealloc_pd - Deallocate kernel PD
3441 * @pd: The protection domain
3442 *
3443 * NOTE: for user PD use ib_dealloc_pd_user with valid udata!
3444 */
3445static inline void ib_dealloc_pd(struct ib_pd *pd)
3446{
91a7c58f
LR
3447 int ret = ib_dealloc_pd_user(pd, NULL);
3448
3449 WARN_ONCE(ret, "Destroy of kernel PD shouldn't fail");
c4367a26 3450}
1da177e4 3451
b090c4e3
GP
3452enum rdma_create_ah_flags {
3453 /* In a sleepable context */
3454 RDMA_CREATE_AH_SLEEPABLE = BIT(0),
3455};
3456
1da177e4 3457/**
0a18cfe4 3458 * rdma_create_ah - Creates an address handle for the given address vector.
1da177e4
LT
3459 * @pd: The protection domain associated with the address handle.
3460 * @ah_attr: The attributes of the address vector.
b090c4e3 3461 * @flags: Create address handle flags (see enum rdma_create_ah_flags).
1da177e4
LT
3462 *
3463 * The address handle is used to reference a local or global destination
3464 * in all UD QP post sends.
3465 */
b090c4e3
GP
3466struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr,
3467 u32 flags);
1da177e4 3468
5cda6587
PP
3469/**
3470 * rdma_create_user_ah - Creates an address handle for the given address vector.
3471 * It resolves destination mac address for ah attribute of RoCE type.
3472 * @pd: The protection domain associated with the address handle.
3473 * @ah_attr: The attributes of the address vector.
3474 * @udata: pointer to user's input output buffer information need by
3475 * provider driver.
3476 *
3477 * It returns 0 on success and returns appropriate error code on error.
3478 * The address handle is used to reference a local or global destination
3479 * in all UD QP post sends.
3480 */
3481struct ib_ah *rdma_create_user_ah(struct ib_pd *pd,
3482 struct rdma_ah_attr *ah_attr,
3483 struct ib_udata *udata);
850d8fd7
MS
3484/**
3485 * ib_get_gids_from_rdma_hdr - Get sgid and dgid from GRH or IPv4 header
3486 * work completion.
3487 * @hdr: the L3 header to parse
3488 * @net_type: type of header to parse
3489 * @sgid: place to store source gid
3490 * @dgid: place to store destination gid
3491 */
3492int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr,
3493 enum rdma_network_type net_type,
3494 union ib_gid *sgid, union ib_gid *dgid);
3495
3496/**
3497 * ib_get_rdma_header_version - Get the header version
3498 * @hdr: the L3 header to parse
3499 */
3500int ib_get_rdma_header_version(const union rdma_network_hdr *hdr);
3501
4e00d694 3502/**
f6bdb142 3503 * ib_init_ah_attr_from_wc - Initializes address handle attributes from a
4e00d694
SH
3504 * work completion.
3505 * @device: Device on which the received message arrived.
3506 * @port_num: Port on which the received message arrived.
3507 * @wc: Work completion associated with the received message.
3508 * @grh: References the received global route header. This parameter is
3509 * ignored unless the work completion indicates that the GRH is valid.
3510 * @ah_attr: Returned attributes that can be used when creating an address
3511 * handle for replying to the message.
b7403217
PP
3512 * When ib_init_ah_attr_from_wc() returns success,
3513 * (a) for IB link layer it optionally contains a reference to SGID attribute
3514 * when GRH is present for IB link layer.
3515 * (b) for RoCE link layer it contains a reference to SGID attribute.
3516 * User must invoke rdma_cleanup_ah_attr_gid_attr() to release reference to SGID
3517 * attributes which are initialized using ib_init_ah_attr_from_wc().
3518 *
4e00d694 3519 */
f6bdb142
PP
3520int ib_init_ah_attr_from_wc(struct ib_device *device, u8 port_num,
3521 const struct ib_wc *wc, const struct ib_grh *grh,
3522 struct rdma_ah_attr *ah_attr);
4e00d694 3523
513789ed
HR
3524/**
3525 * ib_create_ah_from_wc - Creates an address handle associated with the
3526 * sender of the specified work completion.
3527 * @pd: The protection domain associated with the address handle.
3528 * @wc: Work completion information associated with a received message.
3529 * @grh: References the received global route header. This parameter is
3530 * ignored unless the work completion indicates that the GRH is valid.
3531 * @port_num: The outbound port number to associate with the address.
3532 *
3533 * The address handle is used to reference a local or global destination
3534 * in all UD QP post sends.
3535 */
73cdaaee
IW
3536struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc,
3537 const struct ib_grh *grh, u8 port_num);
513789ed 3538
1da177e4 3539/**
67b985b6 3540 * rdma_modify_ah - Modifies the address vector associated with an address
1da177e4
LT
3541 * handle.
3542 * @ah: The address handle to modify.
3543 * @ah_attr: The new address vector attributes to associate with the
3544 * address handle.
3545 */
67b985b6 3546int rdma_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
1da177e4
LT
3547
3548/**
bfbfd661 3549 * rdma_query_ah - Queries the address vector associated with an address
1da177e4
LT
3550 * handle.
3551 * @ah: The address handle to query.
3552 * @ah_attr: The address vector attributes associated with the address
3553 * handle.
3554 */
bfbfd661 3555int rdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
1da177e4 3556
2553ba21
GP
3557enum rdma_destroy_ah_flags {
3558 /* In a sleepable context */
3559 RDMA_DESTROY_AH_SLEEPABLE = BIT(0),
3560};
3561
1da177e4 3562/**
c4367a26 3563 * rdma_destroy_ah_user - Destroys an address handle.
1da177e4 3564 * @ah: The address handle to destroy.
2553ba21 3565 * @flags: Destroy address handle flags (see enum rdma_destroy_ah_flags).
c4367a26 3566 * @udata: Valid user data or NULL for kernel objects
1da177e4 3567 */
c4367a26
SR
3568int rdma_destroy_ah_user(struct ib_ah *ah, u32 flags, struct ib_udata *udata);
3569
3570/**
3571 * rdma_destroy_ah - Destroys an kernel address handle.
3572 * @ah: The address handle to destroy.
3573 * @flags: Destroy address handle flags (see enum rdma_destroy_ah_flags).
3574 *
3575 * NOTE: for user ah use rdma_destroy_ah_user with valid udata!
3576 */
9a9ebf8c 3577static inline void rdma_destroy_ah(struct ib_ah *ah, u32 flags)
c4367a26 3578{
9a9ebf8c
LR
3579 int ret = rdma_destroy_ah_user(ah, flags, NULL);
3580
3581 WARN_ONCE(ret, "Destroy of kernel AH shouldn't fail");
c4367a26 3582}
1da177e4 3583
b0810b03
JG
3584struct ib_srq *ib_create_srq_user(struct ib_pd *pd,
3585 struct ib_srq_init_attr *srq_init_attr,
3586 struct ib_usrq_object *uobject,
3587 struct ib_udata *udata);
3588static inline struct ib_srq *
3589ib_create_srq(struct ib_pd *pd, struct ib_srq_init_attr *srq_init_attr)
3590{
3591 if (!pd->device->ops.create_srq)
3592 return ERR_PTR(-EOPNOTSUPP);
3593
3594 return ib_create_srq_user(pd, srq_init_attr, NULL, NULL);
3595}
d41fcc67
RD
3596
3597/**
3598 * ib_modify_srq - Modifies the attributes for the specified SRQ.
3599 * @srq: The SRQ to modify.
3600 * @srq_attr: On input, specifies the SRQ attributes to modify. On output,
3601 * the current values of selected SRQ attributes are returned.
3602 * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ
3603 * are being modified.
3604 *
3605 * The mask may contain IB_SRQ_MAX_WR to resize the SRQ and/or
3606 * IB_SRQ_LIMIT to set the SRQ's limit and request notification when
3607 * the number of receives queued drops below the limit.
3608 */
3609int ib_modify_srq(struct ib_srq *srq,
3610 struct ib_srq_attr *srq_attr,
3611 enum ib_srq_attr_mask srq_attr_mask);
3612
3613/**
3614 * ib_query_srq - Returns the attribute list and current values for the
3615 * specified SRQ.
3616 * @srq: The SRQ to query.
3617 * @srq_attr: The attributes of the specified SRQ.
3618 */
3619int ib_query_srq(struct ib_srq *srq,
3620 struct ib_srq_attr *srq_attr);
3621
3622/**
c4367a26
SR
3623 * ib_destroy_srq_user - Destroys the specified SRQ.
3624 * @srq: The SRQ to destroy.
3625 * @udata: Valid user data or NULL for kernel objects
3626 */
3627int ib_destroy_srq_user(struct ib_srq *srq, struct ib_udata *udata);
3628
3629/**
3630 * ib_destroy_srq - Destroys the specified kernel SRQ.
d41fcc67 3631 * @srq: The SRQ to destroy.
c4367a26
SR
3632 *
3633 * NOTE: for user srq use ib_destroy_srq_user with valid udata!
d41fcc67 3634 */
119181d1 3635static inline void ib_destroy_srq(struct ib_srq *srq)
c4367a26 3636{
119181d1
LR
3637 int ret = ib_destroy_srq_user(srq, NULL);
3638
3639 WARN_ONCE(ret, "Destroy of kernel SRQ shouldn't fail");
c4367a26 3640}
d41fcc67
RD
3641
3642/**
3643 * ib_post_srq_recv - Posts a list of work requests to the specified SRQ.
3644 * @srq: The SRQ to post the work request on.
3645 * @recv_wr: A list of work requests to post on the receive queue.
3646 * @bad_recv_wr: On an immediate failure, this parameter will reference
3647 * the work request that failed to be posted on the QP.
3648 */
3649static inline int ib_post_srq_recv(struct ib_srq *srq,
d34ac5cd
BVA
3650 const struct ib_recv_wr *recv_wr,
3651 const struct ib_recv_wr **bad_recv_wr)
d41fcc67 3652{
d34ac5cd 3653 const struct ib_recv_wr *dummy;
bb039a87 3654
3023a1e9
KH
3655 return srq->device->ops.post_srq_recv(srq, recv_wr,
3656 bad_recv_wr ? : &dummy);
d41fcc67
RD
3657}
3658
b72bfc96
JG
3659struct ib_qp *ib_create_qp(struct ib_pd *pd,
3660 struct ib_qp_init_attr *qp_init_attr);
1da177e4 3661
a512c2fb
PP
3662/**
3663 * ib_modify_qp_with_udata - Modifies the attributes for the specified QP.
3664 * @qp: The QP to modify.
3665 * @attr: On input, specifies the QP attributes to modify. On output,
3666 * the current values of selected QP attributes are returned.
3667 * @attr_mask: A bit-mask used to specify which attributes of the QP
3668 * are being modified.
3669 * @udata: pointer to user's input output buffer information
3670 * are being modified.
3671 * It returns 0 on success and returns appropriate error code on error.
3672 */
3673int ib_modify_qp_with_udata(struct ib_qp *qp,
3674 struct ib_qp_attr *attr,
3675 int attr_mask,
3676 struct ib_udata *udata);
3677
1da177e4
LT
3678/**
3679 * ib_modify_qp - Modifies the attributes for the specified QP and then
3680 * transitions the QP to the given state.
3681 * @qp: The QP to modify.
3682 * @qp_attr: On input, specifies the QP attributes to modify. On output,
3683 * the current values of selected QP attributes are returned.
3684 * @qp_attr_mask: A bit-mask used to specify which attributes of the QP
3685 * are being modified.
3686 */
3687int ib_modify_qp(struct ib_qp *qp,
3688 struct ib_qp_attr *qp_attr,
3689 int qp_attr_mask);
3690
3691/**
3692 * ib_query_qp - Returns the attribute list and current values for the
3693 * specified QP.
3694 * @qp: The QP to query.
3695 * @qp_attr: The attributes of the specified QP.
3696 * @qp_attr_mask: A bit-mask used to select specific attributes to query.
3697 * @qp_init_attr: Additional attributes of the selected QP.
3698 *
3699 * The qp_attr_mask may be used to limit the query to gathering only the
3700 * selected attributes.
3701 */
3702int ib_query_qp(struct ib_qp *qp,
3703 struct ib_qp_attr *qp_attr,
3704 int qp_attr_mask,
3705 struct ib_qp_init_attr *qp_init_attr);
3706
3707/**
3708 * ib_destroy_qp - Destroys the specified QP.
3709 * @qp: The QP to destroy.
c4367a26 3710 * @udata: Valid udata or NULL for kernel objects
1da177e4 3711 */
c4367a26
SR
3712int ib_destroy_qp_user(struct ib_qp *qp, struct ib_udata *udata);
3713
3714/**
3715 * ib_destroy_qp - Destroys the specified kernel QP.
3716 * @qp: The QP to destroy.
3717 *
3718 * NOTE: for user qp use ib_destroy_qp_user with valid udata!
3719 */
3720static inline int ib_destroy_qp(struct ib_qp *qp)
3721{
3722 return ib_destroy_qp_user(qp, NULL);
3723}
1da177e4 3724
d3d72d90 3725/**
0e0ec7e0
SH
3726 * ib_open_qp - Obtain a reference to an existing sharable QP.
3727 * @xrcd - XRC domain
3728 * @qp_open_attr: Attributes identifying the QP to open.
3729 *
3730 * Returns a reference to a sharable QP.
3731 */
3732struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
3733 struct ib_qp_open_attr *qp_open_attr);
3734
3735/**
3736 * ib_close_qp - Release an external reference to a QP.
d3d72d90
SH
3737 * @qp: The QP handle to release
3738 *
0e0ec7e0
SH
3739 * The opened QP handle is released by the caller. The underlying
3740 * shared QP is not destroyed until all internal references are released.
d3d72d90 3741 */
0e0ec7e0 3742int ib_close_qp(struct ib_qp *qp);
d3d72d90 3743
1da177e4
LT
3744/**
3745 * ib_post_send - Posts a list of work requests to the send queue of
3746 * the specified QP.
3747 * @qp: The QP to post the work request on.
3748 * @send_wr: A list of work requests to post on the send queue.
3749 * @bad_send_wr: On an immediate failure, this parameter will reference
3750 * the work request that failed to be posted on the QP.
55464d46
BVA
3751 *
3752 * While IBA Vol. 1 section 11.4.1.1 specifies that if an immediate
3753 * error is returned, the QP state shall not be affected,
3754 * ib_post_send() will return an immediate error after queueing any
3755 * earlier work requests in the list.
1da177e4
LT
3756 */
3757static inline int ib_post_send(struct ib_qp *qp,
d34ac5cd
BVA
3758 const struct ib_send_wr *send_wr,
3759 const struct ib_send_wr **bad_send_wr)
1da177e4 3760{
d34ac5cd 3761 const struct ib_send_wr *dummy;
bb039a87 3762
3023a1e9 3763 return qp->device->ops.post_send(qp, send_wr, bad_send_wr ? : &dummy);
1da177e4
LT
3764}
3765
3766/**
3767 * ib_post_recv - Posts a list of work requests to the receive queue of
3768 * the specified QP.
3769 * @qp: The QP to post the work request on.
3770 * @recv_wr: A list of work requests to post on the receive queue.
3771 * @bad_recv_wr: On an immediate failure, this parameter will reference
3772 * the work request that failed to be posted on the QP.
3773 */
3774static inline int ib_post_recv(struct ib_qp *qp,
d34ac5cd
BVA
3775 const struct ib_recv_wr *recv_wr,
3776 const struct ib_recv_wr **bad_recv_wr)
1da177e4 3777{
d34ac5cd 3778 const struct ib_recv_wr *dummy;
bb039a87 3779
3023a1e9 3780 return qp->device->ops.post_recv(qp, recv_wr, bad_recv_wr ? : &dummy);
1da177e4
LT
3781}
3782
7e3c66c9
LR
3783struct ib_cq *__ib_alloc_cq(struct ib_device *dev, void *private, int nr_cqe,
3784 int comp_vector, enum ib_poll_context poll_ctx,
3785 const char *caller);
c4367a26
SR
3786static inline struct ib_cq *ib_alloc_cq(struct ib_device *dev, void *private,
3787 int nr_cqe, int comp_vector,
3788 enum ib_poll_context poll_ctx)
3789{
7e3c66c9
LR
3790 return __ib_alloc_cq(dev, private, nr_cqe, comp_vector, poll_ctx,
3791 KBUILD_MODNAME);
c4367a26
SR
3792}
3793
20cf4e02
CL
3794struct ib_cq *__ib_alloc_cq_any(struct ib_device *dev, void *private,
3795 int nr_cqe, enum ib_poll_context poll_ctx,
3796 const char *caller);
3797
3798/**
3799 * ib_alloc_cq_any: Allocate kernel CQ
3800 * @dev: The IB device
3801 * @private: Private data attached to the CQE
3802 * @nr_cqe: Number of CQEs in the CQ
3803 * @poll_ctx: Context used for polling the CQ
3804 */
3805static inline struct ib_cq *ib_alloc_cq_any(struct ib_device *dev,
3806 void *private, int nr_cqe,
3807 enum ib_poll_context poll_ctx)
3808{
3809 return __ib_alloc_cq_any(dev, private, nr_cqe, poll_ctx,
3810 KBUILD_MODNAME);
3811}
3812
7e3c66c9 3813void ib_free_cq(struct ib_cq *cq);
14d3a3b2
CH
3814int ib_process_cq_direct(struct ib_cq *cq, int budget);
3815
1da177e4
LT
3816/**
3817 * ib_create_cq - Creates a CQ on the specified device.
3818 * @device: The device on which to create the CQ.
3819 * @comp_handler: A user-specified callback that is invoked when a
3820 * completion event occurs on the CQ.
3821 * @event_handler: A user-specified callback that is invoked when an
3822 * asynchronous event not associated with a completion occurs on the CQ.
3823 * @cq_context: Context associated with the CQ returned to the user via
3824 * the associated completion and event handlers.
8e37210b 3825 * @cq_attr: The attributes the CQ should be created upon.
1da177e4
LT
3826 *
3827 * Users can examine the cq structure to determine the actual CQ size.
3828 */
7350cdd0
BP
3829struct ib_cq *__ib_create_cq(struct ib_device *device,
3830 ib_comp_handler comp_handler,
3831 void (*event_handler)(struct ib_event *, void *),
3832 void *cq_context,
3833 const struct ib_cq_init_attr *cq_attr,
3834 const char *caller);
3835#define ib_create_cq(device, cmp_hndlr, evt_hndlr, cq_ctxt, cq_attr) \
3836 __ib_create_cq((device), (cmp_hndlr), (evt_hndlr), (cq_ctxt), (cq_attr), KBUILD_MODNAME)
1da177e4
LT
3837
3838/**
3839 * ib_resize_cq - Modifies the capacity of the CQ.
3840 * @cq: The CQ to resize.
3841 * @cqe: The minimum size of the CQ.
3842 *
3843 * Users can examine the cq structure to determine the actual CQ size.
3844 */
3845int ib_resize_cq(struct ib_cq *cq, int cqe);
3846
2dd57162 3847/**
4190b4e9 3848 * rdma_set_cq_moderation - Modifies moderation params of the CQ
2dd57162
EC
3849 * @cq: The CQ to modify.
3850 * @cq_count: number of CQEs that will trigger an event
3851 * @cq_period: max period of time in usec before triggering an event
3852 *
3853 */
4190b4e9 3854int rdma_set_cq_moderation(struct ib_cq *cq, u16 cq_count, u16 cq_period);
2dd57162 3855
1da177e4 3856/**
c4367a26 3857 * ib_destroy_cq_user - Destroys the specified CQ.
1da177e4 3858 * @cq: The CQ to destroy.
c4367a26 3859 * @udata: Valid user data or NULL for kernel objects
1da177e4 3860 */
c4367a26
SR
3861int ib_destroy_cq_user(struct ib_cq *cq, struct ib_udata *udata);
3862
3863/**
3864 * ib_destroy_cq - Destroys the specified kernel CQ.
3865 * @cq: The CQ to destroy.
3866 *
3867 * NOTE: for user cq use ib_destroy_cq_user with valid udata!
3868 */
890ac8d9 3869static inline void ib_destroy_cq(struct ib_cq *cq)
c4367a26 3870{
43d781b9
LR
3871 int ret = ib_destroy_cq_user(cq, NULL);
3872
3873 WARN_ONCE(ret, "Destroy of kernel CQ shouldn't fail");
c4367a26 3874}
1da177e4
LT
3875
3876/**
3877 * ib_poll_cq - poll a CQ for completion(s)
3878 * @cq:the CQ being polled
3879 * @num_entries:maximum number of completions to return
3880 * @wc:array of at least @num_entries &struct ib_wc where completions
3881 * will be returned
3882 *
3883 * Poll a CQ for (possibly multiple) completions. If the return value
3884 * is < 0, an error occurred. If the return value is >= 0, it is the
3885 * number of completions returned. If the return value is
3886 * non-negative and < num_entries, then the CQ was emptied.
3887 */
3888static inline int ib_poll_cq(struct ib_cq *cq, int num_entries,
3889 struct ib_wc *wc)
3890{
3023a1e9 3891 return cq->device->ops.poll_cq(cq, num_entries, wc);
1da177e4
LT
3892}
3893
1da177e4
LT
3894/**
3895 * ib_req_notify_cq - Request completion notification on a CQ.
3896 * @cq: The CQ to generate an event for.
ed23a727
RD
3897 * @flags:
3898 * Must contain exactly one of %IB_CQ_SOLICITED or %IB_CQ_NEXT_COMP
3899 * to request an event on the next solicited event or next work
3900 * completion at any type, respectively. %IB_CQ_REPORT_MISSED_EVENTS
3901 * may also be |ed in to request a hint about missed events, as
3902 * described below.
3903 *
3904 * Return Value:
3905 * < 0 means an error occurred while requesting notification
3906 * == 0 means notification was requested successfully, and if
3907 * IB_CQ_REPORT_MISSED_EVENTS was passed in, then no events
3908 * were missed and it is safe to wait for another event. In
3909 * this case is it guaranteed that any work completions added
3910 * to the CQ since the last CQ poll will trigger a completion
3911 * notification event.
3912 * > 0 is only returned if IB_CQ_REPORT_MISSED_EVENTS was passed
3913 * in. It means that the consumer must poll the CQ again to
3914 * make sure it is empty to avoid missing an event because of a
3915 * race between requesting notification and an entry being
3916 * added to the CQ. This return value means it is possible
3917 * (but not guaranteed) that a work completion has been added
3918 * to the CQ since the last poll without triggering a
3919 * completion notification event.
1da177e4
LT
3920 */
3921static inline int ib_req_notify_cq(struct ib_cq *cq,
ed23a727 3922 enum ib_cq_notify_flags flags)
1da177e4 3923{
3023a1e9 3924 return cq->device->ops.req_notify_cq(cq, flags);
1da177e4
LT
3925}
3926
c7ff819a
YF
3927struct ib_cq *ib_cq_pool_get(struct ib_device *dev, unsigned int nr_cqe,
3928 int comp_vector_hint,
3929 enum ib_poll_context poll_ctx);
3930
3931void ib_cq_pool_put(struct ib_cq *cq, unsigned int nr_cqe);
3932
1da177e4
LT
3933/**
3934 * ib_req_ncomp_notif - Request completion notification when there are
3935 * at least the specified number of unreaped completions on the CQ.
3936 * @cq: The CQ to generate an event for.
3937 * @wc_cnt: The number of unreaped completions that should be on the
3938 * CQ before an event is generated.
3939 */
3940static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
3941{
3023a1e9
KH
3942 return cq->device->ops.req_ncomp_notif ?
3943 cq->device->ops.req_ncomp_notif(cq, wc_cnt) :
1da177e4
LT
3944 -ENOSYS;
3945}
3946
9b513090
RC
3947/**
3948 * ib_dma_mapping_error - check a DMA addr for error
3949 * @dev: The device for which the dma_addr was created
3950 * @dma_addr: The DMA address to check
3951 */
3952static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
3953{
0957c29f 3954 return dma_mapping_error(dev->dma_device, dma_addr);
9b513090
RC
3955}
3956
3957/**
3958 * ib_dma_map_single - Map a kernel virtual address to DMA address
3959 * @dev: The device for which the dma_addr is to be created
3960 * @cpu_addr: The kernel virtual address
3961 * @size: The size of the region in bytes
3962 * @direction: The direction of the DMA
3963 */
3964static inline u64 ib_dma_map_single(struct ib_device *dev,
3965 void *cpu_addr, size_t size,
3966 enum dma_data_direction direction)
3967{
0957c29f 3968 return dma_map_single(dev->dma_device, cpu_addr, size, direction);
9b513090
RC
3969}
3970
3971/**
3972 * ib_dma_unmap_single - Destroy a mapping created by ib_dma_map_single()
3973 * @dev: The device for which the DMA address was created
3974 * @addr: The DMA address
3975 * @size: The size of the region in bytes
3976 * @direction: The direction of the DMA
3977 */
3978static inline void ib_dma_unmap_single(struct ib_device *dev,
3979 u64 addr, size_t size,
3980 enum dma_data_direction direction)
3981{
0957c29f 3982 dma_unmap_single(dev->dma_device, addr, size, direction);
cb9fbc5c
AK
3983}
3984
9b513090
RC
3985/**
3986 * ib_dma_map_page - Map a physical page to DMA address
3987 * @dev: The device for which the dma_addr is to be created
3988 * @page: The page to be mapped
3989 * @offset: The offset within the page
3990 * @size: The size of the region in bytes
3991 * @direction: The direction of the DMA
3992 */
3993static inline u64 ib_dma_map_page(struct ib_device *dev,
3994 struct page *page,
3995 unsigned long offset,
3996 size_t size,
3997 enum dma_data_direction direction)
3998{
0957c29f 3999 return dma_map_page(dev->dma_device, page, offset, size, direction);
9b513090
RC
4000}
4001
4002/**
4003 * ib_dma_unmap_page - Destroy a mapping created by ib_dma_map_page()
4004 * @dev: The device for which the DMA address was created
4005 * @addr: The DMA address
4006 * @size: The size of the region in bytes
4007 * @direction: The direction of the DMA
4008 */
4009static inline void ib_dma_unmap_page(struct ib_device *dev,
4010 u64 addr, size_t size,
4011 enum dma_data_direction direction)
4012{
0957c29f 4013 dma_unmap_page(dev->dma_device, addr, size, direction);
9b513090
RC
4014}
4015
4016/**
4017 * ib_dma_map_sg - Map a scatter/gather list to DMA addresses
4018 * @dev: The device for which the DMA addresses are to be created
4019 * @sg: The array of scatter/gather entries
4020 * @nents: The number of scatter/gather entries
4021 * @direction: The direction of the DMA
4022 */
4023static inline int ib_dma_map_sg(struct ib_device *dev,
4024 struct scatterlist *sg, int nents,
4025 enum dma_data_direction direction)
4026{
0957c29f 4027 return dma_map_sg(dev->dma_device, sg, nents, direction);
9b513090
RC
4028}
4029
4030/**
4031 * ib_dma_unmap_sg - Unmap a scatter/gather list of DMA addresses
4032 * @dev: The device for which the DMA addresses were created
4033 * @sg: The array of scatter/gather entries
4034 * @nents: The number of scatter/gather entries
4035 * @direction: The direction of the DMA
4036 */
4037static inline void ib_dma_unmap_sg(struct ib_device *dev,
4038 struct scatterlist *sg, int nents,
4039 enum dma_data_direction direction)
4040{
0957c29f 4041 dma_unmap_sg(dev->dma_device, sg, nents, direction);
9b513090
RC
4042}
4043
cb9fbc5c
AK
4044static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
4045 struct scatterlist *sg, int nents,
4046 enum dma_data_direction direction,
00085f1e 4047 unsigned long dma_attrs)
cb9fbc5c 4048{
0957c29f
BVA
4049 return dma_map_sg_attrs(dev->dma_device, sg, nents, direction,
4050 dma_attrs);
cb9fbc5c
AK
4051}
4052
4053static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
4054 struct scatterlist *sg, int nents,
4055 enum dma_data_direction direction,
00085f1e 4056 unsigned long dma_attrs)
cb9fbc5c 4057{
0957c29f 4058 dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, dma_attrs);
cb9fbc5c 4059}
9b513090 4060
0b5cb330
BVA
4061/**
4062 * ib_dma_max_seg_size - Return the size limit of a single DMA transfer
4063 * @dev: The device to query
4064 *
4065 * The returned value represents a size in bytes.
4066 */
4067static inline unsigned int ib_dma_max_seg_size(struct ib_device *dev)
4068{
ecdfdfdb 4069 return dma_get_max_seg_size(dev->dma_device);
0b5cb330
BVA
4070}
4071
9b513090
RC
4072/**
4073 * ib_dma_sync_single_for_cpu - Prepare DMA region to be accessed by CPU
4074 * @dev: The device for which the DMA address was created
4075 * @addr: The DMA address
4076 * @size: The size of the region in bytes
4077 * @dir: The direction of the DMA
4078 */
4079static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
4080 u64 addr,
4081 size_t size,
4082 enum dma_data_direction dir)
4083{
0957c29f 4084 dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
9b513090
RC
4085}
4086
4087/**
4088 * ib_dma_sync_single_for_device - Prepare DMA region to be accessed by device
4089 * @dev: The device for which the DMA address was created
4090 * @addr: The DMA address
4091 * @size: The size of the region in bytes
4092 * @dir: The direction of the DMA
4093 */
4094static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
4095 u64 addr,
4096 size_t size,
4097 enum dma_data_direction dir)
4098{
0957c29f 4099 dma_sync_single_for_device(dev->dma_device, addr, size, dir);
9b513090
RC
4100}
4101
4102/**
4103 * ib_dma_alloc_coherent - Allocate memory and map it for DMA
4104 * @dev: The device for which the DMA address is requested
4105 * @size: The size of the region to allocate in bytes
4106 * @dma_handle: A pointer for returning the DMA address of the region
4107 * @flag: memory allocator flags
4108 */
4109static inline void *ib_dma_alloc_coherent(struct ib_device *dev,
4110 size_t size,
d43dbacf 4111 dma_addr_t *dma_handle,
9b513090
RC
4112 gfp_t flag)
4113{
0957c29f 4114 return dma_alloc_coherent(dev->dma_device, size, dma_handle, flag);
9b513090
RC
4115}
4116
4117/**
4118 * ib_dma_free_coherent - Free memory allocated by ib_dma_alloc_coherent()
4119 * @dev: The device for which the DMA addresses were allocated
4120 * @size: The size of the region
4121 * @cpu_addr: the address returned by ib_dma_alloc_coherent()
4122 * @dma_handle: the DMA address returned by ib_dma_alloc_coherent()
4123 */
4124static inline void ib_dma_free_coherent(struct ib_device *dev,
4125 size_t size, void *cpu_addr,
d43dbacf 4126 dma_addr_t dma_handle)
9b513090 4127{
0957c29f 4128 dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle);
9b513090
RC
4129}
4130
33006bd4
MS
4131/* ib_reg_user_mr - register a memory region for virtual addresses from kernel
4132 * space. This function should be called when 'current' is the owning MM.
4133 */
4134struct ib_mr *ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
4135 u64 virt_addr, int mr_access_flags);
4136
87d8069f
MS
4137/* ib_advise_mr - give an advice about an address range in a memory region */
4138int ib_advise_mr(struct ib_pd *pd, enum ib_uverbs_advise_mr_advice advice,
4139 u32 flags, struct ib_sge *sg_list, u32 num_sge);
1da177e4 4140/**
c4367a26
SR
4141 * ib_dereg_mr_user - Deregisters a memory region and removes it from the
4142 * HCA translation table.
4143 * @mr: The memory region to deregister.
4144 * @udata: Valid user data or NULL for kernel object
4145 *
4146 * This function can fail, if the memory region has memory windows bound to it.
4147 */
4148int ib_dereg_mr_user(struct ib_mr *mr, struct ib_udata *udata);
4149
4150/**
4151 * ib_dereg_mr - Deregisters a kernel memory region and removes it from the
1da177e4
LT
4152 * HCA translation table.
4153 * @mr: The memory region to deregister.
7083e42e
SM
4154 *
4155 * This function can fail, if the memory region has memory windows bound to it.
c4367a26
SR
4156 *
4157 * NOTE: for user mr use ib_dereg_mr_user with valid udata!
1da177e4 4158 */
c4367a26
SR
4159static inline int ib_dereg_mr(struct ib_mr *mr)
4160{
4161 return ib_dereg_mr_user(mr, NULL);
4162}
4163
b64b74b1
GP
4164struct ib_mr *ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
4165 u32 max_num_sg);
00f7ec36 4166
26bc7eae
IR
4167struct ib_mr *ib_alloc_mr_integrity(struct ib_pd *pd,
4168 u32 max_num_data_sg,
4169 u32 max_num_meta_sg);
4170
00f7ec36
SW
4171/**
4172 * ib_update_fast_reg_key - updates the key portion of the fast_reg MR
4173 * R_Key and L_Key.
4174 * @mr - struct ib_mr pointer to be updated.
4175 * @newkey - new key to be used.
4176 */
4177static inline void ib_update_fast_reg_key(struct ib_mr *mr, u8 newkey)
4178{
4179 mr->lkey = (mr->lkey & 0xffffff00) | newkey;
4180 mr->rkey = (mr->rkey & 0xffffff00) | newkey;
4181}
4182
7083e42e
SM
4183/**
4184 * ib_inc_rkey - increments the key portion of the given rkey. Can be used
4185 * for calculating a new rkey for type 2 memory windows.
4186 * @rkey - the rkey to increment.
4187 */
4188static inline u32 ib_inc_rkey(u32 rkey)
4189{
4190 const u32 mask = 0x000000ff;
4191 return ((rkey + 1) & mask) | (rkey & ~mask);
4192}
4193
1da177e4
LT
4194/**
4195 * ib_attach_mcast - Attaches the specified QP to a multicast group.
4196 * @qp: QP to attach to the multicast group. The QP must be type
4197 * IB_QPT_UD.
4198 * @gid: Multicast group GID.
4199 * @lid: Multicast group LID in host byte order.
4200 *
4201 * In order to send and receive multicast packets, subnet
4202 * administration must have created the multicast group and configured
4203 * the fabric appropriately. The port associated with the specified
4204 * QP must also be a member of the multicast group.
4205 */
4206int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
4207
4208/**
4209 * ib_detach_mcast - Detaches the specified QP from a multicast group.
4210 * @qp: QP to detach from the multicast group.
4211 * @gid: Multicast group GID.
4212 * @lid: Multicast group LID in host byte order.
4213 */
4214int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
4215
b73efcb2
MG
4216struct ib_xrcd *ib_alloc_xrcd_user(struct ib_device *device,
4217 struct inode *inode, struct ib_udata *udata);
4218int ib_dealloc_xrcd_user(struct ib_xrcd *xrcd, struct ib_udata *udata);
59991f94 4219
1c636f80
EC
4220static inline int ib_check_mr_access(int flags)
4221{
4222 /*
4223 * Local write permission is required if remote write or
4224 * remote atomic permission is also requested.
4225 */
4226 if (flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) &&
4227 !(flags & IB_ACCESS_LOCAL_WRITE))
4228 return -EINVAL;
4229
ca95c141
MG
4230 if (flags & ~IB_ACCESS_SUPPORTED)
4231 return -EINVAL;
4232
1c636f80
EC
4233 return 0;
4234}
4235
08bb558a
JM
4236static inline bool ib_access_writable(int access_flags)
4237{
4238 /*
4239 * We have writable memory backing the MR if any of the following
4240 * access flags are set. "Local write" and "remote write" obviously
4241 * require write access. "Remote atomic" can do things like fetch and
4242 * add, which will modify memory, and "MW bind" can change permissions
4243 * by binding a window.
4244 */
4245 return access_flags &
4246 (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE |
4247 IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND);
4248}
4249
1b01d335
SG
4250/**
4251 * ib_check_mr_status: lightweight check of MR status.
4252 * This routine may provide status checks on a selected
4253 * ib_mr. first use is for signature status check.
4254 *
4255 * @mr: A memory region.
4256 * @check_mask: Bitmask of which checks to perform from
4257 * ib_mr_status_check enumeration.
4258 * @mr_status: The container of relevant status checks.
4259 * failed checks will be indicated in the status bitmask
4260 * and the relevant info shall be in the error item.
4261 */
4262int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
4263 struct ib_mr_status *mr_status);
4264
d79af724
JG
4265/**
4266 * ib_device_try_get: Hold a registration lock
4267 * device: The device to lock
4268 *
4269 * A device under an active registration lock cannot become unregistered. It
4270 * is only possible to obtain a registration lock on a device that is fully
4271 * registered, otherwise this function returns false.
4272 *
4273 * The registration lock is only necessary for actions which require the
4274 * device to still be registered. Uses that only require the device pointer to
4275 * be valid should use get_device(&ibdev->dev) to hold the memory.
4276 *
4277 */
4278static inline bool ib_device_try_get(struct ib_device *dev)
4279{
4280 return refcount_inc_not_zero(&dev->refcount);
4281}
4282
4283void ib_device_put(struct ib_device *device);
324e227e
JG
4284struct ib_device *ib_device_get_by_netdev(struct net_device *ndev,
4285 enum rdma_driver_id driver_id);
4286struct ib_device *ib_device_get_by_name(const char *name,
4287 enum rdma_driver_id driver_id);
9268f72d
YK
4288struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u8 port,
4289 u16 pkey, const union ib_gid *gid,
4290 const struct sockaddr *addr);
c2261dd7
JG
4291int ib_device_set_netdev(struct ib_device *ib_dev, struct net_device *ndev,
4292 unsigned int port);
4293struct net_device *ib_device_netdev(struct ib_device *dev, u8 port);
4294
5fd251c8
YH
4295struct ib_wq *ib_create_wq(struct ib_pd *pd,
4296 struct ib_wq_init_attr *init_attr);
add53535 4297int ib_destroy_wq_user(struct ib_wq *wq, struct ib_udata *udata);
5fd251c8
YH
4298int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *attr,
4299 u32 wq_attr_mask);
6d39786b 4300int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
9268f72d 4301
ff2ba993 4302int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
9aa8b321 4303 unsigned int *sg_offset, unsigned int page_size);
2cdfcdd8
MG
4304int ib_map_mr_sg_pi(struct ib_mr *mr, struct scatterlist *data_sg,
4305 int data_sg_nents, unsigned int *data_sg_offset,
4306 struct scatterlist *meta_sg, int meta_sg_nents,
4307 unsigned int *meta_sg_offset, unsigned int page_size);
4c67e2bf
SG
4308
4309static inline int
ff2ba993 4310ib_map_mr_sg_zbva(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
9aa8b321 4311 unsigned int *sg_offset, unsigned int page_size)
4c67e2bf
SG
4312{
4313 int n;
4314
ff2ba993 4315 n = ib_map_mr_sg(mr, sg, sg_nents, sg_offset, page_size);
4c67e2bf
SG
4316 mr->iova = 0;
4317
4318 return n;
4319}
4320
ff2ba993 4321int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents,
9aa8b321 4322 unsigned int *sg_offset, int (*set_page)(struct ib_mr *, u64));
4c67e2bf 4323
765d6774
SW
4324void ib_drain_rq(struct ib_qp *qp);
4325void ib_drain_sq(struct ib_qp *qp);
4326void ib_drain_qp(struct ib_qp *qp);
850d8fd7 4327
d4186194 4328int ib_get_eth_speed(struct ib_device *dev, u8 port_num, u8 *speed, u8 *width);
2224c47a
DC
4329
4330static inline u8 *rdma_ah_retrieve_dmac(struct rdma_ah_attr *attr)
4331{
44c58487
DC
4332 if (attr->type == RDMA_AH_ATTR_TYPE_ROCE)
4333 return attr->roce.dmac;
4334 return NULL;
2224c47a
DC
4335}
4336
64b4646e 4337static inline void rdma_ah_set_dlid(struct rdma_ah_attr *attr, u32 dlid)
2224c47a 4338{
44c58487 4339 if (attr->type == RDMA_AH_ATTR_TYPE_IB)
64b4646e
DC
4340 attr->ib.dlid = (u16)dlid;
4341 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4342 attr->opa.dlid = dlid;
2224c47a
DC
4343}
4344
64b4646e 4345static inline u32 rdma_ah_get_dlid(const struct rdma_ah_attr *attr)
2224c47a 4346{
44c58487
DC
4347 if (attr->type == RDMA_AH_ATTR_TYPE_IB)
4348 return attr->ib.dlid;
64b4646e
DC
4349 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4350 return attr->opa.dlid;
44c58487 4351 return 0;
2224c47a
DC
4352}
4353
4354static inline void rdma_ah_set_sl(struct rdma_ah_attr *attr, u8 sl)
4355{
4356 attr->sl = sl;
4357}
4358
4359static inline u8 rdma_ah_get_sl(const struct rdma_ah_attr *attr)
4360{
4361 return attr->sl;
4362}
4363
4364static inline void rdma_ah_set_path_bits(struct rdma_ah_attr *attr,
4365 u8 src_path_bits)
4366{
44c58487
DC
4367 if (attr->type == RDMA_AH_ATTR_TYPE_IB)
4368 attr->ib.src_path_bits = src_path_bits;
64b4646e
DC
4369 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4370 attr->opa.src_path_bits = src_path_bits;
2224c47a
DC
4371}
4372
4373static inline u8 rdma_ah_get_path_bits(const struct rdma_ah_attr *attr)
4374{
44c58487
DC
4375 if (attr->type == RDMA_AH_ATTR_TYPE_IB)
4376 return attr->ib.src_path_bits;
64b4646e
DC
4377 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4378 return attr->opa.src_path_bits;
44c58487 4379 return 0;
2224c47a
DC
4380}
4381
d98bb7f7
DH
4382static inline void rdma_ah_set_make_grd(struct rdma_ah_attr *attr,
4383 bool make_grd)
4384{
4385 if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4386 attr->opa.make_grd = make_grd;
4387}
4388
4389static inline bool rdma_ah_get_make_grd(const struct rdma_ah_attr *attr)
4390{
4391 if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4392 return attr->opa.make_grd;
4393 return false;
4394}
4395
2224c47a
DC
4396static inline void rdma_ah_set_port_num(struct rdma_ah_attr *attr, u8 port_num)
4397{
4398 attr->port_num = port_num;
4399}
4400
4401static inline u8 rdma_ah_get_port_num(const struct rdma_ah_attr *attr)
4402{
4403 return attr->port_num;
4404}
4405
4406static inline void rdma_ah_set_static_rate(struct rdma_ah_attr *attr,
4407 u8 static_rate)
4408{
4409 attr->static_rate = static_rate;
4410}
4411
4412static inline u8 rdma_ah_get_static_rate(const struct rdma_ah_attr *attr)
4413{
4414 return attr->static_rate;
4415}
4416
4417static inline void rdma_ah_set_ah_flags(struct rdma_ah_attr *attr,
4418 enum ib_ah_flags flag)
4419{
4420 attr->ah_flags = flag;
4421}
4422
4423static inline enum ib_ah_flags
4424 rdma_ah_get_ah_flags(const struct rdma_ah_attr *attr)
4425{
4426 return attr->ah_flags;
4427}
4428
4429static inline const struct ib_global_route
4430 *rdma_ah_read_grh(const struct rdma_ah_attr *attr)
4431{
4432 return &attr->grh;
4433}
4434
4435/*To retrieve and modify the grh */
4436static inline struct ib_global_route
4437 *rdma_ah_retrieve_grh(struct rdma_ah_attr *attr)
4438{
4439 return &attr->grh;
4440}
4441
4442static inline void rdma_ah_set_dgid_raw(struct rdma_ah_attr *attr, void *dgid)
4443{
4444 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4445
4446 memcpy(grh->dgid.raw, dgid, sizeof(grh->dgid));
4447}
4448
4449static inline void rdma_ah_set_subnet_prefix(struct rdma_ah_attr *attr,
4450 __be64 prefix)
4451{
4452 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4453
4454 grh->dgid.global.subnet_prefix = prefix;
4455}
4456
4457static inline void rdma_ah_set_interface_id(struct rdma_ah_attr *attr,
4458 __be64 if_id)
4459{
4460 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4461
4462 grh->dgid.global.interface_id = if_id;
4463}
4464
4465static inline void rdma_ah_set_grh(struct rdma_ah_attr *attr,
4466 union ib_gid *dgid, u32 flow_label,
4467 u8 sgid_index, u8 hop_limit,
4468 u8 traffic_class)
4469{
4470 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4471
4472 attr->ah_flags = IB_AH_GRH;
4473 if (dgid)
4474 grh->dgid = *dgid;
4475 grh->flow_label = flow_label;
4476 grh->sgid_index = sgid_index;
4477 grh->hop_limit = hop_limit;
4478 grh->traffic_class = traffic_class;
8d9ec9ad 4479 grh->sgid_attr = NULL;
2224c47a 4480}
44c58487 4481
8d9ec9ad
JG
4482void rdma_destroy_ah_attr(struct rdma_ah_attr *ah_attr);
4483void rdma_move_grh_sgid_attr(struct rdma_ah_attr *attr, union ib_gid *dgid,
4484 u32 flow_label, u8 hop_limit, u8 traffic_class,
4485 const struct ib_gid_attr *sgid_attr);
d97099fe
JG
4486void rdma_copy_ah_attr(struct rdma_ah_attr *dest,
4487 const struct rdma_ah_attr *src);
4488void rdma_replace_ah_attr(struct rdma_ah_attr *old,
4489 const struct rdma_ah_attr *new);
4490void rdma_move_ah_attr(struct rdma_ah_attr *dest, struct rdma_ah_attr *src);
8d9ec9ad 4491
87daac68
DH
4492/**
4493 * rdma_ah_find_type - Return address handle type.
4494 *
4495 * @dev: Device to be checked
4496 * @port_num: Port number
4497 */
44c58487 4498static inline enum rdma_ah_attr_type rdma_ah_find_type(struct ib_device *dev,
87daac68 4499 u8 port_num)
44c58487 4500{
a6532e71 4501 if (rdma_protocol_roce(dev, port_num))
44c58487 4502 return RDMA_AH_ATTR_TYPE_ROCE;
87daac68
DH
4503 if (rdma_protocol_ib(dev, port_num)) {
4504 if (rdma_cap_opa_ah(dev, port_num))
4505 return RDMA_AH_ATTR_TYPE_OPA;
44c58487 4506 return RDMA_AH_ATTR_TYPE_IB;
87daac68
DH
4507 }
4508
4509 return RDMA_AH_ATTR_TYPE_UNDEFINED;
44c58487 4510}
7db20ecd 4511
62ede777
HD
4512/**
4513 * ib_lid_cpu16 - Return lid in 16bit CPU encoding.
4514 * In the current implementation the only way to get
4515 * get the 32bit lid is from other sources for OPA.
4516 * For IB, lids will always be 16bits so cast the
4517 * value accordingly.
4518 *
4519 * @lid: A 32bit LID
4520 */
4521static inline u16 ib_lid_cpu16(u32 lid)
7db20ecd 4522{
62ede777
HD
4523 WARN_ON_ONCE(lid & 0xFFFF0000);
4524 return (u16)lid;
7db20ecd
HD
4525}
4526
62ede777
HD
4527/**
4528 * ib_lid_be16 - Return lid in 16bit BE encoding.
4529 *
4530 * @lid: A 32bit LID
4531 */
4532static inline __be16 ib_lid_be16(u32 lid)
7db20ecd 4533{
62ede777
HD
4534 WARN_ON_ONCE(lid & 0xFFFF0000);
4535 return cpu_to_be16((u16)lid);
7db20ecd 4536}
32043830 4537
c66cd353
SG
4538/**
4539 * ib_get_vector_affinity - Get the affinity mappings of a given completion
4540 * vector
4541 * @device: the rdma device
4542 * @comp_vector: index of completion vector
4543 *
4544 * Returns NULL on failure, otherwise a corresponding cpu map of the
4545 * completion vector (returns all-cpus map if the device driver doesn't
4546 * implement get_vector_affinity).
4547 */
4548static inline const struct cpumask *
4549ib_get_vector_affinity(struct ib_device *device, int comp_vector)
4550{
4551 if (comp_vector < 0 || comp_vector >= device->num_comp_vectors ||
3023a1e9 4552 !device->ops.get_vector_affinity)
c66cd353
SG
4553 return NULL;
4554
3023a1e9 4555 return device->ops.get_vector_affinity(device, comp_vector);
c66cd353
SG
4556
4557}
4558
32f69e4b
DJ
4559/**
4560 * rdma_roce_rescan_device - Rescan all of the network devices in the system
4561 * and add their gids, as needed, to the relevant RoCE devices.
4562 *
4563 * @device: the rdma device
4564 */
4565void rdma_roce_rescan_device(struct ib_device *ibdev);
4566
8313c10f 4567struct ib_ucontext *ib_uverbs_get_ucontext_file(struct ib_uverbs_file *ufile);
7dc08dcf 4568
15a1b4be 4569int uverbs_destroy_def_handler(struct uverbs_attr_bundle *attrs);
f6a8a19b
DD
4570
4571struct net_device *rdma_alloc_netdev(struct ib_device *device, u8 port_num,
4572 enum rdma_netdev_t type, const char *name,
4573 unsigned char name_assign_type,
4574 void (*setup)(struct net_device *));
5d6b0cb3
DD
4575
4576int rdma_init_netdev(struct ib_device *device, u8 port_num,
4577 enum rdma_netdev_t type, const char *name,
4578 unsigned char name_assign_type,
4579 void (*setup)(struct net_device *),
4580 struct net_device *netdev);
4581
d4122f5a
PP
4582/**
4583 * rdma_set_device_sysfs_group - Set device attributes group to have
4584 * driver specific sysfs entries at
4585 * for infiniband class.
4586 *
4587 * @device: device pointer for which attributes to be created
4588 * @group: Pointer to group which should be added when device
4589 * is registered with sysfs.
4590 * rdma_set_device_sysfs_group() allows existing drivers to expose one
4591 * group per device to have sysfs attributes.
4592 *
4593 * NOTE: New drivers should not make use of this API; instead new device
4594 * parameter should be exposed via netlink command. This API and mechanism
4595 * exist only for existing drivers.
4596 */
4597static inline void
4598rdma_set_device_sysfs_group(struct ib_device *dev,
4599 const struct attribute_group *group)
4600{
4601 dev->groups[1] = group;
4602}
4603
54747231
PP
4604/**
4605 * rdma_device_to_ibdev - Get ib_device pointer from device pointer
4606 *
4607 * @device: device pointer for which ib_device pointer to retrieve
4608 *
4609 * rdma_device_to_ibdev() retrieves ib_device pointer from device.
4610 *
4611 */
4612static inline struct ib_device *rdma_device_to_ibdev(struct device *device)
4613{
cebe556b
PP
4614 struct ib_core_device *coredev =
4615 container_of(device, struct ib_core_device, dev);
4616
4617 return coredev->owner;
54747231
PP
4618}
4619
4620/**
4621 * rdma_device_to_drv_device - Helper macro to reach back to driver's
4622 * ib_device holder structure from device pointer.
4623 *
4624 * NOTE: New drivers should not make use of this API; This API is only for
4625 * existing drivers who have exposed sysfs entries using
4626 * rdma_set_device_sysfs_group().
4627 */
4628#define rdma_device_to_drv_device(dev, drv_dev_struct, ibdev_member) \
4629 container_of(rdma_device_to_ibdev(dev), drv_dev_struct, ibdev_member)
41c61401
PP
4630
4631bool rdma_dev_access_netns(const struct ib_device *device,
4632 const struct net *net);
d5665a21
MZ
4633
4634#define IB_ROCE_UDP_ENCAP_VALID_PORT_MIN (0xC000)
074bf2c2 4635#define IB_ROCE_UDP_ENCAP_VALID_PORT_MAX (0xFFFF)
d5665a21
MZ
4636#define IB_GRH_FLOWLABEL_MASK (0x000FFFFF)
4637
4638/**
4639 * rdma_flow_label_to_udp_sport - generate a RoCE v2 UDP src port value based
4640 * on the flow_label
4641 *
4642 * This function will convert the 20 bit flow_label input to a valid RoCE v2
4643 * UDP src port 14 bit value. All RoCE V2 drivers should use this same
4644 * convention.
4645 */
4646static inline u16 rdma_flow_label_to_udp_sport(u32 fl)
4647{
4648 u32 fl_low = fl & 0x03fff, fl_high = fl & 0xFC000;
4649
4650 fl_low ^= fl_high >> 14;
4651 return (u16)(fl_low | IB_ROCE_UDP_ENCAP_VALID_PORT_MIN);
4652}
4653
4654/**
4655 * rdma_calc_flow_label - generate a RDMA symmetric flow label value based on
4656 * local and remote qpn values
4657 *
4658 * This function folded the multiplication results of two qpns, 24 bit each,
4659 * fields, and converts it to a 20 bit results.
4660 *
4661 * This function will create symmetric flow_label value based on the local
4662 * and remote qpn values. this will allow both the requester and responder
4663 * to calculate the same flow_label for a given connection.
4664 *
4665 * This helper function should be used by driver in case the upper layer
4666 * provide a zero flow_label value. This is to improve entropy of RDMA
4667 * traffic in the network.
4668 */
4669static inline u32 rdma_calc_flow_label(u32 lqpn, u32 rqpn)
4670{
4671 u64 v = (u64)lqpn * rqpn;
4672
4673 v ^= v >> 20;
4674 v ^= v >> 40;
4675
4676 return (u32)(v & IB_GRH_FLOWLABEL_MASK);
4677}
1da177e4 4678#endif /* IB_VERBS_H */