]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - include/rdma/ib_verbs.h
powerpc/mm: Ensure cpumask update is ordered
[mirror_ubuntu-artful-kernel.git] / include / rdma / ib_verbs.h
1 /*
2 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2004 Infinicon Corporation. All rights reserved.
4 * Copyright (c) 2004 Intel Corporation. All rights reserved.
5 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
7 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
8 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved.
9 *
10 * This software is available to you under a choice of one of two
11 * licenses. You may choose to be licensed under the terms of the GNU
12 * General Public License (GPL) Version 2, available from the file
13 * COPYING in the main directory of this source tree, or the
14 * OpenIB.org BSD license below:
15 *
16 * Redistribution and use in source and binary forms, with or
17 * without modification, are permitted provided that the following
18 * conditions are met:
19 *
20 * - Redistributions of source code must retain the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer.
23 *
24 * - Redistributions in binary form must reproduce the above
25 * copyright notice, this list of conditions and the following
26 * disclaimer in the documentation and/or other materials
27 * provided with the distribution.
28 *
29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
33 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
34 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
35 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 * SOFTWARE.
37 */
38
39 #if !defined(IB_VERBS_H)
40 #define IB_VERBS_H
41
42 #include <linux/types.h>
43 #include <linux/device.h>
44 #include <linux/mm.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/kref.h>
47 #include <linux/list.h>
48 #include <linux/rwsem.h>
49 #include <linux/scatterlist.h>
50 #include <linux/workqueue.h>
51 #include <linux/socket.h>
52 #include <linux/irq_poll.h>
53 #include <uapi/linux/if_ether.h>
54 #include <net/ipv6.h>
55 #include <net/ip.h>
56 #include <linux/string.h>
57 #include <linux/slab.h>
58 #include <linux/netdevice.h>
59
60 #include <linux/if_link.h>
61 #include <linux/atomic.h>
62 #include <linux/mmu_notifier.h>
63 #include <linux/uaccess.h>
64 #include <linux/cgroup_rdma.h>
65 #include <uapi/rdma/ib_user_verbs.h>
66
67 extern struct workqueue_struct *ib_wq;
68 extern struct workqueue_struct *ib_comp_wq;
69
70 union ib_gid {
71 u8 raw[16];
72 struct {
73 __be64 subnet_prefix;
74 __be64 interface_id;
75 } global;
76 };
77
78 extern union ib_gid zgid;
79
80 enum ib_gid_type {
81 /* If link layer is Ethernet, this is RoCE V1 */
82 IB_GID_TYPE_IB = 0,
83 IB_GID_TYPE_ROCE = 0,
84 IB_GID_TYPE_ROCE_UDP_ENCAP = 1,
85 IB_GID_TYPE_SIZE
86 };
87
88 #define ROCE_V2_UDP_DPORT 4791
89 struct ib_gid_attr {
90 enum ib_gid_type gid_type;
91 struct net_device *ndev;
92 };
93
94 enum rdma_node_type {
95 /* IB values map to NodeInfo:NodeType. */
96 RDMA_NODE_IB_CA = 1,
97 RDMA_NODE_IB_SWITCH,
98 RDMA_NODE_IB_ROUTER,
99 RDMA_NODE_RNIC,
100 RDMA_NODE_USNIC,
101 RDMA_NODE_USNIC_UDP,
102 };
103
104 enum {
105 /* set the local administered indication */
106 IB_SA_WELL_KNOWN_GUID = BIT_ULL(57) | 2,
107 };
108
109 enum rdma_transport_type {
110 RDMA_TRANSPORT_IB,
111 RDMA_TRANSPORT_IWARP,
112 RDMA_TRANSPORT_USNIC,
113 RDMA_TRANSPORT_USNIC_UDP
114 };
115
116 enum rdma_protocol_type {
117 RDMA_PROTOCOL_IB,
118 RDMA_PROTOCOL_IBOE,
119 RDMA_PROTOCOL_IWARP,
120 RDMA_PROTOCOL_USNIC_UDP
121 };
122
123 __attribute_const__ enum rdma_transport_type
124 rdma_node_get_transport(enum rdma_node_type node_type);
125
126 enum rdma_network_type {
127 RDMA_NETWORK_IB,
128 RDMA_NETWORK_ROCE_V1 = RDMA_NETWORK_IB,
129 RDMA_NETWORK_IPV4,
130 RDMA_NETWORK_IPV6
131 };
132
133 static inline enum ib_gid_type ib_network_to_gid_type(enum rdma_network_type network_type)
134 {
135 if (network_type == RDMA_NETWORK_IPV4 ||
136 network_type == RDMA_NETWORK_IPV6)
137 return IB_GID_TYPE_ROCE_UDP_ENCAP;
138
139 /* IB_GID_TYPE_IB same as RDMA_NETWORK_ROCE_V1 */
140 return IB_GID_TYPE_IB;
141 }
142
143 static inline enum rdma_network_type ib_gid_to_network_type(enum ib_gid_type gid_type,
144 union ib_gid *gid)
145 {
146 if (gid_type == IB_GID_TYPE_IB)
147 return RDMA_NETWORK_IB;
148
149 if (ipv6_addr_v4mapped((struct in6_addr *)gid))
150 return RDMA_NETWORK_IPV4;
151 else
152 return RDMA_NETWORK_IPV6;
153 }
154
155 enum rdma_link_layer {
156 IB_LINK_LAYER_UNSPECIFIED,
157 IB_LINK_LAYER_INFINIBAND,
158 IB_LINK_LAYER_ETHERNET,
159 };
160
161 enum ib_device_cap_flags {
162 IB_DEVICE_RESIZE_MAX_WR = (1 << 0),
163 IB_DEVICE_BAD_PKEY_CNTR = (1 << 1),
164 IB_DEVICE_BAD_QKEY_CNTR = (1 << 2),
165 IB_DEVICE_RAW_MULTI = (1 << 3),
166 IB_DEVICE_AUTO_PATH_MIG = (1 << 4),
167 IB_DEVICE_CHANGE_PHY_PORT = (1 << 5),
168 IB_DEVICE_UD_AV_PORT_ENFORCE = (1 << 6),
169 IB_DEVICE_CURR_QP_STATE_MOD = (1 << 7),
170 IB_DEVICE_SHUTDOWN_PORT = (1 << 8),
171 IB_DEVICE_INIT_TYPE = (1 << 9),
172 IB_DEVICE_PORT_ACTIVE_EVENT = (1 << 10),
173 IB_DEVICE_SYS_IMAGE_GUID = (1 << 11),
174 IB_DEVICE_RC_RNR_NAK_GEN = (1 << 12),
175 IB_DEVICE_SRQ_RESIZE = (1 << 13),
176 IB_DEVICE_N_NOTIFY_CQ = (1 << 14),
177
178 /*
179 * This device supports a per-device lkey or stag that can be
180 * used without performing a memory registration for the local
181 * memory. Note that ULPs should never check this flag, but
182 * instead of use the local_dma_lkey flag in the ib_pd structure,
183 * which will always contain a usable lkey.
184 */
185 IB_DEVICE_LOCAL_DMA_LKEY = (1 << 15),
186 IB_DEVICE_RESERVED /* old SEND_W_INV */ = (1 << 16),
187 IB_DEVICE_MEM_WINDOW = (1 << 17),
188 /*
189 * Devices should set IB_DEVICE_UD_IP_SUM if they support
190 * insertion of UDP and TCP checksum on outgoing UD IPoIB
191 * messages and can verify the validity of checksum for
192 * incoming messages. Setting this flag implies that the
193 * IPoIB driver may set NETIF_F_IP_CSUM for datagram mode.
194 */
195 IB_DEVICE_UD_IP_CSUM = (1 << 18),
196 IB_DEVICE_UD_TSO = (1 << 19),
197 IB_DEVICE_XRC = (1 << 20),
198
199 /*
200 * This device supports the IB "base memory management extension",
201 * which includes support for fast registrations (IB_WR_REG_MR,
202 * IB_WR_LOCAL_INV and IB_WR_SEND_WITH_INV verbs). This flag should
203 * also be set by any iWarp device which must support FRs to comply
204 * to the iWarp verbs spec. iWarp devices also support the
205 * IB_WR_RDMA_READ_WITH_INV verb for RDMA READs that invalidate the
206 * stag.
207 */
208 IB_DEVICE_MEM_MGT_EXTENSIONS = (1 << 21),
209 IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = (1 << 22),
210 IB_DEVICE_MEM_WINDOW_TYPE_2A = (1 << 23),
211 IB_DEVICE_MEM_WINDOW_TYPE_2B = (1 << 24),
212 IB_DEVICE_RC_IP_CSUM = (1 << 25),
213 /* Deprecated. Please use IB_RAW_PACKET_CAP_IP_CSUM. */
214 IB_DEVICE_RAW_IP_CSUM = (1 << 26),
215 /*
216 * Devices should set IB_DEVICE_CROSS_CHANNEL if they
217 * support execution of WQEs that involve synchronization
218 * of I/O operations with single completion queue managed
219 * by hardware.
220 */
221 IB_DEVICE_CROSS_CHANNEL = (1 << 27),
222 IB_DEVICE_MANAGED_FLOW_STEERING = (1 << 29),
223 IB_DEVICE_SIGNATURE_HANDOVER = (1 << 30),
224 IB_DEVICE_ON_DEMAND_PAGING = (1ULL << 31),
225 IB_DEVICE_SG_GAPS_REG = (1ULL << 32),
226 IB_DEVICE_VIRTUAL_FUNCTION = (1ULL << 33),
227 /* Deprecated. Please use IB_RAW_PACKET_CAP_SCATTER_FCS. */
228 IB_DEVICE_RAW_SCATTER_FCS = (1ULL << 34),
229 IB_DEVICE_RDMA_NETDEV_OPA_VNIC = (1ULL << 35),
230 };
231
232 enum ib_signature_prot_cap {
233 IB_PROT_T10DIF_TYPE_1 = 1,
234 IB_PROT_T10DIF_TYPE_2 = 1 << 1,
235 IB_PROT_T10DIF_TYPE_3 = 1 << 2,
236 };
237
238 enum ib_signature_guard_cap {
239 IB_GUARD_T10DIF_CRC = 1,
240 IB_GUARD_T10DIF_CSUM = 1 << 1,
241 };
242
243 enum ib_atomic_cap {
244 IB_ATOMIC_NONE,
245 IB_ATOMIC_HCA,
246 IB_ATOMIC_GLOB
247 };
248
249 enum ib_odp_general_cap_bits {
250 IB_ODP_SUPPORT = 1 << 0,
251 IB_ODP_SUPPORT_IMPLICIT = 1 << 1,
252 };
253
254 enum ib_odp_transport_cap_bits {
255 IB_ODP_SUPPORT_SEND = 1 << 0,
256 IB_ODP_SUPPORT_RECV = 1 << 1,
257 IB_ODP_SUPPORT_WRITE = 1 << 2,
258 IB_ODP_SUPPORT_READ = 1 << 3,
259 IB_ODP_SUPPORT_ATOMIC = 1 << 4,
260 };
261
262 struct ib_odp_caps {
263 uint64_t general_caps;
264 struct {
265 uint32_t rc_odp_caps;
266 uint32_t uc_odp_caps;
267 uint32_t ud_odp_caps;
268 } per_transport_caps;
269 };
270
271 struct ib_rss_caps {
272 /* Corresponding bit will be set if qp type from
273 * 'enum ib_qp_type' is supported, e.g.
274 * supported_qpts |= 1 << IB_QPT_UD
275 */
276 u32 supported_qpts;
277 u32 max_rwq_indirection_tables;
278 u32 max_rwq_indirection_table_size;
279 };
280
281 enum ib_cq_creation_flags {
282 IB_CQ_FLAGS_TIMESTAMP_COMPLETION = 1 << 0,
283 IB_CQ_FLAGS_IGNORE_OVERRUN = 1 << 1,
284 };
285
286 struct ib_cq_init_attr {
287 unsigned int cqe;
288 int comp_vector;
289 u32 flags;
290 };
291
292 struct ib_device_attr {
293 u64 fw_ver;
294 __be64 sys_image_guid;
295 u64 max_mr_size;
296 u64 page_size_cap;
297 u32 vendor_id;
298 u32 vendor_part_id;
299 u32 hw_ver;
300 int max_qp;
301 int max_qp_wr;
302 u64 device_cap_flags;
303 int max_sge;
304 int max_sge_rd;
305 int max_cq;
306 int max_cqe;
307 int max_mr;
308 int max_pd;
309 int max_qp_rd_atom;
310 int max_ee_rd_atom;
311 int max_res_rd_atom;
312 int max_qp_init_rd_atom;
313 int max_ee_init_rd_atom;
314 enum ib_atomic_cap atomic_cap;
315 enum ib_atomic_cap masked_atomic_cap;
316 int max_ee;
317 int max_rdd;
318 int max_mw;
319 int max_raw_ipv6_qp;
320 int max_raw_ethy_qp;
321 int max_mcast_grp;
322 int max_mcast_qp_attach;
323 int max_total_mcast_qp_attach;
324 int max_ah;
325 int max_fmr;
326 int max_map_per_fmr;
327 int max_srq;
328 int max_srq_wr;
329 int max_srq_sge;
330 unsigned int max_fast_reg_page_list_len;
331 u16 max_pkeys;
332 u8 local_ca_ack_delay;
333 int sig_prot_cap;
334 int sig_guard_cap;
335 struct ib_odp_caps odp_caps;
336 uint64_t timestamp_mask;
337 uint64_t hca_core_clock; /* in KHZ */
338 struct ib_rss_caps rss_caps;
339 u32 max_wq_type_rq;
340 u32 raw_packet_caps; /* Use ib_raw_packet_caps enum */
341 };
342
343 enum ib_mtu {
344 IB_MTU_256 = 1,
345 IB_MTU_512 = 2,
346 IB_MTU_1024 = 3,
347 IB_MTU_2048 = 4,
348 IB_MTU_4096 = 5
349 };
350
351 static inline int ib_mtu_enum_to_int(enum ib_mtu mtu)
352 {
353 switch (mtu) {
354 case IB_MTU_256: return 256;
355 case IB_MTU_512: return 512;
356 case IB_MTU_1024: return 1024;
357 case IB_MTU_2048: return 2048;
358 case IB_MTU_4096: return 4096;
359 default: return -1;
360 }
361 }
362
363 static inline enum ib_mtu ib_mtu_int_to_enum(int mtu)
364 {
365 if (mtu >= 4096)
366 return IB_MTU_4096;
367 else if (mtu >= 2048)
368 return IB_MTU_2048;
369 else if (mtu >= 1024)
370 return IB_MTU_1024;
371 else if (mtu >= 512)
372 return IB_MTU_512;
373 else
374 return IB_MTU_256;
375 }
376
377 enum ib_port_state {
378 IB_PORT_NOP = 0,
379 IB_PORT_DOWN = 1,
380 IB_PORT_INIT = 2,
381 IB_PORT_ARMED = 3,
382 IB_PORT_ACTIVE = 4,
383 IB_PORT_ACTIVE_DEFER = 5
384 };
385
386 enum ib_port_cap_flags {
387 IB_PORT_SM = 1 << 1,
388 IB_PORT_NOTICE_SUP = 1 << 2,
389 IB_PORT_TRAP_SUP = 1 << 3,
390 IB_PORT_OPT_IPD_SUP = 1 << 4,
391 IB_PORT_AUTO_MIGR_SUP = 1 << 5,
392 IB_PORT_SL_MAP_SUP = 1 << 6,
393 IB_PORT_MKEY_NVRAM = 1 << 7,
394 IB_PORT_PKEY_NVRAM = 1 << 8,
395 IB_PORT_LED_INFO_SUP = 1 << 9,
396 IB_PORT_SM_DISABLED = 1 << 10,
397 IB_PORT_SYS_IMAGE_GUID_SUP = 1 << 11,
398 IB_PORT_PKEY_SW_EXT_PORT_TRAP_SUP = 1 << 12,
399 IB_PORT_EXTENDED_SPEEDS_SUP = 1 << 14,
400 IB_PORT_CM_SUP = 1 << 16,
401 IB_PORT_SNMP_TUNNEL_SUP = 1 << 17,
402 IB_PORT_REINIT_SUP = 1 << 18,
403 IB_PORT_DEVICE_MGMT_SUP = 1 << 19,
404 IB_PORT_VENDOR_CLASS_SUP = 1 << 20,
405 IB_PORT_DR_NOTICE_SUP = 1 << 21,
406 IB_PORT_CAP_MASK_NOTICE_SUP = 1 << 22,
407 IB_PORT_BOOT_MGMT_SUP = 1 << 23,
408 IB_PORT_LINK_LATENCY_SUP = 1 << 24,
409 IB_PORT_CLIENT_REG_SUP = 1 << 25,
410 IB_PORT_IP_BASED_GIDS = 1 << 26,
411 };
412
413 enum ib_port_width {
414 IB_WIDTH_1X = 1,
415 IB_WIDTH_4X = 2,
416 IB_WIDTH_8X = 4,
417 IB_WIDTH_12X = 8
418 };
419
420 static inline int ib_width_enum_to_int(enum ib_port_width width)
421 {
422 switch (width) {
423 case IB_WIDTH_1X: return 1;
424 case IB_WIDTH_4X: return 4;
425 case IB_WIDTH_8X: return 8;
426 case IB_WIDTH_12X: return 12;
427 default: return -1;
428 }
429 }
430
431 enum ib_port_speed {
432 IB_SPEED_SDR = 1,
433 IB_SPEED_DDR = 2,
434 IB_SPEED_QDR = 4,
435 IB_SPEED_FDR10 = 8,
436 IB_SPEED_FDR = 16,
437 IB_SPEED_EDR = 32,
438 IB_SPEED_HDR = 64
439 };
440
441 /**
442 * struct rdma_hw_stats
443 * @timestamp - Used by the core code to track when the last update was
444 * @lifespan - Used by the core code to determine how old the counters
445 * should be before being updated again. Stored in jiffies, defaults
446 * to 10 milliseconds, drivers can override the default be specifying
447 * their own value during their allocation routine.
448 * @name - Array of pointers to static names used for the counters in
449 * directory.
450 * @num_counters - How many hardware counters there are. If name is
451 * shorter than this number, a kernel oops will result. Driver authors
452 * are encouraged to leave BUILD_BUG_ON(ARRAY_SIZE(@name) < num_counters)
453 * in their code to prevent this.
454 * @value - Array of u64 counters that are accessed by the sysfs code and
455 * filled in by the drivers get_stats routine
456 */
457 struct rdma_hw_stats {
458 unsigned long timestamp;
459 unsigned long lifespan;
460 const char * const *names;
461 int num_counters;
462 u64 value[];
463 };
464
465 #define RDMA_HW_STATS_DEFAULT_LIFESPAN 10
466 /**
467 * rdma_alloc_hw_stats_struct - Helper function to allocate dynamic struct
468 * for drivers.
469 * @names - Array of static const char *
470 * @num_counters - How many elements in array
471 * @lifespan - How many milliseconds between updates
472 */
473 static inline struct rdma_hw_stats *rdma_alloc_hw_stats_struct(
474 const char * const *names, int num_counters,
475 unsigned long lifespan)
476 {
477 struct rdma_hw_stats *stats;
478
479 stats = kzalloc(sizeof(*stats) + num_counters * sizeof(u64),
480 GFP_KERNEL);
481 if (!stats)
482 return NULL;
483 stats->names = names;
484 stats->num_counters = num_counters;
485 stats->lifespan = msecs_to_jiffies(lifespan);
486
487 return stats;
488 }
489
490
491 /* Define bits for the various functionality this port needs to be supported by
492 * the core.
493 */
494 /* Management 0x00000FFF */
495 #define RDMA_CORE_CAP_IB_MAD 0x00000001
496 #define RDMA_CORE_CAP_IB_SMI 0x00000002
497 #define RDMA_CORE_CAP_IB_CM 0x00000004
498 #define RDMA_CORE_CAP_IW_CM 0x00000008
499 #define RDMA_CORE_CAP_IB_SA 0x00000010
500 #define RDMA_CORE_CAP_OPA_MAD 0x00000020
501
502 /* Address format 0x000FF000 */
503 #define RDMA_CORE_CAP_AF_IB 0x00001000
504 #define RDMA_CORE_CAP_ETH_AH 0x00002000
505 #define RDMA_CORE_CAP_OPA_AH 0x00004000
506
507 /* Protocol 0xFFF00000 */
508 #define RDMA_CORE_CAP_PROT_IB 0x00100000
509 #define RDMA_CORE_CAP_PROT_ROCE 0x00200000
510 #define RDMA_CORE_CAP_PROT_IWARP 0x00400000
511 #define RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP 0x00800000
512 #define RDMA_CORE_CAP_PROT_RAW_PACKET 0x01000000
513 #define RDMA_CORE_CAP_PROT_USNIC 0x02000000
514
515 #define RDMA_CORE_PORT_IBA_IB (RDMA_CORE_CAP_PROT_IB \
516 | RDMA_CORE_CAP_IB_MAD \
517 | RDMA_CORE_CAP_IB_SMI \
518 | RDMA_CORE_CAP_IB_CM \
519 | RDMA_CORE_CAP_IB_SA \
520 | RDMA_CORE_CAP_AF_IB)
521 #define RDMA_CORE_PORT_IBA_ROCE (RDMA_CORE_CAP_PROT_ROCE \
522 | RDMA_CORE_CAP_IB_MAD \
523 | RDMA_CORE_CAP_IB_CM \
524 | RDMA_CORE_CAP_AF_IB \
525 | RDMA_CORE_CAP_ETH_AH)
526 #define RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP \
527 (RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP \
528 | RDMA_CORE_CAP_IB_MAD \
529 | RDMA_CORE_CAP_IB_CM \
530 | RDMA_CORE_CAP_AF_IB \
531 | RDMA_CORE_CAP_ETH_AH)
532 #define RDMA_CORE_PORT_IWARP (RDMA_CORE_CAP_PROT_IWARP \
533 | RDMA_CORE_CAP_IW_CM)
534 #define RDMA_CORE_PORT_INTEL_OPA (RDMA_CORE_PORT_IBA_IB \
535 | RDMA_CORE_CAP_OPA_MAD)
536
537 #define RDMA_CORE_PORT_RAW_PACKET (RDMA_CORE_CAP_PROT_RAW_PACKET)
538
539 #define RDMA_CORE_PORT_USNIC (RDMA_CORE_CAP_PROT_USNIC)
540
541 struct ib_port_attr {
542 u64 subnet_prefix;
543 enum ib_port_state state;
544 enum ib_mtu max_mtu;
545 enum ib_mtu active_mtu;
546 int gid_tbl_len;
547 u32 port_cap_flags;
548 u32 max_msg_sz;
549 u32 bad_pkey_cntr;
550 u32 qkey_viol_cntr;
551 u16 pkey_tbl_len;
552 u16 lid;
553 u16 sm_lid;
554 u8 lmc;
555 u8 max_vl_num;
556 u8 sm_sl;
557 u8 subnet_timeout;
558 u8 init_type_reply;
559 u8 active_width;
560 u8 active_speed;
561 u8 phys_state;
562 bool grh_required;
563 };
564
565 enum ib_device_modify_flags {
566 IB_DEVICE_MODIFY_SYS_IMAGE_GUID = 1 << 0,
567 IB_DEVICE_MODIFY_NODE_DESC = 1 << 1
568 };
569
570 #define IB_DEVICE_NODE_DESC_MAX 64
571
572 struct ib_device_modify {
573 u64 sys_image_guid;
574 char node_desc[IB_DEVICE_NODE_DESC_MAX];
575 };
576
577 enum ib_port_modify_flags {
578 IB_PORT_SHUTDOWN = 1,
579 IB_PORT_INIT_TYPE = (1<<2),
580 IB_PORT_RESET_QKEY_CNTR = (1<<3)
581 };
582
583 struct ib_port_modify {
584 u32 set_port_cap_mask;
585 u32 clr_port_cap_mask;
586 u8 init_type;
587 };
588
589 enum ib_event_type {
590 IB_EVENT_CQ_ERR,
591 IB_EVENT_QP_FATAL,
592 IB_EVENT_QP_REQ_ERR,
593 IB_EVENT_QP_ACCESS_ERR,
594 IB_EVENT_COMM_EST,
595 IB_EVENT_SQ_DRAINED,
596 IB_EVENT_PATH_MIG,
597 IB_EVENT_PATH_MIG_ERR,
598 IB_EVENT_DEVICE_FATAL,
599 IB_EVENT_PORT_ACTIVE,
600 IB_EVENT_PORT_ERR,
601 IB_EVENT_LID_CHANGE,
602 IB_EVENT_PKEY_CHANGE,
603 IB_EVENT_SM_CHANGE,
604 IB_EVENT_SRQ_ERR,
605 IB_EVENT_SRQ_LIMIT_REACHED,
606 IB_EVENT_QP_LAST_WQE_REACHED,
607 IB_EVENT_CLIENT_REREGISTER,
608 IB_EVENT_GID_CHANGE,
609 IB_EVENT_WQ_FATAL,
610 };
611
612 const char *__attribute_const__ ib_event_msg(enum ib_event_type event);
613
614 struct ib_event {
615 struct ib_device *device;
616 union {
617 struct ib_cq *cq;
618 struct ib_qp *qp;
619 struct ib_srq *srq;
620 struct ib_wq *wq;
621 u8 port_num;
622 } element;
623 enum ib_event_type event;
624 };
625
626 struct ib_event_handler {
627 struct ib_device *device;
628 void (*handler)(struct ib_event_handler *, struct ib_event *);
629 struct list_head list;
630 };
631
632 #define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler) \
633 do { \
634 (_ptr)->device = _device; \
635 (_ptr)->handler = _handler; \
636 INIT_LIST_HEAD(&(_ptr)->list); \
637 } while (0)
638
639 struct ib_global_route {
640 union ib_gid dgid;
641 u32 flow_label;
642 u8 sgid_index;
643 u8 hop_limit;
644 u8 traffic_class;
645 };
646
647 struct ib_grh {
648 __be32 version_tclass_flow;
649 __be16 paylen;
650 u8 next_hdr;
651 u8 hop_limit;
652 union ib_gid sgid;
653 union ib_gid dgid;
654 };
655
656 union rdma_network_hdr {
657 struct ib_grh ibgrh;
658 struct {
659 /* The IB spec states that if it's IPv4, the header
660 * is located in the last 20 bytes of the header.
661 */
662 u8 reserved[20];
663 struct iphdr roce4grh;
664 };
665 };
666
667 enum {
668 IB_MULTICAST_QPN = 0xffffff
669 };
670
671 #define IB_LID_PERMISSIVE cpu_to_be16(0xFFFF)
672 #define IB_MULTICAST_LID_BASE cpu_to_be16(0xC000)
673
674 enum ib_ah_flags {
675 IB_AH_GRH = 1
676 };
677
678 enum ib_rate {
679 IB_RATE_PORT_CURRENT = 0,
680 IB_RATE_2_5_GBPS = 2,
681 IB_RATE_5_GBPS = 5,
682 IB_RATE_10_GBPS = 3,
683 IB_RATE_20_GBPS = 6,
684 IB_RATE_30_GBPS = 4,
685 IB_RATE_40_GBPS = 7,
686 IB_RATE_60_GBPS = 8,
687 IB_RATE_80_GBPS = 9,
688 IB_RATE_120_GBPS = 10,
689 IB_RATE_14_GBPS = 11,
690 IB_RATE_56_GBPS = 12,
691 IB_RATE_112_GBPS = 13,
692 IB_RATE_168_GBPS = 14,
693 IB_RATE_25_GBPS = 15,
694 IB_RATE_100_GBPS = 16,
695 IB_RATE_200_GBPS = 17,
696 IB_RATE_300_GBPS = 18
697 };
698
699 /**
700 * ib_rate_to_mult - Convert the IB rate enum to a multiple of the
701 * base rate of 2.5 Gbit/sec. For example, IB_RATE_5_GBPS will be
702 * converted to 2, since 5 Gbit/sec is 2 * 2.5 Gbit/sec.
703 * @rate: rate to convert.
704 */
705 __attribute_const__ int ib_rate_to_mult(enum ib_rate rate);
706
707 /**
708 * ib_rate_to_mbps - Convert the IB rate enum to Mbps.
709 * For example, IB_RATE_2_5_GBPS will be converted to 2500.
710 * @rate: rate to convert.
711 */
712 __attribute_const__ int ib_rate_to_mbps(enum ib_rate rate);
713
714
715 /**
716 * enum ib_mr_type - memory region type
717 * @IB_MR_TYPE_MEM_REG: memory region that is used for
718 * normal registration
719 * @IB_MR_TYPE_SIGNATURE: memory region that is used for
720 * signature operations (data-integrity
721 * capable regions)
722 * @IB_MR_TYPE_SG_GAPS: memory region that is capable to
723 * register any arbitrary sg lists (without
724 * the normal mr constraints - see
725 * ib_map_mr_sg)
726 */
727 enum ib_mr_type {
728 IB_MR_TYPE_MEM_REG,
729 IB_MR_TYPE_SIGNATURE,
730 IB_MR_TYPE_SG_GAPS,
731 };
732
733 /**
734 * Signature types
735 * IB_SIG_TYPE_NONE: Unprotected.
736 * IB_SIG_TYPE_T10_DIF: Type T10-DIF
737 */
738 enum ib_signature_type {
739 IB_SIG_TYPE_NONE,
740 IB_SIG_TYPE_T10_DIF,
741 };
742
743 /**
744 * Signature T10-DIF block-guard types
745 * IB_T10DIF_CRC: Corresponds to T10-PI mandated CRC checksum rules.
746 * IB_T10DIF_CSUM: Corresponds to IP checksum rules.
747 */
748 enum ib_t10_dif_bg_type {
749 IB_T10DIF_CRC,
750 IB_T10DIF_CSUM
751 };
752
753 /**
754 * struct ib_t10_dif_domain - Parameters specific for T10-DIF
755 * domain.
756 * @bg_type: T10-DIF block guard type (CRC|CSUM)
757 * @pi_interval: protection information interval.
758 * @bg: seed of guard computation.
759 * @app_tag: application tag of guard block
760 * @ref_tag: initial guard block reference tag.
761 * @ref_remap: Indicate wethear the reftag increments each block
762 * @app_escape: Indicate to skip block check if apptag=0xffff
763 * @ref_escape: Indicate to skip block check if reftag=0xffffffff
764 * @apptag_check_mask: check bitmask of application tag.
765 */
766 struct ib_t10_dif_domain {
767 enum ib_t10_dif_bg_type bg_type;
768 u16 pi_interval;
769 u16 bg;
770 u16 app_tag;
771 u32 ref_tag;
772 bool ref_remap;
773 bool app_escape;
774 bool ref_escape;
775 u16 apptag_check_mask;
776 };
777
778 /**
779 * struct ib_sig_domain - Parameters for signature domain
780 * @sig_type: specific signauture type
781 * @sig: union of all signature domain attributes that may
782 * be used to set domain layout.
783 */
784 struct ib_sig_domain {
785 enum ib_signature_type sig_type;
786 union {
787 struct ib_t10_dif_domain dif;
788 } sig;
789 };
790
791 /**
792 * struct ib_sig_attrs - Parameters for signature handover operation
793 * @check_mask: bitmask for signature byte check (8 bytes)
794 * @mem: memory domain layout desciptor.
795 * @wire: wire domain layout desciptor.
796 */
797 struct ib_sig_attrs {
798 u8 check_mask;
799 struct ib_sig_domain mem;
800 struct ib_sig_domain wire;
801 };
802
803 enum ib_sig_err_type {
804 IB_SIG_BAD_GUARD,
805 IB_SIG_BAD_REFTAG,
806 IB_SIG_BAD_APPTAG,
807 };
808
809 /**
810 * struct ib_sig_err - signature error descriptor
811 */
812 struct ib_sig_err {
813 enum ib_sig_err_type err_type;
814 u32 expected;
815 u32 actual;
816 u64 sig_err_offset;
817 u32 key;
818 };
819
820 enum ib_mr_status_check {
821 IB_MR_CHECK_SIG_STATUS = 1,
822 };
823
824 /**
825 * struct ib_mr_status - Memory region status container
826 *
827 * @fail_status: Bitmask of MR checks status. For each
828 * failed check a corresponding status bit is set.
829 * @sig_err: Additional info for IB_MR_CEHCK_SIG_STATUS
830 * failure.
831 */
832 struct ib_mr_status {
833 u32 fail_status;
834 struct ib_sig_err sig_err;
835 };
836
837 /**
838 * mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate
839 * enum.
840 * @mult: multiple to convert.
841 */
842 __attribute_const__ enum ib_rate mult_to_ib_rate(int mult);
843
844 enum rdma_ah_attr_type {
845 RDMA_AH_ATTR_TYPE_IB,
846 RDMA_AH_ATTR_TYPE_ROCE,
847 RDMA_AH_ATTR_TYPE_OPA,
848 };
849
850 struct ib_ah_attr {
851 u16 dlid;
852 u8 src_path_bits;
853 };
854
855 struct roce_ah_attr {
856 u8 dmac[ETH_ALEN];
857 };
858
859 struct opa_ah_attr {
860 u32 dlid;
861 u8 src_path_bits;
862 };
863
864 struct rdma_ah_attr {
865 struct ib_global_route grh;
866 u8 sl;
867 u8 static_rate;
868 u8 port_num;
869 u8 ah_flags;
870 enum rdma_ah_attr_type type;
871 union {
872 struct ib_ah_attr ib;
873 struct roce_ah_attr roce;
874 struct opa_ah_attr opa;
875 };
876 };
877
878 enum ib_wc_status {
879 IB_WC_SUCCESS,
880 IB_WC_LOC_LEN_ERR,
881 IB_WC_LOC_QP_OP_ERR,
882 IB_WC_LOC_EEC_OP_ERR,
883 IB_WC_LOC_PROT_ERR,
884 IB_WC_WR_FLUSH_ERR,
885 IB_WC_MW_BIND_ERR,
886 IB_WC_BAD_RESP_ERR,
887 IB_WC_LOC_ACCESS_ERR,
888 IB_WC_REM_INV_REQ_ERR,
889 IB_WC_REM_ACCESS_ERR,
890 IB_WC_REM_OP_ERR,
891 IB_WC_RETRY_EXC_ERR,
892 IB_WC_RNR_RETRY_EXC_ERR,
893 IB_WC_LOC_RDD_VIOL_ERR,
894 IB_WC_REM_INV_RD_REQ_ERR,
895 IB_WC_REM_ABORT_ERR,
896 IB_WC_INV_EECN_ERR,
897 IB_WC_INV_EEC_STATE_ERR,
898 IB_WC_FATAL_ERR,
899 IB_WC_RESP_TIMEOUT_ERR,
900 IB_WC_GENERAL_ERR
901 };
902
903 const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status);
904
905 enum ib_wc_opcode {
906 IB_WC_SEND,
907 IB_WC_RDMA_WRITE,
908 IB_WC_RDMA_READ,
909 IB_WC_COMP_SWAP,
910 IB_WC_FETCH_ADD,
911 IB_WC_LSO,
912 IB_WC_LOCAL_INV,
913 IB_WC_REG_MR,
914 IB_WC_MASKED_COMP_SWAP,
915 IB_WC_MASKED_FETCH_ADD,
916 /*
917 * Set value of IB_WC_RECV so consumers can test if a completion is a
918 * receive by testing (opcode & IB_WC_RECV).
919 */
920 IB_WC_RECV = 1 << 7,
921 IB_WC_RECV_RDMA_WITH_IMM
922 };
923
924 enum ib_wc_flags {
925 IB_WC_GRH = 1,
926 IB_WC_WITH_IMM = (1<<1),
927 IB_WC_WITH_INVALIDATE = (1<<2),
928 IB_WC_IP_CSUM_OK = (1<<3),
929 IB_WC_WITH_SMAC = (1<<4),
930 IB_WC_WITH_VLAN = (1<<5),
931 IB_WC_WITH_NETWORK_HDR_TYPE = (1<<6),
932 };
933
934 struct ib_wc {
935 union {
936 u64 wr_id;
937 struct ib_cqe *wr_cqe;
938 };
939 enum ib_wc_status status;
940 enum ib_wc_opcode opcode;
941 u32 vendor_err;
942 u32 byte_len;
943 struct ib_qp *qp;
944 union {
945 __be32 imm_data;
946 u32 invalidate_rkey;
947 } ex;
948 u32 src_qp;
949 int wc_flags;
950 u16 pkey_index;
951 u16 slid;
952 u8 sl;
953 u8 dlid_path_bits;
954 u8 port_num; /* valid only for DR SMPs on switches */
955 u8 smac[ETH_ALEN];
956 u16 vlan_id;
957 u8 network_hdr_type;
958 };
959
960 enum ib_cq_notify_flags {
961 IB_CQ_SOLICITED = 1 << 0,
962 IB_CQ_NEXT_COMP = 1 << 1,
963 IB_CQ_SOLICITED_MASK = IB_CQ_SOLICITED | IB_CQ_NEXT_COMP,
964 IB_CQ_REPORT_MISSED_EVENTS = 1 << 2,
965 };
966
967 enum ib_srq_type {
968 IB_SRQT_BASIC,
969 IB_SRQT_XRC
970 };
971
972 enum ib_srq_attr_mask {
973 IB_SRQ_MAX_WR = 1 << 0,
974 IB_SRQ_LIMIT = 1 << 1,
975 };
976
977 struct ib_srq_attr {
978 u32 max_wr;
979 u32 max_sge;
980 u32 srq_limit;
981 };
982
983 struct ib_srq_init_attr {
984 void (*event_handler)(struct ib_event *, void *);
985 void *srq_context;
986 struct ib_srq_attr attr;
987 enum ib_srq_type srq_type;
988
989 union {
990 struct {
991 struct ib_xrcd *xrcd;
992 struct ib_cq *cq;
993 } xrc;
994 } ext;
995 };
996
997 struct ib_qp_cap {
998 u32 max_send_wr;
999 u32 max_recv_wr;
1000 u32 max_send_sge;
1001 u32 max_recv_sge;
1002 u32 max_inline_data;
1003
1004 /*
1005 * Maximum number of rdma_rw_ctx structures in flight at a time.
1006 * ib_create_qp() will calculate the right amount of neededed WRs
1007 * and MRs based on this.
1008 */
1009 u32 max_rdma_ctxs;
1010 };
1011
1012 enum ib_sig_type {
1013 IB_SIGNAL_ALL_WR,
1014 IB_SIGNAL_REQ_WR
1015 };
1016
1017 enum ib_qp_type {
1018 /*
1019 * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries
1020 * here (and in that order) since the MAD layer uses them as
1021 * indices into a 2-entry table.
1022 */
1023 IB_QPT_SMI,
1024 IB_QPT_GSI,
1025
1026 IB_QPT_RC,
1027 IB_QPT_UC,
1028 IB_QPT_UD,
1029 IB_QPT_RAW_IPV6,
1030 IB_QPT_RAW_ETHERTYPE,
1031 IB_QPT_RAW_PACKET = 8,
1032 IB_QPT_XRC_INI = 9,
1033 IB_QPT_XRC_TGT,
1034 IB_QPT_MAX,
1035 /* Reserve a range for qp types internal to the low level driver.
1036 * These qp types will not be visible at the IB core layer, so the
1037 * IB_QPT_MAX usages should not be affected in the core layer
1038 */
1039 IB_QPT_RESERVED1 = 0x1000,
1040 IB_QPT_RESERVED2,
1041 IB_QPT_RESERVED3,
1042 IB_QPT_RESERVED4,
1043 IB_QPT_RESERVED5,
1044 IB_QPT_RESERVED6,
1045 IB_QPT_RESERVED7,
1046 IB_QPT_RESERVED8,
1047 IB_QPT_RESERVED9,
1048 IB_QPT_RESERVED10,
1049 };
1050
1051 enum ib_qp_create_flags {
1052 IB_QP_CREATE_IPOIB_UD_LSO = 1 << 0,
1053 IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK = 1 << 1,
1054 IB_QP_CREATE_CROSS_CHANNEL = 1 << 2,
1055 IB_QP_CREATE_MANAGED_SEND = 1 << 3,
1056 IB_QP_CREATE_MANAGED_RECV = 1 << 4,
1057 IB_QP_CREATE_NETIF_QP = 1 << 5,
1058 IB_QP_CREATE_SIGNATURE_EN = 1 << 6,
1059 IB_QP_CREATE_USE_GFP_NOIO = 1 << 7,
1060 IB_QP_CREATE_SCATTER_FCS = 1 << 8,
1061 IB_QP_CREATE_CVLAN_STRIPPING = 1 << 9,
1062 /* reserve bits 26-31 for low level drivers' internal use */
1063 IB_QP_CREATE_RESERVED_START = 1 << 26,
1064 IB_QP_CREATE_RESERVED_END = 1 << 31,
1065 };
1066
1067 /*
1068 * Note: users may not call ib_close_qp or ib_destroy_qp from the event_handler
1069 * callback to destroy the passed in QP.
1070 */
1071
1072 struct ib_qp_init_attr {
1073 void (*event_handler)(struct ib_event *, void *);
1074 void *qp_context;
1075 struct ib_cq *send_cq;
1076 struct ib_cq *recv_cq;
1077 struct ib_srq *srq;
1078 struct ib_xrcd *xrcd; /* XRC TGT QPs only */
1079 struct ib_qp_cap cap;
1080 enum ib_sig_type sq_sig_type;
1081 enum ib_qp_type qp_type;
1082 enum ib_qp_create_flags create_flags;
1083
1084 /*
1085 * Only needed for special QP types, or when using the RW API.
1086 */
1087 u8 port_num;
1088 struct ib_rwq_ind_table *rwq_ind_tbl;
1089 };
1090
1091 struct ib_qp_open_attr {
1092 void (*event_handler)(struct ib_event *, void *);
1093 void *qp_context;
1094 u32 qp_num;
1095 enum ib_qp_type qp_type;
1096 };
1097
1098 enum ib_rnr_timeout {
1099 IB_RNR_TIMER_655_36 = 0,
1100 IB_RNR_TIMER_000_01 = 1,
1101 IB_RNR_TIMER_000_02 = 2,
1102 IB_RNR_TIMER_000_03 = 3,
1103 IB_RNR_TIMER_000_04 = 4,
1104 IB_RNR_TIMER_000_06 = 5,
1105 IB_RNR_TIMER_000_08 = 6,
1106 IB_RNR_TIMER_000_12 = 7,
1107 IB_RNR_TIMER_000_16 = 8,
1108 IB_RNR_TIMER_000_24 = 9,
1109 IB_RNR_TIMER_000_32 = 10,
1110 IB_RNR_TIMER_000_48 = 11,
1111 IB_RNR_TIMER_000_64 = 12,
1112 IB_RNR_TIMER_000_96 = 13,
1113 IB_RNR_TIMER_001_28 = 14,
1114 IB_RNR_TIMER_001_92 = 15,
1115 IB_RNR_TIMER_002_56 = 16,
1116 IB_RNR_TIMER_003_84 = 17,
1117 IB_RNR_TIMER_005_12 = 18,
1118 IB_RNR_TIMER_007_68 = 19,
1119 IB_RNR_TIMER_010_24 = 20,
1120 IB_RNR_TIMER_015_36 = 21,
1121 IB_RNR_TIMER_020_48 = 22,
1122 IB_RNR_TIMER_030_72 = 23,
1123 IB_RNR_TIMER_040_96 = 24,
1124 IB_RNR_TIMER_061_44 = 25,
1125 IB_RNR_TIMER_081_92 = 26,
1126 IB_RNR_TIMER_122_88 = 27,
1127 IB_RNR_TIMER_163_84 = 28,
1128 IB_RNR_TIMER_245_76 = 29,
1129 IB_RNR_TIMER_327_68 = 30,
1130 IB_RNR_TIMER_491_52 = 31
1131 };
1132
1133 enum ib_qp_attr_mask {
1134 IB_QP_STATE = 1,
1135 IB_QP_CUR_STATE = (1<<1),
1136 IB_QP_EN_SQD_ASYNC_NOTIFY = (1<<2),
1137 IB_QP_ACCESS_FLAGS = (1<<3),
1138 IB_QP_PKEY_INDEX = (1<<4),
1139 IB_QP_PORT = (1<<5),
1140 IB_QP_QKEY = (1<<6),
1141 IB_QP_AV = (1<<7),
1142 IB_QP_PATH_MTU = (1<<8),
1143 IB_QP_TIMEOUT = (1<<9),
1144 IB_QP_RETRY_CNT = (1<<10),
1145 IB_QP_RNR_RETRY = (1<<11),
1146 IB_QP_RQ_PSN = (1<<12),
1147 IB_QP_MAX_QP_RD_ATOMIC = (1<<13),
1148 IB_QP_ALT_PATH = (1<<14),
1149 IB_QP_MIN_RNR_TIMER = (1<<15),
1150 IB_QP_SQ_PSN = (1<<16),
1151 IB_QP_MAX_DEST_RD_ATOMIC = (1<<17),
1152 IB_QP_PATH_MIG_STATE = (1<<18),
1153 IB_QP_CAP = (1<<19),
1154 IB_QP_DEST_QPN = (1<<20),
1155 IB_QP_RESERVED1 = (1<<21),
1156 IB_QP_RESERVED2 = (1<<22),
1157 IB_QP_RESERVED3 = (1<<23),
1158 IB_QP_RESERVED4 = (1<<24),
1159 IB_QP_RATE_LIMIT = (1<<25),
1160 };
1161
1162 enum ib_qp_state {
1163 IB_QPS_RESET,
1164 IB_QPS_INIT,
1165 IB_QPS_RTR,
1166 IB_QPS_RTS,
1167 IB_QPS_SQD,
1168 IB_QPS_SQE,
1169 IB_QPS_ERR
1170 };
1171
1172 enum ib_mig_state {
1173 IB_MIG_MIGRATED,
1174 IB_MIG_REARM,
1175 IB_MIG_ARMED
1176 };
1177
1178 enum ib_mw_type {
1179 IB_MW_TYPE_1 = 1,
1180 IB_MW_TYPE_2 = 2
1181 };
1182
1183 struct ib_qp_attr {
1184 enum ib_qp_state qp_state;
1185 enum ib_qp_state cur_qp_state;
1186 enum ib_mtu path_mtu;
1187 enum ib_mig_state path_mig_state;
1188 u32 qkey;
1189 u32 rq_psn;
1190 u32 sq_psn;
1191 u32 dest_qp_num;
1192 int qp_access_flags;
1193 struct ib_qp_cap cap;
1194 struct rdma_ah_attr ah_attr;
1195 struct rdma_ah_attr alt_ah_attr;
1196 u16 pkey_index;
1197 u16 alt_pkey_index;
1198 u8 en_sqd_async_notify;
1199 u8 sq_draining;
1200 u8 max_rd_atomic;
1201 u8 max_dest_rd_atomic;
1202 u8 min_rnr_timer;
1203 u8 port_num;
1204 u8 timeout;
1205 u8 retry_cnt;
1206 u8 rnr_retry;
1207 u8 alt_port_num;
1208 u8 alt_timeout;
1209 u32 rate_limit;
1210 };
1211
1212 enum ib_wr_opcode {
1213 IB_WR_RDMA_WRITE,
1214 IB_WR_RDMA_WRITE_WITH_IMM,
1215 IB_WR_SEND,
1216 IB_WR_SEND_WITH_IMM,
1217 IB_WR_RDMA_READ,
1218 IB_WR_ATOMIC_CMP_AND_SWP,
1219 IB_WR_ATOMIC_FETCH_AND_ADD,
1220 IB_WR_LSO,
1221 IB_WR_SEND_WITH_INV,
1222 IB_WR_RDMA_READ_WITH_INV,
1223 IB_WR_LOCAL_INV,
1224 IB_WR_REG_MR,
1225 IB_WR_MASKED_ATOMIC_CMP_AND_SWP,
1226 IB_WR_MASKED_ATOMIC_FETCH_AND_ADD,
1227 IB_WR_REG_SIG_MR,
1228 /* reserve values for low level drivers' internal use.
1229 * These values will not be used at all in the ib core layer.
1230 */
1231 IB_WR_RESERVED1 = 0xf0,
1232 IB_WR_RESERVED2,
1233 IB_WR_RESERVED3,
1234 IB_WR_RESERVED4,
1235 IB_WR_RESERVED5,
1236 IB_WR_RESERVED6,
1237 IB_WR_RESERVED7,
1238 IB_WR_RESERVED8,
1239 IB_WR_RESERVED9,
1240 IB_WR_RESERVED10,
1241 };
1242
1243 enum ib_send_flags {
1244 IB_SEND_FENCE = 1,
1245 IB_SEND_SIGNALED = (1<<1),
1246 IB_SEND_SOLICITED = (1<<2),
1247 IB_SEND_INLINE = (1<<3),
1248 IB_SEND_IP_CSUM = (1<<4),
1249
1250 /* reserve bits 26-31 for low level drivers' internal use */
1251 IB_SEND_RESERVED_START = (1 << 26),
1252 IB_SEND_RESERVED_END = (1 << 31),
1253 };
1254
1255 struct ib_sge {
1256 u64 addr;
1257 u32 length;
1258 u32 lkey;
1259 };
1260
1261 struct ib_cqe {
1262 void (*done)(struct ib_cq *cq, struct ib_wc *wc);
1263 };
1264
1265 struct ib_send_wr {
1266 struct ib_send_wr *next;
1267 union {
1268 u64 wr_id;
1269 struct ib_cqe *wr_cqe;
1270 };
1271 struct ib_sge *sg_list;
1272 int num_sge;
1273 enum ib_wr_opcode opcode;
1274 int send_flags;
1275 union {
1276 __be32 imm_data;
1277 u32 invalidate_rkey;
1278 } ex;
1279 };
1280
1281 struct ib_rdma_wr {
1282 struct ib_send_wr wr;
1283 u64 remote_addr;
1284 u32 rkey;
1285 };
1286
1287 static inline struct ib_rdma_wr *rdma_wr(struct ib_send_wr *wr)
1288 {
1289 return container_of(wr, struct ib_rdma_wr, wr);
1290 }
1291
1292 struct ib_atomic_wr {
1293 struct ib_send_wr wr;
1294 u64 remote_addr;
1295 u64 compare_add;
1296 u64 swap;
1297 u64 compare_add_mask;
1298 u64 swap_mask;
1299 u32 rkey;
1300 };
1301
1302 static inline struct ib_atomic_wr *atomic_wr(struct ib_send_wr *wr)
1303 {
1304 return container_of(wr, struct ib_atomic_wr, wr);
1305 }
1306
1307 struct ib_ud_wr {
1308 struct ib_send_wr wr;
1309 struct ib_ah *ah;
1310 void *header;
1311 int hlen;
1312 int mss;
1313 u32 remote_qpn;
1314 u32 remote_qkey;
1315 u16 pkey_index; /* valid for GSI only */
1316 u8 port_num; /* valid for DR SMPs on switch only */
1317 };
1318
1319 static inline struct ib_ud_wr *ud_wr(struct ib_send_wr *wr)
1320 {
1321 return container_of(wr, struct ib_ud_wr, wr);
1322 }
1323
1324 struct ib_reg_wr {
1325 struct ib_send_wr wr;
1326 struct ib_mr *mr;
1327 u32 key;
1328 int access;
1329 };
1330
1331 static inline struct ib_reg_wr *reg_wr(struct ib_send_wr *wr)
1332 {
1333 return container_of(wr, struct ib_reg_wr, wr);
1334 }
1335
1336 struct ib_sig_handover_wr {
1337 struct ib_send_wr wr;
1338 struct ib_sig_attrs *sig_attrs;
1339 struct ib_mr *sig_mr;
1340 int access_flags;
1341 struct ib_sge *prot;
1342 };
1343
1344 static inline struct ib_sig_handover_wr *sig_handover_wr(struct ib_send_wr *wr)
1345 {
1346 return container_of(wr, struct ib_sig_handover_wr, wr);
1347 }
1348
1349 struct ib_recv_wr {
1350 struct ib_recv_wr *next;
1351 union {
1352 u64 wr_id;
1353 struct ib_cqe *wr_cqe;
1354 };
1355 struct ib_sge *sg_list;
1356 int num_sge;
1357 };
1358
1359 enum ib_access_flags {
1360 IB_ACCESS_LOCAL_WRITE = 1,
1361 IB_ACCESS_REMOTE_WRITE = (1<<1),
1362 IB_ACCESS_REMOTE_READ = (1<<2),
1363 IB_ACCESS_REMOTE_ATOMIC = (1<<3),
1364 IB_ACCESS_MW_BIND = (1<<4),
1365 IB_ZERO_BASED = (1<<5),
1366 IB_ACCESS_ON_DEMAND = (1<<6),
1367 IB_ACCESS_HUGETLB = (1<<7),
1368 };
1369
1370 /*
1371 * XXX: these are apparently used for ->rereg_user_mr, no idea why they
1372 * are hidden here instead of a uapi header!
1373 */
1374 enum ib_mr_rereg_flags {
1375 IB_MR_REREG_TRANS = 1,
1376 IB_MR_REREG_PD = (1<<1),
1377 IB_MR_REREG_ACCESS = (1<<2),
1378 IB_MR_REREG_SUPPORTED = ((IB_MR_REREG_ACCESS << 1) - 1)
1379 };
1380
1381 struct ib_fmr_attr {
1382 int max_pages;
1383 int max_maps;
1384 u8 page_shift;
1385 };
1386
1387 struct ib_umem;
1388
1389 enum rdma_remove_reason {
1390 /* Userspace requested uobject deletion. Call could fail */
1391 RDMA_REMOVE_DESTROY,
1392 /* Context deletion. This call should delete the actual object itself */
1393 RDMA_REMOVE_CLOSE,
1394 /* Driver is being hot-unplugged. This call should delete the actual object itself */
1395 RDMA_REMOVE_DRIVER_REMOVE,
1396 /* Context is being cleaned-up, but commit was just completed */
1397 RDMA_REMOVE_DURING_CLEANUP,
1398 };
1399
1400 struct ib_rdmacg_object {
1401 #ifdef CONFIG_CGROUP_RDMA
1402 struct rdma_cgroup *cg; /* owner rdma cgroup */
1403 #endif
1404 };
1405
1406 struct ib_ucontext {
1407 struct ib_device *device;
1408 struct ib_uverbs_file *ufile;
1409 int closing;
1410
1411 /* locking the uobjects_list */
1412 struct mutex uobjects_lock;
1413 struct list_head uobjects;
1414 /* protects cleanup process from other actions */
1415 struct rw_semaphore cleanup_rwsem;
1416 enum rdma_remove_reason cleanup_reason;
1417
1418 struct pid *tgid;
1419 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1420 struct rb_root umem_tree;
1421 /*
1422 * Protects .umem_rbroot and tree, as well as odp_mrs_count and
1423 * mmu notifiers registration.
1424 */
1425 struct rw_semaphore umem_rwsem;
1426 void (*invalidate_range)(struct ib_umem *umem,
1427 unsigned long start, unsigned long end);
1428
1429 struct mmu_notifier mn;
1430 atomic_t notifier_count;
1431 /* A list of umems that don't have private mmu notifier counters yet. */
1432 struct list_head no_private_counters;
1433 int odp_mrs_count;
1434 #endif
1435
1436 struct ib_rdmacg_object cg_obj;
1437 };
1438
1439 struct ib_uobject {
1440 u64 user_handle; /* handle given to us by userspace */
1441 struct ib_ucontext *context; /* associated user context */
1442 void *object; /* containing object */
1443 struct list_head list; /* link to context's list */
1444 struct ib_rdmacg_object cg_obj; /* rdmacg object */
1445 int id; /* index into kernel idr */
1446 struct kref ref;
1447 atomic_t usecnt; /* protects exclusive access */
1448 struct rcu_head rcu; /* kfree_rcu() overhead */
1449
1450 const struct uverbs_obj_type *type;
1451 };
1452
1453 struct ib_uobject_file {
1454 struct ib_uobject uobj;
1455 /* ufile contains the lock between context release and file close */
1456 struct ib_uverbs_file *ufile;
1457 };
1458
1459 struct ib_udata {
1460 const void __user *inbuf;
1461 void __user *outbuf;
1462 size_t inlen;
1463 size_t outlen;
1464 };
1465
1466 struct ib_pd {
1467 u32 local_dma_lkey;
1468 u32 flags;
1469 struct ib_device *device;
1470 struct ib_uobject *uobject;
1471 atomic_t usecnt; /* count all resources */
1472
1473 u32 unsafe_global_rkey;
1474
1475 /*
1476 * Implementation details of the RDMA core, don't use in drivers:
1477 */
1478 struct ib_mr *__internal_mr;
1479 };
1480
1481 struct ib_xrcd {
1482 struct ib_device *device;
1483 atomic_t usecnt; /* count all exposed resources */
1484 struct inode *inode;
1485
1486 struct mutex tgt_qp_mutex;
1487 struct list_head tgt_qp_list;
1488 };
1489
1490 struct ib_ah {
1491 struct ib_device *device;
1492 struct ib_pd *pd;
1493 struct ib_uobject *uobject;
1494 enum rdma_ah_attr_type type;
1495 };
1496
1497 typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
1498
1499 enum ib_poll_context {
1500 IB_POLL_DIRECT, /* caller context, no hw completions */
1501 IB_POLL_SOFTIRQ, /* poll from softirq context */
1502 IB_POLL_WORKQUEUE, /* poll from workqueue */
1503 };
1504
1505 struct ib_cq {
1506 struct ib_device *device;
1507 struct ib_uobject *uobject;
1508 ib_comp_handler comp_handler;
1509 void (*event_handler)(struct ib_event *, void *);
1510 void *cq_context;
1511 int cqe;
1512 atomic_t usecnt; /* count number of work queues */
1513 enum ib_poll_context poll_ctx;
1514 struct ib_wc *wc;
1515 union {
1516 struct irq_poll iop;
1517 struct work_struct work;
1518 };
1519 };
1520
1521 struct ib_srq {
1522 struct ib_device *device;
1523 struct ib_pd *pd;
1524 struct ib_uobject *uobject;
1525 void (*event_handler)(struct ib_event *, void *);
1526 void *srq_context;
1527 enum ib_srq_type srq_type;
1528 atomic_t usecnt;
1529
1530 union {
1531 struct {
1532 struct ib_xrcd *xrcd;
1533 struct ib_cq *cq;
1534 u32 srq_num;
1535 } xrc;
1536 } ext;
1537 };
1538
1539 enum ib_raw_packet_caps {
1540 /* Strip cvlan from incoming packet and report it in the matching work
1541 * completion is supported.
1542 */
1543 IB_RAW_PACKET_CAP_CVLAN_STRIPPING = (1 << 0),
1544 /* Scatter FCS field of an incoming packet to host memory is supported.
1545 */
1546 IB_RAW_PACKET_CAP_SCATTER_FCS = (1 << 1),
1547 /* Checksum offloads are supported (for both send and receive). */
1548 IB_RAW_PACKET_CAP_IP_CSUM = (1 << 2),
1549 };
1550
1551 enum ib_wq_type {
1552 IB_WQT_RQ
1553 };
1554
1555 enum ib_wq_state {
1556 IB_WQS_RESET,
1557 IB_WQS_RDY,
1558 IB_WQS_ERR
1559 };
1560
1561 struct ib_wq {
1562 struct ib_device *device;
1563 struct ib_uobject *uobject;
1564 void *wq_context;
1565 void (*event_handler)(struct ib_event *, void *);
1566 struct ib_pd *pd;
1567 struct ib_cq *cq;
1568 u32 wq_num;
1569 enum ib_wq_state state;
1570 enum ib_wq_type wq_type;
1571 atomic_t usecnt;
1572 };
1573
1574 enum ib_wq_flags {
1575 IB_WQ_FLAGS_CVLAN_STRIPPING = 1 << 0,
1576 IB_WQ_FLAGS_SCATTER_FCS = 1 << 1,
1577 };
1578
1579 struct ib_wq_init_attr {
1580 void *wq_context;
1581 enum ib_wq_type wq_type;
1582 u32 max_wr;
1583 u32 max_sge;
1584 struct ib_cq *cq;
1585 void (*event_handler)(struct ib_event *, void *);
1586 u32 create_flags; /* Use enum ib_wq_flags */
1587 };
1588
1589 enum ib_wq_attr_mask {
1590 IB_WQ_STATE = 1 << 0,
1591 IB_WQ_CUR_STATE = 1 << 1,
1592 IB_WQ_FLAGS = 1 << 2,
1593 };
1594
1595 struct ib_wq_attr {
1596 enum ib_wq_state wq_state;
1597 enum ib_wq_state curr_wq_state;
1598 u32 flags; /* Use enum ib_wq_flags */
1599 u32 flags_mask; /* Use enum ib_wq_flags */
1600 };
1601
1602 struct ib_rwq_ind_table {
1603 struct ib_device *device;
1604 struct ib_uobject *uobject;
1605 atomic_t usecnt;
1606 u32 ind_tbl_num;
1607 u32 log_ind_tbl_size;
1608 struct ib_wq **ind_tbl;
1609 };
1610
1611 struct ib_rwq_ind_table_init_attr {
1612 u32 log_ind_tbl_size;
1613 /* Each entry is a pointer to Receive Work Queue */
1614 struct ib_wq **ind_tbl;
1615 };
1616
1617 enum port_pkey_state {
1618 IB_PORT_PKEY_NOT_VALID = 0,
1619 IB_PORT_PKEY_VALID = 1,
1620 IB_PORT_PKEY_LISTED = 2,
1621 };
1622
1623 struct ib_qp_security;
1624
1625 struct ib_port_pkey {
1626 enum port_pkey_state state;
1627 u16 pkey_index;
1628 u8 port_num;
1629 struct list_head qp_list;
1630 struct list_head to_error_list;
1631 struct ib_qp_security *sec;
1632 };
1633
1634 struct ib_ports_pkeys {
1635 struct ib_port_pkey main;
1636 struct ib_port_pkey alt;
1637 };
1638
1639 struct ib_qp_security {
1640 struct ib_qp *qp;
1641 struct ib_device *dev;
1642 /* Hold this mutex when changing port and pkey settings. */
1643 struct mutex mutex;
1644 struct ib_ports_pkeys *ports_pkeys;
1645 /* A list of all open shared QP handles. Required to enforce security
1646 * properly for all users of a shared QP.
1647 */
1648 struct list_head shared_qp_list;
1649 void *security;
1650 bool destroying;
1651 atomic_t error_list_count;
1652 struct completion error_complete;
1653 int error_comps_pending;
1654 };
1655
1656 /*
1657 * @max_write_sge: Maximum SGE elements per RDMA WRITE request.
1658 * @max_read_sge: Maximum SGE elements per RDMA READ request.
1659 */
1660 struct ib_qp {
1661 struct ib_device *device;
1662 struct ib_pd *pd;
1663 struct ib_cq *send_cq;
1664 struct ib_cq *recv_cq;
1665 spinlock_t mr_lock;
1666 int mrs_used;
1667 struct list_head rdma_mrs;
1668 struct list_head sig_mrs;
1669 struct ib_srq *srq;
1670 struct ib_xrcd *xrcd; /* XRC TGT QPs only */
1671 struct list_head xrcd_list;
1672
1673 /* count times opened, mcast attaches, flow attaches */
1674 atomic_t usecnt;
1675 struct list_head open_list;
1676 struct ib_qp *real_qp;
1677 struct ib_uobject *uobject;
1678 void (*event_handler)(struct ib_event *, void *);
1679 void *qp_context;
1680 u32 qp_num;
1681 u32 max_write_sge;
1682 u32 max_read_sge;
1683 enum ib_qp_type qp_type;
1684 struct ib_rwq_ind_table *rwq_ind_tbl;
1685 struct ib_qp_security *qp_sec;
1686 };
1687
1688 struct ib_mr {
1689 struct ib_device *device;
1690 struct ib_pd *pd;
1691 u32 lkey;
1692 u32 rkey;
1693 u64 iova;
1694 u32 length;
1695 unsigned int page_size;
1696 bool need_inval;
1697 union {
1698 struct ib_uobject *uobject; /* user */
1699 struct list_head qp_entry; /* FR */
1700 };
1701 };
1702
1703 struct ib_mw {
1704 struct ib_device *device;
1705 struct ib_pd *pd;
1706 struct ib_uobject *uobject;
1707 u32 rkey;
1708 enum ib_mw_type type;
1709 };
1710
1711 struct ib_fmr {
1712 struct ib_device *device;
1713 struct ib_pd *pd;
1714 struct list_head list;
1715 u32 lkey;
1716 u32 rkey;
1717 };
1718
1719 /* Supported steering options */
1720 enum ib_flow_attr_type {
1721 /* steering according to rule specifications */
1722 IB_FLOW_ATTR_NORMAL = 0x0,
1723 /* default unicast and multicast rule -
1724 * receive all Eth traffic which isn't steered to any QP
1725 */
1726 IB_FLOW_ATTR_ALL_DEFAULT = 0x1,
1727 /* default multicast rule -
1728 * receive all Eth multicast traffic which isn't steered to any QP
1729 */
1730 IB_FLOW_ATTR_MC_DEFAULT = 0x2,
1731 /* sniffer rule - receive all port traffic */
1732 IB_FLOW_ATTR_SNIFFER = 0x3
1733 };
1734
1735 /* Supported steering header types */
1736 enum ib_flow_spec_type {
1737 /* L2 headers*/
1738 IB_FLOW_SPEC_ETH = 0x20,
1739 IB_FLOW_SPEC_IB = 0x22,
1740 /* L3 header*/
1741 IB_FLOW_SPEC_IPV4 = 0x30,
1742 IB_FLOW_SPEC_IPV6 = 0x31,
1743 /* L4 headers*/
1744 IB_FLOW_SPEC_TCP = 0x40,
1745 IB_FLOW_SPEC_UDP = 0x41,
1746 IB_FLOW_SPEC_VXLAN_TUNNEL = 0x50,
1747 IB_FLOW_SPEC_INNER = 0x100,
1748 /* Actions */
1749 IB_FLOW_SPEC_ACTION_TAG = 0x1000,
1750 IB_FLOW_SPEC_ACTION_DROP = 0x1001,
1751 };
1752 #define IB_FLOW_SPEC_LAYER_MASK 0xF0
1753 #define IB_FLOW_SPEC_SUPPORT_LAYERS 8
1754
1755 /* Flow steering rule priority is set according to it's domain.
1756 * Lower domain value means higher priority.
1757 */
1758 enum ib_flow_domain {
1759 IB_FLOW_DOMAIN_USER,
1760 IB_FLOW_DOMAIN_ETHTOOL,
1761 IB_FLOW_DOMAIN_RFS,
1762 IB_FLOW_DOMAIN_NIC,
1763 IB_FLOW_DOMAIN_NUM /* Must be last */
1764 };
1765
1766 enum ib_flow_flags {
1767 IB_FLOW_ATTR_FLAGS_DONT_TRAP = 1UL << 1, /* Continue match, no steal */
1768 IB_FLOW_ATTR_FLAGS_RESERVED = 1UL << 2 /* Must be last */
1769 };
1770
1771 struct ib_flow_eth_filter {
1772 u8 dst_mac[6];
1773 u8 src_mac[6];
1774 __be16 ether_type;
1775 __be16 vlan_tag;
1776 /* Must be last */
1777 u8 real_sz[0];
1778 };
1779
1780 struct ib_flow_spec_eth {
1781 u32 type;
1782 u16 size;
1783 struct ib_flow_eth_filter val;
1784 struct ib_flow_eth_filter mask;
1785 };
1786
1787 struct ib_flow_ib_filter {
1788 __be16 dlid;
1789 __u8 sl;
1790 /* Must be last */
1791 u8 real_sz[0];
1792 };
1793
1794 struct ib_flow_spec_ib {
1795 u32 type;
1796 u16 size;
1797 struct ib_flow_ib_filter val;
1798 struct ib_flow_ib_filter mask;
1799 };
1800
1801 /* IPv4 header flags */
1802 enum ib_ipv4_flags {
1803 IB_IPV4_DONT_FRAG = 0x2, /* Don't enable packet fragmentation */
1804 IB_IPV4_MORE_FRAG = 0X4 /* For All fragmented packets except the
1805 last have this flag set */
1806 };
1807
1808 struct ib_flow_ipv4_filter {
1809 __be32 src_ip;
1810 __be32 dst_ip;
1811 u8 proto;
1812 u8 tos;
1813 u8 ttl;
1814 u8 flags;
1815 /* Must be last */
1816 u8 real_sz[0];
1817 };
1818
1819 struct ib_flow_spec_ipv4 {
1820 u32 type;
1821 u16 size;
1822 struct ib_flow_ipv4_filter val;
1823 struct ib_flow_ipv4_filter mask;
1824 };
1825
1826 struct ib_flow_ipv6_filter {
1827 u8 src_ip[16];
1828 u8 dst_ip[16];
1829 __be32 flow_label;
1830 u8 next_hdr;
1831 u8 traffic_class;
1832 u8 hop_limit;
1833 /* Must be last */
1834 u8 real_sz[0];
1835 };
1836
1837 struct ib_flow_spec_ipv6 {
1838 u32 type;
1839 u16 size;
1840 struct ib_flow_ipv6_filter val;
1841 struct ib_flow_ipv6_filter mask;
1842 };
1843
1844 struct ib_flow_tcp_udp_filter {
1845 __be16 dst_port;
1846 __be16 src_port;
1847 /* Must be last */
1848 u8 real_sz[0];
1849 };
1850
1851 struct ib_flow_spec_tcp_udp {
1852 u32 type;
1853 u16 size;
1854 struct ib_flow_tcp_udp_filter val;
1855 struct ib_flow_tcp_udp_filter mask;
1856 };
1857
1858 struct ib_flow_tunnel_filter {
1859 __be32 tunnel_id;
1860 u8 real_sz[0];
1861 };
1862
1863 /* ib_flow_spec_tunnel describes the Vxlan tunnel
1864 * the tunnel_id from val has the vni value
1865 */
1866 struct ib_flow_spec_tunnel {
1867 u32 type;
1868 u16 size;
1869 struct ib_flow_tunnel_filter val;
1870 struct ib_flow_tunnel_filter mask;
1871 };
1872
1873 struct ib_flow_spec_action_tag {
1874 enum ib_flow_spec_type type;
1875 u16 size;
1876 u32 tag_id;
1877 };
1878
1879 struct ib_flow_spec_action_drop {
1880 enum ib_flow_spec_type type;
1881 u16 size;
1882 };
1883
1884 union ib_flow_spec {
1885 struct {
1886 u32 type;
1887 u16 size;
1888 };
1889 struct ib_flow_spec_eth eth;
1890 struct ib_flow_spec_ib ib;
1891 struct ib_flow_spec_ipv4 ipv4;
1892 struct ib_flow_spec_tcp_udp tcp_udp;
1893 struct ib_flow_spec_ipv6 ipv6;
1894 struct ib_flow_spec_tunnel tunnel;
1895 struct ib_flow_spec_action_tag flow_tag;
1896 struct ib_flow_spec_action_drop drop;
1897 };
1898
1899 struct ib_flow_attr {
1900 enum ib_flow_attr_type type;
1901 u16 size;
1902 u16 priority;
1903 u32 flags;
1904 u8 num_of_specs;
1905 u8 port;
1906 /* Following are the optional layers according to user request
1907 * struct ib_flow_spec_xxx
1908 * struct ib_flow_spec_yyy
1909 */
1910 };
1911
1912 struct ib_flow {
1913 struct ib_qp *qp;
1914 struct ib_uobject *uobject;
1915 };
1916
1917 struct ib_mad_hdr;
1918 struct ib_grh;
1919
1920 enum ib_process_mad_flags {
1921 IB_MAD_IGNORE_MKEY = 1,
1922 IB_MAD_IGNORE_BKEY = 2,
1923 IB_MAD_IGNORE_ALL = IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY
1924 };
1925
1926 enum ib_mad_result {
1927 IB_MAD_RESULT_FAILURE = 0, /* (!SUCCESS is the important flag) */
1928 IB_MAD_RESULT_SUCCESS = 1 << 0, /* MAD was successfully processed */
1929 IB_MAD_RESULT_REPLY = 1 << 1, /* Reply packet needs to be sent */
1930 IB_MAD_RESULT_CONSUMED = 1 << 2 /* Packet consumed: stop processing */
1931 };
1932
1933 struct ib_port_cache {
1934 u64 subnet_prefix;
1935 struct ib_pkey_cache *pkey;
1936 struct ib_gid_table *gid;
1937 u8 lmc;
1938 enum ib_port_state port_state;
1939 };
1940
1941 struct ib_cache {
1942 rwlock_t lock;
1943 struct ib_event_handler event_handler;
1944 struct ib_port_cache *ports;
1945 };
1946
1947 struct iw_cm_verbs;
1948
1949 struct ib_port_immutable {
1950 int pkey_tbl_len;
1951 int gid_tbl_len;
1952 u32 core_cap_flags;
1953 u32 max_mad_size;
1954 };
1955
1956 /* rdma netdev type - specifies protocol type */
1957 enum rdma_netdev_t {
1958 RDMA_NETDEV_OPA_VNIC,
1959 RDMA_NETDEV_IPOIB,
1960 };
1961
1962 /**
1963 * struct rdma_netdev - rdma netdev
1964 * For cases where netstack interfacing is required.
1965 */
1966 struct rdma_netdev {
1967 void *clnt_priv;
1968 struct ib_device *hca;
1969 u8 port_num;
1970
1971 /* cleanup function must be specified */
1972 void (*free_rdma_netdev)(struct net_device *netdev);
1973
1974 /* control functions */
1975 void (*set_id)(struct net_device *netdev, int id);
1976 /* send packet */
1977 int (*send)(struct net_device *dev, struct sk_buff *skb,
1978 struct ib_ah *address, u32 dqpn);
1979 /* multicast */
1980 int (*attach_mcast)(struct net_device *dev, struct ib_device *hca,
1981 union ib_gid *gid, u16 mlid,
1982 int set_qkey, u32 qkey);
1983 int (*detach_mcast)(struct net_device *dev, struct ib_device *hca,
1984 union ib_gid *gid, u16 mlid);
1985 };
1986
1987 struct ib_port_pkey_list {
1988 /* Lock to hold while modifying the list. */
1989 spinlock_t list_lock;
1990 struct list_head pkey_list;
1991 };
1992
1993 struct ib_device {
1994 /* Do not access @dma_device directly from ULP nor from HW drivers. */
1995 struct device *dma_device;
1996
1997 char name[IB_DEVICE_NAME_MAX];
1998
1999 struct list_head event_handler_list;
2000 spinlock_t event_handler_lock;
2001
2002 spinlock_t client_data_lock;
2003 struct list_head core_list;
2004 /* Access to the client_data_list is protected by the client_data_lock
2005 * spinlock and the lists_rwsem read-write semaphore */
2006 struct list_head client_data_list;
2007
2008 struct ib_cache cache;
2009 /**
2010 * port_immutable is indexed by port number
2011 */
2012 struct ib_port_immutable *port_immutable;
2013
2014 int num_comp_vectors;
2015
2016 struct ib_port_pkey_list *port_pkey_list;
2017
2018 struct iw_cm_verbs *iwcm;
2019
2020 /**
2021 * alloc_hw_stats - Allocate a struct rdma_hw_stats and fill in the
2022 * driver initialized data. The struct is kfree()'ed by the sysfs
2023 * core when the device is removed. A lifespan of -1 in the return
2024 * struct tells the core to set a default lifespan.
2025 */
2026 struct rdma_hw_stats *(*alloc_hw_stats)(struct ib_device *device,
2027 u8 port_num);
2028 /**
2029 * get_hw_stats - Fill in the counter value(s) in the stats struct.
2030 * @index - The index in the value array we wish to have updated, or
2031 * num_counters if we want all stats updated
2032 * Return codes -
2033 * < 0 - Error, no counters updated
2034 * index - Updated the single counter pointed to by index
2035 * num_counters - Updated all counters (will reset the timestamp
2036 * and prevent further calls for lifespan milliseconds)
2037 * Drivers are allowed to update all counters in leiu of just the
2038 * one given in index at their option
2039 */
2040 int (*get_hw_stats)(struct ib_device *device,
2041 struct rdma_hw_stats *stats,
2042 u8 port, int index);
2043 int (*query_device)(struct ib_device *device,
2044 struct ib_device_attr *device_attr,
2045 struct ib_udata *udata);
2046 int (*query_port)(struct ib_device *device,
2047 u8 port_num,
2048 struct ib_port_attr *port_attr);
2049 enum rdma_link_layer (*get_link_layer)(struct ib_device *device,
2050 u8 port_num);
2051 /* When calling get_netdev, the HW vendor's driver should return the
2052 * net device of device @device at port @port_num or NULL if such
2053 * a net device doesn't exist. The vendor driver should call dev_hold
2054 * on this net device. The HW vendor's device driver must guarantee
2055 * that this function returns NULL before the net device reaches
2056 * NETDEV_UNREGISTER_FINAL state.
2057 */
2058 struct net_device *(*get_netdev)(struct ib_device *device,
2059 u8 port_num);
2060 int (*query_gid)(struct ib_device *device,
2061 u8 port_num, int index,
2062 union ib_gid *gid);
2063 /* When calling add_gid, the HW vendor's driver should
2064 * add the gid of device @device at gid index @index of
2065 * port @port_num to be @gid. Meta-info of that gid (for example,
2066 * the network device related to this gid is available
2067 * at @attr. @context allows the HW vendor driver to store extra
2068 * information together with a GID entry. The HW vendor may allocate
2069 * memory to contain this information and store it in @context when a
2070 * new GID entry is written to. Params are consistent until the next
2071 * call of add_gid or delete_gid. The function should return 0 on
2072 * success or error otherwise. The function could be called
2073 * concurrently for different ports. This function is only called
2074 * when roce_gid_table is used.
2075 */
2076 int (*add_gid)(struct ib_device *device,
2077 u8 port_num,
2078 unsigned int index,
2079 const union ib_gid *gid,
2080 const struct ib_gid_attr *attr,
2081 void **context);
2082 /* When calling del_gid, the HW vendor's driver should delete the
2083 * gid of device @device at gid index @index of port @port_num.
2084 * Upon the deletion of a GID entry, the HW vendor must free any
2085 * allocated memory. The caller will clear @context afterwards.
2086 * This function is only called when roce_gid_table is used.
2087 */
2088 int (*del_gid)(struct ib_device *device,
2089 u8 port_num,
2090 unsigned int index,
2091 void **context);
2092 int (*query_pkey)(struct ib_device *device,
2093 u8 port_num, u16 index, u16 *pkey);
2094 int (*modify_device)(struct ib_device *device,
2095 int device_modify_mask,
2096 struct ib_device_modify *device_modify);
2097 int (*modify_port)(struct ib_device *device,
2098 u8 port_num, int port_modify_mask,
2099 struct ib_port_modify *port_modify);
2100 struct ib_ucontext * (*alloc_ucontext)(struct ib_device *device,
2101 struct ib_udata *udata);
2102 int (*dealloc_ucontext)(struct ib_ucontext *context);
2103 int (*mmap)(struct ib_ucontext *context,
2104 struct vm_area_struct *vma);
2105 struct ib_pd * (*alloc_pd)(struct ib_device *device,
2106 struct ib_ucontext *context,
2107 struct ib_udata *udata);
2108 int (*dealloc_pd)(struct ib_pd *pd);
2109 struct ib_ah * (*create_ah)(struct ib_pd *pd,
2110 struct rdma_ah_attr *ah_attr,
2111 struct ib_udata *udata);
2112 int (*modify_ah)(struct ib_ah *ah,
2113 struct rdma_ah_attr *ah_attr);
2114 int (*query_ah)(struct ib_ah *ah,
2115 struct rdma_ah_attr *ah_attr);
2116 int (*destroy_ah)(struct ib_ah *ah);
2117 struct ib_srq * (*create_srq)(struct ib_pd *pd,
2118 struct ib_srq_init_attr *srq_init_attr,
2119 struct ib_udata *udata);
2120 int (*modify_srq)(struct ib_srq *srq,
2121 struct ib_srq_attr *srq_attr,
2122 enum ib_srq_attr_mask srq_attr_mask,
2123 struct ib_udata *udata);
2124 int (*query_srq)(struct ib_srq *srq,
2125 struct ib_srq_attr *srq_attr);
2126 int (*destroy_srq)(struct ib_srq *srq);
2127 int (*post_srq_recv)(struct ib_srq *srq,
2128 struct ib_recv_wr *recv_wr,
2129 struct ib_recv_wr **bad_recv_wr);
2130 struct ib_qp * (*create_qp)(struct ib_pd *pd,
2131 struct ib_qp_init_attr *qp_init_attr,
2132 struct ib_udata *udata);
2133 int (*modify_qp)(struct ib_qp *qp,
2134 struct ib_qp_attr *qp_attr,
2135 int qp_attr_mask,
2136 struct ib_udata *udata);
2137 int (*query_qp)(struct ib_qp *qp,
2138 struct ib_qp_attr *qp_attr,
2139 int qp_attr_mask,
2140 struct ib_qp_init_attr *qp_init_attr);
2141 int (*destroy_qp)(struct ib_qp *qp);
2142 int (*post_send)(struct ib_qp *qp,
2143 struct ib_send_wr *send_wr,
2144 struct ib_send_wr **bad_send_wr);
2145 int (*post_recv)(struct ib_qp *qp,
2146 struct ib_recv_wr *recv_wr,
2147 struct ib_recv_wr **bad_recv_wr);
2148 struct ib_cq * (*create_cq)(struct ib_device *device,
2149 const struct ib_cq_init_attr *attr,
2150 struct ib_ucontext *context,
2151 struct ib_udata *udata);
2152 int (*modify_cq)(struct ib_cq *cq, u16 cq_count,
2153 u16 cq_period);
2154 int (*destroy_cq)(struct ib_cq *cq);
2155 int (*resize_cq)(struct ib_cq *cq, int cqe,
2156 struct ib_udata *udata);
2157 int (*poll_cq)(struct ib_cq *cq, int num_entries,
2158 struct ib_wc *wc);
2159 int (*peek_cq)(struct ib_cq *cq, int wc_cnt);
2160 int (*req_notify_cq)(struct ib_cq *cq,
2161 enum ib_cq_notify_flags flags);
2162 int (*req_ncomp_notif)(struct ib_cq *cq,
2163 int wc_cnt);
2164 struct ib_mr * (*get_dma_mr)(struct ib_pd *pd,
2165 int mr_access_flags);
2166 struct ib_mr * (*reg_user_mr)(struct ib_pd *pd,
2167 u64 start, u64 length,
2168 u64 virt_addr,
2169 int mr_access_flags,
2170 struct ib_udata *udata);
2171 int (*rereg_user_mr)(struct ib_mr *mr,
2172 int flags,
2173 u64 start, u64 length,
2174 u64 virt_addr,
2175 int mr_access_flags,
2176 struct ib_pd *pd,
2177 struct ib_udata *udata);
2178 int (*dereg_mr)(struct ib_mr *mr);
2179 struct ib_mr * (*alloc_mr)(struct ib_pd *pd,
2180 enum ib_mr_type mr_type,
2181 u32 max_num_sg);
2182 int (*map_mr_sg)(struct ib_mr *mr,
2183 struct scatterlist *sg,
2184 int sg_nents,
2185 unsigned int *sg_offset);
2186 struct ib_mw * (*alloc_mw)(struct ib_pd *pd,
2187 enum ib_mw_type type,
2188 struct ib_udata *udata);
2189 int (*dealloc_mw)(struct ib_mw *mw);
2190 struct ib_fmr * (*alloc_fmr)(struct ib_pd *pd,
2191 int mr_access_flags,
2192 struct ib_fmr_attr *fmr_attr);
2193 int (*map_phys_fmr)(struct ib_fmr *fmr,
2194 u64 *page_list, int list_len,
2195 u64 iova);
2196 int (*unmap_fmr)(struct list_head *fmr_list);
2197 int (*dealloc_fmr)(struct ib_fmr *fmr);
2198 int (*attach_mcast)(struct ib_qp *qp,
2199 union ib_gid *gid,
2200 u16 lid);
2201 int (*detach_mcast)(struct ib_qp *qp,
2202 union ib_gid *gid,
2203 u16 lid);
2204 int (*process_mad)(struct ib_device *device,
2205 int process_mad_flags,
2206 u8 port_num,
2207 const struct ib_wc *in_wc,
2208 const struct ib_grh *in_grh,
2209 const struct ib_mad_hdr *in_mad,
2210 size_t in_mad_size,
2211 struct ib_mad_hdr *out_mad,
2212 size_t *out_mad_size,
2213 u16 *out_mad_pkey_index);
2214 struct ib_xrcd * (*alloc_xrcd)(struct ib_device *device,
2215 struct ib_ucontext *ucontext,
2216 struct ib_udata *udata);
2217 int (*dealloc_xrcd)(struct ib_xrcd *xrcd);
2218 struct ib_flow * (*create_flow)(struct ib_qp *qp,
2219 struct ib_flow_attr
2220 *flow_attr,
2221 int domain);
2222 int (*destroy_flow)(struct ib_flow *flow_id);
2223 int (*check_mr_status)(struct ib_mr *mr, u32 check_mask,
2224 struct ib_mr_status *mr_status);
2225 void (*disassociate_ucontext)(struct ib_ucontext *ibcontext);
2226 void (*drain_rq)(struct ib_qp *qp);
2227 void (*drain_sq)(struct ib_qp *qp);
2228 int (*set_vf_link_state)(struct ib_device *device, int vf, u8 port,
2229 int state);
2230 int (*get_vf_config)(struct ib_device *device, int vf, u8 port,
2231 struct ifla_vf_info *ivf);
2232 int (*get_vf_stats)(struct ib_device *device, int vf, u8 port,
2233 struct ifla_vf_stats *stats);
2234 int (*set_vf_guid)(struct ib_device *device, int vf, u8 port, u64 guid,
2235 int type);
2236 struct ib_wq * (*create_wq)(struct ib_pd *pd,
2237 struct ib_wq_init_attr *init_attr,
2238 struct ib_udata *udata);
2239 int (*destroy_wq)(struct ib_wq *wq);
2240 int (*modify_wq)(struct ib_wq *wq,
2241 struct ib_wq_attr *attr,
2242 u32 wq_attr_mask,
2243 struct ib_udata *udata);
2244 struct ib_rwq_ind_table * (*create_rwq_ind_table)(struct ib_device *device,
2245 struct ib_rwq_ind_table_init_attr *init_attr,
2246 struct ib_udata *udata);
2247 int (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *wq_ind_table);
2248 /**
2249 * rdma netdev operation
2250 *
2251 * Driver implementing alloc_rdma_netdev must return -EOPNOTSUPP if it
2252 * doesn't support the specified rdma netdev type.
2253 */
2254 struct net_device *(*alloc_rdma_netdev)(
2255 struct ib_device *device,
2256 u8 port_num,
2257 enum rdma_netdev_t type,
2258 const char *name,
2259 unsigned char name_assign_type,
2260 void (*setup)(struct net_device *));
2261
2262 struct module *owner;
2263 struct device dev;
2264 struct kobject *ports_parent;
2265 struct list_head port_list;
2266
2267 enum {
2268 IB_DEV_UNINITIALIZED,
2269 IB_DEV_REGISTERED,
2270 IB_DEV_UNREGISTERED
2271 } reg_state;
2272
2273 int uverbs_abi_ver;
2274 u64 uverbs_cmd_mask;
2275 u64 uverbs_ex_cmd_mask;
2276
2277 char node_desc[IB_DEVICE_NODE_DESC_MAX];
2278 __be64 node_guid;
2279 u32 local_dma_lkey;
2280 u16 is_switch:1;
2281 u8 node_type;
2282 u8 phys_port_cnt;
2283 struct ib_device_attr attrs;
2284 struct attribute_group *hw_stats_ag;
2285 struct rdma_hw_stats *hw_stats;
2286
2287 #ifdef CONFIG_CGROUP_RDMA
2288 struct rdmacg_device cg_device;
2289 #endif
2290
2291 /**
2292 * The following mandatory functions are used only at device
2293 * registration. Keep functions such as these at the end of this
2294 * structure to avoid cache line misses when accessing struct ib_device
2295 * in fast paths.
2296 */
2297 int (*get_port_immutable)(struct ib_device *, u8, struct ib_port_immutable *);
2298 void (*get_dev_fw_str)(struct ib_device *, char *str, size_t str_len);
2299 };
2300
2301 struct ib_client {
2302 char *name;
2303 void (*add) (struct ib_device *);
2304 void (*remove)(struct ib_device *, void *client_data);
2305
2306 /* Returns the net_dev belonging to this ib_client and matching the
2307 * given parameters.
2308 * @dev: An RDMA device that the net_dev use for communication.
2309 * @port: A physical port number on the RDMA device.
2310 * @pkey: P_Key that the net_dev uses if applicable.
2311 * @gid: A GID that the net_dev uses to communicate.
2312 * @addr: An IP address the net_dev is configured with.
2313 * @client_data: The device's client data set by ib_set_client_data().
2314 *
2315 * An ib_client that implements a net_dev on top of RDMA devices
2316 * (such as IP over IB) should implement this callback, allowing the
2317 * rdma_cm module to find the right net_dev for a given request.
2318 *
2319 * The caller is responsible for calling dev_put on the returned
2320 * netdev. */
2321 struct net_device *(*get_net_dev_by_params)(
2322 struct ib_device *dev,
2323 u8 port,
2324 u16 pkey,
2325 const union ib_gid *gid,
2326 const struct sockaddr *addr,
2327 void *client_data);
2328 struct list_head list;
2329 };
2330
2331 struct ib_device *ib_alloc_device(size_t size);
2332 void ib_dealloc_device(struct ib_device *device);
2333
2334 void ib_get_device_fw_str(struct ib_device *device, char *str, size_t str_len);
2335
2336 int ib_register_device(struct ib_device *device,
2337 int (*port_callback)(struct ib_device *,
2338 u8, struct kobject *));
2339 void ib_unregister_device(struct ib_device *device);
2340
2341 int ib_register_client (struct ib_client *client);
2342 void ib_unregister_client(struct ib_client *client);
2343
2344 void *ib_get_client_data(struct ib_device *device, struct ib_client *client);
2345 void ib_set_client_data(struct ib_device *device, struct ib_client *client,
2346 void *data);
2347
2348 static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len)
2349 {
2350 return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0;
2351 }
2352
2353 static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len)
2354 {
2355 return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0;
2356 }
2357
2358 static inline bool ib_is_udata_cleared(struct ib_udata *udata,
2359 size_t offset,
2360 size_t len)
2361 {
2362 const void __user *p = udata->inbuf + offset;
2363 bool ret;
2364 u8 *buf;
2365
2366 if (len > USHRT_MAX)
2367 return false;
2368
2369 buf = memdup_user(p, len);
2370 if (IS_ERR(buf))
2371 return false;
2372
2373 ret = !memchr_inv(buf, 0, len);
2374 kfree(buf);
2375 return ret;
2376 }
2377
2378 /**
2379 * ib_modify_qp_is_ok - Check that the supplied attribute mask
2380 * contains all required attributes and no attributes not allowed for
2381 * the given QP state transition.
2382 * @cur_state: Current QP state
2383 * @next_state: Next QP state
2384 * @type: QP type
2385 * @mask: Mask of supplied QP attributes
2386 * @ll : link layer of port
2387 *
2388 * This function is a helper function that a low-level driver's
2389 * modify_qp method can use to validate the consumer's input. It
2390 * checks that cur_state and next_state are valid QP states, that a
2391 * transition from cur_state to next_state is allowed by the IB spec,
2392 * and that the attribute mask supplied is allowed for the transition.
2393 */
2394 int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
2395 enum ib_qp_type type, enum ib_qp_attr_mask mask,
2396 enum rdma_link_layer ll);
2397
2398 int ib_register_event_handler (struct ib_event_handler *event_handler);
2399 int ib_unregister_event_handler(struct ib_event_handler *event_handler);
2400 void ib_dispatch_event(struct ib_event *event);
2401
2402 int ib_query_port(struct ib_device *device,
2403 u8 port_num, struct ib_port_attr *port_attr);
2404
2405 enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device,
2406 u8 port_num);
2407
2408 /**
2409 * rdma_cap_ib_switch - Check if the device is IB switch
2410 * @device: Device to check
2411 *
2412 * Device driver is responsible for setting is_switch bit on
2413 * in ib_device structure at init time.
2414 *
2415 * Return: true if the device is IB switch.
2416 */
2417 static inline bool rdma_cap_ib_switch(const struct ib_device *device)
2418 {
2419 return device->is_switch;
2420 }
2421
2422 /**
2423 * rdma_start_port - Return the first valid port number for the device
2424 * specified
2425 *
2426 * @device: Device to be checked
2427 *
2428 * Return start port number
2429 */
2430 static inline u8 rdma_start_port(const struct ib_device *device)
2431 {
2432 return rdma_cap_ib_switch(device) ? 0 : 1;
2433 }
2434
2435 /**
2436 * rdma_end_port - Return the last valid port number for the device
2437 * specified
2438 *
2439 * @device: Device to be checked
2440 *
2441 * Return last port number
2442 */
2443 static inline u8 rdma_end_port(const struct ib_device *device)
2444 {
2445 return rdma_cap_ib_switch(device) ? 0 : device->phys_port_cnt;
2446 }
2447
2448 static inline int rdma_is_port_valid(const struct ib_device *device,
2449 unsigned int port)
2450 {
2451 return (port >= rdma_start_port(device) &&
2452 port <= rdma_end_port(device));
2453 }
2454
2455 static inline bool rdma_protocol_ib(const struct ib_device *device, u8 port_num)
2456 {
2457 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IB;
2458 }
2459
2460 static inline bool rdma_protocol_roce(const struct ib_device *device, u8 port_num)
2461 {
2462 return device->port_immutable[port_num].core_cap_flags &
2463 (RDMA_CORE_CAP_PROT_ROCE | RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP);
2464 }
2465
2466 static inline bool rdma_protocol_roce_udp_encap(const struct ib_device *device, u8 port_num)
2467 {
2468 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP;
2469 }
2470
2471 static inline bool rdma_protocol_roce_eth_encap(const struct ib_device *device, u8 port_num)
2472 {
2473 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_ROCE;
2474 }
2475
2476 static inline bool rdma_protocol_iwarp(const struct ib_device *device, u8 port_num)
2477 {
2478 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IWARP;
2479 }
2480
2481 static inline bool rdma_ib_or_roce(const struct ib_device *device, u8 port_num)
2482 {
2483 return rdma_protocol_ib(device, port_num) ||
2484 rdma_protocol_roce(device, port_num);
2485 }
2486
2487 static inline bool rdma_protocol_raw_packet(const struct ib_device *device, u8 port_num)
2488 {
2489 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_RAW_PACKET;
2490 }
2491
2492 static inline bool rdma_protocol_usnic(const struct ib_device *device, u8 port_num)
2493 {
2494 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_USNIC;
2495 }
2496
2497 /**
2498 * rdma_cap_ib_mad - Check if the port of a device supports Infiniband
2499 * Management Datagrams.
2500 * @device: Device to check
2501 * @port_num: Port number to check
2502 *
2503 * Management Datagrams (MAD) are a required part of the InfiniBand
2504 * specification and are supported on all InfiniBand devices. A slightly
2505 * extended version are also supported on OPA interfaces.
2506 *
2507 * Return: true if the port supports sending/receiving of MAD packets.
2508 */
2509 static inline bool rdma_cap_ib_mad(const struct ib_device *device, u8 port_num)
2510 {
2511 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_MAD;
2512 }
2513
2514 /**
2515 * rdma_cap_opa_mad - Check if the port of device provides support for OPA
2516 * Management Datagrams.
2517 * @device: Device to check
2518 * @port_num: Port number to check
2519 *
2520 * Intel OmniPath devices extend and/or replace the InfiniBand Management
2521 * datagrams with their own versions. These OPA MADs share many but not all of
2522 * the characteristics of InfiniBand MADs.
2523 *
2524 * OPA MADs differ in the following ways:
2525 *
2526 * 1) MADs are variable size up to 2K
2527 * IBTA defined MADs remain fixed at 256 bytes
2528 * 2) OPA SMPs must carry valid PKeys
2529 * 3) OPA SMP packets are a different format
2530 *
2531 * Return: true if the port supports OPA MAD packet formats.
2532 */
2533 static inline bool rdma_cap_opa_mad(struct ib_device *device, u8 port_num)
2534 {
2535 return (device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_OPA_MAD)
2536 == RDMA_CORE_CAP_OPA_MAD;
2537 }
2538
2539 /**
2540 * rdma_cap_ib_smi - Check if the port of a device provides an Infiniband
2541 * Subnet Management Agent (SMA) on the Subnet Management Interface (SMI).
2542 * @device: Device to check
2543 * @port_num: Port number to check
2544 *
2545 * Each InfiniBand node is required to provide a Subnet Management Agent
2546 * that the subnet manager can access. Prior to the fabric being fully
2547 * configured by the subnet manager, the SMA is accessed via a well known
2548 * interface called the Subnet Management Interface (SMI). This interface
2549 * uses directed route packets to communicate with the SM to get around the
2550 * chicken and egg problem of the SM needing to know what's on the fabric
2551 * in order to configure the fabric, and needing to configure the fabric in
2552 * order to send packets to the devices on the fabric. These directed
2553 * route packets do not need the fabric fully configured in order to reach
2554 * their destination. The SMI is the only method allowed to send
2555 * directed route packets on an InfiniBand fabric.
2556 *
2557 * Return: true if the port provides an SMI.
2558 */
2559 static inline bool rdma_cap_ib_smi(const struct ib_device *device, u8 port_num)
2560 {
2561 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_SMI;
2562 }
2563
2564 /**
2565 * rdma_cap_ib_cm - Check if the port of device has the capability Infiniband
2566 * Communication Manager.
2567 * @device: Device to check
2568 * @port_num: Port number to check
2569 *
2570 * The InfiniBand Communication Manager is one of many pre-defined General
2571 * Service Agents (GSA) that are accessed via the General Service
2572 * Interface (GSI). It's role is to facilitate establishment of connections
2573 * between nodes as well as other management related tasks for established
2574 * connections.
2575 *
2576 * Return: true if the port supports an IB CM (this does not guarantee that
2577 * a CM is actually running however).
2578 */
2579 static inline bool rdma_cap_ib_cm(const struct ib_device *device, u8 port_num)
2580 {
2581 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_CM;
2582 }
2583
2584 /**
2585 * rdma_cap_iw_cm - Check if the port of device has the capability IWARP
2586 * Communication Manager.
2587 * @device: Device to check
2588 * @port_num: Port number to check
2589 *
2590 * Similar to above, but specific to iWARP connections which have a different
2591 * managment protocol than InfiniBand.
2592 *
2593 * Return: true if the port supports an iWARP CM (this does not guarantee that
2594 * a CM is actually running however).
2595 */
2596 static inline bool rdma_cap_iw_cm(const struct ib_device *device, u8 port_num)
2597 {
2598 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IW_CM;
2599 }
2600
2601 /**
2602 * rdma_cap_ib_sa - Check if the port of device has the capability Infiniband
2603 * Subnet Administration.
2604 * @device: Device to check
2605 * @port_num: Port number to check
2606 *
2607 * An InfiniBand Subnet Administration (SA) service is a pre-defined General
2608 * Service Agent (GSA) provided by the Subnet Manager (SM). On InfiniBand
2609 * fabrics, devices should resolve routes to other hosts by contacting the
2610 * SA to query the proper route.
2611 *
2612 * Return: true if the port should act as a client to the fabric Subnet
2613 * Administration interface. This does not imply that the SA service is
2614 * running locally.
2615 */
2616 static inline bool rdma_cap_ib_sa(const struct ib_device *device, u8 port_num)
2617 {
2618 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_SA;
2619 }
2620
2621 /**
2622 * rdma_cap_ib_mcast - Check if the port of device has the capability Infiniband
2623 * Multicast.
2624 * @device: Device to check
2625 * @port_num: Port number to check
2626 *
2627 * InfiniBand multicast registration is more complex than normal IPv4 or
2628 * IPv6 multicast registration. Each Host Channel Adapter must register
2629 * with the Subnet Manager when it wishes to join a multicast group. It
2630 * should do so only once regardless of how many queue pairs it subscribes
2631 * to this group. And it should leave the group only after all queue pairs
2632 * attached to the group have been detached.
2633 *
2634 * Return: true if the port must undertake the additional adminstrative
2635 * overhead of registering/unregistering with the SM and tracking of the
2636 * total number of queue pairs attached to the multicast group.
2637 */
2638 static inline bool rdma_cap_ib_mcast(const struct ib_device *device, u8 port_num)
2639 {
2640 return rdma_cap_ib_sa(device, port_num);
2641 }
2642
2643 /**
2644 * rdma_cap_af_ib - Check if the port of device has the capability
2645 * Native Infiniband Address.
2646 * @device: Device to check
2647 * @port_num: Port number to check
2648 *
2649 * InfiniBand addressing uses a port's GUID + Subnet Prefix to make a default
2650 * GID. RoCE uses a different mechanism, but still generates a GID via
2651 * a prescribed mechanism and port specific data.
2652 *
2653 * Return: true if the port uses a GID address to identify devices on the
2654 * network.
2655 */
2656 static inline bool rdma_cap_af_ib(const struct ib_device *device, u8 port_num)
2657 {
2658 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_AF_IB;
2659 }
2660
2661 /**
2662 * rdma_cap_eth_ah - Check if the port of device has the capability
2663 * Ethernet Address Handle.
2664 * @device: Device to check
2665 * @port_num: Port number to check
2666 *
2667 * RoCE is InfiniBand over Ethernet, and it uses a well defined technique
2668 * to fabricate GIDs over Ethernet/IP specific addresses native to the
2669 * port. Normally, packet headers are generated by the sending host
2670 * adapter, but when sending connectionless datagrams, we must manually
2671 * inject the proper headers for the fabric we are communicating over.
2672 *
2673 * Return: true if we are running as a RoCE port and must force the
2674 * addition of a Global Route Header built from our Ethernet Address
2675 * Handle into our header list for connectionless packets.
2676 */
2677 static inline bool rdma_cap_eth_ah(const struct ib_device *device, u8 port_num)
2678 {
2679 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_ETH_AH;
2680 }
2681
2682 /**
2683 * rdma_cap_opa_ah - Check if the port of device supports
2684 * OPA Address handles
2685 * @device: Device to check
2686 * @port_num: Port number to check
2687 *
2688 * Return: true if we are running on an OPA device which supports
2689 * the extended OPA addressing.
2690 */
2691 static inline bool rdma_cap_opa_ah(struct ib_device *device, u8 port_num)
2692 {
2693 return (device->port_immutable[port_num].core_cap_flags &
2694 RDMA_CORE_CAP_OPA_AH) == RDMA_CORE_CAP_OPA_AH;
2695 }
2696
2697 /**
2698 * rdma_max_mad_size - Return the max MAD size required by this RDMA Port.
2699 *
2700 * @device: Device
2701 * @port_num: Port number
2702 *
2703 * This MAD size includes the MAD headers and MAD payload. No other headers
2704 * are included.
2705 *
2706 * Return the max MAD size required by the Port. Will return 0 if the port
2707 * does not support MADs
2708 */
2709 static inline size_t rdma_max_mad_size(const struct ib_device *device, u8 port_num)
2710 {
2711 return device->port_immutable[port_num].max_mad_size;
2712 }
2713
2714 /**
2715 * rdma_cap_roce_gid_table - Check if the port of device uses roce_gid_table
2716 * @device: Device to check
2717 * @port_num: Port number to check
2718 *
2719 * RoCE GID table mechanism manages the various GIDs for a device.
2720 *
2721 * NOTE: if allocating the port's GID table has failed, this call will still
2722 * return true, but any RoCE GID table API will fail.
2723 *
2724 * Return: true if the port uses RoCE GID table mechanism in order to manage
2725 * its GIDs.
2726 */
2727 static inline bool rdma_cap_roce_gid_table(const struct ib_device *device,
2728 u8 port_num)
2729 {
2730 return rdma_protocol_roce(device, port_num) &&
2731 device->add_gid && device->del_gid;
2732 }
2733
2734 /*
2735 * Check if the device supports READ W/ INVALIDATE.
2736 */
2737 static inline bool rdma_cap_read_inv(struct ib_device *dev, u32 port_num)
2738 {
2739 /*
2740 * iWarp drivers must support READ W/ INVALIDATE. No other protocol
2741 * has support for it yet.
2742 */
2743 return rdma_protocol_iwarp(dev, port_num);
2744 }
2745
2746 int ib_query_gid(struct ib_device *device,
2747 u8 port_num, int index, union ib_gid *gid,
2748 struct ib_gid_attr *attr);
2749
2750 int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port,
2751 int state);
2752 int ib_get_vf_config(struct ib_device *device, int vf, u8 port,
2753 struct ifla_vf_info *info);
2754 int ib_get_vf_stats(struct ib_device *device, int vf, u8 port,
2755 struct ifla_vf_stats *stats);
2756 int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid,
2757 int type);
2758
2759 int ib_query_pkey(struct ib_device *device,
2760 u8 port_num, u16 index, u16 *pkey);
2761
2762 int ib_modify_device(struct ib_device *device,
2763 int device_modify_mask,
2764 struct ib_device_modify *device_modify);
2765
2766 int ib_modify_port(struct ib_device *device,
2767 u8 port_num, int port_modify_mask,
2768 struct ib_port_modify *port_modify);
2769
2770 int ib_find_gid(struct ib_device *device, union ib_gid *gid,
2771 enum ib_gid_type gid_type, struct net_device *ndev,
2772 u8 *port_num, u16 *index);
2773
2774 int ib_find_pkey(struct ib_device *device,
2775 u8 port_num, u16 pkey, u16 *index);
2776
2777 enum ib_pd_flags {
2778 /*
2779 * Create a memory registration for all memory in the system and place
2780 * the rkey for it into pd->unsafe_global_rkey. This can be used by
2781 * ULPs to avoid the overhead of dynamic MRs.
2782 *
2783 * This flag is generally considered unsafe and must only be used in
2784 * extremly trusted environments. Every use of it will log a warning
2785 * in the kernel log.
2786 */
2787 IB_PD_UNSAFE_GLOBAL_RKEY = 0x01,
2788 };
2789
2790 struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
2791 const char *caller);
2792 #define ib_alloc_pd(device, flags) \
2793 __ib_alloc_pd((device), (flags), __func__)
2794 void ib_dealloc_pd(struct ib_pd *pd);
2795
2796 /**
2797 * rdma_create_ah - Creates an address handle for the given address vector.
2798 * @pd: The protection domain associated with the address handle.
2799 * @ah_attr: The attributes of the address vector.
2800 *
2801 * The address handle is used to reference a local or global destination
2802 * in all UD QP post sends.
2803 */
2804 struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr);
2805
2806 /**
2807 * ib_get_gids_from_rdma_hdr - Get sgid and dgid from GRH or IPv4 header
2808 * work completion.
2809 * @hdr: the L3 header to parse
2810 * @net_type: type of header to parse
2811 * @sgid: place to store source gid
2812 * @dgid: place to store destination gid
2813 */
2814 int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr,
2815 enum rdma_network_type net_type,
2816 union ib_gid *sgid, union ib_gid *dgid);
2817
2818 /**
2819 * ib_get_rdma_header_version - Get the header version
2820 * @hdr: the L3 header to parse
2821 */
2822 int ib_get_rdma_header_version(const union rdma_network_hdr *hdr);
2823
2824 /**
2825 * ib_init_ah_from_wc - Initializes address handle attributes from a
2826 * work completion.
2827 * @device: Device on which the received message arrived.
2828 * @port_num: Port on which the received message arrived.
2829 * @wc: Work completion associated with the received message.
2830 * @grh: References the received global route header. This parameter is
2831 * ignored unless the work completion indicates that the GRH is valid.
2832 * @ah_attr: Returned attributes that can be used when creating an address
2833 * handle for replying to the message.
2834 */
2835 int ib_init_ah_from_wc(struct ib_device *device, u8 port_num,
2836 const struct ib_wc *wc, const struct ib_grh *grh,
2837 struct rdma_ah_attr *ah_attr);
2838
2839 /**
2840 * ib_create_ah_from_wc - Creates an address handle associated with the
2841 * sender of the specified work completion.
2842 * @pd: The protection domain associated with the address handle.
2843 * @wc: Work completion information associated with a received message.
2844 * @grh: References the received global route header. This parameter is
2845 * ignored unless the work completion indicates that the GRH is valid.
2846 * @port_num: The outbound port number to associate with the address.
2847 *
2848 * The address handle is used to reference a local or global destination
2849 * in all UD QP post sends.
2850 */
2851 struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc,
2852 const struct ib_grh *grh, u8 port_num);
2853
2854 /**
2855 * rdma_modify_ah - Modifies the address vector associated with an address
2856 * handle.
2857 * @ah: The address handle to modify.
2858 * @ah_attr: The new address vector attributes to associate with the
2859 * address handle.
2860 */
2861 int rdma_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
2862
2863 /**
2864 * rdma_query_ah - Queries the address vector associated with an address
2865 * handle.
2866 * @ah: The address handle to query.
2867 * @ah_attr: The address vector attributes associated with the address
2868 * handle.
2869 */
2870 int rdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
2871
2872 /**
2873 * rdma_destroy_ah - Destroys an address handle.
2874 * @ah: The address handle to destroy.
2875 */
2876 int rdma_destroy_ah(struct ib_ah *ah);
2877
2878 /**
2879 * ib_create_srq - Creates a SRQ associated with the specified protection
2880 * domain.
2881 * @pd: The protection domain associated with the SRQ.
2882 * @srq_init_attr: A list of initial attributes required to create the
2883 * SRQ. If SRQ creation succeeds, then the attributes are updated to
2884 * the actual capabilities of the created SRQ.
2885 *
2886 * srq_attr->max_wr and srq_attr->max_sge are read the determine the
2887 * requested size of the SRQ, and set to the actual values allocated
2888 * on return. If ib_create_srq() succeeds, then max_wr and max_sge
2889 * will always be at least as large as the requested values.
2890 */
2891 struct ib_srq *ib_create_srq(struct ib_pd *pd,
2892 struct ib_srq_init_attr *srq_init_attr);
2893
2894 /**
2895 * ib_modify_srq - Modifies the attributes for the specified SRQ.
2896 * @srq: The SRQ to modify.
2897 * @srq_attr: On input, specifies the SRQ attributes to modify. On output,
2898 * the current values of selected SRQ attributes are returned.
2899 * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ
2900 * are being modified.
2901 *
2902 * The mask may contain IB_SRQ_MAX_WR to resize the SRQ and/or
2903 * IB_SRQ_LIMIT to set the SRQ's limit and request notification when
2904 * the number of receives queued drops below the limit.
2905 */
2906 int ib_modify_srq(struct ib_srq *srq,
2907 struct ib_srq_attr *srq_attr,
2908 enum ib_srq_attr_mask srq_attr_mask);
2909
2910 /**
2911 * ib_query_srq - Returns the attribute list and current values for the
2912 * specified SRQ.
2913 * @srq: The SRQ to query.
2914 * @srq_attr: The attributes of the specified SRQ.
2915 */
2916 int ib_query_srq(struct ib_srq *srq,
2917 struct ib_srq_attr *srq_attr);
2918
2919 /**
2920 * ib_destroy_srq - Destroys the specified SRQ.
2921 * @srq: The SRQ to destroy.
2922 */
2923 int ib_destroy_srq(struct ib_srq *srq);
2924
2925 /**
2926 * ib_post_srq_recv - Posts a list of work requests to the specified SRQ.
2927 * @srq: The SRQ to post the work request on.
2928 * @recv_wr: A list of work requests to post on the receive queue.
2929 * @bad_recv_wr: On an immediate failure, this parameter will reference
2930 * the work request that failed to be posted on the QP.
2931 */
2932 static inline int ib_post_srq_recv(struct ib_srq *srq,
2933 struct ib_recv_wr *recv_wr,
2934 struct ib_recv_wr **bad_recv_wr)
2935 {
2936 return srq->device->post_srq_recv(srq, recv_wr, bad_recv_wr);
2937 }
2938
2939 /**
2940 * ib_create_qp - Creates a QP associated with the specified protection
2941 * domain.
2942 * @pd: The protection domain associated with the QP.
2943 * @qp_init_attr: A list of initial attributes required to create the
2944 * QP. If QP creation succeeds, then the attributes are updated to
2945 * the actual capabilities of the created QP.
2946 */
2947 struct ib_qp *ib_create_qp(struct ib_pd *pd,
2948 struct ib_qp_init_attr *qp_init_attr);
2949
2950 /**
2951 * ib_modify_qp - Modifies the attributes for the specified QP and then
2952 * transitions the QP to the given state.
2953 * @qp: The QP to modify.
2954 * @qp_attr: On input, specifies the QP attributes to modify. On output,
2955 * the current values of selected QP attributes are returned.
2956 * @qp_attr_mask: A bit-mask used to specify which attributes of the QP
2957 * are being modified.
2958 */
2959 int ib_modify_qp(struct ib_qp *qp,
2960 struct ib_qp_attr *qp_attr,
2961 int qp_attr_mask);
2962
2963 /**
2964 * ib_query_qp - Returns the attribute list and current values for the
2965 * specified QP.
2966 * @qp: The QP to query.
2967 * @qp_attr: The attributes of the specified QP.
2968 * @qp_attr_mask: A bit-mask used to select specific attributes to query.
2969 * @qp_init_attr: Additional attributes of the selected QP.
2970 *
2971 * The qp_attr_mask may be used to limit the query to gathering only the
2972 * selected attributes.
2973 */
2974 int ib_query_qp(struct ib_qp *qp,
2975 struct ib_qp_attr *qp_attr,
2976 int qp_attr_mask,
2977 struct ib_qp_init_attr *qp_init_attr);
2978
2979 /**
2980 * ib_destroy_qp - Destroys the specified QP.
2981 * @qp: The QP to destroy.
2982 */
2983 int ib_destroy_qp(struct ib_qp *qp);
2984
2985 /**
2986 * ib_open_qp - Obtain a reference to an existing sharable QP.
2987 * @xrcd - XRC domain
2988 * @qp_open_attr: Attributes identifying the QP to open.
2989 *
2990 * Returns a reference to a sharable QP.
2991 */
2992 struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
2993 struct ib_qp_open_attr *qp_open_attr);
2994
2995 /**
2996 * ib_close_qp - Release an external reference to a QP.
2997 * @qp: The QP handle to release
2998 *
2999 * The opened QP handle is released by the caller. The underlying
3000 * shared QP is not destroyed until all internal references are released.
3001 */
3002 int ib_close_qp(struct ib_qp *qp);
3003
3004 /**
3005 * ib_post_send - Posts a list of work requests to the send queue of
3006 * the specified QP.
3007 * @qp: The QP to post the work request on.
3008 * @send_wr: A list of work requests to post on the send queue.
3009 * @bad_send_wr: On an immediate failure, this parameter will reference
3010 * the work request that failed to be posted on the QP.
3011 *
3012 * While IBA Vol. 1 section 11.4.1.1 specifies that if an immediate
3013 * error is returned, the QP state shall not be affected,
3014 * ib_post_send() will return an immediate error after queueing any
3015 * earlier work requests in the list.
3016 */
3017 static inline int ib_post_send(struct ib_qp *qp,
3018 struct ib_send_wr *send_wr,
3019 struct ib_send_wr **bad_send_wr)
3020 {
3021 return qp->device->post_send(qp, send_wr, bad_send_wr);
3022 }
3023
3024 /**
3025 * ib_post_recv - Posts a list of work requests to the receive queue of
3026 * the specified QP.
3027 * @qp: The QP to post the work request on.
3028 * @recv_wr: A list of work requests to post on the receive queue.
3029 * @bad_recv_wr: On an immediate failure, this parameter will reference
3030 * the work request that failed to be posted on the QP.
3031 */
3032 static inline int ib_post_recv(struct ib_qp *qp,
3033 struct ib_recv_wr *recv_wr,
3034 struct ib_recv_wr **bad_recv_wr)
3035 {
3036 return qp->device->post_recv(qp, recv_wr, bad_recv_wr);
3037 }
3038
3039 struct ib_cq *ib_alloc_cq(struct ib_device *dev, void *private,
3040 int nr_cqe, int comp_vector, enum ib_poll_context poll_ctx);
3041 void ib_free_cq(struct ib_cq *cq);
3042 int ib_process_cq_direct(struct ib_cq *cq, int budget);
3043
3044 /**
3045 * ib_create_cq - Creates a CQ on the specified device.
3046 * @device: The device on which to create the CQ.
3047 * @comp_handler: A user-specified callback that is invoked when a
3048 * completion event occurs on the CQ.
3049 * @event_handler: A user-specified callback that is invoked when an
3050 * asynchronous event not associated with a completion occurs on the CQ.
3051 * @cq_context: Context associated with the CQ returned to the user via
3052 * the associated completion and event handlers.
3053 * @cq_attr: The attributes the CQ should be created upon.
3054 *
3055 * Users can examine the cq structure to determine the actual CQ size.
3056 */
3057 struct ib_cq *ib_create_cq(struct ib_device *device,
3058 ib_comp_handler comp_handler,
3059 void (*event_handler)(struct ib_event *, void *),
3060 void *cq_context,
3061 const struct ib_cq_init_attr *cq_attr);
3062
3063 /**
3064 * ib_resize_cq - Modifies the capacity of the CQ.
3065 * @cq: The CQ to resize.
3066 * @cqe: The minimum size of the CQ.
3067 *
3068 * Users can examine the cq structure to determine the actual CQ size.
3069 */
3070 int ib_resize_cq(struct ib_cq *cq, int cqe);
3071
3072 /**
3073 * ib_modify_cq - Modifies moderation params of the CQ
3074 * @cq: The CQ to modify.
3075 * @cq_count: number of CQEs that will trigger an event
3076 * @cq_period: max period of time in usec before triggering an event
3077 *
3078 */
3079 int ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
3080
3081 /**
3082 * ib_destroy_cq - Destroys the specified CQ.
3083 * @cq: The CQ to destroy.
3084 */
3085 int ib_destroy_cq(struct ib_cq *cq);
3086
3087 /**
3088 * ib_poll_cq - poll a CQ for completion(s)
3089 * @cq:the CQ being polled
3090 * @num_entries:maximum number of completions to return
3091 * @wc:array of at least @num_entries &struct ib_wc where completions
3092 * will be returned
3093 *
3094 * Poll a CQ for (possibly multiple) completions. If the return value
3095 * is < 0, an error occurred. If the return value is >= 0, it is the
3096 * number of completions returned. If the return value is
3097 * non-negative and < num_entries, then the CQ was emptied.
3098 */
3099 static inline int ib_poll_cq(struct ib_cq *cq, int num_entries,
3100 struct ib_wc *wc)
3101 {
3102 return cq->device->poll_cq(cq, num_entries, wc);
3103 }
3104
3105 /**
3106 * ib_peek_cq - Returns the number of unreaped completions currently
3107 * on the specified CQ.
3108 * @cq: The CQ to peek.
3109 * @wc_cnt: A minimum number of unreaped completions to check for.
3110 *
3111 * If the number of unreaped completions is greater than or equal to wc_cnt,
3112 * this function returns wc_cnt, otherwise, it returns the actual number of
3113 * unreaped completions.
3114 */
3115 int ib_peek_cq(struct ib_cq *cq, int wc_cnt);
3116
3117 /**
3118 * ib_req_notify_cq - Request completion notification on a CQ.
3119 * @cq: The CQ to generate an event for.
3120 * @flags:
3121 * Must contain exactly one of %IB_CQ_SOLICITED or %IB_CQ_NEXT_COMP
3122 * to request an event on the next solicited event or next work
3123 * completion at any type, respectively. %IB_CQ_REPORT_MISSED_EVENTS
3124 * may also be |ed in to request a hint about missed events, as
3125 * described below.
3126 *
3127 * Return Value:
3128 * < 0 means an error occurred while requesting notification
3129 * == 0 means notification was requested successfully, and if
3130 * IB_CQ_REPORT_MISSED_EVENTS was passed in, then no events
3131 * were missed and it is safe to wait for another event. In
3132 * this case is it guaranteed that any work completions added
3133 * to the CQ since the last CQ poll will trigger a completion
3134 * notification event.
3135 * > 0 is only returned if IB_CQ_REPORT_MISSED_EVENTS was passed
3136 * in. It means that the consumer must poll the CQ again to
3137 * make sure it is empty to avoid missing an event because of a
3138 * race between requesting notification and an entry being
3139 * added to the CQ. This return value means it is possible
3140 * (but not guaranteed) that a work completion has been added
3141 * to the CQ since the last poll without triggering a
3142 * completion notification event.
3143 */
3144 static inline int ib_req_notify_cq(struct ib_cq *cq,
3145 enum ib_cq_notify_flags flags)
3146 {
3147 return cq->device->req_notify_cq(cq, flags);
3148 }
3149
3150 /**
3151 * ib_req_ncomp_notif - Request completion notification when there are
3152 * at least the specified number of unreaped completions on the CQ.
3153 * @cq: The CQ to generate an event for.
3154 * @wc_cnt: The number of unreaped completions that should be on the
3155 * CQ before an event is generated.
3156 */
3157 static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
3158 {
3159 return cq->device->req_ncomp_notif ?
3160 cq->device->req_ncomp_notif(cq, wc_cnt) :
3161 -ENOSYS;
3162 }
3163
3164 /**
3165 * ib_dma_mapping_error - check a DMA addr for error
3166 * @dev: The device for which the dma_addr was created
3167 * @dma_addr: The DMA address to check
3168 */
3169 static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
3170 {
3171 return dma_mapping_error(dev->dma_device, dma_addr);
3172 }
3173
3174 /**
3175 * ib_dma_map_single - Map a kernel virtual address to DMA address
3176 * @dev: The device for which the dma_addr is to be created
3177 * @cpu_addr: The kernel virtual address
3178 * @size: The size of the region in bytes
3179 * @direction: The direction of the DMA
3180 */
3181 static inline u64 ib_dma_map_single(struct ib_device *dev,
3182 void *cpu_addr, size_t size,
3183 enum dma_data_direction direction)
3184 {
3185 return dma_map_single(dev->dma_device, cpu_addr, size, direction);
3186 }
3187
3188 /**
3189 * ib_dma_unmap_single - Destroy a mapping created by ib_dma_map_single()
3190 * @dev: The device for which the DMA address was created
3191 * @addr: The DMA address
3192 * @size: The size of the region in bytes
3193 * @direction: The direction of the DMA
3194 */
3195 static inline void ib_dma_unmap_single(struct ib_device *dev,
3196 u64 addr, size_t size,
3197 enum dma_data_direction direction)
3198 {
3199 dma_unmap_single(dev->dma_device, addr, size, direction);
3200 }
3201
3202 /**
3203 * ib_dma_map_page - Map a physical page to DMA address
3204 * @dev: The device for which the dma_addr is to be created
3205 * @page: The page to be mapped
3206 * @offset: The offset within the page
3207 * @size: The size of the region in bytes
3208 * @direction: The direction of the DMA
3209 */
3210 static inline u64 ib_dma_map_page(struct ib_device *dev,
3211 struct page *page,
3212 unsigned long offset,
3213 size_t size,
3214 enum dma_data_direction direction)
3215 {
3216 return dma_map_page(dev->dma_device, page, offset, size, direction);
3217 }
3218
3219 /**
3220 * ib_dma_unmap_page - Destroy a mapping created by ib_dma_map_page()
3221 * @dev: The device for which the DMA address was created
3222 * @addr: The DMA address
3223 * @size: The size of the region in bytes
3224 * @direction: The direction of the DMA
3225 */
3226 static inline void ib_dma_unmap_page(struct ib_device *dev,
3227 u64 addr, size_t size,
3228 enum dma_data_direction direction)
3229 {
3230 dma_unmap_page(dev->dma_device, addr, size, direction);
3231 }
3232
3233 /**
3234 * ib_dma_map_sg - Map a scatter/gather list to DMA addresses
3235 * @dev: The device for which the DMA addresses are to be created
3236 * @sg: The array of scatter/gather entries
3237 * @nents: The number of scatter/gather entries
3238 * @direction: The direction of the DMA
3239 */
3240 static inline int ib_dma_map_sg(struct ib_device *dev,
3241 struct scatterlist *sg, int nents,
3242 enum dma_data_direction direction)
3243 {
3244 return dma_map_sg(dev->dma_device, sg, nents, direction);
3245 }
3246
3247 /**
3248 * ib_dma_unmap_sg - Unmap a scatter/gather list of DMA addresses
3249 * @dev: The device for which the DMA addresses were created
3250 * @sg: The array of scatter/gather entries
3251 * @nents: The number of scatter/gather entries
3252 * @direction: The direction of the DMA
3253 */
3254 static inline void ib_dma_unmap_sg(struct ib_device *dev,
3255 struct scatterlist *sg, int nents,
3256 enum dma_data_direction direction)
3257 {
3258 dma_unmap_sg(dev->dma_device, sg, nents, direction);
3259 }
3260
3261 static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
3262 struct scatterlist *sg, int nents,
3263 enum dma_data_direction direction,
3264 unsigned long dma_attrs)
3265 {
3266 return dma_map_sg_attrs(dev->dma_device, sg, nents, direction,
3267 dma_attrs);
3268 }
3269
3270 static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
3271 struct scatterlist *sg, int nents,
3272 enum dma_data_direction direction,
3273 unsigned long dma_attrs)
3274 {
3275 dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, dma_attrs);
3276 }
3277 /**
3278 * ib_sg_dma_address - Return the DMA address from a scatter/gather entry
3279 * @dev: The device for which the DMA addresses were created
3280 * @sg: The scatter/gather entry
3281 *
3282 * Note: this function is obsolete. To do: change all occurrences of
3283 * ib_sg_dma_address() into sg_dma_address().
3284 */
3285 static inline u64 ib_sg_dma_address(struct ib_device *dev,
3286 struct scatterlist *sg)
3287 {
3288 return sg_dma_address(sg);
3289 }
3290
3291 /**
3292 * ib_sg_dma_len - Return the DMA length from a scatter/gather entry
3293 * @dev: The device for which the DMA addresses were created
3294 * @sg: The scatter/gather entry
3295 *
3296 * Note: this function is obsolete. To do: change all occurrences of
3297 * ib_sg_dma_len() into sg_dma_len().
3298 */
3299 static inline unsigned int ib_sg_dma_len(struct ib_device *dev,
3300 struct scatterlist *sg)
3301 {
3302 return sg_dma_len(sg);
3303 }
3304
3305 /**
3306 * ib_dma_sync_single_for_cpu - Prepare DMA region to be accessed by CPU
3307 * @dev: The device for which the DMA address was created
3308 * @addr: The DMA address
3309 * @size: The size of the region in bytes
3310 * @dir: The direction of the DMA
3311 */
3312 static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
3313 u64 addr,
3314 size_t size,
3315 enum dma_data_direction dir)
3316 {
3317 dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
3318 }
3319
3320 /**
3321 * ib_dma_sync_single_for_device - Prepare DMA region to be accessed by device
3322 * @dev: The device for which the DMA address was created
3323 * @addr: The DMA address
3324 * @size: The size of the region in bytes
3325 * @dir: The direction of the DMA
3326 */
3327 static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
3328 u64 addr,
3329 size_t size,
3330 enum dma_data_direction dir)
3331 {
3332 dma_sync_single_for_device(dev->dma_device, addr, size, dir);
3333 }
3334
3335 /**
3336 * ib_dma_alloc_coherent - Allocate memory and map it for DMA
3337 * @dev: The device for which the DMA address is requested
3338 * @size: The size of the region to allocate in bytes
3339 * @dma_handle: A pointer for returning the DMA address of the region
3340 * @flag: memory allocator flags
3341 */
3342 static inline void *ib_dma_alloc_coherent(struct ib_device *dev,
3343 size_t size,
3344 dma_addr_t *dma_handle,
3345 gfp_t flag)
3346 {
3347 return dma_alloc_coherent(dev->dma_device, size, dma_handle, flag);
3348 }
3349
3350 /**
3351 * ib_dma_free_coherent - Free memory allocated by ib_dma_alloc_coherent()
3352 * @dev: The device for which the DMA addresses were allocated
3353 * @size: The size of the region
3354 * @cpu_addr: the address returned by ib_dma_alloc_coherent()
3355 * @dma_handle: the DMA address returned by ib_dma_alloc_coherent()
3356 */
3357 static inline void ib_dma_free_coherent(struct ib_device *dev,
3358 size_t size, void *cpu_addr,
3359 dma_addr_t dma_handle)
3360 {
3361 dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle);
3362 }
3363
3364 /**
3365 * ib_dereg_mr - Deregisters a memory region and removes it from the
3366 * HCA translation table.
3367 * @mr: The memory region to deregister.
3368 *
3369 * This function can fail, if the memory region has memory windows bound to it.
3370 */
3371 int ib_dereg_mr(struct ib_mr *mr);
3372
3373 struct ib_mr *ib_alloc_mr(struct ib_pd *pd,
3374 enum ib_mr_type mr_type,
3375 u32 max_num_sg);
3376
3377 /**
3378 * ib_update_fast_reg_key - updates the key portion of the fast_reg MR
3379 * R_Key and L_Key.
3380 * @mr - struct ib_mr pointer to be updated.
3381 * @newkey - new key to be used.
3382 */
3383 static inline void ib_update_fast_reg_key(struct ib_mr *mr, u8 newkey)
3384 {
3385 mr->lkey = (mr->lkey & 0xffffff00) | newkey;
3386 mr->rkey = (mr->rkey & 0xffffff00) | newkey;
3387 }
3388
3389 /**
3390 * ib_inc_rkey - increments the key portion of the given rkey. Can be used
3391 * for calculating a new rkey for type 2 memory windows.
3392 * @rkey - the rkey to increment.
3393 */
3394 static inline u32 ib_inc_rkey(u32 rkey)
3395 {
3396 const u32 mask = 0x000000ff;
3397 return ((rkey + 1) & mask) | (rkey & ~mask);
3398 }
3399
3400 /**
3401 * ib_alloc_fmr - Allocates a unmapped fast memory region.
3402 * @pd: The protection domain associated with the unmapped region.
3403 * @mr_access_flags: Specifies the memory access rights.
3404 * @fmr_attr: Attributes of the unmapped region.
3405 *
3406 * A fast memory region must be mapped before it can be used as part of
3407 * a work request.
3408 */
3409 struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
3410 int mr_access_flags,
3411 struct ib_fmr_attr *fmr_attr);
3412
3413 /**
3414 * ib_map_phys_fmr - Maps a list of physical pages to a fast memory region.
3415 * @fmr: The fast memory region to associate with the pages.
3416 * @page_list: An array of physical pages to map to the fast memory region.
3417 * @list_len: The number of pages in page_list.
3418 * @iova: The I/O virtual address to use with the mapped region.
3419 */
3420 static inline int ib_map_phys_fmr(struct ib_fmr *fmr,
3421 u64 *page_list, int list_len,
3422 u64 iova)
3423 {
3424 return fmr->device->map_phys_fmr(fmr, page_list, list_len, iova);
3425 }
3426
3427 /**
3428 * ib_unmap_fmr - Removes the mapping from a list of fast memory regions.
3429 * @fmr_list: A linked list of fast memory regions to unmap.
3430 */
3431 int ib_unmap_fmr(struct list_head *fmr_list);
3432
3433 /**
3434 * ib_dealloc_fmr - Deallocates a fast memory region.
3435 * @fmr: The fast memory region to deallocate.
3436 */
3437 int ib_dealloc_fmr(struct ib_fmr *fmr);
3438
3439 /**
3440 * ib_attach_mcast - Attaches the specified QP to a multicast group.
3441 * @qp: QP to attach to the multicast group. The QP must be type
3442 * IB_QPT_UD.
3443 * @gid: Multicast group GID.
3444 * @lid: Multicast group LID in host byte order.
3445 *
3446 * In order to send and receive multicast packets, subnet
3447 * administration must have created the multicast group and configured
3448 * the fabric appropriately. The port associated with the specified
3449 * QP must also be a member of the multicast group.
3450 */
3451 int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
3452
3453 /**
3454 * ib_detach_mcast - Detaches the specified QP from a multicast group.
3455 * @qp: QP to detach from the multicast group.
3456 * @gid: Multicast group GID.
3457 * @lid: Multicast group LID in host byte order.
3458 */
3459 int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
3460
3461 /**
3462 * ib_alloc_xrcd - Allocates an XRC domain.
3463 * @device: The device on which to allocate the XRC domain.
3464 */
3465 struct ib_xrcd *ib_alloc_xrcd(struct ib_device *device);
3466
3467 /**
3468 * ib_dealloc_xrcd - Deallocates an XRC domain.
3469 * @xrcd: The XRC domain to deallocate.
3470 */
3471 int ib_dealloc_xrcd(struct ib_xrcd *xrcd);
3472
3473 struct ib_flow *ib_create_flow(struct ib_qp *qp,
3474 struct ib_flow_attr *flow_attr, int domain);
3475 int ib_destroy_flow(struct ib_flow *flow_id);
3476
3477 static inline int ib_check_mr_access(int flags)
3478 {
3479 /*
3480 * Local write permission is required if remote write or
3481 * remote atomic permission is also requested.
3482 */
3483 if (flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) &&
3484 !(flags & IB_ACCESS_LOCAL_WRITE))
3485 return -EINVAL;
3486
3487 return 0;
3488 }
3489
3490 /**
3491 * ib_check_mr_status: lightweight check of MR status.
3492 * This routine may provide status checks on a selected
3493 * ib_mr. first use is for signature status check.
3494 *
3495 * @mr: A memory region.
3496 * @check_mask: Bitmask of which checks to perform from
3497 * ib_mr_status_check enumeration.
3498 * @mr_status: The container of relevant status checks.
3499 * failed checks will be indicated in the status bitmask
3500 * and the relevant info shall be in the error item.
3501 */
3502 int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
3503 struct ib_mr_status *mr_status);
3504
3505 struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u8 port,
3506 u16 pkey, const union ib_gid *gid,
3507 const struct sockaddr *addr);
3508 struct ib_wq *ib_create_wq(struct ib_pd *pd,
3509 struct ib_wq_init_attr *init_attr);
3510 int ib_destroy_wq(struct ib_wq *wq);
3511 int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *attr,
3512 u32 wq_attr_mask);
3513 struct ib_rwq_ind_table *ib_create_rwq_ind_table(struct ib_device *device,
3514 struct ib_rwq_ind_table_init_attr*
3515 wq_ind_table_init_attr);
3516 int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
3517
3518 int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
3519 unsigned int *sg_offset, unsigned int page_size);
3520
3521 static inline int
3522 ib_map_mr_sg_zbva(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
3523 unsigned int *sg_offset, unsigned int page_size)
3524 {
3525 int n;
3526
3527 n = ib_map_mr_sg(mr, sg, sg_nents, sg_offset, page_size);
3528 mr->iova = 0;
3529
3530 return n;
3531 }
3532
3533 int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents,
3534 unsigned int *sg_offset, int (*set_page)(struct ib_mr *, u64));
3535
3536 void ib_drain_rq(struct ib_qp *qp);
3537 void ib_drain_sq(struct ib_qp *qp);
3538 void ib_drain_qp(struct ib_qp *qp);
3539
3540 int ib_resolve_eth_dmac(struct ib_device *device,
3541 struct rdma_ah_attr *ah_attr);
3542
3543 static inline u8 *rdma_ah_retrieve_dmac(struct rdma_ah_attr *attr)
3544 {
3545 if (attr->type == RDMA_AH_ATTR_TYPE_ROCE)
3546 return attr->roce.dmac;
3547 return NULL;
3548 }
3549
3550 static inline void rdma_ah_set_dlid(struct rdma_ah_attr *attr, u32 dlid)
3551 {
3552 if (attr->type == RDMA_AH_ATTR_TYPE_IB)
3553 attr->ib.dlid = (u16)dlid;
3554 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
3555 attr->opa.dlid = dlid;
3556 }
3557
3558 static inline u32 rdma_ah_get_dlid(const struct rdma_ah_attr *attr)
3559 {
3560 if (attr->type == RDMA_AH_ATTR_TYPE_IB)
3561 return attr->ib.dlid;
3562 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
3563 return attr->opa.dlid;
3564 return 0;
3565 }
3566
3567 static inline void rdma_ah_set_sl(struct rdma_ah_attr *attr, u8 sl)
3568 {
3569 attr->sl = sl;
3570 }
3571
3572 static inline u8 rdma_ah_get_sl(const struct rdma_ah_attr *attr)
3573 {
3574 return attr->sl;
3575 }
3576
3577 static inline void rdma_ah_set_path_bits(struct rdma_ah_attr *attr,
3578 u8 src_path_bits)
3579 {
3580 if (attr->type == RDMA_AH_ATTR_TYPE_IB)
3581 attr->ib.src_path_bits = src_path_bits;
3582 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
3583 attr->opa.src_path_bits = src_path_bits;
3584 }
3585
3586 static inline u8 rdma_ah_get_path_bits(const struct rdma_ah_attr *attr)
3587 {
3588 if (attr->type == RDMA_AH_ATTR_TYPE_IB)
3589 return attr->ib.src_path_bits;
3590 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
3591 return attr->opa.src_path_bits;
3592 return 0;
3593 }
3594
3595 static inline void rdma_ah_set_port_num(struct rdma_ah_attr *attr, u8 port_num)
3596 {
3597 attr->port_num = port_num;
3598 }
3599
3600 static inline u8 rdma_ah_get_port_num(const struct rdma_ah_attr *attr)
3601 {
3602 return attr->port_num;
3603 }
3604
3605 static inline void rdma_ah_set_static_rate(struct rdma_ah_attr *attr,
3606 u8 static_rate)
3607 {
3608 attr->static_rate = static_rate;
3609 }
3610
3611 static inline u8 rdma_ah_get_static_rate(const struct rdma_ah_attr *attr)
3612 {
3613 return attr->static_rate;
3614 }
3615
3616 static inline void rdma_ah_set_ah_flags(struct rdma_ah_attr *attr,
3617 enum ib_ah_flags flag)
3618 {
3619 attr->ah_flags = flag;
3620 }
3621
3622 static inline enum ib_ah_flags
3623 rdma_ah_get_ah_flags(const struct rdma_ah_attr *attr)
3624 {
3625 return attr->ah_flags;
3626 }
3627
3628 static inline const struct ib_global_route
3629 *rdma_ah_read_grh(const struct rdma_ah_attr *attr)
3630 {
3631 return &attr->grh;
3632 }
3633
3634 /*To retrieve and modify the grh */
3635 static inline struct ib_global_route
3636 *rdma_ah_retrieve_grh(struct rdma_ah_attr *attr)
3637 {
3638 return &attr->grh;
3639 }
3640
3641 static inline void rdma_ah_set_dgid_raw(struct rdma_ah_attr *attr, void *dgid)
3642 {
3643 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
3644
3645 memcpy(grh->dgid.raw, dgid, sizeof(grh->dgid));
3646 }
3647
3648 static inline void rdma_ah_set_subnet_prefix(struct rdma_ah_attr *attr,
3649 __be64 prefix)
3650 {
3651 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
3652
3653 grh->dgid.global.subnet_prefix = prefix;
3654 }
3655
3656 static inline void rdma_ah_set_interface_id(struct rdma_ah_attr *attr,
3657 __be64 if_id)
3658 {
3659 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
3660
3661 grh->dgid.global.interface_id = if_id;
3662 }
3663
3664 static inline void rdma_ah_set_grh(struct rdma_ah_attr *attr,
3665 union ib_gid *dgid, u32 flow_label,
3666 u8 sgid_index, u8 hop_limit,
3667 u8 traffic_class)
3668 {
3669 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
3670
3671 attr->ah_flags = IB_AH_GRH;
3672 if (dgid)
3673 grh->dgid = *dgid;
3674 grh->flow_label = flow_label;
3675 grh->sgid_index = sgid_index;
3676 grh->hop_limit = hop_limit;
3677 grh->traffic_class = traffic_class;
3678 }
3679
3680 /*Get AH type */
3681 static inline enum rdma_ah_attr_type rdma_ah_find_type(struct ib_device *dev,
3682 u32 port_num)
3683 {
3684 if ((rdma_protocol_roce(dev, port_num)) ||
3685 (rdma_protocol_iwarp(dev, port_num)))
3686 return RDMA_AH_ATTR_TYPE_ROCE;
3687 else if ((rdma_protocol_ib(dev, port_num)) &&
3688 (rdma_cap_opa_ah(dev, port_num)))
3689 return RDMA_AH_ATTR_TYPE_OPA;
3690 else
3691 return RDMA_AH_ATTR_TYPE_IB;
3692 }
3693 #endif /* IB_VERBS_H */