]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - include/rdma/ib_verbs.h
Merge branch 'for-linville' of git://github.com/kvalo/ath
[mirror_ubuntu-artful-kernel.git] / include / rdma / ib_verbs.h
CommitLineData
1da177e4
LT
1/*
2 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2004 Infinicon Corporation. All rights reserved.
4 * Copyright (c) 2004 Intel Corporation. All rights reserved.
5 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
2a1d9b7f 7 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
f7c6a7b5 8 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved.
1da177e4
LT
9 *
10 * This software is available to you under a choice of one of two
11 * licenses. You may choose to be licensed under the terms of the GNU
12 * General Public License (GPL) Version 2, available from the file
13 * COPYING in the main directory of this source tree, or the
14 * OpenIB.org BSD license below:
15 *
16 * Redistribution and use in source and binary forms, with or
17 * without modification, are permitted provided that the following
18 * conditions are met:
19 *
20 * - Redistributions of source code must retain the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer.
23 *
24 * - Redistributions in binary form must reproduce the above
25 * copyright notice, this list of conditions and the following
26 * disclaimer in the documentation and/or other materials
27 * provided with the distribution.
28 *
29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
33 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
34 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
35 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 * SOFTWARE.
1da177e4
LT
37 */
38
39#if !defined(IB_VERBS_H)
40#define IB_VERBS_H
41
42#include <linux/types.h>
43#include <linux/device.h>
9b513090
RC
44#include <linux/mm.h>
45#include <linux/dma-mapping.h>
459d6e2a 46#include <linux/kref.h>
bfb3ea12
DB
47#include <linux/list.h>
48#include <linux/rwsem.h>
87ae9afd 49#include <linux/scatterlist.h>
f0626710 50#include <linux/workqueue.h>
dd5f03be 51#include <uapi/linux/if_ether.h>
e2773c06 52
60063497 53#include <linux/atomic.h>
e2773c06 54#include <asm/uaccess.h>
1da177e4 55
f0626710
TH
56extern struct workqueue_struct *ib_wq;
57
1da177e4
LT
58union ib_gid {
59 u8 raw[16];
60 struct {
97f52eb4
SH
61 __be64 subnet_prefix;
62 __be64 interface_id;
1da177e4
LT
63 } global;
64};
65
07ebafba
TT
66enum rdma_node_type {
67 /* IB values map to NodeInfo:NodeType. */
68 RDMA_NODE_IB_CA = 1,
69 RDMA_NODE_IB_SWITCH,
70 RDMA_NODE_IB_ROUTER,
180771a3
UM
71 RDMA_NODE_RNIC,
72 RDMA_NODE_USNIC,
5db5765e 73 RDMA_NODE_USNIC_UDP,
1da177e4
LT
74};
75
07ebafba
TT
76enum rdma_transport_type {
77 RDMA_TRANSPORT_IB,
180771a3 78 RDMA_TRANSPORT_IWARP,
248567f7
UM
79 RDMA_TRANSPORT_USNIC,
80 RDMA_TRANSPORT_USNIC_UDP
07ebafba
TT
81};
82
8385fd84
RD
83__attribute_const__ enum rdma_transport_type
84rdma_node_get_transport(enum rdma_node_type node_type);
07ebafba 85
a3f5adaf
EC
86enum rdma_link_layer {
87 IB_LINK_LAYER_UNSPECIFIED,
88 IB_LINK_LAYER_INFINIBAND,
89 IB_LINK_LAYER_ETHERNET,
90};
91
1da177e4
LT
92enum ib_device_cap_flags {
93 IB_DEVICE_RESIZE_MAX_WR = 1,
94 IB_DEVICE_BAD_PKEY_CNTR = (1<<1),
95 IB_DEVICE_BAD_QKEY_CNTR = (1<<2),
96 IB_DEVICE_RAW_MULTI = (1<<3),
97 IB_DEVICE_AUTO_PATH_MIG = (1<<4),
98 IB_DEVICE_CHANGE_PHY_PORT = (1<<5),
99 IB_DEVICE_UD_AV_PORT_ENFORCE = (1<<6),
100 IB_DEVICE_CURR_QP_STATE_MOD = (1<<7),
101 IB_DEVICE_SHUTDOWN_PORT = (1<<8),
102 IB_DEVICE_INIT_TYPE = (1<<9),
103 IB_DEVICE_PORT_ACTIVE_EVENT = (1<<10),
104 IB_DEVICE_SYS_IMAGE_GUID = (1<<11),
105 IB_DEVICE_RC_RNR_NAK_GEN = (1<<12),
106 IB_DEVICE_SRQ_RESIZE = (1<<13),
107 IB_DEVICE_N_NOTIFY_CQ = (1<<14),
96f15c03 108 IB_DEVICE_LOCAL_DMA_LKEY = (1<<15),
0f39cf3d 109 IB_DEVICE_RESERVED = (1<<16), /* old SEND_W_INV */
e0605d91
EC
110 IB_DEVICE_MEM_WINDOW = (1<<17),
111 /*
112 * Devices should set IB_DEVICE_UD_IP_SUM if they support
113 * insertion of UDP and TCP checksum on outgoing UD IPoIB
114 * messages and can verify the validity of checksum for
115 * incoming messages. Setting this flag implies that the
116 * IPoIB driver may set NETIF_F_IP_CSUM for datagram mode.
117 */
118 IB_DEVICE_UD_IP_CSUM = (1<<18),
c93570f2 119 IB_DEVICE_UD_TSO = (1<<19),
59991f94 120 IB_DEVICE_XRC = (1<<20),
00f7ec36 121 IB_DEVICE_MEM_MGT_EXTENSIONS = (1<<21),
47ee1b9f 122 IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = (1<<22),
7083e42e 123 IB_DEVICE_MEM_WINDOW_TYPE_2A = (1<<23),
319a441d 124 IB_DEVICE_MEM_WINDOW_TYPE_2B = (1<<24),
1b01d335
SG
125 IB_DEVICE_MANAGED_FLOW_STEERING = (1<<29),
126 IB_DEVICE_SIGNATURE_HANDOVER = (1<<30)
127};
128
129enum ib_signature_prot_cap {
130 IB_PROT_T10DIF_TYPE_1 = 1,
131 IB_PROT_T10DIF_TYPE_2 = 1 << 1,
132 IB_PROT_T10DIF_TYPE_3 = 1 << 2,
133};
134
135enum ib_signature_guard_cap {
136 IB_GUARD_T10DIF_CRC = 1,
137 IB_GUARD_T10DIF_CSUM = 1 << 1,
1da177e4
LT
138};
139
140enum ib_atomic_cap {
141 IB_ATOMIC_NONE,
142 IB_ATOMIC_HCA,
143 IB_ATOMIC_GLOB
144};
145
146struct ib_device_attr {
147 u64 fw_ver;
97f52eb4 148 __be64 sys_image_guid;
1da177e4
LT
149 u64 max_mr_size;
150 u64 page_size_cap;
151 u32 vendor_id;
152 u32 vendor_part_id;
153 u32 hw_ver;
154 int max_qp;
155 int max_qp_wr;
156 int device_cap_flags;
157 int max_sge;
158 int max_sge_rd;
159 int max_cq;
160 int max_cqe;
161 int max_mr;
162 int max_pd;
163 int max_qp_rd_atom;
164 int max_ee_rd_atom;
165 int max_res_rd_atom;
166 int max_qp_init_rd_atom;
167 int max_ee_init_rd_atom;
168 enum ib_atomic_cap atomic_cap;
5e80ba8f 169 enum ib_atomic_cap masked_atomic_cap;
1da177e4
LT
170 int max_ee;
171 int max_rdd;
172 int max_mw;
173 int max_raw_ipv6_qp;
174 int max_raw_ethy_qp;
175 int max_mcast_grp;
176 int max_mcast_qp_attach;
177 int max_total_mcast_qp_attach;
178 int max_ah;
179 int max_fmr;
180 int max_map_per_fmr;
181 int max_srq;
182 int max_srq_wr;
183 int max_srq_sge;
00f7ec36 184 unsigned int max_fast_reg_page_list_len;
1da177e4
LT
185 u16 max_pkeys;
186 u8 local_ca_ack_delay;
1b01d335
SG
187 int sig_prot_cap;
188 int sig_guard_cap;
1da177e4
LT
189};
190
191enum ib_mtu {
192 IB_MTU_256 = 1,
193 IB_MTU_512 = 2,
194 IB_MTU_1024 = 3,
195 IB_MTU_2048 = 4,
196 IB_MTU_4096 = 5
197};
198
199static inline int ib_mtu_enum_to_int(enum ib_mtu mtu)
200{
201 switch (mtu) {
202 case IB_MTU_256: return 256;
203 case IB_MTU_512: return 512;
204 case IB_MTU_1024: return 1024;
205 case IB_MTU_2048: return 2048;
206 case IB_MTU_4096: return 4096;
207 default: return -1;
208 }
209}
210
211enum ib_port_state {
212 IB_PORT_NOP = 0,
213 IB_PORT_DOWN = 1,
214 IB_PORT_INIT = 2,
215 IB_PORT_ARMED = 3,
216 IB_PORT_ACTIVE = 4,
217 IB_PORT_ACTIVE_DEFER = 5
218};
219
220enum ib_port_cap_flags {
221 IB_PORT_SM = 1 << 1,
222 IB_PORT_NOTICE_SUP = 1 << 2,
223 IB_PORT_TRAP_SUP = 1 << 3,
224 IB_PORT_OPT_IPD_SUP = 1 << 4,
225 IB_PORT_AUTO_MIGR_SUP = 1 << 5,
226 IB_PORT_SL_MAP_SUP = 1 << 6,
227 IB_PORT_MKEY_NVRAM = 1 << 7,
228 IB_PORT_PKEY_NVRAM = 1 << 8,
229 IB_PORT_LED_INFO_SUP = 1 << 9,
230 IB_PORT_SM_DISABLED = 1 << 10,
231 IB_PORT_SYS_IMAGE_GUID_SUP = 1 << 11,
232 IB_PORT_PKEY_SW_EXT_PORT_TRAP_SUP = 1 << 12,
71eeba16 233 IB_PORT_EXTENDED_SPEEDS_SUP = 1 << 14,
1da177e4
LT
234 IB_PORT_CM_SUP = 1 << 16,
235 IB_PORT_SNMP_TUNNEL_SUP = 1 << 17,
236 IB_PORT_REINIT_SUP = 1 << 18,
237 IB_PORT_DEVICE_MGMT_SUP = 1 << 19,
238 IB_PORT_VENDOR_CLASS_SUP = 1 << 20,
239 IB_PORT_DR_NOTICE_SUP = 1 << 21,
240 IB_PORT_CAP_MASK_NOTICE_SUP = 1 << 22,
241 IB_PORT_BOOT_MGMT_SUP = 1 << 23,
242 IB_PORT_LINK_LATENCY_SUP = 1 << 24,
b4a26a27
MS
243 IB_PORT_CLIENT_REG_SUP = 1 << 25,
244 IB_PORT_IP_BASED_GIDS = 1 << 26
1da177e4
LT
245};
246
247enum ib_port_width {
248 IB_WIDTH_1X = 1,
249 IB_WIDTH_4X = 2,
250 IB_WIDTH_8X = 4,
251 IB_WIDTH_12X = 8
252};
253
254static inline int ib_width_enum_to_int(enum ib_port_width width)
255{
256 switch (width) {
257 case IB_WIDTH_1X: return 1;
258 case IB_WIDTH_4X: return 4;
259 case IB_WIDTH_8X: return 8;
260 case IB_WIDTH_12X: return 12;
261 default: return -1;
262 }
263}
264
2e96691c
OG
265enum ib_port_speed {
266 IB_SPEED_SDR = 1,
267 IB_SPEED_DDR = 2,
268 IB_SPEED_QDR = 4,
269 IB_SPEED_FDR10 = 8,
270 IB_SPEED_FDR = 16,
271 IB_SPEED_EDR = 32
272};
273
7f624d02
SW
274struct ib_protocol_stats {
275 /* TBD... */
276};
277
278struct iw_protocol_stats {
279 u64 ipInReceives;
280 u64 ipInHdrErrors;
281 u64 ipInTooBigErrors;
282 u64 ipInNoRoutes;
283 u64 ipInAddrErrors;
284 u64 ipInUnknownProtos;
285 u64 ipInTruncatedPkts;
286 u64 ipInDiscards;
287 u64 ipInDelivers;
288 u64 ipOutForwDatagrams;
289 u64 ipOutRequests;
290 u64 ipOutDiscards;
291 u64 ipOutNoRoutes;
292 u64 ipReasmTimeout;
293 u64 ipReasmReqds;
294 u64 ipReasmOKs;
295 u64 ipReasmFails;
296 u64 ipFragOKs;
297 u64 ipFragFails;
298 u64 ipFragCreates;
299 u64 ipInMcastPkts;
300 u64 ipOutMcastPkts;
301 u64 ipInBcastPkts;
302 u64 ipOutBcastPkts;
303
304 u64 tcpRtoAlgorithm;
305 u64 tcpRtoMin;
306 u64 tcpRtoMax;
307 u64 tcpMaxConn;
308 u64 tcpActiveOpens;
309 u64 tcpPassiveOpens;
310 u64 tcpAttemptFails;
311 u64 tcpEstabResets;
312 u64 tcpCurrEstab;
313 u64 tcpInSegs;
314 u64 tcpOutSegs;
315 u64 tcpRetransSegs;
316 u64 tcpInErrs;
317 u64 tcpOutRsts;
318};
319
320union rdma_protocol_stats {
321 struct ib_protocol_stats ib;
322 struct iw_protocol_stats iw;
323};
324
1da177e4
LT
325struct ib_port_attr {
326 enum ib_port_state state;
327 enum ib_mtu max_mtu;
328 enum ib_mtu active_mtu;
329 int gid_tbl_len;
330 u32 port_cap_flags;
331 u32 max_msg_sz;
332 u32 bad_pkey_cntr;
333 u32 qkey_viol_cntr;
334 u16 pkey_tbl_len;
335 u16 lid;
336 u16 sm_lid;
337 u8 lmc;
338 u8 max_vl_num;
339 u8 sm_sl;
340 u8 subnet_timeout;
341 u8 init_type_reply;
342 u8 active_width;
343 u8 active_speed;
344 u8 phys_state;
345};
346
347enum ib_device_modify_flags {
c5bcbbb9
RD
348 IB_DEVICE_MODIFY_SYS_IMAGE_GUID = 1 << 0,
349 IB_DEVICE_MODIFY_NODE_DESC = 1 << 1
1da177e4
LT
350};
351
352struct ib_device_modify {
353 u64 sys_image_guid;
c5bcbbb9 354 char node_desc[64];
1da177e4
LT
355};
356
357enum ib_port_modify_flags {
358 IB_PORT_SHUTDOWN = 1,
359 IB_PORT_INIT_TYPE = (1<<2),
360 IB_PORT_RESET_QKEY_CNTR = (1<<3)
361};
362
363struct ib_port_modify {
364 u32 set_port_cap_mask;
365 u32 clr_port_cap_mask;
366 u8 init_type;
367};
368
369enum ib_event_type {
370 IB_EVENT_CQ_ERR,
371 IB_EVENT_QP_FATAL,
372 IB_EVENT_QP_REQ_ERR,
373 IB_EVENT_QP_ACCESS_ERR,
374 IB_EVENT_COMM_EST,
375 IB_EVENT_SQ_DRAINED,
376 IB_EVENT_PATH_MIG,
377 IB_EVENT_PATH_MIG_ERR,
378 IB_EVENT_DEVICE_FATAL,
379 IB_EVENT_PORT_ACTIVE,
380 IB_EVENT_PORT_ERR,
381 IB_EVENT_LID_CHANGE,
382 IB_EVENT_PKEY_CHANGE,
d41fcc67
RD
383 IB_EVENT_SM_CHANGE,
384 IB_EVENT_SRQ_ERR,
385 IB_EVENT_SRQ_LIMIT_REACHED,
63942c9a 386 IB_EVENT_QP_LAST_WQE_REACHED,
761d90ed
OG
387 IB_EVENT_CLIENT_REREGISTER,
388 IB_EVENT_GID_CHANGE,
1da177e4
LT
389};
390
391struct ib_event {
392 struct ib_device *device;
393 union {
394 struct ib_cq *cq;
395 struct ib_qp *qp;
d41fcc67 396 struct ib_srq *srq;
1da177e4
LT
397 u8 port_num;
398 } element;
399 enum ib_event_type event;
400};
401
402struct ib_event_handler {
403 struct ib_device *device;
404 void (*handler)(struct ib_event_handler *, struct ib_event *);
405 struct list_head list;
406};
407
408#define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler) \
409 do { \
410 (_ptr)->device = _device; \
411 (_ptr)->handler = _handler; \
412 INIT_LIST_HEAD(&(_ptr)->list); \
413 } while (0)
414
415struct ib_global_route {
416 union ib_gid dgid;
417 u32 flow_label;
418 u8 sgid_index;
419 u8 hop_limit;
420 u8 traffic_class;
421};
422
513789ed 423struct ib_grh {
97f52eb4
SH
424 __be32 version_tclass_flow;
425 __be16 paylen;
513789ed
HR
426 u8 next_hdr;
427 u8 hop_limit;
428 union ib_gid sgid;
429 union ib_gid dgid;
430};
431
1da177e4
LT
432enum {
433 IB_MULTICAST_QPN = 0xffffff
434};
435
f3a7c66b 436#define IB_LID_PERMISSIVE cpu_to_be16(0xFFFF)
97f52eb4 437
1da177e4
LT
438enum ib_ah_flags {
439 IB_AH_GRH = 1
440};
441
bf6a9e31
JM
442enum ib_rate {
443 IB_RATE_PORT_CURRENT = 0,
444 IB_RATE_2_5_GBPS = 2,
445 IB_RATE_5_GBPS = 5,
446 IB_RATE_10_GBPS = 3,
447 IB_RATE_20_GBPS = 6,
448 IB_RATE_30_GBPS = 4,
449 IB_RATE_40_GBPS = 7,
450 IB_RATE_60_GBPS = 8,
451 IB_RATE_80_GBPS = 9,
71eeba16
MA
452 IB_RATE_120_GBPS = 10,
453 IB_RATE_14_GBPS = 11,
454 IB_RATE_56_GBPS = 12,
455 IB_RATE_112_GBPS = 13,
456 IB_RATE_168_GBPS = 14,
457 IB_RATE_25_GBPS = 15,
458 IB_RATE_100_GBPS = 16,
459 IB_RATE_200_GBPS = 17,
460 IB_RATE_300_GBPS = 18
bf6a9e31
JM
461};
462
463/**
464 * ib_rate_to_mult - Convert the IB rate enum to a multiple of the
465 * base rate of 2.5 Gbit/sec. For example, IB_RATE_5_GBPS will be
466 * converted to 2, since 5 Gbit/sec is 2 * 2.5 Gbit/sec.
467 * @rate: rate to convert.
468 */
8385fd84 469__attribute_const__ int ib_rate_to_mult(enum ib_rate rate);
bf6a9e31 470
71eeba16
MA
471/**
472 * ib_rate_to_mbps - Convert the IB rate enum to Mbps.
473 * For example, IB_RATE_2_5_GBPS will be converted to 2500.
474 * @rate: rate to convert.
475 */
8385fd84 476__attribute_const__ int ib_rate_to_mbps(enum ib_rate rate);
71eeba16 477
17cd3a2d
SG
478enum ib_mr_create_flags {
479 IB_MR_SIGNATURE_EN = 1,
480};
481
482/**
483 * ib_mr_init_attr - Memory region init attributes passed to routine
484 * ib_create_mr.
485 * @max_reg_descriptors: max number of registration descriptors that
486 * may be used with registration work requests.
487 * @flags: MR creation flags bit mask.
488 */
489struct ib_mr_init_attr {
490 int max_reg_descriptors;
491 u32 flags;
492};
493
1b01d335 494/**
78eda2bb
SG
495 * Signature types
496 * IB_SIG_TYPE_NONE: Unprotected.
497 * IB_SIG_TYPE_T10_DIF: Type T10-DIF
1b01d335 498 */
78eda2bb
SG
499enum ib_signature_type {
500 IB_SIG_TYPE_NONE,
501 IB_SIG_TYPE_T10_DIF,
1b01d335
SG
502};
503
504/**
505 * Signature T10-DIF block-guard types
506 * IB_T10DIF_CRC: Corresponds to T10-PI mandated CRC checksum rules.
507 * IB_T10DIF_CSUM: Corresponds to IP checksum rules.
508 */
509enum ib_t10_dif_bg_type {
510 IB_T10DIF_CRC,
511 IB_T10DIF_CSUM
512};
513
514/**
515 * struct ib_t10_dif_domain - Parameters specific for T10-DIF
516 * domain.
1b01d335
SG
517 * @bg_type: T10-DIF block guard type (CRC|CSUM)
518 * @pi_interval: protection information interval.
519 * @bg: seed of guard computation.
520 * @app_tag: application tag of guard block
521 * @ref_tag: initial guard block reference tag.
78eda2bb
SG
522 * @ref_remap: Indicate wethear the reftag increments each block
523 * @app_escape: Indicate to skip block check if apptag=0xffff
524 * @ref_escape: Indicate to skip block check if reftag=0xffffffff
525 * @apptag_check_mask: check bitmask of application tag.
1b01d335
SG
526 */
527struct ib_t10_dif_domain {
1b01d335
SG
528 enum ib_t10_dif_bg_type bg_type;
529 u16 pi_interval;
530 u16 bg;
531 u16 app_tag;
532 u32 ref_tag;
78eda2bb
SG
533 bool ref_remap;
534 bool app_escape;
535 bool ref_escape;
536 u16 apptag_check_mask;
1b01d335
SG
537};
538
539/**
540 * struct ib_sig_domain - Parameters for signature domain
541 * @sig_type: specific signauture type
542 * @sig: union of all signature domain attributes that may
543 * be used to set domain layout.
544 */
545struct ib_sig_domain {
546 enum ib_signature_type sig_type;
547 union {
548 struct ib_t10_dif_domain dif;
549 } sig;
550};
551
552/**
553 * struct ib_sig_attrs - Parameters for signature handover operation
554 * @check_mask: bitmask for signature byte check (8 bytes)
555 * @mem: memory domain layout desciptor.
556 * @wire: wire domain layout desciptor.
557 */
558struct ib_sig_attrs {
559 u8 check_mask;
560 struct ib_sig_domain mem;
561 struct ib_sig_domain wire;
562};
563
564enum ib_sig_err_type {
565 IB_SIG_BAD_GUARD,
566 IB_SIG_BAD_REFTAG,
567 IB_SIG_BAD_APPTAG,
568};
569
570/**
571 * struct ib_sig_err - signature error descriptor
572 */
573struct ib_sig_err {
574 enum ib_sig_err_type err_type;
575 u32 expected;
576 u32 actual;
577 u64 sig_err_offset;
578 u32 key;
579};
580
581enum ib_mr_status_check {
582 IB_MR_CHECK_SIG_STATUS = 1,
583};
584
585/**
586 * struct ib_mr_status - Memory region status container
587 *
588 * @fail_status: Bitmask of MR checks status. For each
589 * failed check a corresponding status bit is set.
590 * @sig_err: Additional info for IB_MR_CEHCK_SIG_STATUS
591 * failure.
592 */
593struct ib_mr_status {
594 u32 fail_status;
595 struct ib_sig_err sig_err;
596};
597
bf6a9e31
JM
598/**
599 * mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate
600 * enum.
601 * @mult: multiple to convert.
602 */
8385fd84 603__attribute_const__ enum ib_rate mult_to_ib_rate(int mult);
bf6a9e31 604
1da177e4
LT
605struct ib_ah_attr {
606 struct ib_global_route grh;
607 u16 dlid;
608 u8 sl;
609 u8 src_path_bits;
610 u8 static_rate;
611 u8 ah_flags;
612 u8 port_num;
dd5f03be
MB
613 u8 dmac[ETH_ALEN];
614 u16 vlan_id;
1da177e4
LT
615};
616
617enum ib_wc_status {
618 IB_WC_SUCCESS,
619 IB_WC_LOC_LEN_ERR,
620 IB_WC_LOC_QP_OP_ERR,
621 IB_WC_LOC_EEC_OP_ERR,
622 IB_WC_LOC_PROT_ERR,
623 IB_WC_WR_FLUSH_ERR,
624 IB_WC_MW_BIND_ERR,
625 IB_WC_BAD_RESP_ERR,
626 IB_WC_LOC_ACCESS_ERR,
627 IB_WC_REM_INV_REQ_ERR,
628 IB_WC_REM_ACCESS_ERR,
629 IB_WC_REM_OP_ERR,
630 IB_WC_RETRY_EXC_ERR,
631 IB_WC_RNR_RETRY_EXC_ERR,
632 IB_WC_LOC_RDD_VIOL_ERR,
633 IB_WC_REM_INV_RD_REQ_ERR,
634 IB_WC_REM_ABORT_ERR,
635 IB_WC_INV_EECN_ERR,
636 IB_WC_INV_EEC_STATE_ERR,
637 IB_WC_FATAL_ERR,
638 IB_WC_RESP_TIMEOUT_ERR,
639 IB_WC_GENERAL_ERR
640};
641
642enum ib_wc_opcode {
643 IB_WC_SEND,
644 IB_WC_RDMA_WRITE,
645 IB_WC_RDMA_READ,
646 IB_WC_COMP_SWAP,
647 IB_WC_FETCH_ADD,
648 IB_WC_BIND_MW,
c93570f2 649 IB_WC_LSO,
00f7ec36
SW
650 IB_WC_LOCAL_INV,
651 IB_WC_FAST_REG_MR,
5e80ba8f
VS
652 IB_WC_MASKED_COMP_SWAP,
653 IB_WC_MASKED_FETCH_ADD,
1da177e4
LT
654/*
655 * Set value of IB_WC_RECV so consumers can test if a completion is a
656 * receive by testing (opcode & IB_WC_RECV).
657 */
658 IB_WC_RECV = 1 << 7,
659 IB_WC_RECV_RDMA_WITH_IMM
660};
661
662enum ib_wc_flags {
663 IB_WC_GRH = 1,
00f7ec36
SW
664 IB_WC_WITH_IMM = (1<<1),
665 IB_WC_WITH_INVALIDATE = (1<<2),
d927d505 666 IB_WC_IP_CSUM_OK = (1<<3),
dd5f03be
MB
667 IB_WC_WITH_SMAC = (1<<4),
668 IB_WC_WITH_VLAN = (1<<5),
1da177e4
LT
669};
670
671struct ib_wc {
672 u64 wr_id;
673 enum ib_wc_status status;
674 enum ib_wc_opcode opcode;
675 u32 vendor_err;
676 u32 byte_len;
062dbb69 677 struct ib_qp *qp;
00f7ec36
SW
678 union {
679 __be32 imm_data;
680 u32 invalidate_rkey;
681 } ex;
1da177e4
LT
682 u32 src_qp;
683 int wc_flags;
684 u16 pkey_index;
685 u16 slid;
686 u8 sl;
687 u8 dlid_path_bits;
688 u8 port_num; /* valid only for DR SMPs on switches */
dd5f03be
MB
689 u8 smac[ETH_ALEN];
690 u16 vlan_id;
1da177e4
LT
691};
692
ed23a727
RD
693enum ib_cq_notify_flags {
694 IB_CQ_SOLICITED = 1 << 0,
695 IB_CQ_NEXT_COMP = 1 << 1,
696 IB_CQ_SOLICITED_MASK = IB_CQ_SOLICITED | IB_CQ_NEXT_COMP,
697 IB_CQ_REPORT_MISSED_EVENTS = 1 << 2,
1da177e4
LT
698};
699
96104eda 700enum ib_srq_type {
418d5130
SH
701 IB_SRQT_BASIC,
702 IB_SRQT_XRC
96104eda
SH
703};
704
d41fcc67
RD
705enum ib_srq_attr_mask {
706 IB_SRQ_MAX_WR = 1 << 0,
707 IB_SRQ_LIMIT = 1 << 1,
708};
709
710struct ib_srq_attr {
711 u32 max_wr;
712 u32 max_sge;
713 u32 srq_limit;
714};
715
716struct ib_srq_init_attr {
717 void (*event_handler)(struct ib_event *, void *);
718 void *srq_context;
719 struct ib_srq_attr attr;
96104eda 720 enum ib_srq_type srq_type;
418d5130
SH
721
722 union {
723 struct {
724 struct ib_xrcd *xrcd;
725 struct ib_cq *cq;
726 } xrc;
727 } ext;
d41fcc67
RD
728};
729
1da177e4
LT
730struct ib_qp_cap {
731 u32 max_send_wr;
732 u32 max_recv_wr;
733 u32 max_send_sge;
734 u32 max_recv_sge;
735 u32 max_inline_data;
736};
737
738enum ib_sig_type {
739 IB_SIGNAL_ALL_WR,
740 IB_SIGNAL_REQ_WR
741};
742
743enum ib_qp_type {
744 /*
745 * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries
746 * here (and in that order) since the MAD layer uses them as
747 * indices into a 2-entry table.
748 */
749 IB_QPT_SMI,
750 IB_QPT_GSI,
751
752 IB_QPT_RC,
753 IB_QPT_UC,
754 IB_QPT_UD,
755 IB_QPT_RAW_IPV6,
b42b63cf 756 IB_QPT_RAW_ETHERTYPE,
c938a616 757 IB_QPT_RAW_PACKET = 8,
b42b63cf
SH
758 IB_QPT_XRC_INI = 9,
759 IB_QPT_XRC_TGT,
0134f16b
JM
760 IB_QPT_MAX,
761 /* Reserve a range for qp types internal to the low level driver.
762 * These qp types will not be visible at the IB core layer, so the
763 * IB_QPT_MAX usages should not be affected in the core layer
764 */
765 IB_QPT_RESERVED1 = 0x1000,
766 IB_QPT_RESERVED2,
767 IB_QPT_RESERVED3,
768 IB_QPT_RESERVED4,
769 IB_QPT_RESERVED5,
770 IB_QPT_RESERVED6,
771 IB_QPT_RESERVED7,
772 IB_QPT_RESERVED8,
773 IB_QPT_RESERVED9,
774 IB_QPT_RESERVED10,
1da177e4
LT
775};
776
b846f25a 777enum ib_qp_create_flags {
47ee1b9f
RL
778 IB_QP_CREATE_IPOIB_UD_LSO = 1 << 0,
779 IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK = 1 << 1,
90f1d1b4 780 IB_QP_CREATE_NETIF_QP = 1 << 5,
1b01d335 781 IB_QP_CREATE_SIGNATURE_EN = 1 << 6,
09b93088 782 IB_QP_CREATE_USE_GFP_NOIO = 1 << 7,
d2b57063
JM
783 /* reserve bits 26-31 for low level drivers' internal use */
784 IB_QP_CREATE_RESERVED_START = 1 << 26,
785 IB_QP_CREATE_RESERVED_END = 1 << 31,
b846f25a
EC
786};
787
73c40c61
YH
788
789/*
790 * Note: users may not call ib_close_qp or ib_destroy_qp from the event_handler
791 * callback to destroy the passed in QP.
792 */
793
1da177e4
LT
794struct ib_qp_init_attr {
795 void (*event_handler)(struct ib_event *, void *);
796 void *qp_context;
797 struct ib_cq *send_cq;
798 struct ib_cq *recv_cq;
799 struct ib_srq *srq;
b42b63cf 800 struct ib_xrcd *xrcd; /* XRC TGT QPs only */
1da177e4
LT
801 struct ib_qp_cap cap;
802 enum ib_sig_type sq_sig_type;
803 enum ib_qp_type qp_type;
b846f25a 804 enum ib_qp_create_flags create_flags;
1da177e4
LT
805 u8 port_num; /* special QP types only */
806};
807
0e0ec7e0
SH
808struct ib_qp_open_attr {
809 void (*event_handler)(struct ib_event *, void *);
810 void *qp_context;
811 u32 qp_num;
812 enum ib_qp_type qp_type;
813};
814
1da177e4
LT
815enum ib_rnr_timeout {
816 IB_RNR_TIMER_655_36 = 0,
817 IB_RNR_TIMER_000_01 = 1,
818 IB_RNR_TIMER_000_02 = 2,
819 IB_RNR_TIMER_000_03 = 3,
820 IB_RNR_TIMER_000_04 = 4,
821 IB_RNR_TIMER_000_06 = 5,
822 IB_RNR_TIMER_000_08 = 6,
823 IB_RNR_TIMER_000_12 = 7,
824 IB_RNR_TIMER_000_16 = 8,
825 IB_RNR_TIMER_000_24 = 9,
826 IB_RNR_TIMER_000_32 = 10,
827 IB_RNR_TIMER_000_48 = 11,
828 IB_RNR_TIMER_000_64 = 12,
829 IB_RNR_TIMER_000_96 = 13,
830 IB_RNR_TIMER_001_28 = 14,
831 IB_RNR_TIMER_001_92 = 15,
832 IB_RNR_TIMER_002_56 = 16,
833 IB_RNR_TIMER_003_84 = 17,
834 IB_RNR_TIMER_005_12 = 18,
835 IB_RNR_TIMER_007_68 = 19,
836 IB_RNR_TIMER_010_24 = 20,
837 IB_RNR_TIMER_015_36 = 21,
838 IB_RNR_TIMER_020_48 = 22,
839 IB_RNR_TIMER_030_72 = 23,
840 IB_RNR_TIMER_040_96 = 24,
841 IB_RNR_TIMER_061_44 = 25,
842 IB_RNR_TIMER_081_92 = 26,
843 IB_RNR_TIMER_122_88 = 27,
844 IB_RNR_TIMER_163_84 = 28,
845 IB_RNR_TIMER_245_76 = 29,
846 IB_RNR_TIMER_327_68 = 30,
847 IB_RNR_TIMER_491_52 = 31
848};
849
850enum ib_qp_attr_mask {
851 IB_QP_STATE = 1,
852 IB_QP_CUR_STATE = (1<<1),
853 IB_QP_EN_SQD_ASYNC_NOTIFY = (1<<2),
854 IB_QP_ACCESS_FLAGS = (1<<3),
855 IB_QP_PKEY_INDEX = (1<<4),
856 IB_QP_PORT = (1<<5),
857 IB_QP_QKEY = (1<<6),
858 IB_QP_AV = (1<<7),
859 IB_QP_PATH_MTU = (1<<8),
860 IB_QP_TIMEOUT = (1<<9),
861 IB_QP_RETRY_CNT = (1<<10),
862 IB_QP_RNR_RETRY = (1<<11),
863 IB_QP_RQ_PSN = (1<<12),
864 IB_QP_MAX_QP_RD_ATOMIC = (1<<13),
865 IB_QP_ALT_PATH = (1<<14),
866 IB_QP_MIN_RNR_TIMER = (1<<15),
867 IB_QP_SQ_PSN = (1<<16),
868 IB_QP_MAX_DEST_RD_ATOMIC = (1<<17),
869 IB_QP_PATH_MIG_STATE = (1<<18),
870 IB_QP_CAP = (1<<19),
dd5f03be
MB
871 IB_QP_DEST_QPN = (1<<20),
872 IB_QP_SMAC = (1<<21),
873 IB_QP_ALT_SMAC = (1<<22),
874 IB_QP_VID = (1<<23),
875 IB_QP_ALT_VID = (1<<24),
1da177e4
LT
876};
877
878enum ib_qp_state {
879 IB_QPS_RESET,
880 IB_QPS_INIT,
881 IB_QPS_RTR,
882 IB_QPS_RTS,
883 IB_QPS_SQD,
884 IB_QPS_SQE,
885 IB_QPS_ERR
886};
887
888enum ib_mig_state {
889 IB_MIG_MIGRATED,
890 IB_MIG_REARM,
891 IB_MIG_ARMED
892};
893
7083e42e
SM
894enum ib_mw_type {
895 IB_MW_TYPE_1 = 1,
896 IB_MW_TYPE_2 = 2
897};
898
1da177e4
LT
899struct ib_qp_attr {
900 enum ib_qp_state qp_state;
901 enum ib_qp_state cur_qp_state;
902 enum ib_mtu path_mtu;
903 enum ib_mig_state path_mig_state;
904 u32 qkey;
905 u32 rq_psn;
906 u32 sq_psn;
907 u32 dest_qp_num;
908 int qp_access_flags;
909 struct ib_qp_cap cap;
910 struct ib_ah_attr ah_attr;
911 struct ib_ah_attr alt_ah_attr;
912 u16 pkey_index;
913 u16 alt_pkey_index;
914 u8 en_sqd_async_notify;
915 u8 sq_draining;
916 u8 max_rd_atomic;
917 u8 max_dest_rd_atomic;
918 u8 min_rnr_timer;
919 u8 port_num;
920 u8 timeout;
921 u8 retry_cnt;
922 u8 rnr_retry;
923 u8 alt_port_num;
924 u8 alt_timeout;
dd5f03be
MB
925 u8 smac[ETH_ALEN];
926 u8 alt_smac[ETH_ALEN];
927 u16 vlan_id;
928 u16 alt_vlan_id;
1da177e4
LT
929};
930
931enum ib_wr_opcode {
932 IB_WR_RDMA_WRITE,
933 IB_WR_RDMA_WRITE_WITH_IMM,
934 IB_WR_SEND,
935 IB_WR_SEND_WITH_IMM,
936 IB_WR_RDMA_READ,
937 IB_WR_ATOMIC_CMP_AND_SWP,
c93570f2 938 IB_WR_ATOMIC_FETCH_AND_ADD,
0f39cf3d
RD
939 IB_WR_LSO,
940 IB_WR_SEND_WITH_INV,
00f7ec36
SW
941 IB_WR_RDMA_READ_WITH_INV,
942 IB_WR_LOCAL_INV,
943 IB_WR_FAST_REG_MR,
5e80ba8f
VS
944 IB_WR_MASKED_ATOMIC_CMP_AND_SWP,
945 IB_WR_MASKED_ATOMIC_FETCH_AND_ADD,
7083e42e 946 IB_WR_BIND_MW,
1b01d335 947 IB_WR_REG_SIG_MR,
0134f16b
JM
948 /* reserve values for low level drivers' internal use.
949 * These values will not be used at all in the ib core layer.
950 */
951 IB_WR_RESERVED1 = 0xf0,
952 IB_WR_RESERVED2,
953 IB_WR_RESERVED3,
954 IB_WR_RESERVED4,
955 IB_WR_RESERVED5,
956 IB_WR_RESERVED6,
957 IB_WR_RESERVED7,
958 IB_WR_RESERVED8,
959 IB_WR_RESERVED9,
960 IB_WR_RESERVED10,
1da177e4
LT
961};
962
963enum ib_send_flags {
964 IB_SEND_FENCE = 1,
965 IB_SEND_SIGNALED = (1<<1),
966 IB_SEND_SOLICITED = (1<<2),
e0605d91 967 IB_SEND_INLINE = (1<<3),
0134f16b
JM
968 IB_SEND_IP_CSUM = (1<<4),
969
970 /* reserve bits 26-31 for low level drivers' internal use */
971 IB_SEND_RESERVED_START = (1 << 26),
972 IB_SEND_RESERVED_END = (1 << 31),
1da177e4
LT
973};
974
975struct ib_sge {
976 u64 addr;
977 u32 length;
978 u32 lkey;
979};
980
00f7ec36
SW
981struct ib_fast_reg_page_list {
982 struct ib_device *device;
983 u64 *page_list;
984 unsigned int max_page_list_len;
985};
986
7083e42e
SM
987/**
988 * struct ib_mw_bind_info - Parameters for a memory window bind operation.
989 * @mr: A memory region to bind the memory window to.
990 * @addr: The address where the memory window should begin.
991 * @length: The length of the memory window, in bytes.
992 * @mw_access_flags: Access flags from enum ib_access_flags for the window.
993 *
994 * This struct contains the shared parameters for type 1 and type 2
995 * memory window bind operations.
996 */
997struct ib_mw_bind_info {
998 struct ib_mr *mr;
999 u64 addr;
1000 u64 length;
1001 int mw_access_flags;
1002};
1003
1da177e4
LT
1004struct ib_send_wr {
1005 struct ib_send_wr *next;
1006 u64 wr_id;
1007 struct ib_sge *sg_list;
1008 int num_sge;
1009 enum ib_wr_opcode opcode;
1010 int send_flags;
0f39cf3d
RD
1011 union {
1012 __be32 imm_data;
1013 u32 invalidate_rkey;
1014 } ex;
1da177e4
LT
1015 union {
1016 struct {
1017 u64 remote_addr;
1018 u32 rkey;
1019 } rdma;
1020 struct {
1021 u64 remote_addr;
1022 u64 compare_add;
1023 u64 swap;
5e80ba8f
VS
1024 u64 compare_add_mask;
1025 u64 swap_mask;
1da177e4
LT
1026 u32 rkey;
1027 } atomic;
1028 struct {
1029 struct ib_ah *ah;
c93570f2
EC
1030 void *header;
1031 int hlen;
1032 int mss;
1da177e4
LT
1033 u32 remote_qpn;
1034 u32 remote_qkey;
1da177e4
LT
1035 u16 pkey_index; /* valid for GSI only */
1036 u8 port_num; /* valid for DR SMPs on switch only */
1037 } ud;
00f7ec36
SW
1038 struct {
1039 u64 iova_start;
1040 struct ib_fast_reg_page_list *page_list;
1041 unsigned int page_shift;
1042 unsigned int page_list_len;
1043 u32 length;
1044 int access_flags;
1045 u32 rkey;
1046 } fast_reg;
7083e42e
SM
1047 struct {
1048 struct ib_mw *mw;
1049 /* The new rkey for the memory window. */
1050 u32 rkey;
1051 struct ib_mw_bind_info bind_info;
1052 } bind_mw;
1b01d335
SG
1053 struct {
1054 struct ib_sig_attrs *sig_attrs;
1055 struct ib_mr *sig_mr;
1056 int access_flags;
1057 struct ib_sge *prot;
1058 } sig_handover;
1da177e4 1059 } wr;
b42b63cf 1060 u32 xrc_remote_srq_num; /* XRC TGT QPs only */
1da177e4
LT
1061};
1062
1063struct ib_recv_wr {
1064 struct ib_recv_wr *next;
1065 u64 wr_id;
1066 struct ib_sge *sg_list;
1067 int num_sge;
1068};
1069
1070enum ib_access_flags {
1071 IB_ACCESS_LOCAL_WRITE = 1,
1072 IB_ACCESS_REMOTE_WRITE = (1<<1),
1073 IB_ACCESS_REMOTE_READ = (1<<2),
1074 IB_ACCESS_REMOTE_ATOMIC = (1<<3),
7083e42e
SM
1075 IB_ACCESS_MW_BIND = (1<<4),
1076 IB_ZERO_BASED = (1<<5)
1da177e4
LT
1077};
1078
1079struct ib_phys_buf {
1080 u64 addr;
1081 u64 size;
1082};
1083
1084struct ib_mr_attr {
1085 struct ib_pd *pd;
1086 u64 device_virt_addr;
1087 u64 size;
1088 int mr_access_flags;
1089 u32 lkey;
1090 u32 rkey;
1091};
1092
1093enum ib_mr_rereg_flags {
1094 IB_MR_REREG_TRANS = 1,
1095 IB_MR_REREG_PD = (1<<1),
7e6edb9b
MB
1096 IB_MR_REREG_ACCESS = (1<<2),
1097 IB_MR_REREG_SUPPORTED = ((IB_MR_REREG_ACCESS << 1) - 1)
1da177e4
LT
1098};
1099
7083e42e
SM
1100/**
1101 * struct ib_mw_bind - Parameters for a type 1 memory window bind operation.
1102 * @wr_id: Work request id.
1103 * @send_flags: Flags from ib_send_flags enum.
1104 * @bind_info: More parameters of the bind operation.
1105 */
1da177e4 1106struct ib_mw_bind {
7083e42e
SM
1107 u64 wr_id;
1108 int send_flags;
1109 struct ib_mw_bind_info bind_info;
1da177e4
LT
1110};
1111
1112struct ib_fmr_attr {
1113 int max_pages;
1114 int max_maps;
d36f34aa 1115 u8 page_shift;
1da177e4
LT
1116};
1117
e2773c06
RD
1118struct ib_ucontext {
1119 struct ib_device *device;
1120 struct list_head pd_list;
1121 struct list_head mr_list;
1122 struct list_head mw_list;
1123 struct list_head cq_list;
1124 struct list_head qp_list;
1125 struct list_head srq_list;
1126 struct list_head ah_list;
53d0bd1e 1127 struct list_head xrcd_list;
436f2ad0 1128 struct list_head rule_list;
f7c6a7b5 1129 int closing;
e2773c06
RD
1130};
1131
1132struct ib_uobject {
1133 u64 user_handle; /* handle given to us by userspace */
1134 struct ib_ucontext *context; /* associated user context */
9ead190b 1135 void *object; /* containing object */
e2773c06 1136 struct list_head list; /* link to context's list */
b3d636b0 1137 int id; /* index into kernel idr */
9ead190b
RD
1138 struct kref ref;
1139 struct rw_semaphore mutex; /* protects .live */
1140 int live;
e2773c06
RD
1141};
1142
e2773c06 1143struct ib_udata {
309243ec 1144 const void __user *inbuf;
e2773c06
RD
1145 void __user *outbuf;
1146 size_t inlen;
1147 size_t outlen;
1148};
1149
1da177e4 1150struct ib_pd {
e2773c06
RD
1151 struct ib_device *device;
1152 struct ib_uobject *uobject;
1153 atomic_t usecnt; /* count all resources */
1da177e4
LT
1154};
1155
59991f94
SH
1156struct ib_xrcd {
1157 struct ib_device *device;
d3d72d90 1158 atomic_t usecnt; /* count all exposed resources */
53d0bd1e 1159 struct inode *inode;
d3d72d90
SH
1160
1161 struct mutex tgt_qp_mutex;
1162 struct list_head tgt_qp_list;
59991f94
SH
1163};
1164
1da177e4
LT
1165struct ib_ah {
1166 struct ib_device *device;
1167 struct ib_pd *pd;
e2773c06 1168 struct ib_uobject *uobject;
1da177e4
LT
1169};
1170
1171typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
1172
1173struct ib_cq {
e2773c06
RD
1174 struct ib_device *device;
1175 struct ib_uobject *uobject;
1176 ib_comp_handler comp_handler;
1177 void (*event_handler)(struct ib_event *, void *);
4deccd6d 1178 void *cq_context;
e2773c06
RD
1179 int cqe;
1180 atomic_t usecnt; /* count number of work queues */
1da177e4
LT
1181};
1182
1183struct ib_srq {
d41fcc67
RD
1184 struct ib_device *device;
1185 struct ib_pd *pd;
1186 struct ib_uobject *uobject;
1187 void (*event_handler)(struct ib_event *, void *);
1188 void *srq_context;
96104eda 1189 enum ib_srq_type srq_type;
1da177e4 1190 atomic_t usecnt;
418d5130
SH
1191
1192 union {
1193 struct {
1194 struct ib_xrcd *xrcd;
1195 struct ib_cq *cq;
1196 u32 srq_num;
1197 } xrc;
1198 } ext;
1da177e4
LT
1199};
1200
1201struct ib_qp {
1202 struct ib_device *device;
1203 struct ib_pd *pd;
1204 struct ib_cq *send_cq;
1205 struct ib_cq *recv_cq;
1206 struct ib_srq *srq;
b42b63cf 1207 struct ib_xrcd *xrcd; /* XRC TGT QPs only */
d3d72d90 1208 struct list_head xrcd_list;
319a441d
HHZ
1209 /* count times opened, mcast attaches, flow attaches */
1210 atomic_t usecnt;
0e0ec7e0
SH
1211 struct list_head open_list;
1212 struct ib_qp *real_qp;
e2773c06 1213 struct ib_uobject *uobject;
1da177e4
LT
1214 void (*event_handler)(struct ib_event *, void *);
1215 void *qp_context;
1216 u32 qp_num;
1217 enum ib_qp_type qp_type;
1218};
1219
1220struct ib_mr {
e2773c06
RD
1221 struct ib_device *device;
1222 struct ib_pd *pd;
1223 struct ib_uobject *uobject;
1224 u32 lkey;
1225 u32 rkey;
1226 atomic_t usecnt; /* count number of MWs */
1da177e4
LT
1227};
1228
1229struct ib_mw {
1230 struct ib_device *device;
1231 struct ib_pd *pd;
e2773c06 1232 struct ib_uobject *uobject;
1da177e4 1233 u32 rkey;
7083e42e 1234 enum ib_mw_type type;
1da177e4
LT
1235};
1236
1237struct ib_fmr {
1238 struct ib_device *device;
1239 struct ib_pd *pd;
1240 struct list_head list;
1241 u32 lkey;
1242 u32 rkey;
1243};
1244
319a441d
HHZ
1245/* Supported steering options */
1246enum ib_flow_attr_type {
1247 /* steering according to rule specifications */
1248 IB_FLOW_ATTR_NORMAL = 0x0,
1249 /* default unicast and multicast rule -
1250 * receive all Eth traffic which isn't steered to any QP
1251 */
1252 IB_FLOW_ATTR_ALL_DEFAULT = 0x1,
1253 /* default multicast rule -
1254 * receive all Eth multicast traffic which isn't steered to any QP
1255 */
1256 IB_FLOW_ATTR_MC_DEFAULT = 0x2,
1257 /* sniffer rule - receive all port traffic */
1258 IB_FLOW_ATTR_SNIFFER = 0x3
1259};
1260
1261/* Supported steering header types */
1262enum ib_flow_spec_type {
1263 /* L2 headers*/
1264 IB_FLOW_SPEC_ETH = 0x20,
240ae00e 1265 IB_FLOW_SPEC_IB = 0x22,
319a441d
HHZ
1266 /* L3 header*/
1267 IB_FLOW_SPEC_IPV4 = 0x30,
1268 /* L4 headers*/
1269 IB_FLOW_SPEC_TCP = 0x40,
1270 IB_FLOW_SPEC_UDP = 0x41
1271};
240ae00e 1272#define IB_FLOW_SPEC_LAYER_MASK 0xF0
22878dbc
MB
1273#define IB_FLOW_SPEC_SUPPORT_LAYERS 4
1274
319a441d
HHZ
1275/* Flow steering rule priority is set according to it's domain.
1276 * Lower domain value means higher priority.
1277 */
1278enum ib_flow_domain {
1279 IB_FLOW_DOMAIN_USER,
1280 IB_FLOW_DOMAIN_ETHTOOL,
1281 IB_FLOW_DOMAIN_RFS,
1282 IB_FLOW_DOMAIN_NIC,
1283 IB_FLOW_DOMAIN_NUM /* Must be last */
1284};
1285
1286struct ib_flow_eth_filter {
1287 u8 dst_mac[6];
1288 u8 src_mac[6];
1289 __be16 ether_type;
1290 __be16 vlan_tag;
1291};
1292
1293struct ib_flow_spec_eth {
1294 enum ib_flow_spec_type type;
1295 u16 size;
1296 struct ib_flow_eth_filter val;
1297 struct ib_flow_eth_filter mask;
1298};
1299
240ae00e
MB
1300struct ib_flow_ib_filter {
1301 __be16 dlid;
1302 __u8 sl;
1303};
1304
1305struct ib_flow_spec_ib {
1306 enum ib_flow_spec_type type;
1307 u16 size;
1308 struct ib_flow_ib_filter val;
1309 struct ib_flow_ib_filter mask;
1310};
1311
319a441d
HHZ
1312struct ib_flow_ipv4_filter {
1313 __be32 src_ip;
1314 __be32 dst_ip;
1315};
1316
1317struct ib_flow_spec_ipv4 {
1318 enum ib_flow_spec_type type;
1319 u16 size;
1320 struct ib_flow_ipv4_filter val;
1321 struct ib_flow_ipv4_filter mask;
1322};
1323
1324struct ib_flow_tcp_udp_filter {
1325 __be16 dst_port;
1326 __be16 src_port;
1327};
1328
1329struct ib_flow_spec_tcp_udp {
1330 enum ib_flow_spec_type type;
1331 u16 size;
1332 struct ib_flow_tcp_udp_filter val;
1333 struct ib_flow_tcp_udp_filter mask;
1334};
1335
1336union ib_flow_spec {
1337 struct {
1338 enum ib_flow_spec_type type;
1339 u16 size;
1340 };
1341 struct ib_flow_spec_eth eth;
240ae00e 1342 struct ib_flow_spec_ib ib;
319a441d
HHZ
1343 struct ib_flow_spec_ipv4 ipv4;
1344 struct ib_flow_spec_tcp_udp tcp_udp;
1345};
1346
1347struct ib_flow_attr {
1348 enum ib_flow_attr_type type;
1349 u16 size;
1350 u16 priority;
1351 u32 flags;
1352 u8 num_of_specs;
1353 u8 port;
1354 /* Following are the optional layers according to user request
1355 * struct ib_flow_spec_xxx
1356 * struct ib_flow_spec_yyy
1357 */
1358};
1359
1360struct ib_flow {
1361 struct ib_qp *qp;
1362 struct ib_uobject *uobject;
1363};
1364
1da177e4
LT
1365struct ib_mad;
1366struct ib_grh;
1367
1368enum ib_process_mad_flags {
1369 IB_MAD_IGNORE_MKEY = 1,
1370 IB_MAD_IGNORE_BKEY = 2,
1371 IB_MAD_IGNORE_ALL = IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY
1372};
1373
1374enum ib_mad_result {
1375 IB_MAD_RESULT_FAILURE = 0, /* (!SUCCESS is the important flag) */
1376 IB_MAD_RESULT_SUCCESS = 1 << 0, /* MAD was successfully processed */
1377 IB_MAD_RESULT_REPLY = 1 << 1, /* Reply packet needs to be sent */
1378 IB_MAD_RESULT_CONSUMED = 1 << 2 /* Packet consumed: stop processing */
1379};
1380
1381#define IB_DEVICE_NAME_MAX 64
1382
1383struct ib_cache {
1384 rwlock_t lock;
1385 struct ib_event_handler event_handler;
1386 struct ib_pkey_cache **pkey_cache;
1387 struct ib_gid_cache **gid_cache;
6fb9cdbf 1388 u8 *lmc_cache;
1da177e4
LT
1389};
1390
9b513090
RC
1391struct ib_dma_mapping_ops {
1392 int (*mapping_error)(struct ib_device *dev,
1393 u64 dma_addr);
1394 u64 (*map_single)(struct ib_device *dev,
1395 void *ptr, size_t size,
1396 enum dma_data_direction direction);
1397 void (*unmap_single)(struct ib_device *dev,
1398 u64 addr, size_t size,
1399 enum dma_data_direction direction);
1400 u64 (*map_page)(struct ib_device *dev,
1401 struct page *page, unsigned long offset,
1402 size_t size,
1403 enum dma_data_direction direction);
1404 void (*unmap_page)(struct ib_device *dev,
1405 u64 addr, size_t size,
1406 enum dma_data_direction direction);
1407 int (*map_sg)(struct ib_device *dev,
1408 struct scatterlist *sg, int nents,
1409 enum dma_data_direction direction);
1410 void (*unmap_sg)(struct ib_device *dev,
1411 struct scatterlist *sg, int nents,
1412 enum dma_data_direction direction);
9b513090
RC
1413 void (*sync_single_for_cpu)(struct ib_device *dev,
1414 u64 dma_handle,
1415 size_t size,
4deccd6d 1416 enum dma_data_direction dir);
9b513090
RC
1417 void (*sync_single_for_device)(struct ib_device *dev,
1418 u64 dma_handle,
1419 size_t size,
1420 enum dma_data_direction dir);
1421 void *(*alloc_coherent)(struct ib_device *dev,
1422 size_t size,
1423 u64 *dma_handle,
1424 gfp_t flag);
1425 void (*free_coherent)(struct ib_device *dev,
1426 size_t size, void *cpu_addr,
1427 u64 dma_handle);
1428};
1429
07ebafba
TT
1430struct iw_cm_verbs;
1431
1da177e4
LT
1432struct ib_device {
1433 struct device *dma_device;
1434
1435 char name[IB_DEVICE_NAME_MAX];
1436
1437 struct list_head event_handler_list;
1438 spinlock_t event_handler_lock;
1439
17a55f79 1440 spinlock_t client_data_lock;
1da177e4
LT
1441 struct list_head core_list;
1442 struct list_head client_data_list;
1da177e4
LT
1443
1444 struct ib_cache cache;
5eb620c8
YE
1445 int *pkey_tbl_len;
1446 int *gid_tbl_len;
1da177e4 1447
f4fd0b22
MT
1448 int num_comp_vectors;
1449
07ebafba
TT
1450 struct iw_cm_verbs *iwcm;
1451
7f624d02
SW
1452 int (*get_protocol_stats)(struct ib_device *device,
1453 union rdma_protocol_stats *stats);
1da177e4
LT
1454 int (*query_device)(struct ib_device *device,
1455 struct ib_device_attr *device_attr);
1456 int (*query_port)(struct ib_device *device,
1457 u8 port_num,
1458 struct ib_port_attr *port_attr);
a3f5adaf
EC
1459 enum rdma_link_layer (*get_link_layer)(struct ib_device *device,
1460 u8 port_num);
1da177e4
LT
1461 int (*query_gid)(struct ib_device *device,
1462 u8 port_num, int index,
1463 union ib_gid *gid);
1464 int (*query_pkey)(struct ib_device *device,
1465 u8 port_num, u16 index, u16 *pkey);
1466 int (*modify_device)(struct ib_device *device,
1467 int device_modify_mask,
1468 struct ib_device_modify *device_modify);
1469 int (*modify_port)(struct ib_device *device,
1470 u8 port_num, int port_modify_mask,
1471 struct ib_port_modify *port_modify);
e2773c06
RD
1472 struct ib_ucontext * (*alloc_ucontext)(struct ib_device *device,
1473 struct ib_udata *udata);
1474 int (*dealloc_ucontext)(struct ib_ucontext *context);
1475 int (*mmap)(struct ib_ucontext *context,
1476 struct vm_area_struct *vma);
1477 struct ib_pd * (*alloc_pd)(struct ib_device *device,
1478 struct ib_ucontext *context,
1479 struct ib_udata *udata);
1da177e4
LT
1480 int (*dealloc_pd)(struct ib_pd *pd);
1481 struct ib_ah * (*create_ah)(struct ib_pd *pd,
1482 struct ib_ah_attr *ah_attr);
1483 int (*modify_ah)(struct ib_ah *ah,
1484 struct ib_ah_attr *ah_attr);
1485 int (*query_ah)(struct ib_ah *ah,
1486 struct ib_ah_attr *ah_attr);
1487 int (*destroy_ah)(struct ib_ah *ah);
d41fcc67
RD
1488 struct ib_srq * (*create_srq)(struct ib_pd *pd,
1489 struct ib_srq_init_attr *srq_init_attr,
1490 struct ib_udata *udata);
1491 int (*modify_srq)(struct ib_srq *srq,
1492 struct ib_srq_attr *srq_attr,
9bc57e2d
RC
1493 enum ib_srq_attr_mask srq_attr_mask,
1494 struct ib_udata *udata);
d41fcc67
RD
1495 int (*query_srq)(struct ib_srq *srq,
1496 struct ib_srq_attr *srq_attr);
1497 int (*destroy_srq)(struct ib_srq *srq);
1498 int (*post_srq_recv)(struct ib_srq *srq,
1499 struct ib_recv_wr *recv_wr,
1500 struct ib_recv_wr **bad_recv_wr);
1da177e4 1501 struct ib_qp * (*create_qp)(struct ib_pd *pd,
e2773c06
RD
1502 struct ib_qp_init_attr *qp_init_attr,
1503 struct ib_udata *udata);
1da177e4
LT
1504 int (*modify_qp)(struct ib_qp *qp,
1505 struct ib_qp_attr *qp_attr,
9bc57e2d
RC
1506 int qp_attr_mask,
1507 struct ib_udata *udata);
1da177e4
LT
1508 int (*query_qp)(struct ib_qp *qp,
1509 struct ib_qp_attr *qp_attr,
1510 int qp_attr_mask,
1511 struct ib_qp_init_attr *qp_init_attr);
1512 int (*destroy_qp)(struct ib_qp *qp);
1513 int (*post_send)(struct ib_qp *qp,
1514 struct ib_send_wr *send_wr,
1515 struct ib_send_wr **bad_send_wr);
1516 int (*post_recv)(struct ib_qp *qp,
1517 struct ib_recv_wr *recv_wr,
1518 struct ib_recv_wr **bad_recv_wr);
e2773c06 1519 struct ib_cq * (*create_cq)(struct ib_device *device, int cqe,
f4fd0b22 1520 int comp_vector,
e2773c06
RD
1521 struct ib_ucontext *context,
1522 struct ib_udata *udata);
2dd57162
EC
1523 int (*modify_cq)(struct ib_cq *cq, u16 cq_count,
1524 u16 cq_period);
1da177e4 1525 int (*destroy_cq)(struct ib_cq *cq);
33b9b3ee
RD
1526 int (*resize_cq)(struct ib_cq *cq, int cqe,
1527 struct ib_udata *udata);
1da177e4
LT
1528 int (*poll_cq)(struct ib_cq *cq, int num_entries,
1529 struct ib_wc *wc);
1530 int (*peek_cq)(struct ib_cq *cq, int wc_cnt);
1531 int (*req_notify_cq)(struct ib_cq *cq,
ed23a727 1532 enum ib_cq_notify_flags flags);
1da177e4
LT
1533 int (*req_ncomp_notif)(struct ib_cq *cq,
1534 int wc_cnt);
1535 struct ib_mr * (*get_dma_mr)(struct ib_pd *pd,
1536 int mr_access_flags);
1537 struct ib_mr * (*reg_phys_mr)(struct ib_pd *pd,
1538 struct ib_phys_buf *phys_buf_array,
1539 int num_phys_buf,
1540 int mr_access_flags,
1541 u64 *iova_start);
e2773c06 1542 struct ib_mr * (*reg_user_mr)(struct ib_pd *pd,
f7c6a7b5
RD
1543 u64 start, u64 length,
1544 u64 virt_addr,
e2773c06
RD
1545 int mr_access_flags,
1546 struct ib_udata *udata);
7e6edb9b
MB
1547 int (*rereg_user_mr)(struct ib_mr *mr,
1548 int flags,
1549 u64 start, u64 length,
1550 u64 virt_addr,
1551 int mr_access_flags,
1552 struct ib_pd *pd,
1553 struct ib_udata *udata);
1da177e4
LT
1554 int (*query_mr)(struct ib_mr *mr,
1555 struct ib_mr_attr *mr_attr);
1556 int (*dereg_mr)(struct ib_mr *mr);
17cd3a2d
SG
1557 int (*destroy_mr)(struct ib_mr *mr);
1558 struct ib_mr * (*create_mr)(struct ib_pd *pd,
1559 struct ib_mr_init_attr *mr_init_attr);
00f7ec36
SW
1560 struct ib_mr * (*alloc_fast_reg_mr)(struct ib_pd *pd,
1561 int max_page_list_len);
1562 struct ib_fast_reg_page_list * (*alloc_fast_reg_page_list)(struct ib_device *device,
1563 int page_list_len);
1564 void (*free_fast_reg_page_list)(struct ib_fast_reg_page_list *page_list);
1da177e4
LT
1565 int (*rereg_phys_mr)(struct ib_mr *mr,
1566 int mr_rereg_mask,
1567 struct ib_pd *pd,
1568 struct ib_phys_buf *phys_buf_array,
1569 int num_phys_buf,
1570 int mr_access_flags,
1571 u64 *iova_start);
7083e42e
SM
1572 struct ib_mw * (*alloc_mw)(struct ib_pd *pd,
1573 enum ib_mw_type type);
1da177e4
LT
1574 int (*bind_mw)(struct ib_qp *qp,
1575 struct ib_mw *mw,
1576 struct ib_mw_bind *mw_bind);
1577 int (*dealloc_mw)(struct ib_mw *mw);
1578 struct ib_fmr * (*alloc_fmr)(struct ib_pd *pd,
1579 int mr_access_flags,
1580 struct ib_fmr_attr *fmr_attr);
1581 int (*map_phys_fmr)(struct ib_fmr *fmr,
1582 u64 *page_list, int list_len,
1583 u64 iova);
1584 int (*unmap_fmr)(struct list_head *fmr_list);
1585 int (*dealloc_fmr)(struct ib_fmr *fmr);
1586 int (*attach_mcast)(struct ib_qp *qp,
1587 union ib_gid *gid,
1588 u16 lid);
1589 int (*detach_mcast)(struct ib_qp *qp,
1590 union ib_gid *gid,
1591 u16 lid);
1592 int (*process_mad)(struct ib_device *device,
1593 int process_mad_flags,
1594 u8 port_num,
1595 struct ib_wc *in_wc,
1596 struct ib_grh *in_grh,
1597 struct ib_mad *in_mad,
1598 struct ib_mad *out_mad);
59991f94
SH
1599 struct ib_xrcd * (*alloc_xrcd)(struct ib_device *device,
1600 struct ib_ucontext *ucontext,
1601 struct ib_udata *udata);
1602 int (*dealloc_xrcd)(struct ib_xrcd *xrcd);
319a441d
HHZ
1603 struct ib_flow * (*create_flow)(struct ib_qp *qp,
1604 struct ib_flow_attr
1605 *flow_attr,
1606 int domain);
1607 int (*destroy_flow)(struct ib_flow *flow_id);
1b01d335
SG
1608 int (*check_mr_status)(struct ib_mr *mr, u32 check_mask,
1609 struct ib_mr_status *mr_status);
1da177e4 1610
9b513090
RC
1611 struct ib_dma_mapping_ops *dma_ops;
1612
e2773c06 1613 struct module *owner;
f4e91eb4 1614 struct device dev;
35be0681 1615 struct kobject *ports_parent;
1da177e4
LT
1616 struct list_head port_list;
1617
1618 enum {
1619 IB_DEV_UNINITIALIZED,
1620 IB_DEV_REGISTERED,
1621 IB_DEV_UNREGISTERED
1622 } reg_state;
1623
274c0891 1624 int uverbs_abi_ver;
17a55f79 1625 u64 uverbs_cmd_mask;
f21519b2 1626 u64 uverbs_ex_cmd_mask;
274c0891 1627
c5bcbbb9 1628 char node_desc[64];
cf311cd4 1629 __be64 node_guid;
96f15c03 1630 u32 local_dma_lkey;
1da177e4
LT
1631 u8 node_type;
1632 u8 phys_port_cnt;
1633};
1634
1635struct ib_client {
1636 char *name;
1637 void (*add) (struct ib_device *);
1638 void (*remove)(struct ib_device *);
1639
1640 struct list_head list;
1641};
1642
1643struct ib_device *ib_alloc_device(size_t size);
1644void ib_dealloc_device(struct ib_device *device);
1645
9a6edb60
RC
1646int ib_register_device(struct ib_device *device,
1647 int (*port_callback)(struct ib_device *,
1648 u8, struct kobject *));
1da177e4
LT
1649void ib_unregister_device(struct ib_device *device);
1650
1651int ib_register_client (struct ib_client *client);
1652void ib_unregister_client(struct ib_client *client);
1653
1654void *ib_get_client_data(struct ib_device *device, struct ib_client *client);
1655void ib_set_client_data(struct ib_device *device, struct ib_client *client,
1656 void *data);
1657
e2773c06
RD
1658static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len)
1659{
1660 return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0;
1661}
1662
1663static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len)
1664{
1665 return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0;
1666}
1667
8a51866f
RD
1668/**
1669 * ib_modify_qp_is_ok - Check that the supplied attribute mask
1670 * contains all required attributes and no attributes not allowed for
1671 * the given QP state transition.
1672 * @cur_state: Current QP state
1673 * @next_state: Next QP state
1674 * @type: QP type
1675 * @mask: Mask of supplied QP attributes
dd5f03be 1676 * @ll : link layer of port
8a51866f
RD
1677 *
1678 * This function is a helper function that a low-level driver's
1679 * modify_qp method can use to validate the consumer's input. It
1680 * checks that cur_state and next_state are valid QP states, that a
1681 * transition from cur_state to next_state is allowed by the IB spec,
1682 * and that the attribute mask supplied is allowed for the transition.
1683 */
1684int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
dd5f03be
MB
1685 enum ib_qp_type type, enum ib_qp_attr_mask mask,
1686 enum rdma_link_layer ll);
8a51866f 1687
1da177e4
LT
1688int ib_register_event_handler (struct ib_event_handler *event_handler);
1689int ib_unregister_event_handler(struct ib_event_handler *event_handler);
1690void ib_dispatch_event(struct ib_event *event);
1691
1692int ib_query_device(struct ib_device *device,
1693 struct ib_device_attr *device_attr);
1694
1695int ib_query_port(struct ib_device *device,
1696 u8 port_num, struct ib_port_attr *port_attr);
1697
a3f5adaf
EC
1698enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device,
1699 u8 port_num);
1700
1da177e4
LT
1701int ib_query_gid(struct ib_device *device,
1702 u8 port_num, int index, union ib_gid *gid);
1703
1704int ib_query_pkey(struct ib_device *device,
1705 u8 port_num, u16 index, u16 *pkey);
1706
1707int ib_modify_device(struct ib_device *device,
1708 int device_modify_mask,
1709 struct ib_device_modify *device_modify);
1710
1711int ib_modify_port(struct ib_device *device,
1712 u8 port_num, int port_modify_mask,
1713 struct ib_port_modify *port_modify);
1714
5eb620c8
YE
1715int ib_find_gid(struct ib_device *device, union ib_gid *gid,
1716 u8 *port_num, u16 *index);
1717
1718int ib_find_pkey(struct ib_device *device,
1719 u8 port_num, u16 pkey, u16 *index);
1720
1da177e4
LT
1721/**
1722 * ib_alloc_pd - Allocates an unused protection domain.
1723 * @device: The device on which to allocate the protection domain.
1724 *
1725 * A protection domain object provides an association between QPs, shared
1726 * receive queues, address handles, memory regions, and memory windows.
1727 */
1728struct ib_pd *ib_alloc_pd(struct ib_device *device);
1729
1730/**
1731 * ib_dealloc_pd - Deallocates a protection domain.
1732 * @pd: The protection domain to deallocate.
1733 */
1734int ib_dealloc_pd(struct ib_pd *pd);
1735
1736/**
1737 * ib_create_ah - Creates an address handle for the given address vector.
1738 * @pd: The protection domain associated with the address handle.
1739 * @ah_attr: The attributes of the address vector.
1740 *
1741 * The address handle is used to reference a local or global destination
1742 * in all UD QP post sends.
1743 */
1744struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
1745
4e00d694
SH
1746/**
1747 * ib_init_ah_from_wc - Initializes address handle attributes from a
1748 * work completion.
1749 * @device: Device on which the received message arrived.
1750 * @port_num: Port on which the received message arrived.
1751 * @wc: Work completion associated with the received message.
1752 * @grh: References the received global route header. This parameter is
1753 * ignored unless the work completion indicates that the GRH is valid.
1754 * @ah_attr: Returned attributes that can be used when creating an address
1755 * handle for replying to the message.
1756 */
1757int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, struct ib_wc *wc,
1758 struct ib_grh *grh, struct ib_ah_attr *ah_attr);
1759
513789ed
HR
1760/**
1761 * ib_create_ah_from_wc - Creates an address handle associated with the
1762 * sender of the specified work completion.
1763 * @pd: The protection domain associated with the address handle.
1764 * @wc: Work completion information associated with a received message.
1765 * @grh: References the received global route header. This parameter is
1766 * ignored unless the work completion indicates that the GRH is valid.
1767 * @port_num: The outbound port number to associate with the address.
1768 *
1769 * The address handle is used to reference a local or global destination
1770 * in all UD QP post sends.
1771 */
1772struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, struct ib_wc *wc,
1773 struct ib_grh *grh, u8 port_num);
1774
1da177e4
LT
1775/**
1776 * ib_modify_ah - Modifies the address vector associated with an address
1777 * handle.
1778 * @ah: The address handle to modify.
1779 * @ah_attr: The new address vector attributes to associate with the
1780 * address handle.
1781 */
1782int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
1783
1784/**
1785 * ib_query_ah - Queries the address vector associated with an address
1786 * handle.
1787 * @ah: The address handle to query.
1788 * @ah_attr: The address vector attributes associated with the address
1789 * handle.
1790 */
1791int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
1792
1793/**
1794 * ib_destroy_ah - Destroys an address handle.
1795 * @ah: The address handle to destroy.
1796 */
1797int ib_destroy_ah(struct ib_ah *ah);
1798
d41fcc67
RD
1799/**
1800 * ib_create_srq - Creates a SRQ associated with the specified protection
1801 * domain.
1802 * @pd: The protection domain associated with the SRQ.
abb6e9ba
DB
1803 * @srq_init_attr: A list of initial attributes required to create the
1804 * SRQ. If SRQ creation succeeds, then the attributes are updated to
1805 * the actual capabilities of the created SRQ.
d41fcc67
RD
1806 *
1807 * srq_attr->max_wr and srq_attr->max_sge are read the determine the
1808 * requested size of the SRQ, and set to the actual values allocated
1809 * on return. If ib_create_srq() succeeds, then max_wr and max_sge
1810 * will always be at least as large as the requested values.
1811 */
1812struct ib_srq *ib_create_srq(struct ib_pd *pd,
1813 struct ib_srq_init_attr *srq_init_attr);
1814
1815/**
1816 * ib_modify_srq - Modifies the attributes for the specified SRQ.
1817 * @srq: The SRQ to modify.
1818 * @srq_attr: On input, specifies the SRQ attributes to modify. On output,
1819 * the current values of selected SRQ attributes are returned.
1820 * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ
1821 * are being modified.
1822 *
1823 * The mask may contain IB_SRQ_MAX_WR to resize the SRQ and/or
1824 * IB_SRQ_LIMIT to set the SRQ's limit and request notification when
1825 * the number of receives queued drops below the limit.
1826 */
1827int ib_modify_srq(struct ib_srq *srq,
1828 struct ib_srq_attr *srq_attr,
1829 enum ib_srq_attr_mask srq_attr_mask);
1830
1831/**
1832 * ib_query_srq - Returns the attribute list and current values for the
1833 * specified SRQ.
1834 * @srq: The SRQ to query.
1835 * @srq_attr: The attributes of the specified SRQ.
1836 */
1837int ib_query_srq(struct ib_srq *srq,
1838 struct ib_srq_attr *srq_attr);
1839
1840/**
1841 * ib_destroy_srq - Destroys the specified SRQ.
1842 * @srq: The SRQ to destroy.
1843 */
1844int ib_destroy_srq(struct ib_srq *srq);
1845
1846/**
1847 * ib_post_srq_recv - Posts a list of work requests to the specified SRQ.
1848 * @srq: The SRQ to post the work request on.
1849 * @recv_wr: A list of work requests to post on the receive queue.
1850 * @bad_recv_wr: On an immediate failure, this parameter will reference
1851 * the work request that failed to be posted on the QP.
1852 */
1853static inline int ib_post_srq_recv(struct ib_srq *srq,
1854 struct ib_recv_wr *recv_wr,
1855 struct ib_recv_wr **bad_recv_wr)
1856{
1857 return srq->device->post_srq_recv(srq, recv_wr, bad_recv_wr);
1858}
1859
1da177e4
LT
1860/**
1861 * ib_create_qp - Creates a QP associated with the specified protection
1862 * domain.
1863 * @pd: The protection domain associated with the QP.
abb6e9ba
DB
1864 * @qp_init_attr: A list of initial attributes required to create the
1865 * QP. If QP creation succeeds, then the attributes are updated to
1866 * the actual capabilities of the created QP.
1da177e4
LT
1867 */
1868struct ib_qp *ib_create_qp(struct ib_pd *pd,
1869 struct ib_qp_init_attr *qp_init_attr);
1870
1871/**
1872 * ib_modify_qp - Modifies the attributes for the specified QP and then
1873 * transitions the QP to the given state.
1874 * @qp: The QP to modify.
1875 * @qp_attr: On input, specifies the QP attributes to modify. On output,
1876 * the current values of selected QP attributes are returned.
1877 * @qp_attr_mask: A bit-mask used to specify which attributes of the QP
1878 * are being modified.
1879 */
1880int ib_modify_qp(struct ib_qp *qp,
1881 struct ib_qp_attr *qp_attr,
1882 int qp_attr_mask);
1883
1884/**
1885 * ib_query_qp - Returns the attribute list and current values for the
1886 * specified QP.
1887 * @qp: The QP to query.
1888 * @qp_attr: The attributes of the specified QP.
1889 * @qp_attr_mask: A bit-mask used to select specific attributes to query.
1890 * @qp_init_attr: Additional attributes of the selected QP.
1891 *
1892 * The qp_attr_mask may be used to limit the query to gathering only the
1893 * selected attributes.
1894 */
1895int ib_query_qp(struct ib_qp *qp,
1896 struct ib_qp_attr *qp_attr,
1897 int qp_attr_mask,
1898 struct ib_qp_init_attr *qp_init_attr);
1899
1900/**
1901 * ib_destroy_qp - Destroys the specified QP.
1902 * @qp: The QP to destroy.
1903 */
1904int ib_destroy_qp(struct ib_qp *qp);
1905
d3d72d90 1906/**
0e0ec7e0
SH
1907 * ib_open_qp - Obtain a reference to an existing sharable QP.
1908 * @xrcd - XRC domain
1909 * @qp_open_attr: Attributes identifying the QP to open.
1910 *
1911 * Returns a reference to a sharable QP.
1912 */
1913struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
1914 struct ib_qp_open_attr *qp_open_attr);
1915
1916/**
1917 * ib_close_qp - Release an external reference to a QP.
d3d72d90
SH
1918 * @qp: The QP handle to release
1919 *
0e0ec7e0
SH
1920 * The opened QP handle is released by the caller. The underlying
1921 * shared QP is not destroyed until all internal references are released.
d3d72d90 1922 */
0e0ec7e0 1923int ib_close_qp(struct ib_qp *qp);
d3d72d90 1924
1da177e4
LT
1925/**
1926 * ib_post_send - Posts a list of work requests to the send queue of
1927 * the specified QP.
1928 * @qp: The QP to post the work request on.
1929 * @send_wr: A list of work requests to post on the send queue.
1930 * @bad_send_wr: On an immediate failure, this parameter will reference
1931 * the work request that failed to be posted on the QP.
55464d46
BVA
1932 *
1933 * While IBA Vol. 1 section 11.4.1.1 specifies that if an immediate
1934 * error is returned, the QP state shall not be affected,
1935 * ib_post_send() will return an immediate error after queueing any
1936 * earlier work requests in the list.
1da177e4
LT
1937 */
1938static inline int ib_post_send(struct ib_qp *qp,
1939 struct ib_send_wr *send_wr,
1940 struct ib_send_wr **bad_send_wr)
1941{
1942 return qp->device->post_send(qp, send_wr, bad_send_wr);
1943}
1944
1945/**
1946 * ib_post_recv - Posts a list of work requests to the receive queue of
1947 * the specified QP.
1948 * @qp: The QP to post the work request on.
1949 * @recv_wr: A list of work requests to post on the receive queue.
1950 * @bad_recv_wr: On an immediate failure, this parameter will reference
1951 * the work request that failed to be posted on the QP.
1952 */
1953static inline int ib_post_recv(struct ib_qp *qp,
1954 struct ib_recv_wr *recv_wr,
1955 struct ib_recv_wr **bad_recv_wr)
1956{
1957 return qp->device->post_recv(qp, recv_wr, bad_recv_wr);
1958}
1959
1960/**
1961 * ib_create_cq - Creates a CQ on the specified device.
1962 * @device: The device on which to create the CQ.
1963 * @comp_handler: A user-specified callback that is invoked when a
1964 * completion event occurs on the CQ.
1965 * @event_handler: A user-specified callback that is invoked when an
1966 * asynchronous event not associated with a completion occurs on the CQ.
1967 * @cq_context: Context associated with the CQ returned to the user via
1968 * the associated completion and event handlers.
1969 * @cqe: The minimum size of the CQ.
f4fd0b22
MT
1970 * @comp_vector - Completion vector used to signal completion events.
1971 * Must be >= 0 and < context->num_comp_vectors.
1da177e4
LT
1972 *
1973 * Users can examine the cq structure to determine the actual CQ size.
1974 */
1975struct ib_cq *ib_create_cq(struct ib_device *device,
1976 ib_comp_handler comp_handler,
1977 void (*event_handler)(struct ib_event *, void *),
f4fd0b22 1978 void *cq_context, int cqe, int comp_vector);
1da177e4
LT
1979
1980/**
1981 * ib_resize_cq - Modifies the capacity of the CQ.
1982 * @cq: The CQ to resize.
1983 * @cqe: The minimum size of the CQ.
1984 *
1985 * Users can examine the cq structure to determine the actual CQ size.
1986 */
1987int ib_resize_cq(struct ib_cq *cq, int cqe);
1988
2dd57162
EC
1989/**
1990 * ib_modify_cq - Modifies moderation params of the CQ
1991 * @cq: The CQ to modify.
1992 * @cq_count: number of CQEs that will trigger an event
1993 * @cq_period: max period of time in usec before triggering an event
1994 *
1995 */
1996int ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
1997
1da177e4
LT
1998/**
1999 * ib_destroy_cq - Destroys the specified CQ.
2000 * @cq: The CQ to destroy.
2001 */
2002int ib_destroy_cq(struct ib_cq *cq);
2003
2004/**
2005 * ib_poll_cq - poll a CQ for completion(s)
2006 * @cq:the CQ being polled
2007 * @num_entries:maximum number of completions to return
2008 * @wc:array of at least @num_entries &struct ib_wc where completions
2009 * will be returned
2010 *
2011 * Poll a CQ for (possibly multiple) completions. If the return value
2012 * is < 0, an error occurred. If the return value is >= 0, it is the
2013 * number of completions returned. If the return value is
2014 * non-negative and < num_entries, then the CQ was emptied.
2015 */
2016static inline int ib_poll_cq(struct ib_cq *cq, int num_entries,
2017 struct ib_wc *wc)
2018{
2019 return cq->device->poll_cq(cq, num_entries, wc);
2020}
2021
2022/**
2023 * ib_peek_cq - Returns the number of unreaped completions currently
2024 * on the specified CQ.
2025 * @cq: The CQ to peek.
2026 * @wc_cnt: A minimum number of unreaped completions to check for.
2027 *
2028 * If the number of unreaped completions is greater than or equal to wc_cnt,
2029 * this function returns wc_cnt, otherwise, it returns the actual number of
2030 * unreaped completions.
2031 */
2032int ib_peek_cq(struct ib_cq *cq, int wc_cnt);
2033
2034/**
2035 * ib_req_notify_cq - Request completion notification on a CQ.
2036 * @cq: The CQ to generate an event for.
ed23a727
RD
2037 * @flags:
2038 * Must contain exactly one of %IB_CQ_SOLICITED or %IB_CQ_NEXT_COMP
2039 * to request an event on the next solicited event or next work
2040 * completion at any type, respectively. %IB_CQ_REPORT_MISSED_EVENTS
2041 * may also be |ed in to request a hint about missed events, as
2042 * described below.
2043 *
2044 * Return Value:
2045 * < 0 means an error occurred while requesting notification
2046 * == 0 means notification was requested successfully, and if
2047 * IB_CQ_REPORT_MISSED_EVENTS was passed in, then no events
2048 * were missed and it is safe to wait for another event. In
2049 * this case is it guaranteed that any work completions added
2050 * to the CQ since the last CQ poll will trigger a completion
2051 * notification event.
2052 * > 0 is only returned if IB_CQ_REPORT_MISSED_EVENTS was passed
2053 * in. It means that the consumer must poll the CQ again to
2054 * make sure it is empty to avoid missing an event because of a
2055 * race between requesting notification and an entry being
2056 * added to the CQ. This return value means it is possible
2057 * (but not guaranteed) that a work completion has been added
2058 * to the CQ since the last poll without triggering a
2059 * completion notification event.
1da177e4
LT
2060 */
2061static inline int ib_req_notify_cq(struct ib_cq *cq,
ed23a727 2062 enum ib_cq_notify_flags flags)
1da177e4 2063{
ed23a727 2064 return cq->device->req_notify_cq(cq, flags);
1da177e4
LT
2065}
2066
2067/**
2068 * ib_req_ncomp_notif - Request completion notification when there are
2069 * at least the specified number of unreaped completions on the CQ.
2070 * @cq: The CQ to generate an event for.
2071 * @wc_cnt: The number of unreaped completions that should be on the
2072 * CQ before an event is generated.
2073 */
2074static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
2075{
2076 return cq->device->req_ncomp_notif ?
2077 cq->device->req_ncomp_notif(cq, wc_cnt) :
2078 -ENOSYS;
2079}
2080
2081/**
2082 * ib_get_dma_mr - Returns a memory region for system memory that is
2083 * usable for DMA.
2084 * @pd: The protection domain associated with the memory region.
2085 * @mr_access_flags: Specifies the memory access rights.
9b513090
RC
2086 *
2087 * Note that the ib_dma_*() functions defined below must be used
2088 * to create/destroy addresses used with the Lkey or Rkey returned
2089 * by ib_get_dma_mr().
1da177e4
LT
2090 */
2091struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags);
2092
9b513090
RC
2093/**
2094 * ib_dma_mapping_error - check a DMA addr for error
2095 * @dev: The device for which the dma_addr was created
2096 * @dma_addr: The DMA address to check
2097 */
2098static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
2099{
d1998ef3
BC
2100 if (dev->dma_ops)
2101 return dev->dma_ops->mapping_error(dev, dma_addr);
8d8bb39b 2102 return dma_mapping_error(dev->dma_device, dma_addr);
9b513090
RC
2103}
2104
2105/**
2106 * ib_dma_map_single - Map a kernel virtual address to DMA address
2107 * @dev: The device for which the dma_addr is to be created
2108 * @cpu_addr: The kernel virtual address
2109 * @size: The size of the region in bytes
2110 * @direction: The direction of the DMA
2111 */
2112static inline u64 ib_dma_map_single(struct ib_device *dev,
2113 void *cpu_addr, size_t size,
2114 enum dma_data_direction direction)
2115{
d1998ef3
BC
2116 if (dev->dma_ops)
2117 return dev->dma_ops->map_single(dev, cpu_addr, size, direction);
2118 return dma_map_single(dev->dma_device, cpu_addr, size, direction);
9b513090
RC
2119}
2120
2121/**
2122 * ib_dma_unmap_single - Destroy a mapping created by ib_dma_map_single()
2123 * @dev: The device for which the DMA address was created
2124 * @addr: The DMA address
2125 * @size: The size of the region in bytes
2126 * @direction: The direction of the DMA
2127 */
2128static inline void ib_dma_unmap_single(struct ib_device *dev,
2129 u64 addr, size_t size,
2130 enum dma_data_direction direction)
2131{
d1998ef3
BC
2132 if (dev->dma_ops)
2133 dev->dma_ops->unmap_single(dev, addr, size, direction);
2134 else
9b513090
RC
2135 dma_unmap_single(dev->dma_device, addr, size, direction);
2136}
2137
cb9fbc5c
AK
2138static inline u64 ib_dma_map_single_attrs(struct ib_device *dev,
2139 void *cpu_addr, size_t size,
2140 enum dma_data_direction direction,
2141 struct dma_attrs *attrs)
2142{
2143 return dma_map_single_attrs(dev->dma_device, cpu_addr, size,
2144 direction, attrs);
2145}
2146
2147static inline void ib_dma_unmap_single_attrs(struct ib_device *dev,
2148 u64 addr, size_t size,
2149 enum dma_data_direction direction,
2150 struct dma_attrs *attrs)
2151{
2152 return dma_unmap_single_attrs(dev->dma_device, addr, size,
2153 direction, attrs);
2154}
2155
9b513090
RC
2156/**
2157 * ib_dma_map_page - Map a physical page to DMA address
2158 * @dev: The device for which the dma_addr is to be created
2159 * @page: The page to be mapped
2160 * @offset: The offset within the page
2161 * @size: The size of the region in bytes
2162 * @direction: The direction of the DMA
2163 */
2164static inline u64 ib_dma_map_page(struct ib_device *dev,
2165 struct page *page,
2166 unsigned long offset,
2167 size_t size,
2168 enum dma_data_direction direction)
2169{
d1998ef3
BC
2170 if (dev->dma_ops)
2171 return dev->dma_ops->map_page(dev, page, offset, size, direction);
2172 return dma_map_page(dev->dma_device, page, offset, size, direction);
9b513090
RC
2173}
2174
2175/**
2176 * ib_dma_unmap_page - Destroy a mapping created by ib_dma_map_page()
2177 * @dev: The device for which the DMA address was created
2178 * @addr: The DMA address
2179 * @size: The size of the region in bytes
2180 * @direction: The direction of the DMA
2181 */
2182static inline void ib_dma_unmap_page(struct ib_device *dev,
2183 u64 addr, size_t size,
2184 enum dma_data_direction direction)
2185{
d1998ef3
BC
2186 if (dev->dma_ops)
2187 dev->dma_ops->unmap_page(dev, addr, size, direction);
2188 else
9b513090
RC
2189 dma_unmap_page(dev->dma_device, addr, size, direction);
2190}
2191
2192/**
2193 * ib_dma_map_sg - Map a scatter/gather list to DMA addresses
2194 * @dev: The device for which the DMA addresses are to be created
2195 * @sg: The array of scatter/gather entries
2196 * @nents: The number of scatter/gather entries
2197 * @direction: The direction of the DMA
2198 */
2199static inline int ib_dma_map_sg(struct ib_device *dev,
2200 struct scatterlist *sg, int nents,
2201 enum dma_data_direction direction)
2202{
d1998ef3
BC
2203 if (dev->dma_ops)
2204 return dev->dma_ops->map_sg(dev, sg, nents, direction);
2205 return dma_map_sg(dev->dma_device, sg, nents, direction);
9b513090
RC
2206}
2207
2208/**
2209 * ib_dma_unmap_sg - Unmap a scatter/gather list of DMA addresses
2210 * @dev: The device for which the DMA addresses were created
2211 * @sg: The array of scatter/gather entries
2212 * @nents: The number of scatter/gather entries
2213 * @direction: The direction of the DMA
2214 */
2215static inline void ib_dma_unmap_sg(struct ib_device *dev,
2216 struct scatterlist *sg, int nents,
2217 enum dma_data_direction direction)
2218{
d1998ef3
BC
2219 if (dev->dma_ops)
2220 dev->dma_ops->unmap_sg(dev, sg, nents, direction);
2221 else
9b513090
RC
2222 dma_unmap_sg(dev->dma_device, sg, nents, direction);
2223}
2224
cb9fbc5c
AK
2225static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
2226 struct scatterlist *sg, int nents,
2227 enum dma_data_direction direction,
2228 struct dma_attrs *attrs)
2229{
2230 return dma_map_sg_attrs(dev->dma_device, sg, nents, direction, attrs);
2231}
2232
2233static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
2234 struct scatterlist *sg, int nents,
2235 enum dma_data_direction direction,
2236 struct dma_attrs *attrs)
2237{
2238 dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, attrs);
2239}
9b513090
RC
2240/**
2241 * ib_sg_dma_address - Return the DMA address from a scatter/gather entry
2242 * @dev: The device for which the DMA addresses were created
2243 * @sg: The scatter/gather entry
ea58a595
MM
2244 *
2245 * Note: this function is obsolete. To do: change all occurrences of
2246 * ib_sg_dma_address() into sg_dma_address().
9b513090
RC
2247 */
2248static inline u64 ib_sg_dma_address(struct ib_device *dev,
2249 struct scatterlist *sg)
2250{
d1998ef3 2251 return sg_dma_address(sg);
9b513090
RC
2252}
2253
2254/**
2255 * ib_sg_dma_len - Return the DMA length from a scatter/gather entry
2256 * @dev: The device for which the DMA addresses were created
2257 * @sg: The scatter/gather entry
ea58a595
MM
2258 *
2259 * Note: this function is obsolete. To do: change all occurrences of
2260 * ib_sg_dma_len() into sg_dma_len().
9b513090
RC
2261 */
2262static inline unsigned int ib_sg_dma_len(struct ib_device *dev,
2263 struct scatterlist *sg)
2264{
d1998ef3 2265 return sg_dma_len(sg);
9b513090
RC
2266}
2267
2268/**
2269 * ib_dma_sync_single_for_cpu - Prepare DMA region to be accessed by CPU
2270 * @dev: The device for which the DMA address was created
2271 * @addr: The DMA address
2272 * @size: The size of the region in bytes
2273 * @dir: The direction of the DMA
2274 */
2275static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
2276 u64 addr,
2277 size_t size,
2278 enum dma_data_direction dir)
2279{
d1998ef3
BC
2280 if (dev->dma_ops)
2281 dev->dma_ops->sync_single_for_cpu(dev, addr, size, dir);
2282 else
9b513090
RC
2283 dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
2284}
2285
2286/**
2287 * ib_dma_sync_single_for_device - Prepare DMA region to be accessed by device
2288 * @dev: The device for which the DMA address was created
2289 * @addr: The DMA address
2290 * @size: The size of the region in bytes
2291 * @dir: The direction of the DMA
2292 */
2293static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
2294 u64 addr,
2295 size_t size,
2296 enum dma_data_direction dir)
2297{
d1998ef3
BC
2298 if (dev->dma_ops)
2299 dev->dma_ops->sync_single_for_device(dev, addr, size, dir);
2300 else
9b513090
RC
2301 dma_sync_single_for_device(dev->dma_device, addr, size, dir);
2302}
2303
2304/**
2305 * ib_dma_alloc_coherent - Allocate memory and map it for DMA
2306 * @dev: The device for which the DMA address is requested
2307 * @size: The size of the region to allocate in bytes
2308 * @dma_handle: A pointer for returning the DMA address of the region
2309 * @flag: memory allocator flags
2310 */
2311static inline void *ib_dma_alloc_coherent(struct ib_device *dev,
2312 size_t size,
2313 u64 *dma_handle,
2314 gfp_t flag)
2315{
d1998ef3
BC
2316 if (dev->dma_ops)
2317 return dev->dma_ops->alloc_coherent(dev, size, dma_handle, flag);
c59a3da1
RD
2318 else {
2319 dma_addr_t handle;
2320 void *ret;
2321
2322 ret = dma_alloc_coherent(dev->dma_device, size, &handle, flag);
2323 *dma_handle = handle;
2324 return ret;
2325 }
9b513090
RC
2326}
2327
2328/**
2329 * ib_dma_free_coherent - Free memory allocated by ib_dma_alloc_coherent()
2330 * @dev: The device for which the DMA addresses were allocated
2331 * @size: The size of the region
2332 * @cpu_addr: the address returned by ib_dma_alloc_coherent()
2333 * @dma_handle: the DMA address returned by ib_dma_alloc_coherent()
2334 */
2335static inline void ib_dma_free_coherent(struct ib_device *dev,
2336 size_t size, void *cpu_addr,
2337 u64 dma_handle)
2338{
d1998ef3
BC
2339 if (dev->dma_ops)
2340 dev->dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
2341 else
9b513090
RC
2342 dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle);
2343}
2344
1da177e4
LT
2345/**
2346 * ib_reg_phys_mr - Prepares a virtually addressed memory region for use
2347 * by an HCA.
2348 * @pd: The protection domain associated assigned to the registered region.
2349 * @phys_buf_array: Specifies a list of physical buffers to use in the
2350 * memory region.
2351 * @num_phys_buf: Specifies the size of the phys_buf_array.
2352 * @mr_access_flags: Specifies the memory access rights.
2353 * @iova_start: The offset of the region's starting I/O virtual address.
2354 */
2355struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd,
2356 struct ib_phys_buf *phys_buf_array,
2357 int num_phys_buf,
2358 int mr_access_flags,
2359 u64 *iova_start);
2360
2361/**
2362 * ib_rereg_phys_mr - Modifies the attributes of an existing memory region.
2363 * Conceptually, this call performs the functions deregister memory region
2364 * followed by register physical memory region. Where possible,
2365 * resources are reused instead of deallocated and reallocated.
2366 * @mr: The memory region to modify.
2367 * @mr_rereg_mask: A bit-mask used to indicate which of the following
2368 * properties of the memory region are being modified.
2369 * @pd: If %IB_MR_REREG_PD is set in mr_rereg_mask, this field specifies
2370 * the new protection domain to associated with the memory region,
2371 * otherwise, this parameter is ignored.
2372 * @phys_buf_array: If %IB_MR_REREG_TRANS is set in mr_rereg_mask, this
2373 * field specifies a list of physical buffers to use in the new
2374 * translation, otherwise, this parameter is ignored.
2375 * @num_phys_buf: If %IB_MR_REREG_TRANS is set in mr_rereg_mask, this
2376 * field specifies the size of the phys_buf_array, otherwise, this
2377 * parameter is ignored.
2378 * @mr_access_flags: If %IB_MR_REREG_ACCESS is set in mr_rereg_mask, this
2379 * field specifies the new memory access rights, otherwise, this
2380 * parameter is ignored.
2381 * @iova_start: The offset of the region's starting I/O virtual address.
2382 */
2383int ib_rereg_phys_mr(struct ib_mr *mr,
2384 int mr_rereg_mask,
2385 struct ib_pd *pd,
2386 struct ib_phys_buf *phys_buf_array,
2387 int num_phys_buf,
2388 int mr_access_flags,
2389 u64 *iova_start);
2390
2391/**
2392 * ib_query_mr - Retrieves information about a specific memory region.
2393 * @mr: The memory region to retrieve information about.
2394 * @mr_attr: The attributes of the specified memory region.
2395 */
2396int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr);
2397
2398/**
2399 * ib_dereg_mr - Deregisters a memory region and removes it from the
2400 * HCA translation table.
2401 * @mr: The memory region to deregister.
7083e42e
SM
2402 *
2403 * This function can fail, if the memory region has memory windows bound to it.
1da177e4
LT
2404 */
2405int ib_dereg_mr(struct ib_mr *mr);
2406
17cd3a2d
SG
2407
2408/**
2409 * ib_create_mr - Allocates a memory region that may be used for
2410 * signature handover operations.
2411 * @pd: The protection domain associated with the region.
2412 * @mr_init_attr: memory region init attributes.
2413 */
2414struct ib_mr *ib_create_mr(struct ib_pd *pd,
2415 struct ib_mr_init_attr *mr_init_attr);
2416
2417/**
2418 * ib_destroy_mr - Destroys a memory region that was created using
2419 * ib_create_mr and removes it from HW translation tables.
2420 * @mr: The memory region to destroy.
2421 *
2422 * This function can fail, if the memory region has memory windows bound to it.
2423 */
2424int ib_destroy_mr(struct ib_mr *mr);
2425
00f7ec36
SW
2426/**
2427 * ib_alloc_fast_reg_mr - Allocates memory region usable with the
2428 * IB_WR_FAST_REG_MR send work request.
2429 * @pd: The protection domain associated with the region.
2430 * @max_page_list_len: requested max physical buffer list length to be
2431 * used with fast register work requests for this MR.
2432 */
2433struct ib_mr *ib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len);
2434
2435/**
2436 * ib_alloc_fast_reg_page_list - Allocates a page list array
2437 * @device - ib device pointer.
2438 * @page_list_len - size of the page list array to be allocated.
2439 *
2440 * This allocates and returns a struct ib_fast_reg_page_list * and a
2441 * page_list array that is at least page_list_len in size. The actual
2442 * size is returned in max_page_list_len. The caller is responsible
2443 * for initializing the contents of the page_list array before posting
2444 * a send work request with the IB_WC_FAST_REG_MR opcode.
2445 *
2446 * The page_list array entries must be translated using one of the
2447 * ib_dma_*() functions just like the addresses passed to
2448 * ib_map_phys_fmr(). Once the ib_post_send() is issued, the struct
2449 * ib_fast_reg_page_list must not be modified by the caller until the
2450 * IB_WC_FAST_REG_MR work request completes.
2451 */
2452struct ib_fast_reg_page_list *ib_alloc_fast_reg_page_list(
2453 struct ib_device *device, int page_list_len);
2454
2455/**
2456 * ib_free_fast_reg_page_list - Deallocates a previously allocated
2457 * page list array.
2458 * @page_list - struct ib_fast_reg_page_list pointer to be deallocated.
2459 */
2460void ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list);
2461
2462/**
2463 * ib_update_fast_reg_key - updates the key portion of the fast_reg MR
2464 * R_Key and L_Key.
2465 * @mr - struct ib_mr pointer to be updated.
2466 * @newkey - new key to be used.
2467 */
2468static inline void ib_update_fast_reg_key(struct ib_mr *mr, u8 newkey)
2469{
2470 mr->lkey = (mr->lkey & 0xffffff00) | newkey;
2471 mr->rkey = (mr->rkey & 0xffffff00) | newkey;
2472}
2473
7083e42e
SM
2474/**
2475 * ib_inc_rkey - increments the key portion of the given rkey. Can be used
2476 * for calculating a new rkey for type 2 memory windows.
2477 * @rkey - the rkey to increment.
2478 */
2479static inline u32 ib_inc_rkey(u32 rkey)
2480{
2481 const u32 mask = 0x000000ff;
2482 return ((rkey + 1) & mask) | (rkey & ~mask);
2483}
2484
1da177e4
LT
2485/**
2486 * ib_alloc_mw - Allocates a memory window.
2487 * @pd: The protection domain associated with the memory window.
7083e42e 2488 * @type: The type of the memory window (1 or 2).
1da177e4 2489 */
7083e42e 2490struct ib_mw *ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type);
1da177e4
LT
2491
2492/**
2493 * ib_bind_mw - Posts a work request to the send queue of the specified
2494 * QP, which binds the memory window to the given address range and
2495 * remote access attributes.
2496 * @qp: QP to post the bind work request on.
2497 * @mw: The memory window to bind.
2498 * @mw_bind: Specifies information about the memory window, including
2499 * its address range, remote access rights, and associated memory region.
7083e42e
SM
2500 *
2501 * If there is no immediate error, the function will update the rkey member
2502 * of the mw parameter to its new value. The bind operation can still fail
2503 * asynchronously.
1da177e4
LT
2504 */
2505static inline int ib_bind_mw(struct ib_qp *qp,
2506 struct ib_mw *mw,
2507 struct ib_mw_bind *mw_bind)
2508{
2509 /* XXX reference counting in corresponding MR? */
2510 return mw->device->bind_mw ?
2511 mw->device->bind_mw(qp, mw, mw_bind) :
2512 -ENOSYS;
2513}
2514
2515/**
2516 * ib_dealloc_mw - Deallocates a memory window.
2517 * @mw: The memory window to deallocate.
2518 */
2519int ib_dealloc_mw(struct ib_mw *mw);
2520
2521/**
2522 * ib_alloc_fmr - Allocates a unmapped fast memory region.
2523 * @pd: The protection domain associated with the unmapped region.
2524 * @mr_access_flags: Specifies the memory access rights.
2525 * @fmr_attr: Attributes of the unmapped region.
2526 *
2527 * A fast memory region must be mapped before it can be used as part of
2528 * a work request.
2529 */
2530struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
2531 int mr_access_flags,
2532 struct ib_fmr_attr *fmr_attr);
2533
2534/**
2535 * ib_map_phys_fmr - Maps a list of physical pages to a fast memory region.
2536 * @fmr: The fast memory region to associate with the pages.
2537 * @page_list: An array of physical pages to map to the fast memory region.
2538 * @list_len: The number of pages in page_list.
2539 * @iova: The I/O virtual address to use with the mapped region.
2540 */
2541static inline int ib_map_phys_fmr(struct ib_fmr *fmr,
2542 u64 *page_list, int list_len,
2543 u64 iova)
2544{
2545 return fmr->device->map_phys_fmr(fmr, page_list, list_len, iova);
2546}
2547
2548/**
2549 * ib_unmap_fmr - Removes the mapping from a list of fast memory regions.
2550 * @fmr_list: A linked list of fast memory regions to unmap.
2551 */
2552int ib_unmap_fmr(struct list_head *fmr_list);
2553
2554/**
2555 * ib_dealloc_fmr - Deallocates a fast memory region.
2556 * @fmr: The fast memory region to deallocate.
2557 */
2558int ib_dealloc_fmr(struct ib_fmr *fmr);
2559
2560/**
2561 * ib_attach_mcast - Attaches the specified QP to a multicast group.
2562 * @qp: QP to attach to the multicast group. The QP must be type
2563 * IB_QPT_UD.
2564 * @gid: Multicast group GID.
2565 * @lid: Multicast group LID in host byte order.
2566 *
2567 * In order to send and receive multicast packets, subnet
2568 * administration must have created the multicast group and configured
2569 * the fabric appropriately. The port associated with the specified
2570 * QP must also be a member of the multicast group.
2571 */
2572int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2573
2574/**
2575 * ib_detach_mcast - Detaches the specified QP from a multicast group.
2576 * @qp: QP to detach from the multicast group.
2577 * @gid: Multicast group GID.
2578 * @lid: Multicast group LID in host byte order.
2579 */
2580int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2581
59991f94
SH
2582/**
2583 * ib_alloc_xrcd - Allocates an XRC domain.
2584 * @device: The device on which to allocate the XRC domain.
2585 */
2586struct ib_xrcd *ib_alloc_xrcd(struct ib_device *device);
2587
2588/**
2589 * ib_dealloc_xrcd - Deallocates an XRC domain.
2590 * @xrcd: The XRC domain to deallocate.
2591 */
2592int ib_dealloc_xrcd(struct ib_xrcd *xrcd);
2593
319a441d
HHZ
2594struct ib_flow *ib_create_flow(struct ib_qp *qp,
2595 struct ib_flow_attr *flow_attr, int domain);
2596int ib_destroy_flow(struct ib_flow *flow_id);
2597
1c636f80
EC
2598static inline int ib_check_mr_access(int flags)
2599{
2600 /*
2601 * Local write permission is required if remote write or
2602 * remote atomic permission is also requested.
2603 */
2604 if (flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) &&
2605 !(flags & IB_ACCESS_LOCAL_WRITE))
2606 return -EINVAL;
2607
2608 return 0;
2609}
2610
1b01d335
SG
2611/**
2612 * ib_check_mr_status: lightweight check of MR status.
2613 * This routine may provide status checks on a selected
2614 * ib_mr. first use is for signature status check.
2615 *
2616 * @mr: A memory region.
2617 * @check_mask: Bitmask of which checks to perform from
2618 * ib_mr_status_check enumeration.
2619 * @mr_status: The container of relevant status checks.
2620 * failed checks will be indicated in the status bitmask
2621 * and the relevant info shall be in the error item.
2622 */
2623int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
2624 struct ib_mr_status *mr_status);
2625
1da177e4 2626#endif /* IB_VERBS_H */