1 /* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */
2 /* Copyright (c) 2015 - 2020 Intel Corporation */
6 #define irdma_handle void *
7 #define irdma_adapter_handle irdma_handle
8 #define irdma_qp_handle irdma_handle
9 #define irdma_cq_handle irdma_handle
10 #define irdma_pd_id irdma_handle
11 #define irdma_stag_handle irdma_handle
12 #define irdma_stag_index u32
13 #define irdma_stag u32
14 #define irdma_stag_key u8
15 #define irdma_tagged_offset u64
16 #define irdma_access_privileges u32
17 #define irdma_physical_fragment u64
18 #define irdma_address_list u64 *
19 #define irdma_sgl struct irdma_sge *
21 #define IRDMA_MAX_MR_SIZE 0x200000000000ULL
23 #define IRDMA_ACCESS_FLAGS_LOCALREAD 0x01
24 #define IRDMA_ACCESS_FLAGS_LOCALWRITE 0x02
25 #define IRDMA_ACCESS_FLAGS_REMOTEREAD_ONLY 0x04
26 #define IRDMA_ACCESS_FLAGS_REMOTEREAD 0x05
27 #define IRDMA_ACCESS_FLAGS_REMOTEWRITE_ONLY 0x08
28 #define IRDMA_ACCESS_FLAGS_REMOTEWRITE 0x0a
29 #define IRDMA_ACCESS_FLAGS_BIND_WINDOW 0x10
30 #define IRDMA_ACCESS_FLAGS_ZERO_BASED 0x20
31 #define IRDMA_ACCESS_FLAGS_ALL 0x3f
33 #define IRDMA_OP_TYPE_RDMA_WRITE 0x00
34 #define IRDMA_OP_TYPE_RDMA_READ 0x01
35 #define IRDMA_OP_TYPE_SEND 0x03
36 #define IRDMA_OP_TYPE_SEND_INV 0x04
37 #define IRDMA_OP_TYPE_SEND_SOL 0x05
38 #define IRDMA_OP_TYPE_SEND_SOL_INV 0x06
39 #define IRDMA_OP_TYPE_RDMA_WRITE_SOL 0x0d
40 #define IRDMA_OP_TYPE_BIND_MW 0x08
41 #define IRDMA_OP_TYPE_FAST_REG_NSMR 0x09
42 #define IRDMA_OP_TYPE_INV_STAG 0x0a
43 #define IRDMA_OP_TYPE_RDMA_READ_INV_STAG 0x0b
44 #define IRDMA_OP_TYPE_NOP 0x0c
45 #define IRDMA_OP_TYPE_REC 0x3e
46 #define IRDMA_OP_TYPE_REC_IMM 0x3f
48 #define IRDMA_FLUSH_MAJOR_ERR 1
50 enum irdma_device_caps_const
{
52 IRDMA_CQP_WQE_SIZE
= 8,
54 IRDMA_EXTENDED_CQE_SIZE
= 8,
57 IRDMA_CQP_CTX_SIZE
= 8,
58 IRDMA_SHADOW_AREA_SIZE
= 8,
59 IRDMA_QUERY_FPM_BUF_SIZE
= 176,
60 IRDMA_COMMIT_FPM_BUF_SIZE
= 176,
61 IRDMA_GATHER_STATS_BUF_SIZE
= 1024,
62 IRDMA_MIN_IW_QP_ID
= 0,
63 IRDMA_MAX_IW_QP_ID
= 262143,
65 IRDMA_MAX_CEQID
= 1023,
66 IRDMA_CEQ_MAX_COUNT
= IRDMA_MAX_CEQID
+ 1,
68 IRDMA_MAX_CQID
= 524287,
69 IRDMA_MIN_AEQ_ENTRIES
= 1,
70 IRDMA_MAX_AEQ_ENTRIES
= 524287,
71 IRDMA_MIN_CEQ_ENTRIES
= 1,
72 IRDMA_MAX_CEQ_ENTRIES
= 262143,
73 IRDMA_MIN_CQ_SIZE
= 1,
74 IRDMA_MAX_CQ_SIZE
= 1048575,
76 IRDMA_MAX_WQ_FRAGMENT_COUNT
= 13,
77 IRDMA_MAX_SGE_RD
= 13,
78 IRDMA_MAX_OUTBOUND_MSG_SIZE
= 2147483647,
79 IRDMA_MAX_INBOUND_MSG_SIZE
= 2147483647,
80 IRDMA_MAX_PUSH_PAGE_COUNT
= 1024,
81 IRDMA_MAX_PE_ENA_VF_COUNT
= 32,
82 IRDMA_MAX_VF_FPM_ID
= 47,
83 IRDMA_MAX_SQ_PAYLOAD_SIZE
= 2145386496,
84 IRDMA_MAX_INLINE_DATA_SIZE
= 101,
85 IRDMA_MAX_WQ_ENTRIES
= 32768,
86 IRDMA_Q2_BUF_SIZE
= 256,
87 IRDMA_QP_CTX_SIZE
= 256,
88 IRDMA_MAX_PDS
= 262144,
91 enum irdma_addressing_type
{
92 IRDMA_ADDR_TYPE_ZERO_BASED
= 0,
93 IRDMA_ADDR_TYPE_VA_BASED
= 1,
96 enum irdma_flush_opcode
{
100 FLUSH_REM_ACCESS_ERR
,
109 enum irdma_cmpl_status
{
110 IRDMA_COMPL_STATUS_SUCCESS
= 0,
111 IRDMA_COMPL_STATUS_FLUSHED
,
112 IRDMA_COMPL_STATUS_INVALID_WQE
,
113 IRDMA_COMPL_STATUS_QP_CATASTROPHIC
,
114 IRDMA_COMPL_STATUS_REMOTE_TERMINATION
,
115 IRDMA_COMPL_STATUS_INVALID_STAG
,
116 IRDMA_COMPL_STATUS_BASE_BOUND_VIOLATION
,
117 IRDMA_COMPL_STATUS_ACCESS_VIOLATION
,
118 IRDMA_COMPL_STATUS_INVALID_PD_ID
,
119 IRDMA_COMPL_STATUS_WRAP_ERROR
,
120 IRDMA_COMPL_STATUS_STAG_INVALID_PDID
,
121 IRDMA_COMPL_STATUS_RDMA_READ_ZERO_ORD
,
122 IRDMA_COMPL_STATUS_QP_NOT_PRIVLEDGED
,
123 IRDMA_COMPL_STATUS_STAG_NOT_INVALID
,
124 IRDMA_COMPL_STATUS_INVALID_PHYS_BUF_SIZE
,
125 IRDMA_COMPL_STATUS_INVALID_PHYS_BUF_ENTRY
,
126 IRDMA_COMPL_STATUS_INVALID_FBO
,
127 IRDMA_COMPL_STATUS_INVALID_LEN
,
128 IRDMA_COMPL_STATUS_INVALID_ACCESS
,
129 IRDMA_COMPL_STATUS_PHYS_BUF_LIST_TOO_LONG
,
130 IRDMA_COMPL_STATUS_INVALID_VIRT_ADDRESS
,
131 IRDMA_COMPL_STATUS_INVALID_REGION
,
132 IRDMA_COMPL_STATUS_INVALID_WINDOW
,
133 IRDMA_COMPL_STATUS_INVALID_TOTAL_LEN
,
134 IRDMA_COMPL_STATUS_UNKNOWN
,
137 enum irdma_cmpl_notify
{
138 IRDMA_CQ_COMPL_EVENT
= 0,
139 IRDMA_CQ_COMPL_SOLICITED
= 1,
143 IRDMA_WRITE_WITH_IMM
= 1,
144 IRDMA_SEND_WITH_IMM
= 2,
151 struct irdma_qp_uk_init_info
;
152 struct irdma_cq_uk_init_info
;
155 irdma_tagged_offset tag_off
;
167 __le64 buf
[IRDMA_CQE_SIZE
];
170 struct irdma_extended_cqe
{
171 __le64 buf
[IRDMA_EXTENDED_CQE_SIZE
];
174 struct irdma_post_send
{
182 struct irdma_post_inline_send
{
190 struct irdma_post_rq_info
{
196 struct irdma_rdma_write
{
197 irdma_sgl lo_sg_list
;
199 struct irdma_sge rem_addr
;
202 struct irdma_inline_rdma_write
{
205 struct irdma_sge rem_addr
;
208 struct irdma_rdma_read
{
209 irdma_sgl lo_sg_list
;
211 struct irdma_sge rem_addr
;
214 struct irdma_bind_window
{
218 enum irdma_addressing_type addressing_type
;
222 bool mem_window_type_1
:1;
225 struct irdma_inv_local_stag
{
226 irdma_stag target_stag
;
229 struct irdma_post_sq_info
{
237 bool imm_data_valid
:1;
245 struct irdma_post_send send
;
246 struct irdma_rdma_write rdma_write
;
247 struct irdma_rdma_read rdma_read
;
248 struct irdma_bind_window bind_window
;
249 struct irdma_inv_local_stag inv_local_stag
;
250 struct irdma_inline_rdma_write inline_rdma_write
;
251 struct irdma_post_inline_send inline_send
;
255 struct irdma_cq_poll_info
{
257 irdma_qp_handle qp_handle
;
263 irdma_stag inv_stag
; /* or L_R_Key */
264 enum irdma_cmpl_status comp_status
;
270 bool stag_invalid_set
:1; /* or L_R_Key set */
273 bool solicited_event
:1;
275 bool ud_vlan_valid
:1;
276 bool ud_smac_valid
:1;
280 enum irdma_status_code
irdma_uk_inline_rdma_write(struct irdma_qp_uk
*qp
,
281 struct irdma_post_sq_info
*info
,
283 enum irdma_status_code
irdma_uk_inline_send(struct irdma_qp_uk
*qp
,
284 struct irdma_post_sq_info
*info
,
286 enum irdma_status_code
irdma_uk_mw_bind(struct irdma_qp_uk
*qp
,
287 struct irdma_post_sq_info
*info
,
289 enum irdma_status_code
irdma_uk_post_nop(struct irdma_qp_uk
*qp
, u64 wr_id
,
290 bool signaled
, bool post_sq
);
291 enum irdma_status_code
irdma_uk_post_receive(struct irdma_qp_uk
*qp
,
292 struct irdma_post_rq_info
*info
);
293 void irdma_uk_qp_post_wr(struct irdma_qp_uk
*qp
);
294 enum irdma_status_code
irdma_uk_rdma_read(struct irdma_qp_uk
*qp
,
295 struct irdma_post_sq_info
*info
,
296 bool inv_stag
, bool post_sq
);
297 enum irdma_status_code
irdma_uk_rdma_write(struct irdma_qp_uk
*qp
,
298 struct irdma_post_sq_info
*info
,
300 enum irdma_status_code
irdma_uk_send(struct irdma_qp_uk
*qp
,
301 struct irdma_post_sq_info
*info
, bool post_sq
);
302 enum irdma_status_code
irdma_uk_stag_local_invalidate(struct irdma_qp_uk
*qp
,
303 struct irdma_post_sq_info
*info
,
306 struct irdma_wqe_uk_ops
{
307 void (*iw_copy_inline_data
)(u8
*dest
, u8
*src
, u32 len
, u8 polarity
);
308 u16 (*iw_inline_data_size_to_quanta
)(u32 data_size
);
309 void (*iw_set_fragment
)(__le64
*wqe
, u32 offset
, struct irdma_sge
*sge
,
311 void (*iw_set_mw_bind_wqe
)(__le64
*wqe
,
312 struct irdma_bind_window
*op_info
);
315 enum irdma_status_code
irdma_uk_cq_poll_cmpl(struct irdma_cq_uk
*cq
,
316 struct irdma_cq_poll_info
*info
);
317 void irdma_uk_cq_request_notification(struct irdma_cq_uk
*cq
,
318 enum irdma_cmpl_notify cq_notify
);
319 void irdma_uk_cq_resize(struct irdma_cq_uk
*cq
, void *cq_base
, int size
);
320 void irdma_uk_cq_set_resized_cnt(struct irdma_cq_uk
*qp
, u16 cnt
);
321 enum irdma_status_code
irdma_uk_cq_init(struct irdma_cq_uk
*cq
,
322 struct irdma_cq_uk_init_info
*info
);
323 enum irdma_status_code
irdma_uk_qp_init(struct irdma_qp_uk
*qp
,
324 struct irdma_qp_uk_init_info
*info
);
325 struct irdma_sq_uk_wr_trk_info
{
332 struct irdma_qp_quanta
{
333 __le64 elem
[IRDMA_WQE_SIZE
];
337 struct irdma_qp_quanta
*sq_base
;
338 struct irdma_qp_quanta
*rq_base
;
339 struct irdma_uk_attrs
*uk_attrs
;
340 u32 __iomem
*wqe_alloc_db
;
341 struct irdma_sq_uk_wr_trk_info
*sq_wrtrk_array
;
346 struct irdma_ring sq_ring
;
347 struct irdma_ring rq_ring
;
348 struct irdma_ring initial_ring
;
356 struct irdma_wqe_uk_ops wqe_ops
;
360 u8 swqe_polarity_deferred
;
363 u8 rq_wqe_size_multiplier
;
364 bool deferred_flag
:1;
365 bool push_mode
:1; /* whether the last post wqe was pushed */
368 bool sq_flush_complete
:1; /* Indicates flush was seen and SQ was empty after the flush */
369 bool rq_flush_complete
:1; /* Indicates flush was seen and RQ was empty after the flush */
370 bool destroy_pending
:1; /* Indicates the QP is being destroyed */
379 struct irdma_cqe
*cq_base
;
380 u32 __iomem
*cqe_alloc_db
;
381 u32 __iomem
*cq_ack_db
;
385 struct irdma_ring cq_ring
;
387 bool avoid_mem_cflct
:1;
390 struct irdma_qp_uk_init_info
{
391 struct irdma_qp_quanta
*sq
;
392 struct irdma_qp_quanta
*rq
;
393 struct irdma_uk_attrs
*uk_attrs
;
394 u32 __iomem
*wqe_alloc_db
;
396 struct irdma_sq_uk_wr_trk_info
*sq_wrtrk_array
;
411 struct irdma_cq_uk_init_info
{
412 u32 __iomem
*cqe_alloc_db
;
413 u32 __iomem
*cq_ack_db
;
414 struct irdma_cqe
*cq_base
;
418 bool avoid_mem_cflct
;
421 __le64
*irdma_qp_get_next_send_wqe(struct irdma_qp_uk
*qp
, u32
*wqe_idx
,
422 u16 quanta
, u32 total_size
,
423 struct irdma_post_sq_info
*info
);
424 __le64
*irdma_qp_get_next_recv_wqe(struct irdma_qp_uk
*qp
, u32
*wqe_idx
);
425 void irdma_uk_clean_cq(void *q
, struct irdma_cq_uk
*cq
);
426 enum irdma_status_code
irdma_nop(struct irdma_qp_uk
*qp
, u64 wr_id
,
427 bool signaled
, bool post_sq
);
428 enum irdma_status_code
irdma_fragcnt_to_quanta_sq(u32 frag_cnt
, u16
*quanta
);
429 enum irdma_status_code
irdma_fragcnt_to_wqesize_rq(u32 frag_cnt
, u16
*wqe_size
);
430 void irdma_get_wqe_shift(struct irdma_uk_attrs
*uk_attrs
, u32 sge
,
431 u32 inline_data
, u8
*shift
);
432 enum irdma_status_code
irdma_get_sqdepth(struct irdma_uk_attrs
*uk_attrs
,
433 u32 sq_size
, u8 shift
, u32
*wqdepth
);
434 enum irdma_status_code
irdma_get_rqdepth(struct irdma_uk_attrs
*uk_attrs
,
435 u32 rq_size
, u8 shift
, u32
*wqdepth
);
436 void irdma_qp_push_wqe(struct irdma_qp_uk
*qp
, __le64
*wqe
, u16 quanta
,
437 u32 wqe_idx
, bool post_sq
);
438 void irdma_clr_wqes(struct irdma_qp_uk
*qp
, u32 qp_wqe_idx
);
439 #endif /* IRDMA_USER_H */