2 * Broadcom NetXtreme-E RoCE driver.
4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
5 * Broadcom refers to Broadcom Limited and/or its subsidiaries.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 * Description: Fast Path Operators (header)
39 #ifndef __BNXT_QPLIB_FP_H__
40 #define __BNXT_QPLIB_FP_H__
42 struct bnxt_qplib_sge
{
48 #define BNXT_QPLIB_MAX_SQE_ENTRY_SIZE sizeof(struct sq_send)
50 #define SQE_CNT_PER_PG (PAGE_SIZE / BNXT_QPLIB_MAX_SQE_ENTRY_SIZE)
51 #define SQE_MAX_IDX_PER_PG (SQE_CNT_PER_PG - 1)
53 static inline u32
get_sqe_pg(u32 val
)
55 return ((val
& ~SQE_MAX_IDX_PER_PG
) / SQE_CNT_PER_PG
);
58 static inline u32
get_sqe_idx(u32 val
)
60 return (val
& SQE_MAX_IDX_PER_PG
);
63 #define BNXT_QPLIB_MAX_PSNE_ENTRY_SIZE sizeof(struct sq_psn_search)
65 #define PSNE_CNT_PER_PG (PAGE_SIZE / BNXT_QPLIB_MAX_PSNE_ENTRY_SIZE)
66 #define PSNE_MAX_IDX_PER_PG (PSNE_CNT_PER_PG - 1)
68 static inline u32
get_psne_pg(u32 val
)
70 return ((val
& ~PSNE_MAX_IDX_PER_PG
) / PSNE_CNT_PER_PG
);
73 static inline u32
get_psne_idx(u32 val
)
75 return (val
& PSNE_MAX_IDX_PER_PG
);
78 #define BNXT_QPLIB_QP_MAX_SGL 6
80 struct bnxt_qplib_swq
{
86 struct sq_psn_search
*psn_search
;
89 struct bnxt_qplib_swqe
{
91 #define BNXT_QPLIB_FENCE_WRID 0x46454E43 /* "FENC" */
95 #define BNXT_QPLIB_SWQE_TYPE_SEND 0
96 #define BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM 1
97 #define BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV 2
98 #define BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE 4
99 #define BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM 5
100 #define BNXT_QPLIB_SWQE_TYPE_RDMA_READ 6
101 #define BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP 8
102 #define BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD 11
103 #define BNXT_QPLIB_SWQE_TYPE_LOCAL_INV 12
104 #define BNXT_QPLIB_SWQE_TYPE_FAST_REG_MR 13
105 #define BNXT_QPLIB_SWQE_TYPE_REG_MR 13
106 #define BNXT_QPLIB_SWQE_TYPE_BIND_MW 14
107 #define BNXT_QPLIB_SWQE_TYPE_RECV 128
108 #define BNXT_QPLIB_SWQE_TYPE_RECV_RDMA_IMM 129
110 #define BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP BIT(0)
111 #define BNXT_QPLIB_SWQE_FLAGS_RD_ATOMIC_FENCE BIT(1)
112 #define BNXT_QPLIB_SWQE_FLAGS_UC_FENCE BIT(2)
113 #define BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT BIT(3)
114 #define BNXT_QPLIB_SWQE_FLAGS_INLINE BIT(4)
115 struct bnxt_qplib_sge sg_list
[BNXT_QPLIB_QP_MAX_SGL
];
117 /* Max inline data is 96 bytes */
119 #define BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH 96
120 u8 inline_data
[BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH
];
123 /* Send, with imm, inval key */
134 /* Send Raw Ethernet and QP1 */
141 /* RDMA write, with imm, read */
151 /* Atomic cmp/swap, fetch/add */
159 /* Local Invalidate */
172 #define BNXT_QPLIB_SWQE_PAGE_SIZE_4K 0
173 #define BNXT_QPLIB_SWQE_PAGE_SIZE_8K 1
174 #define BNXT_QPLIB_SWQE_PAGE_SIZE_64K 4
175 #define BNXT_QPLIB_SWQE_PAGE_SIZE_256K 6
176 #define BNXT_QPLIB_SWQE_PAGE_SIZE_1M 8
177 #define BNXT_QPLIB_SWQE_PAGE_SIZE_2M 9
178 #define BNXT_QPLIB_SWQE_PAGE_SIZE_4M 10
179 #define BNXT_QPLIB_SWQE_PAGE_SIZE_1G 18
181 #define PAGE_SHIFT_4K 12
183 dma_addr_t pbl_dma_ptr
;
192 #define BNXT_QPLIB_BIND_SWQE_ACCESS_LOCAL_WRITE BIT(0)
193 #define BNXT_QPLIB_BIND_SWQE_ACCESS_REMOTE_READ BIT(1)
194 #define BNXT_QPLIB_BIND_SWQE_ACCESS_REMOTE_WRITE BIT(2)
195 #define BNXT_QPLIB_BIND_SWQE_ACCESS_REMOTE_ATOMIC BIT(3)
196 #define BNXT_QPLIB_BIND_SWQE_ACCESS_WINDOW_BIND BIT(4)
207 #define BNXT_QPLIB_MAX_RQE_ENTRY_SIZE sizeof(struct rq_wqe)
209 #define RQE_CNT_PER_PG (PAGE_SIZE / BNXT_QPLIB_MAX_RQE_ENTRY_SIZE)
210 #define RQE_MAX_IDX_PER_PG (RQE_CNT_PER_PG - 1)
211 #define RQE_PG(x) (((x) & ~RQE_MAX_IDX_PER_PG) / RQE_CNT_PER_PG)
212 #define RQE_IDX(x) ((x) & RQE_MAX_IDX_PER_PG)
214 struct bnxt_qplib_q
{
215 struct bnxt_qplib_hwq hwq
;
216 struct bnxt_qplib_swq
*swq
;
217 struct scatterlist
*sglist
;
223 bool flush_in_progress
;
232 struct bnxt_qplib_qp
{
233 struct bnxt_qplib_pd
*pd
;
234 struct bnxt_qplib_dpi
*dpi
;
245 bool en_sqd_async_notify
;
256 u32 max_dest_rd_atomic
;
261 struct bnxt_qplib_ah ah
;
263 #define BTH_PSN_MASK ((1 << 24) - 1)
265 struct bnxt_qplib_q sq
;
267 struct bnxt_qplib_q rq
;
269 struct bnxt_qplib_srq
*srq
;
271 struct bnxt_qplib_cq
*scq
;
272 struct bnxt_qplib_cq
*rcq
;
274 struct bnxt_qplib_hwq irrq
;
275 struct bnxt_qplib_hwq orrq
;
276 /* Header buffer for QP1 */
280 * Buffer space for ETH(14), IP or GRH(40), UDP header(8)
281 * and ib_bth + ib_deth (20).
282 * Max required is 82 when RoCE V2 is enabled
284 #define BNXT_QPLIB_MAX_QP1_SQ_HDR_SIZE_V2 86
285 /* Ethernet header = 14 */
286 /* ib_grh = 40 (provided by MAD) */
287 /* ib_bth + ib_deth = 20 */
288 /* MAD = 256 (provided by MAD) */
290 #define BNXT_QPLIB_MAX_QP1_RQ_ETH_HDR_SIZE 14
291 #define BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2 512
292 #define BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV4 20
293 #define BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6 40
294 #define BNXT_QPLIB_MAX_QP1_RQ_BDETH_HDR_SIZE 20
296 dma_addr_t sq_hdr_buf_map
;
298 dma_addr_t rq_hdr_buf_map
;
301 #define BNXT_QPLIB_MAX_CQE_ENTRY_SIZE sizeof(struct cq_base)
303 #define CQE_CNT_PER_PG (PAGE_SIZE / BNXT_QPLIB_MAX_CQE_ENTRY_SIZE)
304 #define CQE_MAX_IDX_PER_PG (CQE_CNT_PER_PG - 1)
305 #define CQE_PG(x) (((x) & ~CQE_MAX_IDX_PER_PG) / CQE_CNT_PER_PG)
306 #define CQE_IDX(x) ((x) & CQE_MAX_IDX_PER_PG)
308 #define ROCE_CQE_CMP_V 0
309 #define CQE_CMP_VALID(hdr, raw_cons, cp_bit) \
310 (!!((hdr)->cqe_type_toggle & CQ_BASE_TOGGLE) == \
311 !((raw_cons) & (cp_bit)))
313 static inline bool bnxt_qplib_queue_full(struct bnxt_qplib_q
*qplib_q
)
315 return HWQ_CMP((qplib_q
->hwq
.prod
+ qplib_q
->q_full_delta
),
316 &qplib_q
->hwq
) == HWQ_CMP(qplib_q
->hwq
.cons
,
320 struct bnxt_qplib_cqe
{
335 u16 raweth_qp1_flags
;
336 u16 raweth_qp1_errors
;
337 u16 raweth_qp1_cfa_code
;
338 u32 raweth_qp1_flags2
;
339 u32 raweth_qp1_metadata
;
340 u8 raweth_qp1_payload_offset
;
344 #define BNXT_QPLIB_QUEUE_START_PERIOD 0x01
345 struct bnxt_qplib_cq
{
346 struct bnxt_qplib_dpi
*dpi
;
347 void __iomem
*dbr_base
;
352 struct bnxt_qplib_hwq hwq
;
354 bool resize_in_progress
;
355 struct scatterlist
*sghead
;
359 #define CQ_RESIZE_WAIT_TIME_MS 500
361 #define CQ_FLAGS_RESIZE_IN_PROG 1
362 wait_queue_head_t waitq
;
365 #define BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE sizeof(struct xrrq_irrq)
366 #define BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE sizeof(struct xrrq_orrq)
367 #define IRD_LIMIT_TO_IRRQ_SLOTS(x) (2 * (x) + 2)
368 #define IRRQ_SLOTS_TO_IRD_LIMIT(s) (((s) >> 1) - 1)
369 #define ORD_LIMIT_TO_ORRQ_SLOTS(x) ((x) + 1)
370 #define ORRQ_SLOTS_TO_ORD_LIMIT(s) ((s) - 1)
372 #define BNXT_QPLIB_MAX_NQE_ENTRY_SIZE sizeof(struct nq_base)
374 #define NQE_CNT_PER_PG (PAGE_SIZE / BNXT_QPLIB_MAX_NQE_ENTRY_SIZE)
375 #define NQE_MAX_IDX_PER_PG (NQE_CNT_PER_PG - 1)
376 #define NQE_PG(x) (((x) & ~NQE_MAX_IDX_PER_PG) / NQE_CNT_PER_PG)
377 #define NQE_IDX(x) ((x) & NQE_MAX_IDX_PER_PG)
379 #define NQE_CMP_VALID(hdr, raw_cons, cp_bit) \
380 (!!(le32_to_cpu((hdr)->info63_v[0]) & NQ_BASE_V) == \
381 !((raw_cons) & (cp_bit)))
383 #define BNXT_QPLIB_NQE_MAX_CNT (128 * 1024)
385 #define NQ_CONS_PCI_BAR_REGION 2
386 #define NQ_DB_KEY_CP (0x2 << CMPL_DOORBELL_KEY_SFT)
387 #define NQ_DB_IDX_VALID CMPL_DOORBELL_IDX_VALID
388 #define NQ_DB_IRQ_DIS CMPL_DOORBELL_MASK
389 #define NQ_DB_CP_FLAGS_REARM (NQ_DB_KEY_CP | \
391 #define NQ_DB_CP_FLAGS (NQ_DB_KEY_CP | \
394 #define NQ_DB_REARM(db, raw_cons, cp_bit) \
395 writel(NQ_DB_CP_FLAGS_REARM | ((raw_cons) & ((cp_bit) - 1)), db)
396 #define NQ_DB(db, raw_cons, cp_bit) \
397 writel(NQ_DB_CP_FLAGS | ((raw_cons) & ((cp_bit) - 1)), db)
399 struct bnxt_qplib_nq
{
400 struct pci_dev
*pdev
;
405 struct tasklet_struct worker
;
406 struct bnxt_qplib_hwq hwq
;
411 void __iomem
*bar_reg_iomem
;
414 (struct bnxt_qplib_nq
*nq
,
415 struct bnxt_qplib_cq
*cq
);
417 (struct bnxt_qplib_nq
*nq
,
422 void bnxt_qplib_disable_nq(struct bnxt_qplib_nq
*nq
);
423 int bnxt_qplib_enable_nq(struct pci_dev
*pdev
, struct bnxt_qplib_nq
*nq
,
424 int msix_vector
, int bar_reg_offset
,
425 int (*cqn_handler
)(struct bnxt_qplib_nq
*nq
,
426 struct bnxt_qplib_cq
*cq
),
427 int (*srqn_handler
)(struct bnxt_qplib_nq
*nq
,
430 int bnxt_qplib_create_qp1(struct bnxt_qplib_res
*res
, struct bnxt_qplib_qp
*qp
);
431 int bnxt_qplib_create_qp(struct bnxt_qplib_res
*res
, struct bnxt_qplib_qp
*qp
);
432 int bnxt_qplib_modify_qp(struct bnxt_qplib_res
*res
, struct bnxt_qplib_qp
*qp
);
433 int bnxt_qplib_query_qp(struct bnxt_qplib_res
*res
, struct bnxt_qplib_qp
*qp
);
434 int bnxt_qplib_destroy_qp(struct bnxt_qplib_res
*res
, struct bnxt_qplib_qp
*qp
);
435 void *bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp
*qp
,
436 struct bnxt_qplib_sge
*sge
);
437 void *bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp
*qp
,
438 struct bnxt_qplib_sge
*sge
);
439 u32
bnxt_qplib_get_rq_prod_index(struct bnxt_qplib_qp
*qp
);
440 dma_addr_t
bnxt_qplib_get_qp_buf_from_index(struct bnxt_qplib_qp
*qp
,
442 void bnxt_qplib_post_send_db(struct bnxt_qplib_qp
*qp
);
443 int bnxt_qplib_post_send(struct bnxt_qplib_qp
*qp
,
444 struct bnxt_qplib_swqe
*wqe
);
445 void bnxt_qplib_post_recv_db(struct bnxt_qplib_qp
*qp
);
446 int bnxt_qplib_post_recv(struct bnxt_qplib_qp
*qp
,
447 struct bnxt_qplib_swqe
*wqe
);
448 int bnxt_qplib_create_cq(struct bnxt_qplib_res
*res
, struct bnxt_qplib_cq
*cq
);
449 int bnxt_qplib_destroy_cq(struct bnxt_qplib_res
*res
, struct bnxt_qplib_cq
*cq
);
450 int bnxt_qplib_poll_cq(struct bnxt_qplib_cq
*cq
, struct bnxt_qplib_cqe
*cqe
,
451 int num
, struct bnxt_qplib_qp
**qp
);
452 void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq
*cq
, u32 arm_type
);
453 void bnxt_qplib_free_nq(struct bnxt_qplib_nq
*nq
);
454 int bnxt_qplib_alloc_nq(struct pci_dev
*pdev
, struct bnxt_qplib_nq
*nq
);
455 #endif /* __BNXT_QPLIB_FP_H__ */