1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4 * Copyright 2016-2020 NXP
8 #ifndef _DPAA2_HW_PVT_H_
9 #define _DPAA2_HW_PVT_H_
11 #include <rte_eventdev.h>
12 #include <dpaax_iova_table.h>
14 #include <mc/fsl_mc_sys.h>
15 #include <fsl_qbman_portal.h>
23 #define lower_32_bits(x) ((uint32_t)(x))
24 #define upper_32_bits(x) ((uint32_t)(((x) >> 16) >> 16))
27 #define VLAN_TAG_SIZE 4 /** < Vlan Header Length */
30 /* Maximum number of slots available in TX ring */
31 #define MAX_TX_RING_SLOTS 32
32 #define MAX_EQ_RESP_ENTRIES (MAX_TX_RING_SLOTS + 1)
34 /* Maximum number of slots available in RX ring */
35 #define DPAA2_EQCR_RING_SIZE 8
36 /* Maximum number of slots available in RX ring on LX2 */
37 #define DPAA2_LX2_EQCR_RING_SIZE 32
39 /* Maximum number of slots available in RX ring */
40 #define DPAA2_DQRR_RING_SIZE 16
41 /* Maximum number of slots available in RX ring on LX2 */
42 #define DPAA2_LX2_DQRR_RING_SIZE 32
44 /* EQCR shift to get EQCR size (2 >> 3) = 8 for LS2/LS2 */
45 #define DPAA2_EQCR_SHIFT 3
46 /* EQCR shift to get EQCR size for LX2 (2 >> 5) = 32 for LX2 */
47 #define DPAA2_LX2_EQCR_SHIFT 5
49 /* Flag to determine an ordered queue mbuf */
50 #define DPAA2_ENQUEUE_FLAG_ORP (1ULL << 30)
51 /* ORP ID shift and mask */
52 #define DPAA2_EQCR_OPRID_SHIFT 16
53 #define DPAA2_EQCR_OPRID_MASK 0x3FFF0000
54 /* Sequence number shift and mask */
55 #define DPAA2_EQCR_SEQNUM_SHIFT 0
56 #define DPAA2_EQCR_SEQNUM_MASK 0x0000FFFF
58 #define DPAA2_SWP_CENA_REGION 0
59 #define DPAA2_SWP_CINH_REGION 1
60 #define DPAA2_SWP_CENA_MEM_REGION 2
62 #define DPAA2_MAX_TX_RETRY_COUNT 10000
64 #define MC_PORTAL_INDEX 0
65 #define NUM_DPIO_REGIONS 2
66 #define NUM_DQS_PER_QUEUE 2
68 /* Maximum release/acquire from QBMAN */
69 #define DPAA2_MBUF_MAX_ACQ_REL 7
71 #define DPAA2_MEMPOOL_OPS_NAME "dpaa2"
74 #define DPAA2_MBUF_HW_ANNOTATION 64
75 #define DPAA2_FD_PTA_SIZE 0
77 /* we will re-use the HEADROOM for annotation in RX */
78 #define DPAA2_HW_BUF_RESERVE 0
79 #define DPAA2_PACKET_LAYOUT_ALIGN 64 /*changing from 256 */
81 #define DPAA2_DPCI_MAX_QUEUES 2
85 struct eqresp_metadata
{
86 struct dpaa2_queue
*dpaa2_q
;
87 struct rte_mempool
*mp
;
90 struct dpaa2_dpio_dev
{
91 TAILQ_ENTRY(dpaa2_dpio_dev
) next
;
92 /**< Pointer to Next device instance */
93 uint16_t index
; /**< Index of a instance in the list */
94 rte_atomic16_t ref_count
;
95 /**< How many thread contexts are sharing this.*/
98 struct qbman_result
*eqresp
;
99 struct eqresp_metadata
*eqresp_meta
;
100 struct fsl_mc_io
*dpio
; /** handle to DPIO portal object */
102 struct qbman_swp
*sw_portal
; /** SW portal object */
103 const struct qbman_result
*dqrr
[4];
104 /**< DQRR Entry for this SW portal */
105 void *mc_portal
; /**< MC Portal for configuring this device */
106 uintptr_t qbman_portal_ce_paddr
;
107 /**< Physical address of Cache Enabled Area */
108 uintptr_t ce_size
; /**< Size of the CE region */
109 uintptr_t qbman_portal_ci_paddr
;
110 /**< Physical address of Cache Inhibit Area */
111 uintptr_t ci_size
; /**< Size of the CI region */
112 struct rte_intr_handle intr_handle
; /* Interrupt related info */
113 int32_t epoll_fd
; /**< File descriptor created for interrupt polling */
114 int32_t hw_id
; /**< An unique ID of this DPIO device instance */
117 struct dpaa2_dpbp_dev
{
118 TAILQ_ENTRY(dpaa2_dpbp_dev
) next
;
119 /**< Pointer to Next device instance */
120 struct fsl_mc_io dpbp
; /** handle to DPBP portal object */
122 rte_atomic16_t in_use
;
123 uint32_t dpbp_id
; /*HW ID for DPBP object */
126 struct queue_storage_info_t
{
127 struct qbman_result
*dq_storage
[NUM_DQS_PER_QUEUE
];
128 struct qbman_result
*active_dqs
;
129 uint8_t active_dpio_id
;
131 uint8_t last_num_pkts
;
136 typedef void (dpaa2_queue_cb_dqrr_t
)(struct qbman_swp
*swp
,
137 const struct qbman_fd
*fd
,
138 const struct qbman_result
*dq
,
139 struct dpaa2_queue
*rxq
,
140 struct rte_event
*ev
);
142 typedef void (dpaa2_queue_cb_eqresp_free_t
)(uint16_t eqresp_ci
);
145 struct rte_mempool
*mb_pool
; /**< mbuf pool to populate RX ring. */
147 struct rte_eth_dev_data
*eth_data
;
148 struct rte_cryptodev_data
*crypto_data
;
150 uint32_t fqid
; /*!< Unique ID of this queue */
151 uint16_t flow_id
; /*!< To be used by DPAA2 frmework */
152 uint8_t tc_index
; /*!< traffic class identifier */
153 uint8_t cgid
; /*! < Congestion Group id for this queue */
158 struct queue_storage_info_t
*q_storage
;
159 struct qbman_result
*cscn
;
162 int32_t eventfd
; /*!< Event Fd of this queue */
163 dpaa2_queue_cb_dqrr_t
*cb
;
164 dpaa2_queue_cb_eqresp_free_t
*cb_eqresp_free
;
165 struct dpaa2_bp_info
*bp_array
;
166 /*to store tx_conf_queue corresponding to tx_queue*/
167 struct dpaa2_queue
*tx_conf_queue
;
170 struct swp_active_dqs
{
171 struct qbman_result
*global_active_dqs
;
172 uint64_t reserved
[7];
175 #define NUM_MAX_SWP 64
177 extern struct swp_active_dqs rte_global_active_dqs_list
[NUM_MAX_SWP
];
179 struct dpaa2_dpci_dev
{
180 TAILQ_ENTRY(dpaa2_dpci_dev
) next
;
181 /**< Pointer to Next device instance */
182 struct fsl_mc_io dpci
; /** handle to DPCI portal object */
184 rte_atomic16_t in_use
;
185 uint32_t dpci_id
; /*HW ID for DPCI object */
186 struct dpaa2_queue rx_queue
[DPAA2_DPCI_MAX_QUEUES
];
187 struct dpaa2_queue tx_queue
[DPAA2_DPCI_MAX_QUEUES
];
190 struct dpaa2_dpcon_dev
{
191 TAILQ_ENTRY(dpaa2_dpcon_dev
) next
;
192 struct fsl_mc_io dpcon
;
194 rte_atomic16_t in_use
;
196 uint16_t qbman_ch_id
;
197 uint8_t num_priorities
;
198 uint8_t channel_index
;
201 /* Refer to Table 7-3 in SEC BG */
206 /* FMT must be 00, MSB is final bit */
207 uint32_t fin_bpid_offset
;
209 uint32_t reserved
[3]; /* Not used currently */
216 uint32_t fin_bpid_offset
;
219 /* There are three types of frames: Single, Scatter Gather and Frame Lists */
220 enum qbman_fd_format
{
225 /*Macros to define operations on FD*/
226 #define DPAA2_SET_FD_ADDR(fd, addr) do { \
227 (fd)->simple.addr_lo = lower_32_bits((size_t)(addr)); \
228 (fd)->simple.addr_hi = upper_32_bits((uint64_t)(addr)); \
230 #define DPAA2_SET_FD_LEN(fd, length) ((fd)->simple.len = length)
231 #define DPAA2_SET_FD_BPID(fd, bpid) ((fd)->simple.bpid_offset |= bpid)
232 #define DPAA2_SET_ONLY_FD_BPID(fd, bpid) \
233 ((fd)->simple.bpid_offset = bpid)
234 #define DPAA2_SET_FD_IVP(fd) (((fd)->simple.bpid_offset |= 0x00004000))
235 #define DPAA2_SET_FD_OFFSET(fd, offset) \
236 (((fd)->simple.bpid_offset |= (uint32_t)(offset) << 16))
237 #define DPAA2_SET_FD_INTERNAL_JD(fd, len) \
238 ((fd)->simple.frc = (0x80000000 | (len)))
239 #define DPAA2_GET_FD_FRC_PARSE_SUM(fd) \
240 ((uint16_t)(((fd)->simple.frc & 0xffff0000) >> 16))
241 #define DPAA2_RESET_FD_FRC(fd) ((fd)->simple.frc = 0)
242 #define DPAA2_SET_FD_FRC(fd, _frc) ((fd)->simple.frc = _frc)
243 #define DPAA2_RESET_FD_CTRL(fd) ((fd)->simple.ctrl = 0)
245 #define DPAA2_SET_FD_ASAL(fd, asal) ((fd)->simple.ctrl |= (asal << 16))
247 #define DPAA2_RESET_FD_FLC(fd) do { \
248 (fd)->simple.flc_lo = 0; \
249 (fd)->simple.flc_hi = 0; \
252 #define DPAA2_SET_FD_FLC(fd, addr) do { \
253 (fd)->simple.flc_lo = lower_32_bits((size_t)(addr)); \
254 (fd)->simple.flc_hi = upper_32_bits((uint64_t)(addr)); \
256 #define DPAA2_SET_FLE_INTERNAL_JD(fle, len) ((fle)->frc = (0x80000000 | (len)))
257 #define DPAA2_GET_FLE_ADDR(fle) \
258 (size_t)((((uint64_t)((fle)->addr_hi)) << 32) + (fle)->addr_lo)
259 #define DPAA2_SET_FLE_ADDR(fle, addr) do { \
260 (fle)->addr_lo = lower_32_bits((size_t)addr); \
261 (fle)->addr_hi = upper_32_bits((uint64_t)addr); \
263 #define DPAA2_GET_FLE_CTXT(fle) \
264 ((((uint64_t)((fle)->reserved[1])) << 32) + (fle)->reserved[0])
265 #define DPAA2_FLE_SAVE_CTXT(fle, addr) do { \
266 (fle)->reserved[0] = lower_32_bits((size_t)addr); \
267 (fle)->reserved[1] = upper_32_bits((uint64_t)addr); \
269 #define DPAA2_SET_FLE_OFFSET(fle, offset) \
270 ((fle)->fin_bpid_offset |= (uint32_t)(offset) << 16)
271 #define DPAA2_SET_FLE_LEN(fle, len) ((fle)->length = len)
272 #define DPAA2_SET_FLE_BPID(fle, bpid) ((fle)->fin_bpid_offset |= (size_t)bpid)
273 #define DPAA2_GET_FLE_BPID(fle) ((fle)->fin_bpid_offset & 0x000000ff)
274 #define DPAA2_SET_FLE_FIN(fle) ((fle)->fin_bpid_offset |= 1 << 31)
275 #define DPAA2_SET_FLE_IVP(fle) (((fle)->fin_bpid_offset |= 0x00004000))
276 #define DPAA2_SET_FLE_BMT(fle) (((fle)->fin_bpid_offset |= 0x00008000))
277 #define DPAA2_SET_FD_COMPOUND_FMT(fd) \
278 ((fd)->simple.bpid_offset |= (uint32_t)1 << 28)
279 #define DPAA2_GET_FD_ADDR(fd) \
280 (((((uint64_t)((fd)->simple.addr_hi)) << 32) + (fd)->simple.addr_lo))
282 #define DPAA2_GET_FD_LEN(fd) ((fd)->simple.len)
283 #define DPAA2_GET_FD_BPID(fd) (((fd)->simple.bpid_offset & 0x00003FFF))
284 #define DPAA2_GET_FD_IVP(fd) (((fd)->simple.bpid_offset & 0x00004000) >> 14)
285 #define DPAA2_GET_FD_OFFSET(fd) (((fd)->simple.bpid_offset & 0x0FFF0000) >> 16)
286 #define DPAA2_GET_FD_FRC(fd) ((fd)->simple.frc)
287 #define DPAA2_GET_FD_FLC(fd) \
288 (((uint64_t)((fd)->simple.flc_hi) << 32) + (fd)->simple.flc_lo)
289 #define DPAA2_GET_FD_ERR(fd) ((fd)->simple.bpid_offset & 0x000000FF)
290 #define DPAA2_GET_FLE_OFFSET(fle) (((fle)->fin_bpid_offset & 0x0FFF0000) >> 16)
291 #define DPAA2_SET_FLE_SG_EXT(fle) ((fle)->fin_bpid_offset |= (uint64_t)1 << 29)
292 #define DPAA2_IS_SET_FLE_SG_EXT(fle) \
293 (((fle)->fin_bpid_offset & ((uint64_t)1 << 29)) ? 1 : 0)
295 #define DPAA2_INLINE_MBUF_FROM_BUF(buf, meta_data_size) \
296 ((struct rte_mbuf *)((size_t)(buf) - (meta_data_size)))
298 #define DPAA2_ASAL_VAL (DPAA2_MBUF_HW_ANNOTATION / 64)
300 #define DPAA2_FD_SET_FORMAT(fd, format) do { \
301 (fd)->simple.bpid_offset &= 0xCFFFFFFF; \
302 (fd)->simple.bpid_offset |= (uint32_t)format << 28; \
304 #define DPAA2_FD_GET_FORMAT(fd) (((fd)->simple.bpid_offset >> 28) & 0x3)
306 #define DPAA2_SG_SET_FINAL(sg, fin) do { \
307 (sg)->fin_bpid_offset &= 0x7FFFFFFF; \
308 (sg)->fin_bpid_offset |= (uint32_t)fin << 31; \
310 #define DPAA2_SG_IS_FINAL(sg) (!!((sg)->fin_bpid_offset >> 31))
311 /* Only Enqueue Error responses will be
312 * pushed on FQID_ERR of Enqueue FQ
314 #define DPAA2_EQ_RESP_ERR_FQ 0
315 /* All Enqueue responses will be pushed on address
316 * set with qbman_eq_desc_set_response
318 #define DPAA2_EQ_RESP_ALWAYS 1
320 /* Various structures representing contiguous memory maps */
321 struct dpaa2_memseg
{
322 TAILQ_ENTRY(dpaa2_memseg
) next
;
328 #ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
329 extern uint8_t dpaa2_virt_mode
;
330 static void *dpaa2_mem_ptov(phys_addr_t paddr
) __rte_unused
;
332 static void *dpaa2_mem_ptov(phys_addr_t paddr
)
337 return (void *)(size_t)paddr
;
339 va
= (void *)dpaax_iova_table_get_va(paddr
);
340 if (likely(va
!= NULL
))
343 /* If not, Fallback to full memseg list searching */
344 va
= rte_mem_iova2virt(paddr
);
349 static phys_addr_t
dpaa2_mem_vtop(uint64_t vaddr
) __rte_unused
;
351 static phys_addr_t
dpaa2_mem_vtop(uint64_t vaddr
)
353 const struct rte_memseg
*memseg
;
358 memseg
= rte_mem_virt2memseg((void *)(uintptr_t)vaddr
, NULL
);
360 return memseg
->phys_addr
+ RTE_PTR_DIFF(vaddr
, memseg
->addr
);
365 * When we are using Physical addresses as IO Virtual Addresses,
366 * Need to call conversion routines dpaa2_mem_vtop & dpaa2_mem_ptov
368 * These routines are called with help of below MACRO's
371 #define DPAA2_MBUF_VADDR_TO_IOVA(mbuf) ((mbuf)->buf_iova)
374 * macro to convert Virtual address to IOVA
376 #define DPAA2_VADDR_TO_IOVA(_vaddr) dpaa2_mem_vtop((size_t)(_vaddr))
379 * macro to convert IOVA to Virtual address
381 #define DPAA2_IOVA_TO_VADDR(_iova) dpaa2_mem_ptov((size_t)(_iova))
384 * macro to convert modify the memory containing IOVA to Virtual address
386 #define DPAA2_MODIFY_IOVA_TO_VADDR(_mem, _type) \
387 {_mem = (_type)(dpaa2_mem_ptov((size_t)(_mem))); }
389 #else /* RTE_LIBRTE_DPAA2_USE_PHYS_IOVA */
391 #define DPAA2_MBUF_VADDR_TO_IOVA(mbuf) ((mbuf)->buf_addr)
392 #define DPAA2_VADDR_TO_IOVA(_vaddr) (phys_addr_t)(_vaddr)
393 #define DPAA2_IOVA_TO_VADDR(_iova) (void *)(_iova)
394 #define DPAA2_MODIFY_IOVA_TO_VADDR(_mem, _type)
396 #endif /* RTE_LIBRTE_DPAA2_USE_PHYS_IOVA */
399 int check_swp_active_dqs(uint16_t dpio_index
)
401 if (rte_global_active_dqs_list
[dpio_index
].global_active_dqs
!= NULL
)
407 void clear_swp_active_dqs(uint16_t dpio_index
)
409 rte_global_active_dqs_list
[dpio_index
].global_active_dqs
= NULL
;
413 struct qbman_result
*get_swp_active_dqs(uint16_t dpio_index
)
415 return rte_global_active_dqs_list
[dpio_index
].global_active_dqs
;
419 void set_swp_active_dqs(uint16_t dpio_index
, struct qbman_result
*dqs
)
421 rte_global_active_dqs_list
[dpio_index
].global_active_dqs
= dqs
;
425 struct dpaa2_dpbp_dev
*dpaa2_alloc_dpbp_dev(void);
428 void dpaa2_free_dpbp_dev(struct dpaa2_dpbp_dev
*dpbp
);
431 int dpaa2_dpbp_supported(void);
434 struct dpaa2_dpci_dev
*rte_dpaa2_alloc_dpci_dev(void);
437 void rte_dpaa2_free_dpci_dev(struct dpaa2_dpci_dev
*dpci
);
439 /* Global MCP pointer */
441 void *dpaa2_get_mcp_ptr(int portal_idx
);