1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
5 #ifndef __OTX2_WORKER_H__
6 #define __OTX2_WORKER_H__
8 #include <rte_common.h>
9 #include <rte_branch_prediction.h>
11 #include <otx2_common.h>
12 #include "otx2_evdev.h"
13 #include "otx2_ethdev_sec_tx.h"
17 static __rte_always_inline
uint16_t
18 otx2_ssogws_get_work(struct otx2_ssogws
*ws
, struct rte_event
*ev
,
19 const uint32_t flags
, const void * const lookup_mem
)
21 union otx2_sso_event event
;
26 otx2_write64(BIT_ULL(16) | /* wait for work. */
27 1, /* Use Mask set 0. */
30 if (flags
& NIX_RX_OFFLOAD_PTYPE_F
)
31 rte_prefetch_non_temporal(lookup_mem
);
34 " ldr %[tag], [%[tag_loc]] \n"
35 " ldr %[wqp], [%[wqp_loc]] \n"
36 " tbz %[tag], 63, done%= \n"
39 " ldr %[tag], [%[tag_loc]] \n"
40 " ldr %[wqp], [%[wqp_loc]] \n"
41 " tbnz %[tag], 63, rty%= \n"
43 " prfm pldl1keep, [%[wqp], #8] \n"
44 " sub %[mbuf], %[wqp], #0x80 \n"
45 " prfm pldl1keep, [%[mbuf]] \n"
46 : [tag
] "=&r" (event
.get_work0
),
47 [wqp
] "=&r" (get_work1
),
49 : [tag_loc
] "r" (ws
->tag_op
),
50 [wqp_loc
] "r" (ws
->wqp_op
)
53 event
.get_work0
= otx2_read64(ws
->tag_op
);
54 while ((BIT_ULL(63)) & event
.get_work0
)
55 event
.get_work0
= otx2_read64(ws
->tag_op
);
57 get_work1
= otx2_read64(ws
->wqp_op
);
58 rte_prefetch0((const void *)get_work1
);
59 mbuf
= (uint64_t)((char *)get_work1
- sizeof(struct rte_mbuf
));
60 rte_prefetch0((const void *)mbuf
);
63 event
.get_work0
= (event
.get_work0
& (0x3ull
<< 32)) << 6 |
64 (event
.get_work0
& (0x3FFull
<< 36)) << 4 |
65 (event
.get_work0
& 0xffffffff);
66 ws
->cur_tt
= event
.sched_type
;
67 ws
->cur_grp
= event
.queue_id
;
69 if (event
.sched_type
!= SSO_TT_EMPTY
&&
70 event
.event_type
== RTE_EVENT_TYPE_ETHDEV
) {
71 otx2_wqe_to_mbuf(get_work1
, mbuf
, event
.sub_event_type
,
72 (uint32_t) event
.get_work0
, flags
, lookup_mem
);
73 /* Extracting tstamp, if PTP enabled*/
74 tstamp_ptr
= *(uint64_t *)(((struct nix_wqe_hdr_s
*)get_work1
)
75 + OTX2_SSO_WQE_SG_PTR
);
76 otx2_nix_mbuf_to_tstamp((struct rte_mbuf
*)mbuf
, ws
->tstamp
,
77 flags
, (uint64_t *)tstamp_ptr
);
81 ev
->event
= event
.get_work0
;
87 /* Used in cleaning up workslot. */
88 static __rte_always_inline
uint16_t
89 otx2_ssogws_get_work_empty(struct otx2_ssogws
*ws
, struct rte_event
*ev
,
92 union otx2_sso_event event
;
99 " ldr %[tag], [%[tag_loc]] \n"
100 " ldr %[wqp], [%[wqp_loc]] \n"
101 " tbz %[tag], 63, done%= \n"
104 " ldr %[tag], [%[tag_loc]] \n"
105 " ldr %[wqp], [%[wqp_loc]] \n"
106 " tbnz %[tag], 63, rty%= \n"
108 " prfm pldl1keep, [%[wqp], #8] \n"
109 " sub %[mbuf], %[wqp], #0x80 \n"
110 " prfm pldl1keep, [%[mbuf]] \n"
111 : [tag
] "=&r" (event
.get_work0
),
112 [wqp
] "=&r" (get_work1
),
114 : [tag_loc
] "r" (ws
->tag_op
),
115 [wqp_loc
] "r" (ws
->wqp_op
)
118 event
.get_work0
= otx2_read64(ws
->tag_op
);
119 while ((BIT_ULL(63)) & event
.get_work0
)
120 event
.get_work0
= otx2_read64(ws
->tag_op
);
122 get_work1
= otx2_read64(ws
->wqp_op
);
123 rte_prefetch_non_temporal((const void *)get_work1
);
124 mbuf
= (uint64_t)((char *)get_work1
- sizeof(struct rte_mbuf
));
125 rte_prefetch_non_temporal((const void *)mbuf
);
128 event
.get_work0
= (event
.get_work0
& (0x3ull
<< 32)) << 6 |
129 (event
.get_work0
& (0x3FFull
<< 36)) << 4 |
130 (event
.get_work0
& 0xffffffff);
131 ws
->cur_tt
= event
.sched_type
;
132 ws
->cur_grp
= event
.queue_id
;
134 if (event
.sched_type
!= SSO_TT_EMPTY
&&
135 event
.event_type
== RTE_EVENT_TYPE_ETHDEV
) {
136 otx2_wqe_to_mbuf(get_work1
, mbuf
, event
.sub_event_type
,
137 (uint32_t) event
.get_work0
, flags
, NULL
);
138 /* Extracting tstamp, if PTP enabled*/
139 tstamp_ptr
= *(uint64_t *)(((struct nix_wqe_hdr_s
*)get_work1
)
140 + OTX2_SSO_WQE_SG_PTR
);
141 otx2_nix_mbuf_to_tstamp((struct rte_mbuf
*)mbuf
, ws
->tstamp
,
142 flags
, (uint64_t *)tstamp_ptr
);
146 ev
->event
= event
.get_work0
;
152 static __rte_always_inline
void
153 otx2_ssogws_add_work(struct otx2_ssogws
*ws
, const uint64_t event_ptr
,
154 const uint32_t tag
, const uint8_t new_tt
,
159 add_work0
= tag
| ((uint64_t)(new_tt
) << 32);
160 otx2_store_pair(add_work0
, event_ptr
, ws
->grps_base
[grp
]);
163 static __rte_always_inline
void
164 otx2_ssogws_swtag_desched(struct otx2_ssogws
*ws
, uint32_t tag
, uint8_t new_tt
,
169 val
= tag
| ((uint64_t)(new_tt
& 0x3) << 32) | ((uint64_t)grp
<< 34);
170 otx2_write64(val
, ws
->swtag_desched_op
);
173 static __rte_always_inline
void
174 otx2_ssogws_swtag_norm(struct otx2_ssogws
*ws
, uint32_t tag
, uint8_t new_tt
)
178 val
= tag
| ((uint64_t)(new_tt
& 0x3) << 32);
179 otx2_write64(val
, ws
->swtag_norm_op
);
182 static __rte_always_inline
void
183 otx2_ssogws_swtag_untag(struct otx2_ssogws
*ws
)
185 otx2_write64(0, OTX2_SSOW_GET_BASE_ADDR(ws
->getwrk_op
) +
186 SSOW_LF_GWS_OP_SWTAG_UNTAG
);
187 ws
->cur_tt
= SSO_SYNC_UNTAGGED
;
190 static __rte_always_inline
void
191 otx2_ssogws_swtag_flush(struct otx2_ssogws
*ws
)
193 otx2_write64(0, OTX2_SSOW_GET_BASE_ADDR(ws
->getwrk_op
) +
194 SSOW_LF_GWS_OP_SWTAG_FLUSH
);
195 ws
->cur_tt
= SSO_SYNC_EMPTY
;
198 static __rte_always_inline
void
199 otx2_ssogws_desched(struct otx2_ssogws
*ws
)
201 otx2_write64(0, OTX2_SSOW_GET_BASE_ADDR(ws
->getwrk_op
) +
202 SSOW_LF_GWS_OP_DESCHED
);
205 static __rte_always_inline
void
206 otx2_ssogws_swtag_wait(struct otx2_ssogws
*ws
)
208 #ifdef RTE_ARCH_ARM64
212 " ldr %[swtb], [%[swtp_loc]] \n"
213 " cbz %[swtb], done%= \n"
216 " ldr %[swtb], [%[swtp_loc]] \n"
217 " cbnz %[swtb], rty%= \n"
219 : [swtb
] "=&r" (swtp
)
220 : [swtp_loc
] "r" (ws
->swtp_op
)
223 /* Wait for the SWTAG/SWTAG_FULL operation */
224 while (otx2_read64(ws
->swtp_op
))
229 static __rte_always_inline
void
230 otx2_ssogws_head_wait(struct otx2_ssogws
*ws
)
232 #ifdef RTE_ARCH_ARM64
236 " ldr %[tag], [%[tag_op]] \n"
237 " tbnz %[tag], 35, done%= \n"
240 " ldr %[tag], [%[tag_op]] \n"
241 " tbz %[tag], 35, rty%= \n"
244 : [tag_op
] "r" (ws
->tag_op
)
247 /* Wait for the HEAD to be set */
248 while (!(otx2_read64(ws
->tag_op
) & BIT_ULL(35)))
253 static __rte_always_inline
void
254 otx2_ssogws_order(struct otx2_ssogws
*ws
, const uint8_t wait_flag
)
257 otx2_ssogws_head_wait(ws
);
262 static __rte_always_inline
const struct otx2_eth_txq
*
263 otx2_ssogws_xtract_meta(struct rte_mbuf
*m
)
265 return rte_eth_devices
[m
->port
].data
->tx_queues
[
266 rte_event_eth_tx_adapter_txq_get(m
)];
269 static __rte_always_inline
void
270 otx2_ssogws_prepare_pkt(const struct otx2_eth_txq
*txq
, struct rte_mbuf
*m
,
271 uint64_t *cmd
, const uint32_t flags
)
273 otx2_lmt_mov(cmd
, txq
->cmd
, otx2_nix_tx_ext_subs(flags
));
274 otx2_nix_xmit_prepare(m
, cmd
, flags
);
277 static __rte_always_inline
uint16_t
278 otx2_ssogws_event_tx(struct otx2_ssogws
*ws
, struct rte_event ev
[],
279 uint64_t *cmd
, const uint32_t flags
)
281 struct rte_mbuf
*m
= ev
[0].mbuf
;
282 const struct otx2_eth_txq
*txq
= otx2_ssogws_xtract_meta(m
);
284 rte_prefetch_non_temporal(txq
);
286 if ((flags
& NIX_TX_OFFLOAD_SECURITY_F
) &&
287 (m
->ol_flags
& PKT_TX_SEC_OFFLOAD
))
288 return otx2_sec_event_tx(ws
, ev
, m
, txq
, flags
);
290 /* Perform header writes before barrier for TSO */
291 otx2_nix_xmit_prepare_tso(m
, flags
);
292 otx2_ssogws_order(ws
, !ev
->sched_type
);
293 otx2_ssogws_prepare_pkt(txq
, m
, cmd
, flags
);
295 if (flags
& NIX_TX_MULTI_SEG_F
) {
296 const uint16_t segdw
= otx2_nix_prepare_mseg(m
, cmd
, flags
);
297 otx2_nix_xmit_prepare_tstamp(cmd
, &txq
->cmd
[0],
298 m
->ol_flags
, segdw
, flags
);
299 otx2_nix_xmit_mseg_one(cmd
, txq
->lmt_addr
, txq
->io_addr
, segdw
);
301 /* Passing no of segdw as 4: HDR + EXT + SG + SMEM */
302 otx2_nix_xmit_prepare_tstamp(cmd
, &txq
->cmd
[0],
303 m
->ol_flags
, 4, flags
);
304 otx2_nix_xmit_one(cmd
, txq
->lmt_addr
, txq
->io_addr
, flags
);