1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
9 #include <rte_malloc.h>
10 #include <rte_cycles.h>
11 #include <rte_ethdev.h>
12 #include <rte_memcpy.h>
13 #include <rte_byteorder.h>
14 #include <rte_branch_prediction.h>
15 #include <rte_sched.h>
20 * QoS parameters are encoded as follows:
21 * Outer VLAN ID defines subport
22 * Inner VLAN ID defines pipe
23 * Destination IP 0.0.XXX.0 defines traffic class
24 * Destination IP host (0.0.0.XXX) defines queue
25 * Values below define offset to each field from start of frame
27 #define SUBPORT_OFFSET 7
30 #define QUEUE_OFFSET 20
31 #define COLOR_OFFSET 19
34 get_pkt_sched(struct rte_mbuf
*m
, uint32_t *subport
, uint32_t *pipe
,
35 uint32_t *traffic_class
, uint32_t *queue
, uint32_t *color
)
37 uint16_t *pdata
= rte_pktmbuf_mtod(m
, uint16_t *);
39 *subport
= (rte_be_to_cpu_16(pdata
[SUBPORT_OFFSET
]) & 0x0FFF) &
40 (port_params
.n_subports_per_port
- 1); /* Outer VLAN ID*/
41 *pipe
= (rte_be_to_cpu_16(pdata
[PIPE_OFFSET
]) & 0x0FFF) &
42 (port_params
.n_pipes_per_subport
- 1); /* Inner VLAN ID */
43 *traffic_class
= (pdata
[QUEUE_OFFSET
] & 0x0F) &
44 (RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE
- 1); /* Destination IP */
45 *queue
= ((pdata
[QUEUE_OFFSET
] >> 8) & 0x0F) &
46 (RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS
- 1) ; /* Destination IP */
47 *color
= pdata
[COLOR_OFFSET
] & 0x03; /* Destination IP */
53 app_rx_thread(struct thread_conf
**confs
)
56 struct rte_mbuf
*rx_mbufs
[burst_conf
.rx_burst
] __rte_cache_aligned
;
57 struct thread_conf
*conf
;
62 uint32_t traffic_class
;
66 while ((conf
= confs
[conf_idx
])) {
67 nb_rx
= rte_eth_rx_burst(conf
->rx_port
, conf
->rx_queue
, rx_mbufs
,
70 if (likely(nb_rx
!= 0)) {
71 APP_STATS_ADD(conf
->stat
.nb_rx
, nb_rx
);
73 for(i
= 0; i
< nb_rx
; i
++) {
74 get_pkt_sched(rx_mbufs
[i
],
75 &subport
, &pipe
, &traffic_class
, &queue
, &color
);
76 rte_sched_port_pkt_write(rx_mbufs
[i
], subport
, pipe
,
77 traffic_class
, queue
, (enum rte_meter_color
) color
);
80 if (unlikely(rte_ring_sp_enqueue_bulk(conf
->rx_ring
,
81 (void **)rx_mbufs
, nb_rx
, NULL
) == 0)) {
82 for(i
= 0; i
< nb_rx
; i
++) {
83 rte_pktmbuf_free(rx_mbufs
[i
]);
85 APP_STATS_ADD(conf
->stat
.nb_drop
, 1);
90 if (confs
[conf_idx
] == NULL
)
97 /* Send the packet to an output interface
98 * For performance reason function returns number of packets dropped, not sent,
99 * so 0 means that all packets were sent successfully
103 app_send_burst(struct thread_conf
*qconf
)
105 struct rte_mbuf
**mbufs
;
108 mbufs
= (struct rte_mbuf
**)qconf
->m_table
;
112 ret
= rte_eth_tx_burst(qconf
->tx_port
, qconf
->tx_queue
, mbufs
, (uint16_t)n
);
113 /* we cannot drop the packets, so re-send */
114 /* update number of packets to be sent */
116 mbufs
= (struct rte_mbuf
**)&mbufs
[ret
];
121 /* Send the packet to an output interface */
123 app_send_packets(struct thread_conf
*qconf
, struct rte_mbuf
**mbufs
, uint32_t nb_pkt
)
127 len
= qconf
->n_mbufs
;
128 for(i
= 0; i
< nb_pkt
; i
++) {
129 qconf
->m_table
[len
] = mbufs
[i
];
131 /* enough pkts to be sent */
132 if (unlikely(len
== burst_conf
.tx_burst
)) {
133 qconf
->n_mbufs
= len
;
134 app_send_burst(qconf
);
139 qconf
->n_mbufs
= len
;
143 app_tx_thread(struct thread_conf
**confs
)
145 struct rte_mbuf
*mbufs
[burst_conf
.qos_dequeue
];
146 struct thread_conf
*conf
;
149 const uint64_t drain_tsc
= (rte_get_tsc_hz() + US_PER_S
- 1) / US_PER_S
* BURST_TX_DRAIN_US
;
151 while ((conf
= confs
[conf_idx
])) {
152 retval
= rte_ring_sc_dequeue_bulk(conf
->tx_ring
, (void **)mbufs
,
153 burst_conf
.qos_dequeue
, NULL
);
154 if (likely(retval
!= 0)) {
155 app_send_packets(conf
, mbufs
, burst_conf
.qos_dequeue
);
157 conf
->counter
= 0; /* reset empty read loop counter */
162 /* drain ring and TX queues */
163 if (unlikely(conf
->counter
> drain_tsc
)) {
164 /* now check is there any packets left to be transmitted */
165 if (conf
->n_mbufs
!= 0) {
166 app_send_burst(conf
);
174 if (confs
[conf_idx
] == NULL
)
181 app_worker_thread(struct thread_conf
**confs
)
183 struct rte_mbuf
*mbufs
[burst_conf
.ring_burst
];
184 struct thread_conf
*conf
;
187 while ((conf
= confs
[conf_idx
])) {
190 /* Read packet from the ring */
191 nb_pkt
= rte_ring_sc_dequeue_burst(conf
->rx_ring
, (void **)mbufs
,
192 burst_conf
.ring_burst
, NULL
);
193 if (likely(nb_pkt
)) {
194 int nb_sent
= rte_sched_port_enqueue(conf
->sched_port
, mbufs
,
197 APP_STATS_ADD(conf
->stat
.nb_drop
, nb_pkt
- nb_sent
);
198 APP_STATS_ADD(conf
->stat
.nb_rx
, nb_pkt
);
201 nb_pkt
= rte_sched_port_dequeue(conf
->sched_port
, mbufs
,
202 burst_conf
.qos_dequeue
);
203 if (likely(nb_pkt
> 0))
204 while (rte_ring_sp_enqueue_bulk(conf
->tx_ring
,
205 (void **)mbufs
, nb_pkt
, NULL
) == 0)
209 if (confs
[conf_idx
] == NULL
)
216 app_mixed_thread(struct thread_conf
**confs
)
218 struct rte_mbuf
*mbufs
[burst_conf
.ring_burst
];
219 struct thread_conf
*conf
;
221 const uint64_t drain_tsc
= (rte_get_tsc_hz() + US_PER_S
- 1) / US_PER_S
* BURST_TX_DRAIN_US
;
223 while ((conf
= confs
[conf_idx
])) {
226 /* Read packet from the ring */
227 nb_pkt
= rte_ring_sc_dequeue_burst(conf
->rx_ring
, (void **)mbufs
,
228 burst_conf
.ring_burst
, NULL
);
229 if (likely(nb_pkt
)) {
230 int nb_sent
= rte_sched_port_enqueue(conf
->sched_port
, mbufs
,
233 APP_STATS_ADD(conf
->stat
.nb_drop
, nb_pkt
- nb_sent
);
234 APP_STATS_ADD(conf
->stat
.nb_rx
, nb_pkt
);
238 nb_pkt
= rte_sched_port_dequeue(conf
->sched_port
, mbufs
,
239 burst_conf
.qos_dequeue
);
240 if (likely(nb_pkt
> 0)) {
241 app_send_packets(conf
, mbufs
, nb_pkt
);
243 conf
->counter
= 0; /* reset empty read loop counter */
248 /* drain ring and TX queues */
249 if (unlikely(conf
->counter
> drain_tsc
)) {
251 /* now check is there any packets left to be transmitted */
252 if (conf
->n_mbufs
!= 0) {
253 app_send_burst(conf
);
261 if (confs
[conf_idx
] == NULL
)