]>
git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/dpdk/examples/l3fwd/l3fwd_em_hlm.h
1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2018 Intel Corporation.
3 * Copyright(c) 2017-2018 Linaro Limited.
6 #ifndef __L3FWD_EM_HLM_H__
7 #define __L3FWD_EM_HLM_H__
9 #if defined RTE_ARCH_X86
10 #include "l3fwd_sse.h"
11 #include "l3fwd_em_hlm_sse.h"
12 #elif defined RTE_MACHINE_CPUFLAG_NEON
13 #include "l3fwd_neon.h"
14 #include "l3fwd_em_hlm_neon.h"
18 #define EM_HASH_LOOKUP_COUNT 16
20 #define EM_HASH_LOOKUP_COUNT 8
24 static __rte_always_inline
void
25 em_get_dst_port_ipv4xN(struct lcore_conf
*qconf
, struct rte_mbuf
*m
[],
26 uint16_t portid
, uint16_t dst_port
[])
29 int32_t ret
[EM_HASH_LOOKUP_COUNT
];
30 union ipv4_5tuple_host key
[EM_HASH_LOOKUP_COUNT
];
31 const void *key_array
[EM_HASH_LOOKUP_COUNT
];
33 for (i
= 0; i
< EM_HASH_LOOKUP_COUNT
; i
++) {
34 get_ipv4_5tuple(m
[i
], mask0
.x
, &key
[i
]);
35 key_array
[i
] = &key
[i
];
38 rte_hash_lookup_bulk(qconf
->ipv4_lookup_struct
, &key_array
[0],
39 EM_HASH_LOOKUP_COUNT
, ret
);
41 for (i
= 0; i
< EM_HASH_LOOKUP_COUNT
; i
++) {
42 dst_port
[i
] = ((ret
[i
] < 0) ?
43 portid
: ipv4_l3fwd_out_if
[ret
[i
]]);
45 if (dst_port
[i
] >= RTE_MAX_ETHPORTS
||
46 (enabled_port_mask
& 1 << dst_port
[i
]) == 0)
51 static __rte_always_inline
void
52 em_get_dst_port_ipv6xN(struct lcore_conf
*qconf
, struct rte_mbuf
*m
[],
53 uint16_t portid
, uint16_t dst_port
[])
56 int32_t ret
[EM_HASH_LOOKUP_COUNT
];
57 union ipv6_5tuple_host key
[EM_HASH_LOOKUP_COUNT
];
58 const void *key_array
[EM_HASH_LOOKUP_COUNT
];
60 for (i
= 0; i
< EM_HASH_LOOKUP_COUNT
; i
++) {
61 get_ipv6_5tuple(m
[i
], mask1
.x
, mask2
.x
, &key
[i
]);
62 key_array
[i
] = &key
[i
];
65 rte_hash_lookup_bulk(qconf
->ipv6_lookup_struct
, &key_array
[0],
66 EM_HASH_LOOKUP_COUNT
, ret
);
68 for (i
= 0; i
< EM_HASH_LOOKUP_COUNT
; i
++) {
69 dst_port
[i
] = ((ret
[i
] < 0) ?
70 portid
: ipv6_l3fwd_out_if
[ret
[i
]]);
72 if (dst_port
[i
] >= RTE_MAX_ETHPORTS
||
73 (enabled_port_mask
& 1 << dst_port
[i
]) == 0)
78 static __rte_always_inline
uint16_t
79 em_get_dst_port(const struct lcore_conf
*qconf
, struct rte_mbuf
*pkt
,
83 struct ipv4_hdr
*ipv4_hdr
;
84 struct ipv6_hdr
*ipv6_hdr
;
88 tcp_or_udp
= pkt
->packet_type
& (RTE_PTYPE_L4_TCP
| RTE_PTYPE_L4_UDP
);
89 l3_ptypes
= pkt
->packet_type
& RTE_PTYPE_L3_MASK
;
91 if (tcp_or_udp
&& (l3_ptypes
== RTE_PTYPE_L3_IPV4
)) {
93 /* Handle IPv4 headers.*/
94 ipv4_hdr
= rte_pktmbuf_mtod_offset(pkt
, struct ipv4_hdr
*,
95 sizeof(struct ether_hdr
));
97 next_hop
= em_get_ipv4_dst_port(ipv4_hdr
, portid
,
98 qconf
->ipv4_lookup_struct
);
100 if (next_hop
>= RTE_MAX_ETHPORTS
||
101 (enabled_port_mask
& 1 << next_hop
) == 0)
106 } else if (tcp_or_udp
&& (l3_ptypes
== RTE_PTYPE_L3_IPV6
)) {
108 /* Handle IPv6 headers.*/
109 ipv6_hdr
= rte_pktmbuf_mtod_offset(pkt
, struct ipv6_hdr
*,
110 sizeof(struct ether_hdr
));
112 next_hop
= em_get_ipv6_dst_port(ipv6_hdr
, portid
,
113 qconf
->ipv6_lookup_struct
);
115 if (next_hop
>= RTE_MAX_ETHPORTS
||
116 (enabled_port_mask
& 1 << next_hop
) == 0)
127 * Buffer optimized handling of packets, invoked
131 l3fwd_em_send_packets(int nb_rx
, struct rte_mbuf
**pkts_burst
,
132 uint16_t portid
, struct lcore_conf
*qconf
)
135 uint16_t dst_port
[MAX_PKT_BURST
];
138 * Send nb_rx - nb_rx % EM_HASH_LOOKUP_COUNT packets
139 * in groups of EM_HASH_LOOKUP_COUNT.
141 int32_t n
= RTE_ALIGN_FLOOR(nb_rx
, EM_HASH_LOOKUP_COUNT
);
143 for (j
= 0; j
< EM_HASH_LOOKUP_COUNT
&& j
< nb_rx
; j
++) {
144 rte_prefetch0(rte_pktmbuf_mtod(pkts_burst
[j
],
145 struct ether_hdr
*) + 1);
148 for (j
= 0; j
< n
; j
+= EM_HASH_LOOKUP_COUNT
) {
150 uint32_t pkt_type
= RTE_PTYPE_L3_MASK
|
151 RTE_PTYPE_L4_TCP
| RTE_PTYPE_L4_UDP
;
152 uint32_t l3_type
, tcp_or_udp
;
154 for (i
= 0; i
< EM_HASH_LOOKUP_COUNT
; i
++)
155 pkt_type
&= pkts_burst
[j
+ i
]->packet_type
;
157 l3_type
= pkt_type
& RTE_PTYPE_L3_MASK
;
158 tcp_or_udp
= pkt_type
& (RTE_PTYPE_L4_TCP
| RTE_PTYPE_L4_UDP
);
160 for (i
= 0, pos
= j
+ EM_HASH_LOOKUP_COUNT
;
161 i
< EM_HASH_LOOKUP_COUNT
&& pos
< nb_rx
; i
++, pos
++) {
162 rte_prefetch0(rte_pktmbuf_mtod(pkts_burst
[pos
],
163 struct ether_hdr
*) + 1);
166 if (tcp_or_udp
&& (l3_type
== RTE_PTYPE_L3_IPV4
)) {
168 em_get_dst_port_ipv4xN(qconf
, &pkts_burst
[j
], portid
,
171 } else if (tcp_or_udp
&& (l3_type
== RTE_PTYPE_L3_IPV6
)) {
173 em_get_dst_port_ipv6xN(qconf
, &pkts_burst
[j
], portid
,
177 for (i
= 0; i
< EM_HASH_LOOKUP_COUNT
; i
++)
178 dst_port
[j
+ i
] = em_get_dst_port(qconf
,
179 pkts_burst
[j
+ i
], portid
);
183 for (; j
< nb_rx
; j
++)
184 dst_port
[j
] = em_get_dst_port(qconf
, pkts_burst
[j
], portid
);
186 send_packets_multi(qconf
, pkts_burst
, dst_port
, nb_rx
);
189 #endif /* __L3FWD_EM_HLM_H__ */