]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/dpdk/examples/l3fwd/l3fwd_lpm_neon.h
update source to Ceph Pacific 16.2.2
[ceph.git] / ceph / src / spdk / dpdk / examples / l3fwd / l3fwd_lpm_neon.h
1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2018 Intel Corporation.
3 * Copyright(c) 2017-2018 Linaro Limited.
4 */
5
6 #ifndef __L3FWD_LPM_NEON_H__
7 #define __L3FWD_LPM_NEON_H__
8
9 #include <arm_neon.h>
10
11 #include "l3fwd_neon.h"
12
13 /*
14 * Read packet_type and destination IPV4 addresses from 4 mbufs.
15 */
16 static inline void
17 processx4_step1(struct rte_mbuf *pkt[FWDSTEP],
18 int32x4_t *dip,
19 uint32_t *ipv4_flag)
20 {
21 struct rte_ipv4_hdr *ipv4_hdr;
22 struct rte_ether_hdr *eth_hdr;
23 int32_t dst[FWDSTEP];
24
25 eth_hdr = rte_pktmbuf_mtod(pkt[0], struct rte_ether_hdr *);
26 ipv4_hdr = (struct rte_ipv4_hdr *)(eth_hdr + 1);
27 dst[0] = ipv4_hdr->dst_addr;
28 ipv4_flag[0] = pkt[0]->packet_type & RTE_PTYPE_L3_IPV4;
29
30 eth_hdr = rte_pktmbuf_mtod(pkt[1], struct rte_ether_hdr *);
31 ipv4_hdr = (struct rte_ipv4_hdr *)(eth_hdr + 1);
32 dst[1] = ipv4_hdr->dst_addr;
33 ipv4_flag[0] &= pkt[1]->packet_type;
34
35 eth_hdr = rte_pktmbuf_mtod(pkt[2], struct rte_ether_hdr *);
36 ipv4_hdr = (struct rte_ipv4_hdr *)(eth_hdr + 1);
37 dst[2] = ipv4_hdr->dst_addr;
38 ipv4_flag[0] &= pkt[2]->packet_type;
39
40 eth_hdr = rte_pktmbuf_mtod(pkt[3], struct rte_ether_hdr *);
41 ipv4_hdr = (struct rte_ipv4_hdr *)(eth_hdr + 1);
42 dst[3] = ipv4_hdr->dst_addr;
43 ipv4_flag[0] &= pkt[3]->packet_type;
44
45 dip[0] = vld1q_s32(dst);
46 }
47
48 /*
49 * Lookup into LPM for destination port.
50 * If lookup fails, use incoming port (portid) as destination port.
51 */
52 static inline void
53 processx4_step2(const struct lcore_conf *qconf,
54 int32x4_t dip,
55 uint32_t ipv4_flag,
56 uint16_t portid,
57 struct rte_mbuf *pkt[FWDSTEP],
58 uint16_t dprt[FWDSTEP])
59 {
60 rte_xmm_t dst;
61
62 dip = vreinterpretq_s32_u8(vrev32q_u8(vreinterpretq_u8_s32(dip)));
63
64 /* if all 4 packets are IPV4. */
65 if (likely(ipv4_flag)) {
66 rte_lpm_lookupx4(qconf->ipv4_lookup_struct, dip, dst.u32,
67 portid);
68 /* get rid of unused upper 16 bit for each dport. */
69 vst1_s16((int16_t *)dprt, vqmovn_s32(dst.x));
70 } else {
71 dst.x = dip;
72 dprt[0] = lpm_get_dst_port_with_ipv4(qconf, pkt[0],
73 dst.u32[0], portid);
74 dprt[1] = lpm_get_dst_port_with_ipv4(qconf, pkt[1],
75 dst.u32[1], portid);
76 dprt[2] = lpm_get_dst_port_with_ipv4(qconf, pkt[2],
77 dst.u32[2], portid);
78 dprt[3] = lpm_get_dst_port_with_ipv4(qconf, pkt[3],
79 dst.u32[3], portid);
80 }
81 }
82
83 /*
84 * Buffer optimized handling of packets, invoked
85 * from main_loop.
86 */
87 static inline void
88 l3fwd_lpm_send_packets(int nb_rx, struct rte_mbuf **pkts_burst,
89 uint16_t portid, struct lcore_conf *qconf)
90 {
91 int32_t i = 0, j = 0;
92 uint16_t dst_port[MAX_PKT_BURST];
93 int32x4_t dip;
94 uint32_t ipv4_flag;
95 const int32_t k = RTE_ALIGN_FLOOR(nb_rx, FWDSTEP);
96 const int32_t m = nb_rx % FWDSTEP;
97
98 if (k) {
99 for (i = 0; i < FWDSTEP; i++) {
100 rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[i],
101 struct rte_ether_hdr *) + 1);
102 }
103
104 for (j = 0; j != k - FWDSTEP; j += FWDSTEP) {
105 for (i = 0; i < FWDSTEP; i++) {
106 rte_prefetch0(rte_pktmbuf_mtod(
107 pkts_burst[j + i + FWDSTEP],
108 struct rte_ether_hdr *) + 1);
109 }
110
111 processx4_step1(&pkts_burst[j], &dip, &ipv4_flag);
112 processx4_step2(qconf, dip, ipv4_flag, portid,
113 &pkts_burst[j], &dst_port[j]);
114 }
115
116 processx4_step1(&pkts_burst[j], &dip, &ipv4_flag);
117 processx4_step2(qconf, dip, ipv4_flag, portid, &pkts_burst[j],
118 &dst_port[j]);
119
120 j += FWDSTEP;
121 }
122
123 if (m) {
124 /* Prefetch last up to 3 packets one by one */
125 switch (m) {
126 case 3:
127 rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[j],
128 struct rte_ether_hdr *) + 1);
129 j++;
130 /* fallthrough */
131 case 2:
132 rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[j],
133 struct rte_ether_hdr *) + 1);
134 j++;
135 /* fallthrough */
136 case 1:
137 rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[j],
138 struct rte_ether_hdr *) + 1);
139 j++;
140 }
141
142 j -= m;
143 /* Classify last up to 3 packets one by one */
144 switch (m) {
145 case 3:
146 dst_port[j] = lpm_get_dst_port(qconf, pkts_burst[j],
147 portid);
148 j++;
149 /* fallthrough */
150 case 2:
151 dst_port[j] = lpm_get_dst_port(qconf, pkts_burst[j],
152 portid);
153 j++;
154 /* fallthrough */
155 case 1:
156 dst_port[j] = lpm_get_dst_port(qconf, pkts_burst[j],
157 portid);
158 }
159 }
160
161 send_packets_multi(qconf, pkts_burst, dst_port, nb_rx);
162 }
163
164 #endif /* __L3FWD_LPM_NEON_H__ */