]> git.proxmox.com Git - ceph.git/blob - ceph/src/seastar/dpdk/test/test/packet_burst_generator.c
update download target update for octopus release
[ceph.git] / ceph / src / seastar / dpdk / test / test / packet_burst_generator.c
1 /*-
2 * BSD LICENSE
3 *
4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 #include <rte_byteorder.h>
35 #include <rte_mbuf.h>
36
37 #include "packet_burst_generator.h"
38
39 #define UDP_SRC_PORT 1024
40 #define UDP_DST_PORT 1024
41
42
43 #define IP_DEFTTL 64 /* from RFC 1340. */
44 #define IP_VERSION 0x40
45 #define IP_HDRLEN 0x05 /* default IP header length == five 32-bits words. */
46 #define IP_VHL_DEF (IP_VERSION | IP_HDRLEN)
47
48 static void
49 copy_buf_to_pkt_segs(void *buf, unsigned len, struct rte_mbuf *pkt,
50 unsigned offset)
51 {
52 struct rte_mbuf *seg;
53 void *seg_buf;
54 unsigned copy_len;
55
56 seg = pkt;
57 while (offset >= seg->data_len) {
58 offset -= seg->data_len;
59 seg = seg->next;
60 }
61 copy_len = seg->data_len - offset;
62 seg_buf = rte_pktmbuf_mtod_offset(seg, char *, offset);
63 while (len > copy_len) {
64 rte_memcpy(seg_buf, buf, (size_t) copy_len);
65 len -= copy_len;
66 buf = ((char *) buf + copy_len);
67 seg = seg->next;
68 seg_buf = rte_pktmbuf_mtod(seg, void *);
69 }
70 rte_memcpy(seg_buf, buf, (size_t) len);
71 }
72
73 static inline void
74 copy_buf_to_pkt(void *buf, unsigned len, struct rte_mbuf *pkt, unsigned offset)
75 {
76 if (offset + len <= pkt->data_len) {
77 rte_memcpy(rte_pktmbuf_mtod_offset(pkt, char *, offset), buf,
78 (size_t) len);
79 return;
80 }
81 copy_buf_to_pkt_segs(buf, len, pkt, offset);
82 }
83
84 void
85 initialize_eth_header(struct ether_hdr *eth_hdr, struct ether_addr *src_mac,
86 struct ether_addr *dst_mac, uint16_t ether_type,
87 uint8_t vlan_enabled, uint16_t van_id)
88 {
89 ether_addr_copy(dst_mac, &eth_hdr->d_addr);
90 ether_addr_copy(src_mac, &eth_hdr->s_addr);
91
92 if (vlan_enabled) {
93 struct vlan_hdr *vhdr = (struct vlan_hdr *)((uint8_t *)eth_hdr +
94 sizeof(struct ether_hdr));
95
96 eth_hdr->ether_type = rte_cpu_to_be_16(ETHER_TYPE_VLAN);
97
98 vhdr->eth_proto = rte_cpu_to_be_16(ether_type);
99 vhdr->vlan_tci = van_id;
100 } else {
101 eth_hdr->ether_type = rte_cpu_to_be_16(ether_type);
102 }
103 }
104
105 void
106 initialize_arp_header(struct arp_hdr *arp_hdr, struct ether_addr *src_mac,
107 struct ether_addr *dst_mac, uint32_t src_ip, uint32_t dst_ip,
108 uint32_t opcode)
109 {
110 arp_hdr->arp_hrd = rte_cpu_to_be_16(ARP_HRD_ETHER);
111 arp_hdr->arp_pro = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
112 arp_hdr->arp_hln = ETHER_ADDR_LEN;
113 arp_hdr->arp_pln = sizeof(uint32_t);
114 arp_hdr->arp_op = rte_cpu_to_be_16(opcode);
115 ether_addr_copy(src_mac, &arp_hdr->arp_data.arp_sha);
116 arp_hdr->arp_data.arp_sip = src_ip;
117 ether_addr_copy(dst_mac, &arp_hdr->arp_data.arp_tha);
118 arp_hdr->arp_data.arp_tip = dst_ip;
119 }
120
121 uint16_t
122 initialize_udp_header(struct udp_hdr *udp_hdr, uint16_t src_port,
123 uint16_t dst_port, uint16_t pkt_data_len)
124 {
125 uint16_t pkt_len;
126
127 pkt_len = (uint16_t) (pkt_data_len + sizeof(struct udp_hdr));
128
129 udp_hdr->src_port = rte_cpu_to_be_16(src_port);
130 udp_hdr->dst_port = rte_cpu_to_be_16(dst_port);
131 udp_hdr->dgram_len = rte_cpu_to_be_16(pkt_len);
132 udp_hdr->dgram_cksum = 0; /* No UDP checksum. */
133
134 return pkt_len;
135 }
136
137
138 uint16_t
139 initialize_ipv6_header(struct ipv6_hdr *ip_hdr, uint8_t *src_addr,
140 uint8_t *dst_addr, uint16_t pkt_data_len)
141 {
142 ip_hdr->vtc_flow = 0;
143 ip_hdr->payload_len = pkt_data_len;
144 ip_hdr->proto = IPPROTO_UDP;
145 ip_hdr->hop_limits = IP_DEFTTL;
146
147 rte_memcpy(ip_hdr->src_addr, src_addr, sizeof(ip_hdr->src_addr));
148 rte_memcpy(ip_hdr->dst_addr, dst_addr, sizeof(ip_hdr->dst_addr));
149
150 return (uint16_t) (pkt_data_len + sizeof(struct ipv6_hdr));
151 }
152
153 uint16_t
154 initialize_ipv4_header(struct ipv4_hdr *ip_hdr, uint32_t src_addr,
155 uint32_t dst_addr, uint16_t pkt_data_len)
156 {
157 uint16_t pkt_len;
158 unaligned_uint16_t *ptr16;
159 uint32_t ip_cksum;
160
161 /*
162 * Initialize IP header.
163 */
164 pkt_len = (uint16_t) (pkt_data_len + sizeof(struct ipv4_hdr));
165
166 ip_hdr->version_ihl = IP_VHL_DEF;
167 ip_hdr->type_of_service = 0;
168 ip_hdr->fragment_offset = 0;
169 ip_hdr->time_to_live = IP_DEFTTL;
170 ip_hdr->next_proto_id = IPPROTO_UDP;
171 ip_hdr->packet_id = 0;
172 ip_hdr->total_length = rte_cpu_to_be_16(pkt_len);
173 ip_hdr->src_addr = rte_cpu_to_be_32(src_addr);
174 ip_hdr->dst_addr = rte_cpu_to_be_32(dst_addr);
175
176 /*
177 * Compute IP header checksum.
178 */
179 ptr16 = (unaligned_uint16_t *)ip_hdr;
180 ip_cksum = 0;
181 ip_cksum += ptr16[0]; ip_cksum += ptr16[1];
182 ip_cksum += ptr16[2]; ip_cksum += ptr16[3];
183 ip_cksum += ptr16[4];
184 ip_cksum += ptr16[6]; ip_cksum += ptr16[7];
185 ip_cksum += ptr16[8]; ip_cksum += ptr16[9];
186
187 /*
188 * Reduce 32 bit checksum to 16 bits and complement it.
189 */
190 ip_cksum = ((ip_cksum & 0xFFFF0000) >> 16) +
191 (ip_cksum & 0x0000FFFF);
192 ip_cksum %= 65536;
193 ip_cksum = (~ip_cksum) & 0x0000FFFF;
194 if (ip_cksum == 0)
195 ip_cksum = 0xFFFF;
196 ip_hdr->hdr_checksum = (uint16_t) ip_cksum;
197
198 return pkt_len;
199 }
200
201
202
203 /*
204 * The maximum number of segments per packet is used when creating
205 * scattered transmit packets composed of a list of mbufs.
206 */
207 #define RTE_MAX_SEGS_PER_PKT 255 /**< pkt.nb_segs is a 8-bit unsigned char. */
208
209
210 int
211 generate_packet_burst(struct rte_mempool *mp, struct rte_mbuf **pkts_burst,
212 struct ether_hdr *eth_hdr, uint8_t vlan_enabled, void *ip_hdr,
213 uint8_t ipv4, struct udp_hdr *udp_hdr, int nb_pkt_per_burst,
214 uint8_t pkt_len, uint8_t nb_pkt_segs)
215 {
216 int i, nb_pkt = 0;
217 size_t eth_hdr_size;
218
219 struct rte_mbuf *pkt_seg;
220 struct rte_mbuf *pkt;
221
222 for (nb_pkt = 0; nb_pkt < nb_pkt_per_burst; nb_pkt++) {
223 pkt = rte_pktmbuf_alloc(mp);
224 if (pkt == NULL) {
225 nomore_mbuf:
226 if (nb_pkt == 0)
227 return -1;
228 break;
229 }
230
231 pkt->data_len = pkt_len;
232 pkt_seg = pkt;
233 for (i = 1; i < nb_pkt_segs; i++) {
234 pkt_seg->next = rte_pktmbuf_alloc(mp);
235 if (pkt_seg->next == NULL) {
236 pkt->nb_segs = i;
237 rte_pktmbuf_free(pkt);
238 goto nomore_mbuf;
239 }
240 pkt_seg = pkt_seg->next;
241 pkt_seg->data_len = pkt_len;
242 }
243 pkt_seg->next = NULL; /* Last segment of packet. */
244
245 /*
246 * Copy headers in first packet segment(s).
247 */
248 if (vlan_enabled)
249 eth_hdr_size = sizeof(struct ether_hdr) + sizeof(struct vlan_hdr);
250 else
251 eth_hdr_size = sizeof(struct ether_hdr);
252
253 copy_buf_to_pkt(eth_hdr, eth_hdr_size, pkt, 0);
254
255 if (ipv4) {
256 copy_buf_to_pkt(ip_hdr, sizeof(struct ipv4_hdr), pkt, eth_hdr_size);
257 copy_buf_to_pkt(udp_hdr, sizeof(*udp_hdr), pkt, eth_hdr_size +
258 sizeof(struct ipv4_hdr));
259 } else {
260 copy_buf_to_pkt(ip_hdr, sizeof(struct ipv6_hdr), pkt, eth_hdr_size);
261 copy_buf_to_pkt(udp_hdr, sizeof(*udp_hdr), pkt, eth_hdr_size +
262 sizeof(struct ipv6_hdr));
263 }
264
265 /*
266 * Complete first mbuf of packet and append it to the
267 * burst of packets to be transmitted.
268 */
269 pkt->nb_segs = nb_pkt_segs;
270 pkt->pkt_len = pkt_len;
271 pkt->l2_len = eth_hdr_size;
272
273 if (ipv4) {
274 pkt->vlan_tci = ETHER_TYPE_IPv4;
275 pkt->l3_len = sizeof(struct ipv4_hdr);
276 } else {
277 pkt->vlan_tci = ETHER_TYPE_IPv6;
278 pkt->l3_len = sizeof(struct ipv6_hdr);
279 }
280
281 pkts_burst[nb_pkt] = pkt;
282 }
283
284 return nb_pkt;
285 }