1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
5 #include <rte_malloc.h>
7 #include <rte_cycles.h>
8 #include <rte_ethdev.h>
13 gro_tcp4_tbl_create(uint16_t socket_id
,
14 uint16_t max_flow_num
,
15 uint16_t max_item_per_flow
)
17 struct gro_tcp4_tbl
*tbl
;
19 uint32_t entries_num
, i
;
21 entries_num
= max_flow_num
* max_item_per_flow
;
22 entries_num
= RTE_MIN(entries_num
, GRO_TCP4_TBL_MAX_ITEM_NUM
);
27 tbl
= rte_zmalloc_socket(__func__
,
28 sizeof(struct gro_tcp4_tbl
),
34 size
= sizeof(struct gro_tcp4_item
) * entries_num
;
35 tbl
->items
= rte_zmalloc_socket(__func__
,
39 if (tbl
->items
== NULL
) {
43 tbl
->max_item_num
= entries_num
;
45 size
= sizeof(struct gro_tcp4_flow
) * entries_num
;
46 tbl
->flows
= rte_zmalloc_socket(__func__
,
50 if (tbl
->flows
== NULL
) {
55 /* INVALID_ARRAY_INDEX indicates an empty flow */
56 for (i
= 0; i
< entries_num
; i
++)
57 tbl
->flows
[i
].start_index
= INVALID_ARRAY_INDEX
;
58 tbl
->max_flow_num
= entries_num
;
64 gro_tcp4_tbl_destroy(void *tbl
)
66 struct gro_tcp4_tbl
*tcp_tbl
= tbl
;
69 rte_free(tcp_tbl
->items
);
70 rte_free(tcp_tbl
->flows
);
75 static inline uint32_t
76 find_an_empty_item(struct gro_tcp4_tbl
*tbl
)
79 uint32_t max_item_num
= tbl
->max_item_num
;
81 for (i
= 0; i
< max_item_num
; i
++)
82 if (tbl
->items
[i
].firstseg
== NULL
)
84 return INVALID_ARRAY_INDEX
;
87 static inline uint32_t
88 find_an_empty_flow(struct gro_tcp4_tbl
*tbl
)
91 uint32_t max_flow_num
= tbl
->max_flow_num
;
93 for (i
= 0; i
< max_flow_num
; i
++)
94 if (tbl
->flows
[i
].start_index
== INVALID_ARRAY_INDEX
)
96 return INVALID_ARRAY_INDEX
;
99 static inline uint32_t
100 insert_new_item(struct gro_tcp4_tbl
*tbl
,
101 struct rte_mbuf
*pkt
,
110 item_idx
= find_an_empty_item(tbl
);
111 if (item_idx
== INVALID_ARRAY_INDEX
)
112 return INVALID_ARRAY_INDEX
;
114 tbl
->items
[item_idx
].firstseg
= pkt
;
115 tbl
->items
[item_idx
].lastseg
= rte_pktmbuf_lastseg(pkt
);
116 tbl
->items
[item_idx
].start_time
= start_time
;
117 tbl
->items
[item_idx
].next_pkt_idx
= INVALID_ARRAY_INDEX
;
118 tbl
->items
[item_idx
].sent_seq
= sent_seq
;
119 tbl
->items
[item_idx
].ip_id
= ip_id
;
120 tbl
->items
[item_idx
].nb_merged
= 1;
121 tbl
->items
[item_idx
].is_atomic
= is_atomic
;
124 /* if the previous packet exists, chain them together. */
125 if (prev_idx
!= INVALID_ARRAY_INDEX
) {
126 tbl
->items
[item_idx
].next_pkt_idx
=
127 tbl
->items
[prev_idx
].next_pkt_idx
;
128 tbl
->items
[prev_idx
].next_pkt_idx
= item_idx
;
134 static inline uint32_t
135 delete_item(struct gro_tcp4_tbl
*tbl
, uint32_t item_idx
,
136 uint32_t prev_item_idx
)
138 uint32_t next_idx
= tbl
->items
[item_idx
].next_pkt_idx
;
140 /* NULL indicates an empty item */
141 tbl
->items
[item_idx
].firstseg
= NULL
;
143 if (prev_item_idx
!= INVALID_ARRAY_INDEX
)
144 tbl
->items
[prev_item_idx
].next_pkt_idx
= next_idx
;
149 static inline uint32_t
150 insert_new_flow(struct gro_tcp4_tbl
*tbl
,
151 struct tcp4_flow_key
*src
,
154 struct tcp4_flow_key
*dst
;
157 flow_idx
= find_an_empty_flow(tbl
);
158 if (unlikely(flow_idx
== INVALID_ARRAY_INDEX
))
159 return INVALID_ARRAY_INDEX
;
161 dst
= &(tbl
->flows
[flow_idx
].key
);
163 ether_addr_copy(&(src
->eth_saddr
), &(dst
->eth_saddr
));
164 ether_addr_copy(&(src
->eth_daddr
), &(dst
->eth_daddr
));
165 dst
->ip_src_addr
= src
->ip_src_addr
;
166 dst
->ip_dst_addr
= src
->ip_dst_addr
;
167 dst
->recv_ack
= src
->recv_ack
;
168 dst
->src_port
= src
->src_port
;
169 dst
->dst_port
= src
->dst_port
;
171 tbl
->flows
[flow_idx
].start_index
= item_idx
;
178 * update the packet length for the flushed packet.
181 update_header(struct gro_tcp4_item
*item
)
183 struct ipv4_hdr
*ipv4_hdr
;
184 struct rte_mbuf
*pkt
= item
->firstseg
;
186 ipv4_hdr
= (struct ipv4_hdr
*)(rte_pktmbuf_mtod(pkt
, char *) +
188 ipv4_hdr
->total_length
= rte_cpu_to_be_16(pkt
->pkt_len
-
193 gro_tcp4_reassemble(struct rte_mbuf
*pkt
,
194 struct gro_tcp4_tbl
*tbl
,
197 struct ether_hdr
*eth_hdr
;
198 struct ipv4_hdr
*ipv4_hdr
;
199 struct tcp_hdr
*tcp_hdr
;
201 uint16_t tcp_dl
, ip_id
, hdr_len
, frag_off
;
204 struct tcp4_flow_key key
;
205 uint32_t cur_idx
, prev_idx
, item_idx
;
206 uint32_t i
, max_flow_num
, remaining_flow_num
;
210 eth_hdr
= rte_pktmbuf_mtod(pkt
, struct ether_hdr
*);
211 ipv4_hdr
= (struct ipv4_hdr
*)((char *)eth_hdr
+ pkt
->l2_len
);
212 tcp_hdr
= (struct tcp_hdr
*)((char *)ipv4_hdr
+ pkt
->l3_len
);
213 hdr_len
= pkt
->l2_len
+ pkt
->l3_len
+ pkt
->l4_len
;
216 * Don't process the packet which has FIN, SYN, RST, PSH, URG, ECE
219 if (tcp_hdr
->tcp_flags
!= TCP_ACK_FLAG
)
222 * Don't process the packet whose payload length is less than or
225 tcp_dl
= pkt
->pkt_len
- hdr_len
;
230 * Save IPv4 ID for the packet whose DF bit is 0. For the packet
231 * whose DF bit is 1, IPv4 ID is ignored.
233 frag_off
= rte_be_to_cpu_16(ipv4_hdr
->fragment_offset
);
234 is_atomic
= (frag_off
& IPV4_HDR_DF_FLAG
) == IPV4_HDR_DF_FLAG
;
235 ip_id
= is_atomic
? 0 : rte_be_to_cpu_16(ipv4_hdr
->packet_id
);
236 sent_seq
= rte_be_to_cpu_32(tcp_hdr
->sent_seq
);
238 ether_addr_copy(&(eth_hdr
->s_addr
), &(key
.eth_saddr
));
239 ether_addr_copy(&(eth_hdr
->d_addr
), &(key
.eth_daddr
));
240 key
.ip_src_addr
= ipv4_hdr
->src_addr
;
241 key
.ip_dst_addr
= ipv4_hdr
->dst_addr
;
242 key
.src_port
= tcp_hdr
->src_port
;
243 key
.dst_port
= tcp_hdr
->dst_port
;
244 key
.recv_ack
= tcp_hdr
->recv_ack
;
246 /* Search for a matched flow. */
247 max_flow_num
= tbl
->max_flow_num
;
248 remaining_flow_num
= tbl
->flow_num
;
250 for (i
= 0; i
< max_flow_num
&& remaining_flow_num
; i
++) {
251 if (tbl
->flows
[i
].start_index
!= INVALID_ARRAY_INDEX
) {
252 if (is_same_tcp4_flow(tbl
->flows
[i
].key
, key
)) {
256 remaining_flow_num
--;
261 * Fail to find a matched flow. Insert a new flow and store the
262 * packet into the flow.
265 item_idx
= insert_new_item(tbl
, pkt
, start_time
,
266 INVALID_ARRAY_INDEX
, sent_seq
, ip_id
,
268 if (item_idx
== INVALID_ARRAY_INDEX
)
270 if (insert_new_flow(tbl
, &key
, item_idx
) ==
271 INVALID_ARRAY_INDEX
) {
273 * Fail to insert a new flow, so delete the
276 delete_item(tbl
, item_idx
, INVALID_ARRAY_INDEX
);
283 * Check all packets in the flow and try to find a neighbor for
286 cur_idx
= tbl
->flows
[i
].start_index
;
289 cmp
= check_seq_option(&(tbl
->items
[cur_idx
]), tcp_hdr
,
290 sent_seq
, ip_id
, pkt
->l4_len
, tcp_dl
, 0,
293 if (merge_two_tcp4_packets(&(tbl
->items
[cur_idx
]),
294 pkt
, cmp
, sent_seq
, ip_id
, 0))
297 * Fail to merge the two packets, as the packet
298 * length is greater than the max value. Store
299 * the packet into the flow.
301 if (insert_new_item(tbl
, pkt
, start_time
, prev_idx
,
302 sent_seq
, ip_id
, is_atomic
) ==
308 cur_idx
= tbl
->items
[cur_idx
].next_pkt_idx
;
309 } while (cur_idx
!= INVALID_ARRAY_INDEX
);
311 /* Fail to find a neighbor, so store the packet into the flow. */
312 if (insert_new_item(tbl
, pkt
, start_time
, prev_idx
, sent_seq
,
313 ip_id
, is_atomic
) == INVALID_ARRAY_INDEX
)
320 gro_tcp4_tbl_timeout_flush(struct gro_tcp4_tbl
*tbl
,
321 uint64_t flush_timestamp
,
322 struct rte_mbuf
**out
,
327 uint32_t max_flow_num
= tbl
->max_flow_num
;
329 for (i
= 0; i
< max_flow_num
; i
++) {
330 if (unlikely(tbl
->flow_num
== 0))
333 j
= tbl
->flows
[i
].start_index
;
334 while (j
!= INVALID_ARRAY_INDEX
) {
335 if (tbl
->items
[j
].start_time
<= flush_timestamp
) {
336 out
[k
++] = tbl
->items
[j
].firstseg
;
337 if (tbl
->items
[j
].nb_merged
> 1)
338 update_header(&(tbl
->items
[j
]));
340 * Delete the packet and get the next
341 * packet in the flow.
343 j
= delete_item(tbl
, j
, INVALID_ARRAY_INDEX
);
344 tbl
->flows
[i
].start_index
= j
;
345 if (j
== INVALID_ARRAY_INDEX
)
348 if (unlikely(k
== nb_out
))
352 * The left packets in this flow won't be
353 * timeout. Go to check other flows.
362 gro_tcp4_tbl_pkt_count(void *tbl
)
364 struct gro_tcp4_tbl
*gro_tbl
= tbl
;
367 return gro_tbl
->item_num
;