]>
git.proxmox.com Git - ceph.git/blob - ceph/src/dpdk/lib/librte_ip_frag/rte_ipv4_reassembly.c
4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 #include <rte_debug.h>
38 #include "ip_frag_common.h"
41 * Reassemble fragments into one packet.
44 ipv4_frag_reassemble(struct ip_frag_pkt
*fp
)
46 struct ipv4_hdr
*ip_hdr
;
47 struct rte_mbuf
*m
, *prev
;
48 uint32_t i
, n
, ofs
, first_len
;
49 uint32_t curr_idx
= 0;
51 first_len
= fp
->frags
[IP_FIRST_FRAG_IDX
].len
;
54 /*start from the last fragment. */
55 m
= fp
->frags
[IP_LAST_FRAG_IDX
].mb
;
56 ofs
= fp
->frags
[IP_LAST_FRAG_IDX
].ofs
;
57 curr_idx
= IP_LAST_FRAG_IDX
;
59 while (ofs
!= first_len
) {
63 for (i
= n
; i
!= IP_FIRST_FRAG_IDX
&& ofs
!= first_len
; i
--) {
65 /* previous fragment found. */
66 if(fp
->frags
[i
].ofs
+ fp
->frags
[i
].len
== ofs
) {
68 /* adjust start of the last fragment data. */
69 rte_pktmbuf_adj(m
, (uint16_t)(m
->l2_len
+ m
->l3_len
));
70 rte_pktmbuf_chain(fp
->frags
[i
].mb
, m
);
72 /* this mbuf should not be accessed directly */
73 fp
->frags
[curr_idx
].mb
= NULL
;
76 /* update our last fragment and offset. */
78 ofs
= fp
->frags
[i
].ofs
;
82 /* error - hole in the packet. */
88 /* chain with the first fragment. */
89 rte_pktmbuf_adj(m
, (uint16_t)(m
->l2_len
+ m
->l3_len
));
90 rte_pktmbuf_chain(fp
->frags
[IP_FIRST_FRAG_IDX
].mb
, m
);
91 m
= fp
->frags
[IP_FIRST_FRAG_IDX
].mb
;
93 /* update mbuf fields for reassembled packet. */
94 m
->ol_flags
|= PKT_TX_IP_CKSUM
;
96 /* update ipv4 header for the reassmebled packet */
97 ip_hdr
= rte_pktmbuf_mtod_offset(m
, struct ipv4_hdr
*, m
->l2_len
);
99 ip_hdr
->total_length
= rte_cpu_to_be_16((uint16_t)(fp
->total_size
+
101 ip_hdr
->fragment_offset
= (uint16_t)(ip_hdr
->fragment_offset
&
102 rte_cpu_to_be_16(IPV4_HDR_DF_FLAG
));
103 ip_hdr
->hdr_checksum
= 0;
109 * Process new mbuf with fragment of IPV4 packet.
110 * Incoming mbuf should have it's l2_len/l3_len fields setuped correclty.
112 * Table where to lookup/add the fragmented packet.
114 * Incoming mbuf with IPV4 fragment.
116 * Fragment arrival timestamp.
118 * Pointer to the IPV4 header inside the fragment.
120 * Pointer to mbuf for reassebled packet, or NULL if:
121 * - an error occured.
122 * - not all fragments of the packet are collected yet.
125 rte_ipv4_frag_reassemble_packet(struct rte_ip_frag_tbl
*tbl
,
126 struct rte_ip_frag_death_row
*dr
, struct rte_mbuf
*mb
, uint64_t tms
,
127 struct ipv4_hdr
*ip_hdr
)
129 struct ip_frag_pkt
*fp
;
130 struct ip_frag_key key
;
131 const unaligned_uint64_t
*psd
;
133 uint16_t flag_offset
, ip_ofs
, ip_flag
;
135 flag_offset
= rte_be_to_cpu_16(ip_hdr
->fragment_offset
);
136 ip_ofs
= (uint16_t)(flag_offset
& IPV4_HDR_OFFSET_MASK
);
137 ip_flag
= (uint16_t)(flag_offset
& IPV4_HDR_MF_FLAG
);
139 psd
= (unaligned_uint64_t
*)&ip_hdr
->src_addr
;
140 /* use first 8 bytes only */
141 key
.src_dst
[0] = psd
[0];
142 key
.id
= ip_hdr
->packet_id
;
143 key
.key_len
= IPV4_KEYLEN
;
145 ip_ofs
*= IPV4_HDR_OFFSET_UNITS
;
146 ip_len
= (uint16_t)(rte_be_to_cpu_16(ip_hdr
->total_length
) -
149 IP_FRAG_LOG(DEBUG
, "%s:%d:\n"
150 "mbuf: %p, tms: %" PRIu64
151 ", key: <%" PRIx64
", %#x>, ofs: %u, len: %u, flags: %#x\n"
152 "tbl: %p, max_cycles: %" PRIu64
", entry_mask: %#x, "
153 "max_entries: %u, use_entries: %u\n\n",
155 mb
, tms
, key
.src_dst
[0], key
.id
, ip_ofs
, ip_len
, ip_flag
,
156 tbl
, tbl
->max_cycles
, tbl
->entry_mask
, tbl
->max_entries
,
159 /* try to find/add entry into the fragment's table. */
160 if ((fp
= ip_frag_find(tbl
, dr
, &key
, tms
)) == NULL
) {
161 IP_FRAG_MBUF2DR(dr
, mb
);
165 IP_FRAG_LOG(DEBUG
, "%s:%d:\n"
166 "tbl: %p, max_entries: %u, use_entries: %u\n"
167 "ipv4_frag_pkt: %p, key: <%" PRIx64
", %#x>, start: %" PRIu64
168 ", total_size: %u, frag_size: %u, last_idx: %u\n\n",
170 tbl
, tbl
->max_entries
, tbl
->use_entries
,
171 fp
, fp
->key
.src_dst
[0], fp
->key
.id
, fp
->start
,
172 fp
->total_size
, fp
->frag_size
, fp
->last_idx
);
175 /* process the fragmented packet. */
176 mb
= ip_frag_process(fp
, dr
, mb
, ip_ofs
, ip_len
, ip_flag
);
177 ip_frag_inuse(tbl
, fp
);
179 IP_FRAG_LOG(DEBUG
, "%s:%d:\n"
181 "tbl: %p, max_entries: %u, use_entries: %u\n"
182 "ipv4_frag_pkt: %p, key: <%" PRIx64
", %#x>, start: %" PRIu64
183 ", total_size: %u, frag_size: %u, last_idx: %u\n\n",
184 __func__
, __LINE__
, mb
,
185 tbl
, tbl
->max_entries
, tbl
->use_entries
,
186 fp
, fp
->key
.src_dst
[0], fp
->key
.id
, fp
->start
,
187 fp
->total_size
, fp
->frag_size
, fp
->last_idx
);