]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - net/batman-adv/fragmentation.c
batman-adv: Don't always reallocate the fragmentation skb head
[mirror_ubuntu-jammy-kernel.git] / net / batman-adv / fragmentation.c
CommitLineData
7db7d9f3 1// SPDX-License-Identifier: GPL-2.0
68e039f9 2/* Copyright (C) 2013-2020 B.A.T.M.A.N. contributors:
610bfc6b
MH
3 *
4 * Martin Hundebøll <martin@hundeboll.net>
610bfc6b
MH
5 */
6
610bfc6b 7#include "fragmentation.h"
1e2c2a4f
SE
8#include "main.h"
9
10#include <linux/atomic.h>
11#include <linux/byteorder/generic.h>
8def0be8 12#include <linux/errno.h>
1e2c2a4f 13#include <linux/etherdevice.h>
b92b94ac 14#include <linux/gfp.h>
1e2c2a4f
SE
15#include <linux/if_ether.h>
16#include <linux/jiffies.h>
17#include <linux/kernel.h>
5274cd68 18#include <linux/lockdep.h>
1e2c2a4f 19#include <linux/netdevice.h>
1e2c2a4f
SE
20#include <linux/skbuff.h>
21#include <linux/slab.h>
22#include <linux/spinlock.h>
23#include <linux/string.h>
fec149f5 24#include <uapi/linux/batadv_packet.h>
1e2c2a4f
SE
25
26#include "hard-interface.h"
610bfc6b
MH
27#include "originator.h"
28#include "routing.h"
1e2c2a4f 29#include "send.h"
610bfc6b
MH
30#include "soft-interface.h"
31
610bfc6b 32/**
7e9a8c2c 33 * batadv_frag_clear_chain() - delete entries in the fragment buffer chain
610bfc6b 34 * @head: head of chain with entries.
bd687fe4 35 * @dropped: whether the chain is cleared because all fragments are dropped
610bfc6b
MH
36 *
37 * Free fragments in the passed hlist. Should be called with appropriate lock.
38 */
bd687fe4 39static void batadv_frag_clear_chain(struct hlist_head *head, bool dropped)
610bfc6b
MH
40{
41 struct batadv_frag_list_entry *entry;
42 struct hlist_node *node;
43
44 hlist_for_each_entry_safe(entry, node, head, list) {
45 hlist_del(&entry->list);
bd687fe4
SE
46
47 if (dropped)
48 kfree_skb(entry->skb);
49 else
50 consume_skb(entry->skb);
51
610bfc6b
MH
52 kfree(entry);
53 }
54}
55
56/**
7e9a8c2c 57 * batadv_frag_purge_orig() - free fragments associated to an orig
610bfc6b
MH
58 * @orig_node: originator to free fragments from
59 * @check_cb: optional function to tell if an entry should be purged
60 */
61void batadv_frag_purge_orig(struct batadv_orig_node *orig_node,
62 bool (*check_cb)(struct batadv_frag_table_entry *))
63{
64 struct batadv_frag_table_entry *chain;
6b5e971a 65 u8 i;
610bfc6b
MH
66
67 for (i = 0; i < BATADV_FRAG_BUFFER_COUNT; i++) {
68 chain = &orig_node->fragments[i];
01f6b5c7 69 spin_lock_bh(&chain->lock);
610bfc6b
MH
70
71 if (!check_cb || check_cb(chain)) {
bd687fe4 72 batadv_frag_clear_chain(&chain->fragment_list, true);
01f6b5c7 73 chain->size = 0;
610bfc6b
MH
74 }
75
01f6b5c7 76 spin_unlock_bh(&chain->lock);
610bfc6b
MH
77 }
78}
79
80/**
7e9a8c2c 81 * batadv_frag_size_limit() - maximum possible size of packet to be fragmented
610bfc6b 82 *
62fe710f 83 * Return: the maximum size of payload that can be fragmented.
610bfc6b
MH
84 */
85static int batadv_frag_size_limit(void)
86{
87 int limit = BATADV_FRAG_MAX_FRAG_SIZE;
88
89 limit -= sizeof(struct batadv_frag_packet);
90 limit *= BATADV_FRAG_MAX_FRAGMENTS;
91
92 return limit;
93}
94
95/**
7e9a8c2c 96 * batadv_frag_init_chain() - check and prepare fragment chain for new fragment
610bfc6b
MH
97 * @chain: chain in fragments table to init
98 * @seqno: sequence number of the received fragment
99 *
100 * Make chain ready for a fragment with sequence number "seqno". Delete existing
101 * entries if they have an "old" sequence number.
102 *
103 * Caller must hold chain->lock.
104 *
bccb48c8
SE
105 * Return: true if chain is empty and the caller can just insert the new
106 * fragment without searching for the right position.
610bfc6b
MH
107 */
108static bool batadv_frag_init_chain(struct batadv_frag_table_entry *chain,
6b5e971a 109 u16 seqno)
610bfc6b 110{
5274cd68
SE
111 lockdep_assert_held(&chain->lock);
112
610bfc6b
MH
113 if (chain->seqno == seqno)
114 return false;
115
176e5b77 116 if (!hlist_empty(&chain->fragment_list))
bd687fe4 117 batadv_frag_clear_chain(&chain->fragment_list, true);
610bfc6b
MH
118
119 chain->size = 0;
120 chain->seqno = seqno;
121
122 return true;
123}
124
125/**
7e9a8c2c 126 * batadv_frag_insert_packet() - insert a fragment into a fragment chain
610bfc6b
MH
127 * @orig_node: originator that the fragment was received from
128 * @skb: skb to insert
129 * @chain_out: list head to attach complete chains of fragments to
130 *
131 * Insert a new fragment into the reverse ordered chain in the right table
132 * entry. The hash table entry is cleared if "old" fragments exist in it.
133 *
62fe710f 134 * Return: true if skb is buffered, false on error. If the chain has all the
610bfc6b
MH
135 * fragments needed to merge the packet, the chain is moved to the passed head
136 * to avoid locking the chain in the table.
137 */
138static bool batadv_frag_insert_packet(struct batadv_orig_node *orig_node,
139 struct sk_buff *skb,
140 struct hlist_head *chain_out)
141{
142 struct batadv_frag_table_entry *chain;
143 struct batadv_frag_list_entry *frag_entry_new = NULL, *frag_entry_curr;
d9124268 144 struct batadv_frag_list_entry *frag_entry_last = NULL;
610bfc6b 145 struct batadv_frag_packet *frag_packet;
6b5e971a
SE
146 u8 bucket;
147 u16 seqno, hdr_size = sizeof(struct batadv_frag_packet);
610bfc6b
MH
148 bool ret = false;
149
150 /* Linearize packet to avoid linearizing 16 packets in a row when doing
151 * the later merge. Non-linear merge should be added to remove this
152 * linearization.
153 */
154 if (skb_linearize(skb) < 0)
155 goto err;
156
157 frag_packet = (struct batadv_frag_packet *)skb->data;
158 seqno = ntohs(frag_packet->seqno);
159 bucket = seqno % BATADV_FRAG_BUFFER_COUNT;
160
161 frag_entry_new = kmalloc(sizeof(*frag_entry_new), GFP_ATOMIC);
162 if (!frag_entry_new)
163 goto err;
164
165 frag_entry_new->skb = skb;
166 frag_entry_new->no = frag_packet->no;
167
168 /* Select entry in the "chain table" and delete any prior fragments
169 * with another sequence number. batadv_frag_init_chain() returns true,
170 * if the list is empty at return.
171 */
172 chain = &orig_node->fragments[bucket];
173 spin_lock_bh(&chain->lock);
174 if (batadv_frag_init_chain(chain, seqno)) {
176e5b77 175 hlist_add_head(&frag_entry_new->list, &chain->fragment_list);
610bfc6b
MH
176 chain->size = skb->len - hdr_size;
177 chain->timestamp = jiffies;
53e77145 178 chain->total_size = ntohs(frag_packet->total_size);
610bfc6b
MH
179 ret = true;
180 goto out;
181 }
182
183 /* Find the position for the new fragment. */
176e5b77 184 hlist_for_each_entry(frag_entry_curr, &chain->fragment_list, list) {
610bfc6b
MH
185 /* Drop packet if fragment already exists. */
186 if (frag_entry_curr->no == frag_entry_new->no)
187 goto err_unlock;
188
189 /* Order fragments from highest to lowest. */
190 if (frag_entry_curr->no < frag_entry_new->no) {
191 hlist_add_before(&frag_entry_new->list,
192 &frag_entry_curr->list);
193 chain->size += skb->len - hdr_size;
194 chain->timestamp = jiffies;
195 ret = true;
196 goto out;
197 }
d9124268
SE
198
199 /* store current entry because it could be the last in list */
200 frag_entry_last = frag_entry_curr;
610bfc6b
MH
201 }
202
d9124268
SE
203 /* Reached the end of the list, so insert after 'frag_entry_last'. */
204 if (likely(frag_entry_last)) {
e050dbeb 205 hlist_add_behind(&frag_entry_new->list, &frag_entry_last->list);
610bfc6b
MH
206 chain->size += skb->len - hdr_size;
207 chain->timestamp = jiffies;
208 ret = true;
209 }
210
211out:
212 if (chain->size > batadv_frag_size_limit() ||
53e77145
SE
213 chain->total_size != ntohs(frag_packet->total_size) ||
214 chain->total_size > batadv_frag_size_limit()) {
610bfc6b 215 /* Clear chain if total size of either the list or the packet
53e77145
SE
216 * exceeds the maximum size of one merged packet. Don't allow
217 * packets to have different total_size.
610bfc6b 218 */
bd687fe4 219 batadv_frag_clear_chain(&chain->fragment_list, true);
610bfc6b
MH
220 chain->size = 0;
221 } else if (ntohs(frag_packet->total_size) == chain->size) {
222 /* All fragments received. Hand over chain to caller. */
176e5b77 223 hlist_move_list(&chain->fragment_list, chain_out);
610bfc6b
MH
224 chain->size = 0;
225 }
226
227err_unlock:
228 spin_unlock_bh(&chain->lock);
229
230err:
248e23b5 231 if (!ret) {
610bfc6b 232 kfree(frag_entry_new);
248e23b5
SE
233 kfree_skb(skb);
234 }
610bfc6b
MH
235
236 return ret;
237}
238
239/**
7e9a8c2c 240 * batadv_frag_merge_packets() - merge a chain of fragments
610bfc6b 241 * @chain: head of chain with fragments
610bfc6b
MH
242 *
243 * Expand the first skb in the chain and copy the content of the remaining
244 * skb's into the expanded one. After doing so, clear the chain.
245 *
62fe710f 246 * Return: the merged skb or NULL on error.
610bfc6b
MH
247 */
248static struct sk_buff *
83e8b877 249batadv_frag_merge_packets(struct hlist_head *chain)
610bfc6b
MH
250{
251 struct batadv_frag_packet *packet;
252 struct batadv_frag_list_entry *entry;
422d2f77 253 struct sk_buff *skb_out;
610bfc6b 254 int size, hdr_size = sizeof(struct batadv_frag_packet);
bd687fe4 255 bool dropped = false;
610bfc6b 256
610bfc6b
MH
257 /* Remove first entry, as this is the destination for the rest of the
258 * fragments.
259 */
260 entry = hlist_entry(chain->first, struct batadv_frag_list_entry, list);
261 hlist_del(&entry->list);
262 skb_out = entry->skb;
263 kfree(entry);
264
83e8b877 265 packet = (struct batadv_frag_packet *)skb_out->data;
d7d8bbb4 266 size = ntohs(packet->total_size) + hdr_size;
83e8b877 267
610bfc6b 268 /* Make room for the rest of the fragments. */
5b6698b0 269 if (pskb_expand_head(skb_out, 0, size - skb_out->len, GFP_ATOMIC) < 0) {
610bfc6b
MH
270 kfree_skb(skb_out);
271 skb_out = NULL;
bd687fe4 272 dropped = true;
610bfc6b
MH
273 goto free;
274 }
275
276 /* Move the existing MAC header to just before the payload. (Override
277 * the fragment header.)
278 */
3bf2a09d
MS
279 skb_pull(skb_out, hdr_size);
280 skb_out->ip_summed = CHECKSUM_NONE;
610bfc6b
MH
281 memmove(skb_out->data - ETH_HLEN, skb_mac_header(skb_out), ETH_HLEN);
282 skb_set_mac_header(skb_out, -ETH_HLEN);
283 skb_reset_network_header(skb_out);
284 skb_reset_transport_header(skb_out);
285
286 /* Copy the payload of the each fragment into the last skb */
287 hlist_for_each_entry(entry, chain, list) {
288 size = entry->skb->len - hdr_size;
59ae1d12 289 skb_put_data(skb_out, entry->skb->data + hdr_size, size);
610bfc6b
MH
290 }
291
292free:
293 /* Locking is not needed, because 'chain' is not part of any orig. */
bd687fe4 294 batadv_frag_clear_chain(chain, dropped);
610bfc6b
MH
295 return skb_out;
296}
297
298/**
7e9a8c2c 299 * batadv_frag_skb_buffer() - buffer fragment for later merge
610bfc6b
MH
300 * @skb: skb to buffer
301 * @orig_node_src: originator that the skb is received from
302 *
303 * Add fragment to buffer and merge fragments if possible.
304 *
305 * There are three possible outcomes: 1) Packet is merged: Return true and
306 * set *skb to merged packet; 2) Packet is buffered: Return true and set *skb
248e23b5 307 * to NULL; 3) Error: Return false and free skb.
62fe710f 308 *
21ba5ab2 309 * Return: true when the packet is merged or buffered, false when skb is not
62fe710f 310 * used.
610bfc6b
MH
311 */
312bool batadv_frag_skb_buffer(struct sk_buff **skb,
313 struct batadv_orig_node *orig_node_src)
314{
315 struct sk_buff *skb_out = NULL;
316 struct hlist_head head = HLIST_HEAD_INIT;
317 bool ret = false;
318
319 /* Add packet to buffer and table entry if merge is possible. */
320 if (!batadv_frag_insert_packet(orig_node_src, *skb, &head))
321 goto out_err;
322
323 /* Leave if more fragments are needed to merge. */
324 if (hlist_empty(&head))
325 goto out;
326
83e8b877 327 skb_out = batadv_frag_merge_packets(&head);
610bfc6b
MH
328 if (!skb_out)
329 goto out_err;
330
331out:
610bfc6b
MH
332 ret = true;
333out_err:
248e23b5 334 *skb = skb_out;
610bfc6b
MH
335 return ret;
336}
337
338/**
7e9a8c2c 339 * batadv_frag_skb_fwd() - forward fragments that would exceed MTU when merged
610bfc6b
MH
340 * @skb: skb to forward
341 * @recv_if: interface that the skb is received on
342 * @orig_node_src: originator that the skb is received from
343 *
344 * Look up the next-hop of the fragments payload and check if the merged packet
345 * will exceed the MTU towards the next-hop. If so, the fragment is forwarded
346 * without merging it.
347 *
62fe710f 348 * Return: true if the fragment is consumed/forwarded, false otherwise.
610bfc6b
MH
349 */
350bool batadv_frag_skb_fwd(struct sk_buff *skb,
351 struct batadv_hard_iface *recv_if,
352 struct batadv_orig_node *orig_node_src)
353{
354 struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
422d2f77 355 struct batadv_orig_node *orig_node_dst;
610bfc6b
MH
356 struct batadv_neigh_node *neigh_node = NULL;
357 struct batadv_frag_packet *packet;
6b5e971a 358 u16 total_size;
610bfc6b
MH
359 bool ret = false;
360
361 packet = (struct batadv_frag_packet *)skb->data;
362 orig_node_dst = batadv_orig_hash_find(bat_priv, packet->dest);
363 if (!orig_node_dst)
364 goto out;
365
366 neigh_node = batadv_find_router(bat_priv, orig_node_dst, recv_if);
367 if (!neigh_node)
368 goto out;
369
370 /* Forward the fragment, if the merged packet would be too big to
371 * be assembled.
372 */
373 total_size = ntohs(packet->total_size);
374 if (total_size > neigh_node->if_incoming->net_dev->mtu) {
375 batadv_inc_counter(bat_priv, BATADV_CNT_FRAG_FWD);
376 batadv_add_counter(bat_priv, BATADV_CNT_FRAG_FWD_BYTES,
377 skb->len + ETH_HLEN);
378
a40d9b07 379 packet->ttl--;
95d39278 380 batadv_send_unicast_skb(skb, neigh_node);
610bfc6b
MH
381 ret = true;
382 }
383
384out:
385 if (orig_node_dst)
5d967310 386 batadv_orig_node_put(orig_node_dst);
610bfc6b 387 if (neigh_node)
25bb2509 388 batadv_neigh_node_put(neigh_node);
610bfc6b
MH
389 return ret;
390}
ee75ed88
MH
391
392/**
7e9a8c2c 393 * batadv_frag_create() - create a fragment from skb
c5cbfc87 394 * @net_dev: outgoing device for fragment
ee75ed88
MH
395 * @skb: skb to create fragment from
396 * @frag_head: header to use in new fragment
1c2bcc76 397 * @fragment_size: size of new fragment
ee75ed88
MH
398 *
399 * Split the passed skb into two fragments: A new one with size matching the
400 * passed mtu and the old one with the rest. The new skb contains data from the
401 * tail of the old skb.
402 *
62fe710f 403 * Return: the new fragment, NULL on error.
ee75ed88 404 */
c5cbfc87
SE
405static struct sk_buff *batadv_frag_create(struct net_device *net_dev,
406 struct sk_buff *skb,
ee75ed88 407 struct batadv_frag_packet *frag_head,
1c2bcc76 408 unsigned int fragment_size)
ee75ed88 409{
c5cbfc87
SE
410 unsigned int ll_reserved = LL_RESERVED_SPACE(net_dev);
411 unsigned int tailroom = net_dev->needed_tailroom;
ee75ed88 412 struct sk_buff *skb_fragment;
d3abce78 413 unsigned int header_size = sizeof(*frag_head);
1c2bcc76 414 unsigned int mtu = fragment_size + header_size;
ee75ed88 415
c5cbfc87 416 skb_fragment = dev_alloc_skb(ll_reserved + mtu + tailroom);
ee75ed88
MH
417 if (!skb_fragment)
418 goto err;
419
1914848e 420 skb_fragment->priority = skb->priority;
ee75ed88
MH
421
422 /* Eat the last mtu-bytes of the skb */
c5cbfc87 423 skb_reserve(skb_fragment, ll_reserved + header_size);
ee75ed88
MH
424 skb_split(skb, skb_fragment, skb->len - fragment_size);
425
426 /* Add the header */
427 skb_push(skb_fragment, header_size);
428 memcpy(skb_fragment->data, frag_head, header_size);
429
430err:
431 return skb_fragment;
432}
433
434/**
7e9a8c2c 435 * batadv_frag_send_packet() - create up to 16 fragments from the passed skb
ee75ed88
MH
436 * @skb: skb to create fragments from
437 * @orig_node: final destination of the created fragments
438 * @neigh_node: next-hop of the created fragments
439 *
8def0be8 440 * Return: the netdev tx status or a negative errno code on a failure
ee75ed88 441 */
f50ca95a
AQ
442int batadv_frag_send_packet(struct sk_buff *skb,
443 struct batadv_orig_node *orig_node,
444 struct batadv_neigh_node *neigh_node)
ee75ed88 445{
c5cbfc87 446 struct net_device *net_dev = neigh_node->if_incoming->net_dev;
ee75ed88 447 struct batadv_priv *bat_priv;
be181015 448 struct batadv_hard_iface *primary_if = NULL;
ee75ed88
MH
449 struct batadv_frag_packet frag_header;
450 struct sk_buff *skb_fragment;
c5cbfc87 451 unsigned int mtu = net_dev->mtu;
d3abce78 452 unsigned int header_size = sizeof(frag_header);
1c2bcc76 453 unsigned int max_fragment_size, num_fragments;
8def0be8 454 int ret;
ee75ed88
MH
455
456 /* To avoid merge and refragmentation at next-hops we never send
457 * fragments larger than BATADV_FRAG_MAX_FRAG_SIZE
458 */
d3abce78 459 mtu = min_t(unsigned int, mtu, BATADV_FRAG_MAX_FRAG_SIZE);
0402e444 460 max_fragment_size = mtu - header_size;
1c2bcc76
SE
461
462 if (skb->len == 0 || max_fragment_size == 0)
463 return -EINVAL;
464
465 num_fragments = (skb->len - 1) / max_fragment_size + 1;
466 max_fragment_size = (skb->len - 1) / num_fragments + 1;
ee75ed88
MH
467
468 /* Don't even try to fragment, if we need more than 16 fragments */
1c2bcc76 469 if (num_fragments > BATADV_FRAG_MAX_FRAGMENTS) {
8def0be8
SE
470 ret = -EAGAIN;
471 goto free_skb;
472 }
ee75ed88
MH
473
474 bat_priv = orig_node->bat_priv;
475 primary_if = batadv_primary_if_get_selected(bat_priv);
8def0be8
SE
476 if (!primary_if) {
477 ret = -EINVAL;
4ea33ef0 478 goto free_skb;
8def0be8 479 }
ee75ed88
MH
480
481 /* Create one header to be copied to all fragments */
a40d9b07
SW
482 frag_header.packet_type = BATADV_UNICAST_FRAG;
483 frag_header.version = BATADV_COMPAT_VERSION;
484 frag_header.ttl = BATADV_TTL;
ee75ed88
MH
485 frag_header.seqno = htons(atomic_inc_return(&bat_priv->frag_seqno));
486 frag_header.reserved = 0;
487 frag_header.no = 0;
488 frag_header.total_size = htons(skb->len);
c0f25c80
AL
489
490 /* skb->priority values from 256->263 are magic values to
491 * directly indicate a specific 802.1d priority. This is used
492 * to allow 802.1d priority to be passed directly in from VLAN
493 * tags, etc.
494 */
495 if (skb->priority >= 256 && skb->priority <= 263)
496 frag_header.priority = skb->priority - 256;
fe77d825
SE
497 else
498 frag_header.priority = 0;
c0f25c80 499
8fdd0153
AQ
500 ether_addr_copy(frag_header.orig, primary_if->net_dev->dev_addr);
501 ether_addr_copy(frag_header.dest, orig_node->orig);
ee75ed88
MH
502
503 /* Eat and send fragments from the tail of skb */
504 while (skb->len > max_fragment_size) {
51c6b429
LL
505 /* The initial check in this function should cover this case */
506 if (unlikely(frag_header.no == BATADV_FRAG_MAX_FRAGMENTS - 1)) {
507 ret = -EINVAL;
508 goto put_primary_if;
509 }
510
c5cbfc87 511 skb_fragment = batadv_frag_create(net_dev, skb, &frag_header,
1c2bcc76 512 max_fragment_size);
8def0be8
SE
513 if (!skb_fragment) {
514 ret = -ENOMEM;
4ea33ef0 515 goto put_primary_if;
8def0be8 516 }
ee75ed88
MH
517
518 batadv_inc_counter(bat_priv, BATADV_CNT_FRAG_TX);
519 batadv_add_counter(bat_priv, BATADV_CNT_FRAG_TX_BYTES,
520 skb_fragment->len + ETH_HLEN);
f50ca95a
AQ
521 ret = batadv_send_unicast_skb(skb_fragment, neigh_node);
522 if (ret != NET_XMIT_SUCCESS) {
8def0be8 523 ret = NET_XMIT_DROP;
4ea33ef0 524 goto put_primary_if;
f50ca95a
AQ
525 }
526
ee75ed88 527 frag_header.no++;
ee75ed88
MH
528 }
529
992b03b8
SE
530 /* make sure that there is at least enough head for the fragmentation
531 * and ethernet headers
532 */
533 ret = skb_cow_head(skb, ETH_HLEN + header_size);
534 if (ret < 0)
4ea33ef0 535 goto put_primary_if;
ee75ed88 536
992b03b8 537 skb_push(skb, header_size);
ee75ed88
MH
538 memcpy(skb->data, &frag_header, header_size);
539
540 /* Send the last fragment */
541 batadv_inc_counter(bat_priv, BATADV_CNT_FRAG_TX);
542 batadv_add_counter(bat_priv, BATADV_CNT_FRAG_TX_BYTES,
543 skb->len + ETH_HLEN);
f50ca95a 544 ret = batadv_send_unicast_skb(skb, neigh_node);
8def0be8
SE
545 /* skb was consumed */
546 skb = NULL;
ee75ed88 547
8def0be8
SE
548put_primary_if:
549 batadv_hardif_put(primary_if);
550free_skb:
551 kfree_skb(skb);
be181015
AQ
552
553 return ret;
ee75ed88 554}