]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - net/batman-adv/send.c
Merge branch 'acpica-fixes'
[mirror_ubuntu-artful-kernel.git] / net / batman-adv / send.c
1 /* Copyright (C) 2007-2016 B.A.T.M.A.N. contributors:
2 *
3 * Marek Lindner, Simon Wunderlich
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public
7 * License as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
16 */
17
18 #include "send.h"
19 #include "main.h"
20
21 #include <linux/atomic.h>
22 #include <linux/byteorder/generic.h>
23 #include <linux/etherdevice.h>
24 #include <linux/fs.h>
25 #include <linux/if_ether.h>
26 #include <linux/if.h>
27 #include <linux/jiffies.h>
28 #include <linux/kernel.h>
29 #include <linux/kref.h>
30 #include <linux/list.h>
31 #include <linux/netdevice.h>
32 #include <linux/printk.h>
33 #include <linux/rculist.h>
34 #include <linux/rcupdate.h>
35 #include <linux/skbuff.h>
36 #include <linux/slab.h>
37 #include <linux/spinlock.h>
38 #include <linux/stddef.h>
39 #include <linux/workqueue.h>
40
41 #include "distributed-arp-table.h"
42 #include "fragmentation.h"
43 #include "gateway_client.h"
44 #include "hard-interface.h"
45 #include "network-coding.h"
46 #include "originator.h"
47 #include "routing.h"
48 #include "soft-interface.h"
49 #include "translation-table.h"
50
51 static void batadv_send_outstanding_bcast_packet(struct work_struct *work);
52
53 /**
54 * batadv_send_skb_packet - send an already prepared packet
55 * @skb: the packet to send
56 * @hard_iface: the interface to use to send the broadcast packet
57 * @dst_addr: the payload destination
58 *
59 * Send out an already prepared packet to the given neighbor or broadcast it
60 * using the specified interface. Either hard_iface or neigh_node must be not
61 * NULL.
62 * If neigh_node is NULL, then the packet is broadcasted using hard_iface,
63 * otherwise it is sent as unicast to the given neighbor.
64 *
65 * Return: NET_TX_DROP in case of error or the result of dev_queue_xmit(skb)
66 * otherwise
67 */
68 int batadv_send_skb_packet(struct sk_buff *skb,
69 struct batadv_hard_iface *hard_iface,
70 const u8 *dst_addr)
71 {
72 struct batadv_priv *bat_priv;
73 struct ethhdr *ethhdr;
74
75 bat_priv = netdev_priv(hard_iface->soft_iface);
76
77 if (hard_iface->if_status != BATADV_IF_ACTIVE)
78 goto send_skb_err;
79
80 if (unlikely(!hard_iface->net_dev))
81 goto send_skb_err;
82
83 if (!(hard_iface->net_dev->flags & IFF_UP)) {
84 pr_warn("Interface %s is not up - can't send packet via that interface!\n",
85 hard_iface->net_dev->name);
86 goto send_skb_err;
87 }
88
89 /* push to the ethernet header. */
90 if (batadv_skb_head_push(skb, ETH_HLEN) < 0)
91 goto send_skb_err;
92
93 skb_reset_mac_header(skb);
94
95 ethhdr = eth_hdr(skb);
96 ether_addr_copy(ethhdr->h_source, hard_iface->net_dev->dev_addr);
97 ether_addr_copy(ethhdr->h_dest, dst_addr);
98 ethhdr->h_proto = htons(ETH_P_BATMAN);
99
100 skb_set_network_header(skb, ETH_HLEN);
101 skb->protocol = htons(ETH_P_BATMAN);
102
103 skb->dev = hard_iface->net_dev;
104
105 /* Save a clone of the skb to use when decoding coded packets */
106 batadv_nc_skb_store_for_decoding(bat_priv, skb);
107
108 /* dev_queue_xmit() returns a negative result on error. However on
109 * congestion and traffic shaping, it drops and returns NET_XMIT_DROP
110 * (which is > 0). This will not be treated as an error.
111 */
112 return dev_queue_xmit(skb);
113 send_skb_err:
114 kfree_skb(skb);
115 return NET_XMIT_DROP;
116 }
117
118 int batadv_send_broadcast_skb(struct sk_buff *skb,
119 struct batadv_hard_iface *hard_iface)
120 {
121 return batadv_send_skb_packet(skb, hard_iface, batadv_broadcast_addr);
122 }
123
124 int batadv_send_unicast_skb(struct sk_buff *skb,
125 struct batadv_neigh_node *neigh)
126 {
127 #ifdef CONFIG_BATMAN_ADV_BATMAN_V
128 struct batadv_hardif_neigh_node *hardif_neigh;
129 #endif
130 int ret;
131
132 ret = batadv_send_skb_packet(skb, neigh->if_incoming, neigh->addr);
133
134 #ifdef CONFIG_BATMAN_ADV_BATMAN_V
135 hardif_neigh = batadv_hardif_neigh_get(neigh->if_incoming, neigh->addr);
136
137 if ((hardif_neigh) && (ret != NET_XMIT_DROP))
138 hardif_neigh->bat_v.last_unicast_tx = jiffies;
139
140 if (hardif_neigh)
141 batadv_hardif_neigh_put(hardif_neigh);
142 #endif
143
144 return ret;
145 }
146
147 /**
148 * batadv_send_skb_to_orig - Lookup next-hop and transmit skb.
149 * @skb: Packet to be transmitted.
150 * @orig_node: Final destination of the packet.
151 * @recv_if: Interface used when receiving the packet (can be NULL).
152 *
153 * Looks up the best next-hop towards the passed originator and passes the
154 * skb on for preparation of MAC header. If the packet originated from this
155 * host, NULL can be passed as recv_if and no interface alternating is
156 * attempted.
157 *
158 * Return: NET_XMIT_SUCCESS on success, NET_XMIT_DROP on failure, or
159 * NET_XMIT_POLICED if the skb is buffered for later transmit.
160 */
161 int batadv_send_skb_to_orig(struct sk_buff *skb,
162 struct batadv_orig_node *orig_node,
163 struct batadv_hard_iface *recv_if)
164 {
165 struct batadv_priv *bat_priv = orig_node->bat_priv;
166 struct batadv_neigh_node *neigh_node;
167 int ret = NET_XMIT_DROP;
168
169 /* batadv_find_router() increases neigh_nodes refcount if found. */
170 neigh_node = batadv_find_router(bat_priv, orig_node, recv_if);
171 if (!neigh_node)
172 goto out;
173
174 /* Check if the skb is too large to send in one piece and fragment
175 * it if needed.
176 */
177 if (atomic_read(&bat_priv->fragmentation) &&
178 skb->len > neigh_node->if_incoming->net_dev->mtu) {
179 /* Fragment and send packet. */
180 if (batadv_frag_send_packet(skb, orig_node, neigh_node))
181 ret = NET_XMIT_SUCCESS;
182
183 goto out;
184 }
185
186 /* try to network code the packet, if it is received on an interface
187 * (i.e. being forwarded). If the packet originates from this node or if
188 * network coding fails, then send the packet as usual.
189 */
190 if (recv_if && batadv_nc_skb_forward(skb, neigh_node)) {
191 ret = NET_XMIT_POLICED;
192 } else {
193 batadv_send_unicast_skb(skb, neigh_node);
194 ret = NET_XMIT_SUCCESS;
195 }
196
197 out:
198 if (neigh_node)
199 batadv_neigh_node_put(neigh_node);
200
201 return ret;
202 }
203
204 /**
205 * batadv_send_skb_push_fill_unicast - extend the buffer and initialize the
206 * common fields for unicast packets
207 * @skb: the skb carrying the unicast header to initialize
208 * @hdr_size: amount of bytes to push at the beginning of the skb
209 * @orig_node: the destination node
210 *
211 * Return: false if the buffer extension was not possible or true otherwise.
212 */
213 static bool
214 batadv_send_skb_push_fill_unicast(struct sk_buff *skb, int hdr_size,
215 struct batadv_orig_node *orig_node)
216 {
217 struct batadv_unicast_packet *unicast_packet;
218 u8 ttvn = (u8)atomic_read(&orig_node->last_ttvn);
219
220 if (batadv_skb_head_push(skb, hdr_size) < 0)
221 return false;
222
223 unicast_packet = (struct batadv_unicast_packet *)skb->data;
224 unicast_packet->version = BATADV_COMPAT_VERSION;
225 /* batman packet type: unicast */
226 unicast_packet->packet_type = BATADV_UNICAST;
227 /* set unicast ttl */
228 unicast_packet->ttl = BATADV_TTL;
229 /* copy the destination for faster routing */
230 ether_addr_copy(unicast_packet->dest, orig_node->orig);
231 /* set the destination tt version number */
232 unicast_packet->ttvn = ttvn;
233
234 return true;
235 }
236
237 /**
238 * batadv_send_skb_prepare_unicast - encapsulate an skb with a unicast header
239 * @skb: the skb containing the payload to encapsulate
240 * @orig_node: the destination node
241 *
242 * Return: false if the payload could not be encapsulated or true otherwise.
243 */
244 static bool batadv_send_skb_prepare_unicast(struct sk_buff *skb,
245 struct batadv_orig_node *orig_node)
246 {
247 size_t uni_size = sizeof(struct batadv_unicast_packet);
248
249 return batadv_send_skb_push_fill_unicast(skb, uni_size, orig_node);
250 }
251
252 /**
253 * batadv_send_skb_prepare_unicast_4addr - encapsulate an skb with a
254 * unicast 4addr header
255 * @bat_priv: the bat priv with all the soft interface information
256 * @skb: the skb containing the payload to encapsulate
257 * @orig: the destination node
258 * @packet_subtype: the unicast 4addr packet subtype to use
259 *
260 * Return: false if the payload could not be encapsulated or true otherwise.
261 */
262 bool batadv_send_skb_prepare_unicast_4addr(struct batadv_priv *bat_priv,
263 struct sk_buff *skb,
264 struct batadv_orig_node *orig,
265 int packet_subtype)
266 {
267 struct batadv_hard_iface *primary_if;
268 struct batadv_unicast_4addr_packet *uc_4addr_packet;
269 bool ret = false;
270
271 primary_if = batadv_primary_if_get_selected(bat_priv);
272 if (!primary_if)
273 goto out;
274
275 /* Pull the header space and fill the unicast_packet substructure.
276 * We can do that because the first member of the uc_4addr_packet
277 * is of type struct unicast_packet
278 */
279 if (!batadv_send_skb_push_fill_unicast(skb, sizeof(*uc_4addr_packet),
280 orig))
281 goto out;
282
283 uc_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data;
284 uc_4addr_packet->u.packet_type = BATADV_UNICAST_4ADDR;
285 ether_addr_copy(uc_4addr_packet->src, primary_if->net_dev->dev_addr);
286 uc_4addr_packet->subtype = packet_subtype;
287 uc_4addr_packet->reserved = 0;
288
289 ret = true;
290 out:
291 if (primary_if)
292 batadv_hardif_put(primary_if);
293 return ret;
294 }
295
296 /**
297 * batadv_send_skb_unicast - encapsulate and send an skb via unicast
298 * @bat_priv: the bat priv with all the soft interface information
299 * @skb: payload to send
300 * @packet_type: the batman unicast packet type to use
301 * @packet_subtype: the unicast 4addr packet subtype (only relevant for unicast
302 * 4addr packets)
303 * @orig_node: the originator to send the packet to
304 * @vid: the vid to be used to search the translation table
305 *
306 * Wrap the given skb into a batman-adv unicast or unicast-4addr header
307 * depending on whether BATADV_UNICAST or BATADV_UNICAST_4ADDR was supplied
308 * as packet_type. Then send this frame to the given orig_node and release a
309 * reference to this orig_node.
310 *
311 * Return: NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
312 */
313 int batadv_send_skb_unicast(struct batadv_priv *bat_priv,
314 struct sk_buff *skb, int packet_type,
315 int packet_subtype,
316 struct batadv_orig_node *orig_node,
317 unsigned short vid)
318 {
319 struct batadv_unicast_packet *unicast_packet;
320 struct ethhdr *ethhdr;
321 int ret = NET_XMIT_DROP;
322
323 if (!orig_node)
324 goto out;
325
326 switch (packet_type) {
327 case BATADV_UNICAST:
328 if (!batadv_send_skb_prepare_unicast(skb, orig_node))
329 goto out;
330 break;
331 case BATADV_UNICAST_4ADDR:
332 if (!batadv_send_skb_prepare_unicast_4addr(bat_priv, skb,
333 orig_node,
334 packet_subtype))
335 goto out;
336 break;
337 default:
338 /* this function supports UNICAST and UNICAST_4ADDR only. It
339 * should never be invoked with any other packet type
340 */
341 goto out;
342 }
343
344 /* skb->data might have been reallocated by
345 * batadv_send_skb_prepare_unicast{,_4addr}()
346 */
347 ethhdr = eth_hdr(skb);
348 unicast_packet = (struct batadv_unicast_packet *)skb->data;
349
350 /* inform the destination node that we are still missing a correct route
351 * for this client. The destination will receive this packet and will
352 * try to reroute it because the ttvn contained in the header is less
353 * than the current one
354 */
355 if (batadv_tt_global_client_is_roaming(bat_priv, ethhdr->h_dest, vid))
356 unicast_packet->ttvn = unicast_packet->ttvn - 1;
357
358 if (batadv_send_skb_to_orig(skb, orig_node, NULL) != NET_XMIT_DROP)
359 ret = NET_XMIT_SUCCESS;
360
361 out:
362 if (orig_node)
363 batadv_orig_node_put(orig_node);
364 if (ret == NET_XMIT_DROP)
365 kfree_skb(skb);
366 return ret;
367 }
368
369 /**
370 * batadv_send_skb_via_tt_generic - send an skb via TT lookup
371 * @bat_priv: the bat priv with all the soft interface information
372 * @skb: payload to send
373 * @packet_type: the batman unicast packet type to use
374 * @packet_subtype: the unicast 4addr packet subtype (only relevant for unicast
375 * 4addr packets)
376 * @dst_hint: can be used to override the destination contained in the skb
377 * @vid: the vid to be used to search the translation table
378 *
379 * Look up the recipient node for the destination address in the ethernet
380 * header via the translation table. Wrap the given skb into a batman-adv
381 * unicast or unicast-4addr header depending on whether BATADV_UNICAST or
382 * BATADV_UNICAST_4ADDR was supplied as packet_type. Then send this frame
383 * to the according destination node.
384 *
385 * Return: NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
386 */
387 int batadv_send_skb_via_tt_generic(struct batadv_priv *bat_priv,
388 struct sk_buff *skb, int packet_type,
389 int packet_subtype, u8 *dst_hint,
390 unsigned short vid)
391 {
392 struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
393 struct batadv_orig_node *orig_node;
394 u8 *src, *dst;
395
396 src = ethhdr->h_source;
397 dst = ethhdr->h_dest;
398
399 /* if we got an hint! let's send the packet to this client (if any) */
400 if (dst_hint) {
401 src = NULL;
402 dst = dst_hint;
403 }
404 orig_node = batadv_transtable_search(bat_priv, src, dst, vid);
405
406 return batadv_send_skb_unicast(bat_priv, skb, packet_type,
407 packet_subtype, orig_node, vid);
408 }
409
410 /**
411 * batadv_send_skb_via_gw - send an skb via gateway lookup
412 * @bat_priv: the bat priv with all the soft interface information
413 * @skb: payload to send
414 * @vid: the vid to be used to search the translation table
415 *
416 * Look up the currently selected gateway. Wrap the given skb into a batman-adv
417 * unicast header and send this frame to this gateway node.
418 *
419 * Return: NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
420 */
421 int batadv_send_skb_via_gw(struct batadv_priv *bat_priv, struct sk_buff *skb,
422 unsigned short vid)
423 {
424 struct batadv_orig_node *orig_node;
425
426 orig_node = batadv_gw_get_selected_orig(bat_priv);
427 return batadv_send_skb_unicast(bat_priv, skb, BATADV_UNICAST, 0,
428 orig_node, vid);
429 }
430
431 void batadv_schedule_bat_ogm(struct batadv_hard_iface *hard_iface)
432 {
433 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
434
435 if ((hard_iface->if_status == BATADV_IF_NOT_IN_USE) ||
436 (hard_iface->if_status == BATADV_IF_TO_BE_REMOVED))
437 return;
438
439 /* the interface gets activated here to avoid race conditions between
440 * the moment of activating the interface in
441 * hardif_activate_interface() where the originator mac is set and
442 * outdated packets (especially uninitialized mac addresses) in the
443 * packet queue
444 */
445 if (hard_iface->if_status == BATADV_IF_TO_BE_ACTIVATED)
446 hard_iface->if_status = BATADV_IF_ACTIVE;
447
448 bat_priv->bat_algo_ops->bat_ogm_schedule(hard_iface);
449 }
450
451 static void batadv_forw_packet_free(struct batadv_forw_packet *forw_packet)
452 {
453 kfree_skb(forw_packet->skb);
454 if (forw_packet->if_incoming)
455 batadv_hardif_put(forw_packet->if_incoming);
456 if (forw_packet->if_outgoing)
457 batadv_hardif_put(forw_packet->if_outgoing);
458 kfree(forw_packet);
459 }
460
461 static void
462 _batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
463 struct batadv_forw_packet *forw_packet,
464 unsigned long send_time)
465 {
466 /* add new packet to packet list */
467 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
468 hlist_add_head(&forw_packet->list, &bat_priv->forw_bcast_list);
469 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
470
471 /* start timer for this packet */
472 queue_delayed_work(batadv_event_workqueue, &forw_packet->delayed_work,
473 send_time);
474 }
475
476 /**
477 * batadv_add_bcast_packet_to_list - queue broadcast packet for multiple sends
478 * @bat_priv: the bat priv with all the soft interface information
479 * @skb: broadcast packet to add
480 * @delay: number of jiffies to wait before sending
481 *
482 * add a broadcast packet to the queue and setup timers. broadcast packets
483 * are sent multiple times to increase probability for being received.
484 *
485 * The skb is not consumed, so the caller should make sure that the
486 * skb is freed.
487 *
488 * Return: NETDEV_TX_OK on success and NETDEV_TX_BUSY on errors.
489 */
490 int batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
491 const struct sk_buff *skb,
492 unsigned long delay)
493 {
494 struct batadv_hard_iface *primary_if = NULL;
495 struct batadv_forw_packet *forw_packet;
496 struct batadv_bcast_packet *bcast_packet;
497 struct sk_buff *newskb;
498
499 if (!batadv_atomic_dec_not_zero(&bat_priv->bcast_queue_left)) {
500 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
501 "bcast packet queue full\n");
502 goto out;
503 }
504
505 primary_if = batadv_primary_if_get_selected(bat_priv);
506 if (!primary_if)
507 goto out_and_inc;
508
509 forw_packet = kmalloc(sizeof(*forw_packet), GFP_ATOMIC);
510
511 if (!forw_packet)
512 goto out_and_inc;
513
514 newskb = skb_copy(skb, GFP_ATOMIC);
515 if (!newskb)
516 goto packet_free;
517
518 /* as we have a copy now, it is safe to decrease the TTL */
519 bcast_packet = (struct batadv_bcast_packet *)newskb->data;
520 bcast_packet->ttl--;
521
522 skb_reset_mac_header(newskb);
523
524 forw_packet->skb = newskb;
525 forw_packet->if_incoming = primary_if;
526 forw_packet->if_outgoing = NULL;
527
528 /* how often did we send the bcast packet ? */
529 forw_packet->num_packets = 0;
530
531 INIT_DELAYED_WORK(&forw_packet->delayed_work,
532 batadv_send_outstanding_bcast_packet);
533
534 _batadv_add_bcast_packet_to_list(bat_priv, forw_packet, delay);
535 return NETDEV_TX_OK;
536
537 packet_free:
538 kfree(forw_packet);
539 out_and_inc:
540 atomic_inc(&bat_priv->bcast_queue_left);
541 out:
542 if (primary_if)
543 batadv_hardif_put(primary_if);
544 return NETDEV_TX_BUSY;
545 }
546
547 static void batadv_send_outstanding_bcast_packet(struct work_struct *work)
548 {
549 struct batadv_hard_iface *hard_iface;
550 struct delayed_work *delayed_work;
551 struct batadv_forw_packet *forw_packet;
552 struct sk_buff *skb1;
553 struct net_device *soft_iface;
554 struct batadv_priv *bat_priv;
555
556 delayed_work = to_delayed_work(work);
557 forw_packet = container_of(delayed_work, struct batadv_forw_packet,
558 delayed_work);
559 soft_iface = forw_packet->if_incoming->soft_iface;
560 bat_priv = netdev_priv(soft_iface);
561
562 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
563 hlist_del(&forw_packet->list);
564 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
565
566 if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING)
567 goto out;
568
569 if (batadv_dat_drop_broadcast_packet(bat_priv, forw_packet))
570 goto out;
571
572 /* rebroadcast packet */
573 rcu_read_lock();
574 list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
575 if (hard_iface->soft_iface != soft_iface)
576 continue;
577
578 if (forw_packet->num_packets >= hard_iface->num_bcasts)
579 continue;
580
581 if (!kref_get_unless_zero(&hard_iface->refcount))
582 continue;
583
584 /* send a copy of the saved skb */
585 skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC);
586 if (skb1)
587 batadv_send_broadcast_skb(skb1, hard_iface);
588
589 batadv_hardif_put(hard_iface);
590 }
591 rcu_read_unlock();
592
593 forw_packet->num_packets++;
594
595 /* if we still have some more bcasts to send */
596 if (forw_packet->num_packets < BATADV_NUM_BCASTS_MAX) {
597 _batadv_add_bcast_packet_to_list(bat_priv, forw_packet,
598 msecs_to_jiffies(5));
599 return;
600 }
601
602 out:
603 batadv_forw_packet_free(forw_packet);
604 atomic_inc(&bat_priv->bcast_queue_left);
605 }
606
607 void batadv_send_outstanding_bat_ogm_packet(struct work_struct *work)
608 {
609 struct delayed_work *delayed_work;
610 struct batadv_forw_packet *forw_packet;
611 struct batadv_priv *bat_priv;
612
613 delayed_work = to_delayed_work(work);
614 forw_packet = container_of(delayed_work, struct batadv_forw_packet,
615 delayed_work);
616 bat_priv = netdev_priv(forw_packet->if_incoming->soft_iface);
617 spin_lock_bh(&bat_priv->forw_bat_list_lock);
618 hlist_del(&forw_packet->list);
619 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
620
621 if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING)
622 goto out;
623
624 bat_priv->bat_algo_ops->bat_ogm_emit(forw_packet);
625
626 /* we have to have at least one packet in the queue to determine the
627 * queues wake up time unless we are shutting down.
628 *
629 * only re-schedule if this is the "original" copy, e.g. the OGM of the
630 * primary interface should only be rescheduled once per period, but
631 * this function will be called for the forw_packet instances of the
632 * other secondary interfaces as well.
633 */
634 if (forw_packet->own &&
635 forw_packet->if_incoming == forw_packet->if_outgoing)
636 batadv_schedule_bat_ogm(forw_packet->if_incoming);
637
638 out:
639 /* don't count own packet */
640 if (!forw_packet->own)
641 atomic_inc(&bat_priv->batman_queue_left);
642
643 batadv_forw_packet_free(forw_packet);
644 }
645
646 void
647 batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
648 const struct batadv_hard_iface *hard_iface)
649 {
650 struct batadv_forw_packet *forw_packet;
651 struct hlist_node *safe_tmp_node;
652 bool pending;
653
654 if (hard_iface)
655 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
656 "purge_outstanding_packets(): %s\n",
657 hard_iface->net_dev->name);
658 else
659 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
660 "purge_outstanding_packets()\n");
661
662 /* free bcast list */
663 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
664 hlist_for_each_entry_safe(forw_packet, safe_tmp_node,
665 &bat_priv->forw_bcast_list, list) {
666 /* if purge_outstanding_packets() was called with an argument
667 * we delete only packets belonging to the given interface
668 */
669 if ((hard_iface) &&
670 (forw_packet->if_incoming != hard_iface) &&
671 (forw_packet->if_outgoing != hard_iface))
672 continue;
673
674 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
675
676 /* batadv_send_outstanding_bcast_packet() will lock the list to
677 * delete the item from the list
678 */
679 pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
680 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
681
682 if (pending) {
683 hlist_del(&forw_packet->list);
684 if (!forw_packet->own)
685 atomic_inc(&bat_priv->bcast_queue_left);
686
687 batadv_forw_packet_free(forw_packet);
688 }
689 }
690 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
691
692 /* free batman packet list */
693 spin_lock_bh(&bat_priv->forw_bat_list_lock);
694 hlist_for_each_entry_safe(forw_packet, safe_tmp_node,
695 &bat_priv->forw_bat_list, list) {
696 /* if purge_outstanding_packets() was called with an argument
697 * we delete only packets belonging to the given interface
698 */
699 if ((hard_iface) &&
700 (forw_packet->if_incoming != hard_iface) &&
701 (forw_packet->if_outgoing != hard_iface))
702 continue;
703
704 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
705
706 /* send_outstanding_bat_packet() will lock the list to
707 * delete the item from the list
708 */
709 pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
710 spin_lock_bh(&bat_priv->forw_bat_list_lock);
711
712 if (pending) {
713 hlist_del(&forw_packet->list);
714 if (!forw_packet->own)
715 atomic_inc(&bat_priv->batman_queue_left);
716
717 batadv_forw_packet_free(forw_packet);
718 }
719 }
720 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
721 }