]>
Commit | Line | Data |
---|---|---|
c6c8fea2 | 1 | /* |
64afe353 | 2 | * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors: |
c6c8fea2 SE |
3 | * |
4 | * Marek Lindner, Simon Wunderlich | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or | |
7 | * modify it under the terms of version 2 of the GNU General Public | |
8 | * License as published by the Free Software Foundation. | |
9 | * | |
10 | * This program is distributed in the hope that it will be useful, but | |
11 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
13 | * General Public License for more details. | |
14 | * | |
15 | * You should have received a copy of the GNU General Public License | |
16 | * along with this program; if not, write to the Free Software | |
17 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | |
18 | * 02110-1301, USA | |
19 | * | |
20 | */ | |
21 | ||
22 | #include "main.h" | |
23 | #include "hard-interface.h" | |
24 | #include "soft-interface.h" | |
25 | #include "send.h" | |
26 | #include "translation-table.h" | |
27 | #include "routing.h" | |
28 | #include "bat_sysfs.h" | |
29 | #include "originator.h" | |
30 | #include "hash.h" | |
31 | ||
32 | #include <linux/if_arp.h> | |
33 | ||
4389e47a ML |
34 | /* protect update critical side of hardif_list - but not the content */ |
35 | static DEFINE_SPINLOCK(hardif_list_lock); | |
c6c8fea2 | 36 | |
fb86d764 SE |
37 | |
38 | static int batman_skb_recv(struct sk_buff *skb, | |
39 | struct net_device *dev, | |
40 | struct packet_type *ptype, | |
41 | struct net_device *orig_dev); | |
42 | ||
ed75ccbe | 43 | void hardif_free_rcu(struct rcu_head *rcu) |
c6c8fea2 SE |
44 | { |
45 | struct batman_if *batman_if; | |
46 | ||
47 | batman_if = container_of(rcu, struct batman_if, rcu); | |
48 | dev_put(batman_if->net_dev); | |
ed75ccbe | 49 | kfree(batman_if); |
c6c8fea2 SE |
50 | } |
51 | ||
52 | struct batman_if *get_batman_if_by_netdev(struct net_device *net_dev) | |
53 | { | |
54 | struct batman_if *batman_if; | |
55 | ||
56 | rcu_read_lock(); | |
4389e47a | 57 | list_for_each_entry_rcu(batman_if, &hardif_list, list) { |
ed75ccbe ML |
58 | if (batman_if->net_dev == net_dev && |
59 | atomic_inc_not_zero(&batman_if->refcount)) | |
c6c8fea2 SE |
60 | goto out; |
61 | } | |
62 | ||
63 | batman_if = NULL; | |
64 | ||
65 | out: | |
c6c8fea2 SE |
66 | rcu_read_unlock(); |
67 | return batman_if; | |
68 | } | |
69 | ||
70 | static int is_valid_iface(struct net_device *net_dev) | |
71 | { | |
72 | if (net_dev->flags & IFF_LOOPBACK) | |
73 | return 0; | |
74 | ||
75 | if (net_dev->type != ARPHRD_ETHER) | |
76 | return 0; | |
77 | ||
78 | if (net_dev->addr_len != ETH_ALEN) | |
79 | return 0; | |
80 | ||
81 | /* no batman over batman */ | |
82 | #ifdef HAVE_NET_DEVICE_OPS | |
83 | if (net_dev->netdev_ops->ndo_start_xmit == interface_tx) | |
84 | return 0; | |
85 | #else | |
86 | if (net_dev->hard_start_xmit == interface_tx) | |
87 | return 0; | |
88 | #endif | |
89 | ||
90 | /* Device is being bridged */ | |
91 | /* if (net_dev->priv_flags & IFF_BRIDGE_PORT) | |
92 | return 0; */ | |
93 | ||
94 | return 1; | |
95 | } | |
96 | ||
97 | static struct batman_if *get_active_batman_if(struct net_device *soft_iface) | |
98 | { | |
99 | struct batman_if *batman_if; | |
100 | ||
101 | rcu_read_lock(); | |
4389e47a | 102 | list_for_each_entry_rcu(batman_if, &hardif_list, list) { |
c6c8fea2 SE |
103 | if (batman_if->soft_iface != soft_iface) |
104 | continue; | |
105 | ||
ed75ccbe ML |
106 | if (batman_if->if_status == IF_ACTIVE && |
107 | atomic_inc_not_zero(&batman_if->refcount)) | |
c6c8fea2 SE |
108 | goto out; |
109 | } | |
110 | ||
111 | batman_if = NULL; | |
112 | ||
113 | out: | |
c6c8fea2 SE |
114 | rcu_read_unlock(); |
115 | return batman_if; | |
116 | } | |
117 | ||
118 | static void update_primary_addr(struct bat_priv *bat_priv) | |
119 | { | |
120 | struct vis_packet *vis_packet; | |
121 | ||
122 | vis_packet = (struct vis_packet *) | |
123 | bat_priv->my_vis_info->skb_packet->data; | |
124 | memcpy(vis_packet->vis_orig, | |
125 | bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN); | |
126 | memcpy(vis_packet->sender_orig, | |
127 | bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN); | |
128 | } | |
129 | ||
130 | static void set_primary_if(struct bat_priv *bat_priv, | |
131 | struct batman_if *batman_if) | |
132 | { | |
133 | struct batman_packet *batman_packet; | |
134 | struct batman_if *old_if; | |
135 | ||
ed75ccbe ML |
136 | if (batman_if && !atomic_inc_not_zero(&batman_if->refcount)) |
137 | batman_if = NULL; | |
c6c8fea2 SE |
138 | |
139 | old_if = bat_priv->primary_if; | |
140 | bat_priv->primary_if = batman_if; | |
141 | ||
142 | if (old_if) | |
ed75ccbe | 143 | hardif_free_ref(old_if); |
c6c8fea2 SE |
144 | |
145 | if (!bat_priv->primary_if) | |
146 | return; | |
147 | ||
148 | batman_packet = (struct batman_packet *)(batman_if->packet_buff); | |
149 | batman_packet->flags = PRIMARIES_FIRST_HOP; | |
150 | batman_packet->ttl = TTL; | |
151 | ||
152 | update_primary_addr(bat_priv); | |
153 | ||
154 | /*** | |
155 | * hacky trick to make sure that we send the HNA information via | |
156 | * our new primary interface | |
157 | */ | |
158 | atomic_set(&bat_priv->hna_local_changed, 1); | |
159 | } | |
160 | ||
161 | static bool hardif_is_iface_up(struct batman_if *batman_if) | |
162 | { | |
163 | if (batman_if->net_dev->flags & IFF_UP) | |
164 | return true; | |
165 | ||
166 | return false; | |
167 | } | |
168 | ||
169 | static void update_mac_addresses(struct batman_if *batman_if) | |
170 | { | |
171 | memcpy(((struct batman_packet *)(batman_if->packet_buff))->orig, | |
172 | batman_if->net_dev->dev_addr, ETH_ALEN); | |
173 | memcpy(((struct batman_packet *)(batman_if->packet_buff))->prev_sender, | |
174 | batman_if->net_dev->dev_addr, ETH_ALEN); | |
175 | } | |
176 | ||
177 | static void check_known_mac_addr(struct net_device *net_dev) | |
178 | { | |
179 | struct batman_if *batman_if; | |
180 | ||
181 | rcu_read_lock(); | |
4389e47a | 182 | list_for_each_entry_rcu(batman_if, &hardif_list, list) { |
c6c8fea2 SE |
183 | if ((batman_if->if_status != IF_ACTIVE) && |
184 | (batman_if->if_status != IF_TO_BE_ACTIVATED)) | |
185 | continue; | |
186 | ||
187 | if (batman_if->net_dev == net_dev) | |
188 | continue; | |
189 | ||
39901e71 | 190 | if (!compare_eth(batman_if->net_dev->dev_addr, |
c6c8fea2 SE |
191 | net_dev->dev_addr)) |
192 | continue; | |
193 | ||
194 | pr_warning("The newly added mac address (%pM) already exists " | |
195 | "on: %s\n", net_dev->dev_addr, | |
196 | batman_if->net_dev->name); | |
197 | pr_warning("It is strongly recommended to keep mac addresses " | |
198 | "unique to avoid problems!\n"); | |
199 | } | |
200 | rcu_read_unlock(); | |
201 | } | |
202 | ||
203 | int hardif_min_mtu(struct net_device *soft_iface) | |
204 | { | |
205 | struct bat_priv *bat_priv = netdev_priv(soft_iface); | |
206 | struct batman_if *batman_if; | |
207 | /* allow big frames if all devices are capable to do so | |
208 | * (have MTU > 1500 + BAT_HEADER_LEN) */ | |
209 | int min_mtu = ETH_DATA_LEN; | |
210 | ||
211 | if (atomic_read(&bat_priv->fragmentation)) | |
212 | goto out; | |
213 | ||
214 | rcu_read_lock(); | |
4389e47a | 215 | list_for_each_entry_rcu(batman_if, &hardif_list, list) { |
c6c8fea2 SE |
216 | if ((batman_if->if_status != IF_ACTIVE) && |
217 | (batman_if->if_status != IF_TO_BE_ACTIVATED)) | |
218 | continue; | |
219 | ||
220 | if (batman_if->soft_iface != soft_iface) | |
221 | continue; | |
222 | ||
223 | min_mtu = min_t(int, batman_if->net_dev->mtu - BAT_HEADER_LEN, | |
224 | min_mtu); | |
225 | } | |
226 | rcu_read_unlock(); | |
227 | out: | |
228 | return min_mtu; | |
229 | } | |
230 | ||
231 | /* adjusts the MTU if a new interface with a smaller MTU appeared. */ | |
232 | void update_min_mtu(struct net_device *soft_iface) | |
233 | { | |
234 | int min_mtu; | |
235 | ||
236 | min_mtu = hardif_min_mtu(soft_iface); | |
237 | if (soft_iface->mtu != min_mtu) | |
238 | soft_iface->mtu = min_mtu; | |
239 | } | |
240 | ||
241 | static void hardif_activate_interface(struct batman_if *batman_if) | |
242 | { | |
243 | struct bat_priv *bat_priv; | |
244 | ||
245 | if (batman_if->if_status != IF_INACTIVE) | |
246 | return; | |
247 | ||
248 | bat_priv = netdev_priv(batman_if->soft_iface); | |
249 | ||
250 | update_mac_addresses(batman_if); | |
251 | batman_if->if_status = IF_TO_BE_ACTIVATED; | |
252 | ||
253 | /** | |
254 | * the first active interface becomes our primary interface or | |
255 | * the next active interface after the old primay interface was removed | |
256 | */ | |
257 | if (!bat_priv->primary_if) | |
258 | set_primary_if(bat_priv, batman_if); | |
259 | ||
260 | bat_info(batman_if->soft_iface, "Interface activated: %s\n", | |
261 | batman_if->net_dev->name); | |
262 | ||
263 | update_min_mtu(batman_if->soft_iface); | |
264 | return; | |
265 | } | |
266 | ||
267 | static void hardif_deactivate_interface(struct batman_if *batman_if) | |
268 | { | |
269 | if ((batman_if->if_status != IF_ACTIVE) && | |
a4c135c5 | 270 | (batman_if->if_status != IF_TO_BE_ACTIVATED)) |
c6c8fea2 SE |
271 | return; |
272 | ||
273 | batman_if->if_status = IF_INACTIVE; | |
274 | ||
275 | bat_info(batman_if->soft_iface, "Interface deactivated: %s\n", | |
276 | batman_if->net_dev->name); | |
277 | ||
278 | update_min_mtu(batman_if->soft_iface); | |
279 | } | |
280 | ||
281 | int hardif_enable_interface(struct batman_if *batman_if, char *iface_name) | |
282 | { | |
283 | struct bat_priv *bat_priv; | |
284 | struct batman_packet *batman_packet; | |
285 | ||
286 | if (batman_if->if_status != IF_NOT_IN_USE) | |
287 | goto out; | |
288 | ||
ed75ccbe ML |
289 | if (!atomic_inc_not_zero(&batman_if->refcount)) |
290 | goto out; | |
291 | ||
c6c8fea2 SE |
292 | batman_if->soft_iface = dev_get_by_name(&init_net, iface_name); |
293 | ||
294 | if (!batman_if->soft_iface) { | |
295 | batman_if->soft_iface = softif_create(iface_name); | |
296 | ||
297 | if (!batman_if->soft_iface) | |
298 | goto err; | |
299 | ||
300 | /* dev_get_by_name() increases the reference counter for us */ | |
301 | dev_hold(batman_if->soft_iface); | |
302 | } | |
303 | ||
304 | bat_priv = netdev_priv(batman_if->soft_iface); | |
305 | batman_if->packet_len = BAT_PACKET_LEN; | |
306 | batman_if->packet_buff = kmalloc(batman_if->packet_len, GFP_ATOMIC); | |
307 | ||
308 | if (!batman_if->packet_buff) { | |
309 | bat_err(batman_if->soft_iface, "Can't add interface packet " | |
310 | "(%s): out of memory\n", batman_if->net_dev->name); | |
311 | goto err; | |
312 | } | |
313 | ||
314 | batman_packet = (struct batman_packet *)(batman_if->packet_buff); | |
315 | batman_packet->packet_type = BAT_PACKET; | |
316 | batman_packet->version = COMPAT_VERSION; | |
317 | batman_packet->flags = 0; | |
318 | batman_packet->ttl = 2; | |
319 | batman_packet->tq = TQ_MAX_VALUE; | |
320 | batman_packet->num_hna = 0; | |
321 | ||
322 | batman_if->if_num = bat_priv->num_ifaces; | |
323 | bat_priv->num_ifaces++; | |
324 | batman_if->if_status = IF_INACTIVE; | |
325 | orig_hash_add_if(batman_if, bat_priv->num_ifaces); | |
326 | ||
327 | batman_if->batman_adv_ptype.type = __constant_htons(ETH_P_BATMAN); | |
328 | batman_if->batman_adv_ptype.func = batman_skb_recv; | |
329 | batman_if->batman_adv_ptype.dev = batman_if->net_dev; | |
c6c8fea2 SE |
330 | dev_add_pack(&batman_if->batman_adv_ptype); |
331 | ||
332 | atomic_set(&batman_if->seqno, 1); | |
333 | atomic_set(&batman_if->frag_seqno, 1); | |
334 | bat_info(batman_if->soft_iface, "Adding interface: %s\n", | |
335 | batman_if->net_dev->name); | |
336 | ||
337 | if (atomic_read(&bat_priv->fragmentation) && batman_if->net_dev->mtu < | |
338 | ETH_DATA_LEN + BAT_HEADER_LEN) | |
339 | bat_info(batman_if->soft_iface, | |
340 | "The MTU of interface %s is too small (%i) to handle " | |
341 | "the transport of batman-adv packets. Packets going " | |
342 | "over this interface will be fragmented on layer2 " | |
343 | "which could impact the performance. Setting the MTU " | |
344 | "to %zi would solve the problem.\n", | |
345 | batman_if->net_dev->name, batman_if->net_dev->mtu, | |
346 | ETH_DATA_LEN + BAT_HEADER_LEN); | |
347 | ||
348 | if (!atomic_read(&bat_priv->fragmentation) && batman_if->net_dev->mtu < | |
349 | ETH_DATA_LEN + BAT_HEADER_LEN) | |
350 | bat_info(batman_if->soft_iface, | |
351 | "The MTU of interface %s is too small (%i) to handle " | |
352 | "the transport of batman-adv packets. If you experience" | |
353 | " problems getting traffic through try increasing the " | |
354 | "MTU to %zi.\n", | |
355 | batman_if->net_dev->name, batman_if->net_dev->mtu, | |
356 | ETH_DATA_LEN + BAT_HEADER_LEN); | |
357 | ||
358 | if (hardif_is_iface_up(batman_if)) | |
359 | hardif_activate_interface(batman_if); | |
360 | else | |
361 | bat_err(batman_if->soft_iface, "Not using interface %s " | |
362 | "(retrying later): interface not active\n", | |
363 | batman_if->net_dev->name); | |
364 | ||
365 | /* begin scheduling originator messages on that interface */ | |
366 | schedule_own_packet(batman_if); | |
367 | ||
368 | out: | |
369 | return 0; | |
370 | ||
371 | err: | |
ed75ccbe | 372 | hardif_free_ref(batman_if); |
c6c8fea2 SE |
373 | return -ENOMEM; |
374 | } | |
375 | ||
376 | void hardif_disable_interface(struct batman_if *batman_if) | |
377 | { | |
378 | struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface); | |
379 | ||
380 | if (batman_if->if_status == IF_ACTIVE) | |
381 | hardif_deactivate_interface(batman_if); | |
382 | ||
383 | if (batman_if->if_status != IF_INACTIVE) | |
384 | return; | |
385 | ||
386 | bat_info(batman_if->soft_iface, "Removing interface: %s\n", | |
387 | batman_if->net_dev->name); | |
388 | dev_remove_pack(&batman_if->batman_adv_ptype); | |
c6c8fea2 SE |
389 | |
390 | bat_priv->num_ifaces--; | |
391 | orig_hash_del_if(batman_if, bat_priv->num_ifaces); | |
392 | ||
393 | if (batman_if == bat_priv->primary_if) { | |
394 | struct batman_if *new_if; | |
395 | ||
396 | new_if = get_active_batman_if(batman_if->soft_iface); | |
397 | set_primary_if(bat_priv, new_if); | |
398 | ||
399 | if (new_if) | |
ed75ccbe | 400 | hardif_free_ref(new_if); |
c6c8fea2 SE |
401 | } |
402 | ||
403 | kfree(batman_if->packet_buff); | |
404 | batman_if->packet_buff = NULL; | |
405 | batman_if->if_status = IF_NOT_IN_USE; | |
406 | ||
407 | /* delete all references to this batman_if */ | |
408 | purge_orig_ref(bat_priv); | |
409 | purge_outstanding_packets(bat_priv, batman_if); | |
410 | dev_put(batman_if->soft_iface); | |
411 | ||
412 | /* nobody uses this interface anymore */ | |
413 | if (!bat_priv->num_ifaces) | |
414 | softif_destroy(batman_if->soft_iface); | |
415 | ||
416 | batman_if->soft_iface = NULL; | |
ed75ccbe | 417 | hardif_free_ref(batman_if); |
c6c8fea2 SE |
418 | } |
419 | ||
420 | static struct batman_if *hardif_add_interface(struct net_device *net_dev) | |
421 | { | |
422 | struct batman_if *batman_if; | |
423 | int ret; | |
424 | ||
425 | ret = is_valid_iface(net_dev); | |
426 | if (ret != 1) | |
427 | goto out; | |
428 | ||
429 | dev_hold(net_dev); | |
430 | ||
431 | batman_if = kmalloc(sizeof(struct batman_if), GFP_ATOMIC); | |
432 | if (!batman_if) { | |
433 | pr_err("Can't add interface (%s): out of memory\n", | |
434 | net_dev->name); | |
435 | goto release_dev; | |
436 | } | |
437 | ||
438 | ret = sysfs_add_hardif(&batman_if->hardif_obj, net_dev); | |
439 | if (ret) | |
440 | goto free_if; | |
441 | ||
442 | batman_if->if_num = -1; | |
443 | batman_if->net_dev = net_dev; | |
444 | batman_if->soft_iface = NULL; | |
445 | batman_if->if_status = IF_NOT_IN_USE; | |
446 | INIT_LIST_HEAD(&batman_if->list); | |
ed75ccbe ML |
447 | /* extra reference for return */ |
448 | atomic_set(&batman_if->refcount, 2); | |
c6c8fea2 SE |
449 | |
450 | check_known_mac_addr(batman_if->net_dev); | |
451 | ||
4389e47a ML |
452 | spin_lock(&hardif_list_lock); |
453 | list_add_tail_rcu(&batman_if->list, &hardif_list); | |
454 | spin_unlock(&hardif_list_lock); | |
c6c8fea2 | 455 | |
c6c8fea2 SE |
456 | return batman_if; |
457 | ||
458 | free_if: | |
459 | kfree(batman_if); | |
460 | release_dev: | |
461 | dev_put(net_dev); | |
462 | out: | |
463 | return NULL; | |
464 | } | |
465 | ||
466 | static void hardif_remove_interface(struct batman_if *batman_if) | |
467 | { | |
468 | /* first deactivate interface */ | |
469 | if (batman_if->if_status != IF_NOT_IN_USE) | |
470 | hardif_disable_interface(batman_if); | |
471 | ||
472 | if (batman_if->if_status != IF_NOT_IN_USE) | |
473 | return; | |
474 | ||
475 | batman_if->if_status = IF_TO_BE_REMOVED; | |
476 | sysfs_del_hardif(&batman_if->hardif_obj); | |
ed75ccbe | 477 | hardif_free_ref(batman_if); |
c6c8fea2 SE |
478 | } |
479 | ||
480 | void hardif_remove_interfaces(void) | |
481 | { | |
482 | struct batman_if *batman_if, *batman_if_tmp; | |
483 | struct list_head if_queue; | |
484 | ||
485 | INIT_LIST_HEAD(&if_queue); | |
486 | ||
4389e47a ML |
487 | spin_lock(&hardif_list_lock); |
488 | list_for_each_entry_safe(batman_if, batman_if_tmp, &hardif_list, list) { | |
c6c8fea2 SE |
489 | list_del_rcu(&batman_if->list); |
490 | list_add_tail(&batman_if->list, &if_queue); | |
491 | } | |
4389e47a | 492 | spin_unlock(&hardif_list_lock); |
c6c8fea2 SE |
493 | |
494 | rtnl_lock(); | |
495 | list_for_each_entry_safe(batman_if, batman_if_tmp, &if_queue, list) { | |
496 | hardif_remove_interface(batman_if); | |
497 | } | |
498 | rtnl_unlock(); | |
499 | } | |
500 | ||
501 | static int hard_if_event(struct notifier_block *this, | |
502 | unsigned long event, void *ptr) | |
503 | { | |
504 | struct net_device *net_dev = (struct net_device *)ptr; | |
505 | struct batman_if *batman_if = get_batman_if_by_netdev(net_dev); | |
506 | struct bat_priv *bat_priv; | |
507 | ||
508 | if (!batman_if && event == NETDEV_REGISTER) | |
509 | batman_if = hardif_add_interface(net_dev); | |
510 | ||
511 | if (!batman_if) | |
512 | goto out; | |
513 | ||
514 | switch (event) { | |
515 | case NETDEV_UP: | |
516 | hardif_activate_interface(batman_if); | |
517 | break; | |
518 | case NETDEV_GOING_DOWN: | |
519 | case NETDEV_DOWN: | |
520 | hardif_deactivate_interface(batman_if); | |
521 | break; | |
522 | case NETDEV_UNREGISTER: | |
4389e47a | 523 | spin_lock(&hardif_list_lock); |
c6c8fea2 | 524 | list_del_rcu(&batman_if->list); |
4389e47a | 525 | spin_unlock(&hardif_list_lock); |
c6c8fea2 SE |
526 | |
527 | hardif_remove_interface(batman_if); | |
528 | break; | |
529 | case NETDEV_CHANGEMTU: | |
530 | if (batman_if->soft_iface) | |
531 | update_min_mtu(batman_if->soft_iface); | |
532 | break; | |
533 | case NETDEV_CHANGEADDR: | |
534 | if (batman_if->if_status == IF_NOT_IN_USE) | |
535 | goto hardif_put; | |
536 | ||
537 | check_known_mac_addr(batman_if->net_dev); | |
538 | update_mac_addresses(batman_if); | |
539 | ||
540 | bat_priv = netdev_priv(batman_if->soft_iface); | |
541 | if (batman_if == bat_priv->primary_if) | |
542 | update_primary_addr(bat_priv); | |
543 | break; | |
544 | default: | |
545 | break; | |
546 | }; | |
547 | ||
548 | hardif_put: | |
ed75ccbe | 549 | hardif_free_ref(batman_if); |
c6c8fea2 SE |
550 | out: |
551 | return NOTIFY_DONE; | |
552 | } | |
553 | ||
554 | /* receive a packet with the batman ethertype coming on a hard | |
555 | * interface */ | |
fb86d764 SE |
556 | static int batman_skb_recv(struct sk_buff *skb, struct net_device *dev, |
557 | struct packet_type *ptype, | |
558 | struct net_device *orig_dev) | |
c6c8fea2 SE |
559 | { |
560 | struct bat_priv *bat_priv; | |
561 | struct batman_packet *batman_packet; | |
562 | struct batman_if *batman_if; | |
563 | int ret; | |
564 | ||
565 | batman_if = container_of(ptype, struct batman_if, batman_adv_ptype); | |
566 | skb = skb_share_check(skb, GFP_ATOMIC); | |
567 | ||
568 | /* skb was released by skb_share_check() */ | |
569 | if (!skb) | |
570 | goto err_out; | |
571 | ||
572 | /* packet should hold at least type and version */ | |
573 | if (unlikely(!pskb_may_pull(skb, 2))) | |
574 | goto err_free; | |
575 | ||
576 | /* expect a valid ethernet header here. */ | |
577 | if (unlikely(skb->mac_len != sizeof(struct ethhdr) | |
578 | || !skb_mac_header(skb))) | |
579 | goto err_free; | |
580 | ||
581 | if (!batman_if->soft_iface) | |
582 | goto err_free; | |
583 | ||
584 | bat_priv = netdev_priv(batman_if->soft_iface); | |
585 | ||
586 | if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE) | |
587 | goto err_free; | |
588 | ||
589 | /* discard frames on not active interfaces */ | |
590 | if (batman_if->if_status != IF_ACTIVE) | |
591 | goto err_free; | |
592 | ||
593 | batman_packet = (struct batman_packet *)skb->data; | |
594 | ||
595 | if (batman_packet->version != COMPAT_VERSION) { | |
596 | bat_dbg(DBG_BATMAN, bat_priv, | |
597 | "Drop packet: incompatible batman version (%i)\n", | |
598 | batman_packet->version); | |
599 | goto err_free; | |
600 | } | |
601 | ||
602 | /* all receive handlers return whether they received or reused | |
603 | * the supplied skb. if not, we have to free the skb. */ | |
604 | ||
605 | switch (batman_packet->packet_type) { | |
606 | /* batman originator packet */ | |
607 | case BAT_PACKET: | |
608 | ret = recv_bat_packet(skb, batman_if); | |
609 | break; | |
610 | ||
611 | /* batman icmp packet */ | |
612 | case BAT_ICMP: | |
613 | ret = recv_icmp_packet(skb, batman_if); | |
614 | break; | |
615 | ||
616 | /* unicast packet */ | |
617 | case BAT_UNICAST: | |
618 | ret = recv_unicast_packet(skb, batman_if); | |
619 | break; | |
620 | ||
621 | /* fragmented unicast packet */ | |
622 | case BAT_UNICAST_FRAG: | |
623 | ret = recv_ucast_frag_packet(skb, batman_if); | |
624 | break; | |
625 | ||
626 | /* broadcast packet */ | |
627 | case BAT_BCAST: | |
628 | ret = recv_bcast_packet(skb, batman_if); | |
629 | break; | |
630 | ||
631 | /* vis packet */ | |
632 | case BAT_VIS: | |
633 | ret = recv_vis_packet(skb, batman_if); | |
634 | break; | |
635 | default: | |
636 | ret = NET_RX_DROP; | |
637 | } | |
638 | ||
639 | if (ret == NET_RX_DROP) | |
640 | kfree_skb(skb); | |
641 | ||
642 | /* return NET_RX_SUCCESS in any case as we | |
643 | * most probably dropped the packet for | |
644 | * routing-logical reasons. */ | |
645 | ||
646 | return NET_RX_SUCCESS; | |
647 | ||
648 | err_free: | |
649 | kfree_skb(skb); | |
650 | err_out: | |
651 | return NET_RX_DROP; | |
652 | } | |
653 | ||
654 | struct notifier_block hard_if_notifier = { | |
655 | .notifier_call = hard_if_event, | |
656 | }; |