]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blob - net/batman-adv/main.c
Merge branch 'for-linville' of git://github.com/kvalo/ath6kl
[mirror_ubuntu-focal-kernel.git] / net / batman-adv / main.c
1 /* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
2 *
3 * Marek Lindner, Simon Wunderlich
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public
7 * License as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
17 * 02110-1301, USA
18 */
19
20 #include <linux/crc32c.h>
21 #include <linux/highmem.h>
22 #include "main.h"
23 #include "sysfs.h"
24 #include "debugfs.h"
25 #include "routing.h"
26 #include "send.h"
27 #include "originator.h"
28 #include "soft-interface.h"
29 #include "icmp_socket.h"
30 #include "translation-table.h"
31 #include "hard-interface.h"
32 #include "gateway_client.h"
33 #include "bridge_loop_avoidance.h"
34 #include "distributed-arp-table.h"
35 #include "vis.h"
36 #include "hash.h"
37 #include "bat_algo.h"
38
39
40 /* List manipulations on hardif_list have to be rtnl_lock()'ed,
41 * list traversals just rcu-locked
42 */
43 struct list_head batadv_hardif_list;
44 static int (*batadv_rx_handler[256])(struct sk_buff *,
45 struct batadv_hard_iface *);
46 char batadv_routing_algo[20] = "BATMAN_IV";
47 static struct hlist_head batadv_algo_list;
48
49 unsigned char batadv_broadcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
50
51 struct workqueue_struct *batadv_event_workqueue;
52
53 static void batadv_recv_handler_init(void);
54
55 static int __init batadv_init(void)
56 {
57 INIT_LIST_HEAD(&batadv_hardif_list);
58 INIT_HLIST_HEAD(&batadv_algo_list);
59
60 batadv_recv_handler_init();
61
62 batadv_iv_init();
63
64 batadv_event_workqueue = create_singlethread_workqueue("bat_events");
65
66 if (!batadv_event_workqueue)
67 return -ENOMEM;
68
69 batadv_socket_init();
70 batadv_debugfs_init();
71
72 register_netdevice_notifier(&batadv_hard_if_notifier);
73
74 pr_info("B.A.T.M.A.N. advanced %s (compatibility version %i) loaded\n",
75 BATADV_SOURCE_VERSION, BATADV_COMPAT_VERSION);
76
77 return 0;
78 }
79
80 static void __exit batadv_exit(void)
81 {
82 batadv_debugfs_destroy();
83 unregister_netdevice_notifier(&batadv_hard_if_notifier);
84 batadv_hardif_remove_interfaces();
85
86 flush_workqueue(batadv_event_workqueue);
87 destroy_workqueue(batadv_event_workqueue);
88 batadv_event_workqueue = NULL;
89
90 rcu_barrier();
91 }
92
93 int batadv_mesh_init(struct net_device *soft_iface)
94 {
95 struct batadv_priv *bat_priv = netdev_priv(soft_iface);
96 int ret;
97
98 spin_lock_init(&bat_priv->forw_bat_list_lock);
99 spin_lock_init(&bat_priv->forw_bcast_list_lock);
100 spin_lock_init(&bat_priv->tt.changes_list_lock);
101 spin_lock_init(&bat_priv->tt.req_list_lock);
102 spin_lock_init(&bat_priv->tt.roam_list_lock);
103 spin_lock_init(&bat_priv->tt.last_changeset_lock);
104 spin_lock_init(&bat_priv->gw.list_lock);
105 spin_lock_init(&bat_priv->vis.hash_lock);
106 spin_lock_init(&bat_priv->vis.list_lock);
107
108 INIT_HLIST_HEAD(&bat_priv->forw_bat_list);
109 INIT_HLIST_HEAD(&bat_priv->forw_bcast_list);
110 INIT_HLIST_HEAD(&bat_priv->gw.list);
111 INIT_LIST_HEAD(&bat_priv->tt.changes_list);
112 INIT_LIST_HEAD(&bat_priv->tt.req_list);
113 INIT_LIST_HEAD(&bat_priv->tt.roam_list);
114
115 ret = batadv_originator_init(bat_priv);
116 if (ret < 0)
117 goto err;
118
119 ret = batadv_tt_init(bat_priv);
120 if (ret < 0)
121 goto err;
122
123 batadv_tt_local_add(soft_iface, soft_iface->dev_addr,
124 BATADV_NULL_IFINDEX);
125
126 ret = batadv_vis_init(bat_priv);
127 if (ret < 0)
128 goto err;
129
130 ret = batadv_bla_init(bat_priv);
131 if (ret < 0)
132 goto err;
133
134 ret = batadv_dat_init(bat_priv);
135 if (ret < 0)
136 goto err;
137
138 atomic_set(&bat_priv->gw.reselect, 0);
139 atomic_set(&bat_priv->mesh_state, BATADV_MESH_ACTIVE);
140
141 return 0;
142
143 err:
144 batadv_mesh_free(soft_iface);
145 return ret;
146 }
147
148 void batadv_mesh_free(struct net_device *soft_iface)
149 {
150 struct batadv_priv *bat_priv = netdev_priv(soft_iface);
151
152 atomic_set(&bat_priv->mesh_state, BATADV_MESH_DEACTIVATING);
153
154 batadv_purge_outstanding_packets(bat_priv, NULL);
155
156 batadv_vis_quit(bat_priv);
157
158 batadv_gw_node_purge(bat_priv);
159 batadv_originator_free(bat_priv);
160
161 batadv_tt_free(bat_priv);
162
163 batadv_bla_free(bat_priv);
164
165 batadv_dat_free(bat_priv);
166
167 free_percpu(bat_priv->bat_counters);
168
169 atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
170 }
171
172 int batadv_is_my_mac(const uint8_t *addr)
173 {
174 const struct batadv_hard_iface *hard_iface;
175
176 rcu_read_lock();
177 list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
178 if (hard_iface->if_status != BATADV_IF_ACTIVE)
179 continue;
180
181 if (batadv_compare_eth(hard_iface->net_dev->dev_addr, addr)) {
182 rcu_read_unlock();
183 return 1;
184 }
185 }
186 rcu_read_unlock();
187 return 0;
188 }
189
190 /**
191 * batadv_seq_print_text_primary_if_get - called from debugfs table printing
192 * function that requires the primary interface
193 * @seq: debugfs table seq_file struct
194 *
195 * Returns primary interface if found or NULL otherwise.
196 */
197 struct batadv_hard_iface *
198 batadv_seq_print_text_primary_if_get(struct seq_file *seq)
199 {
200 struct net_device *net_dev = (struct net_device *)seq->private;
201 struct batadv_priv *bat_priv = netdev_priv(net_dev);
202 struct batadv_hard_iface *primary_if;
203
204 primary_if = batadv_primary_if_get_selected(bat_priv);
205
206 if (!primary_if) {
207 seq_printf(seq,
208 "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
209 net_dev->name);
210 goto out;
211 }
212
213 if (primary_if->if_status == BATADV_IF_ACTIVE)
214 goto out;
215
216 seq_printf(seq,
217 "BATMAN mesh %s disabled - primary interface not active\n",
218 net_dev->name);
219 batadv_hardif_free_ref(primary_if);
220 primary_if = NULL;
221
222 out:
223 return primary_if;
224 }
225
226 static int batadv_recv_unhandled_packet(struct sk_buff *skb,
227 struct batadv_hard_iface *recv_if)
228 {
229 return NET_RX_DROP;
230 }
231
232 /* incoming packets with the batman ethertype received on any active hard
233 * interface
234 */
235 int batadv_batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
236 struct packet_type *ptype,
237 struct net_device *orig_dev)
238 {
239 struct batadv_priv *bat_priv;
240 struct batadv_ogm_packet *batadv_ogm_packet;
241 struct batadv_hard_iface *hard_iface;
242 uint8_t idx;
243 int ret;
244
245 hard_iface = container_of(ptype, struct batadv_hard_iface,
246 batman_adv_ptype);
247 skb = skb_share_check(skb, GFP_ATOMIC);
248
249 /* skb was released by skb_share_check() */
250 if (!skb)
251 goto err_out;
252
253 /* packet should hold at least type and version */
254 if (unlikely(!pskb_may_pull(skb, 2)))
255 goto err_free;
256
257 /* expect a valid ethernet header here. */
258 if (unlikely(skb->mac_len != ETH_HLEN || !skb_mac_header(skb)))
259 goto err_free;
260
261 if (!hard_iface->soft_iface)
262 goto err_free;
263
264 bat_priv = netdev_priv(hard_iface->soft_iface);
265
266 if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
267 goto err_free;
268
269 /* discard frames on not active interfaces */
270 if (hard_iface->if_status != BATADV_IF_ACTIVE)
271 goto err_free;
272
273 batadv_ogm_packet = (struct batadv_ogm_packet *)skb->data;
274
275 if (batadv_ogm_packet->header.version != BATADV_COMPAT_VERSION) {
276 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
277 "Drop packet: incompatible batman version (%i)\n",
278 batadv_ogm_packet->header.version);
279 goto err_free;
280 }
281
282 /* all receive handlers return whether they received or reused
283 * the supplied skb. if not, we have to free the skb.
284 */
285 idx = batadv_ogm_packet->header.packet_type;
286 ret = (*batadv_rx_handler[idx])(skb, hard_iface);
287
288 if (ret == NET_RX_DROP)
289 kfree_skb(skb);
290
291 /* return NET_RX_SUCCESS in any case as we
292 * most probably dropped the packet for
293 * routing-logical reasons.
294 */
295 return NET_RX_SUCCESS;
296
297 err_free:
298 kfree_skb(skb);
299 err_out:
300 return NET_RX_DROP;
301 }
302
303 static void batadv_recv_handler_init(void)
304 {
305 int i;
306
307 for (i = 0; i < ARRAY_SIZE(batadv_rx_handler); i++)
308 batadv_rx_handler[i] = batadv_recv_unhandled_packet;
309
310 /* batman icmp packet */
311 batadv_rx_handler[BATADV_ICMP] = batadv_recv_icmp_packet;
312 /* unicast with 4 addresses packet */
313 batadv_rx_handler[BATADV_UNICAST_4ADDR] = batadv_recv_unicast_packet;
314 /* unicast packet */
315 batadv_rx_handler[BATADV_UNICAST] = batadv_recv_unicast_packet;
316 /* fragmented unicast packet */
317 batadv_rx_handler[BATADV_UNICAST_FRAG] = batadv_recv_ucast_frag_packet;
318 /* broadcast packet */
319 batadv_rx_handler[BATADV_BCAST] = batadv_recv_bcast_packet;
320 /* vis packet */
321 batadv_rx_handler[BATADV_VIS] = batadv_recv_vis_packet;
322 /* Translation table query (request or response) */
323 batadv_rx_handler[BATADV_TT_QUERY] = batadv_recv_tt_query;
324 /* Roaming advertisement */
325 batadv_rx_handler[BATADV_ROAM_ADV] = batadv_recv_roam_adv;
326 }
327
328 int
329 batadv_recv_handler_register(uint8_t packet_type,
330 int (*recv_handler)(struct sk_buff *,
331 struct batadv_hard_iface *))
332 {
333 if (batadv_rx_handler[packet_type] != &batadv_recv_unhandled_packet)
334 return -EBUSY;
335
336 batadv_rx_handler[packet_type] = recv_handler;
337 return 0;
338 }
339
340 void batadv_recv_handler_unregister(uint8_t packet_type)
341 {
342 batadv_rx_handler[packet_type] = batadv_recv_unhandled_packet;
343 }
344
345 static struct batadv_algo_ops *batadv_algo_get(char *name)
346 {
347 struct batadv_algo_ops *bat_algo_ops = NULL, *bat_algo_ops_tmp;
348 struct hlist_node *node;
349
350 hlist_for_each_entry(bat_algo_ops_tmp, node, &batadv_algo_list, list) {
351 if (strcmp(bat_algo_ops_tmp->name, name) != 0)
352 continue;
353
354 bat_algo_ops = bat_algo_ops_tmp;
355 break;
356 }
357
358 return bat_algo_ops;
359 }
360
361 int batadv_algo_register(struct batadv_algo_ops *bat_algo_ops)
362 {
363 struct batadv_algo_ops *bat_algo_ops_tmp;
364 int ret;
365
366 bat_algo_ops_tmp = batadv_algo_get(bat_algo_ops->name);
367 if (bat_algo_ops_tmp) {
368 pr_info("Trying to register already registered routing algorithm: %s\n",
369 bat_algo_ops->name);
370 ret = -EEXIST;
371 goto out;
372 }
373
374 /* all algorithms must implement all ops (for now) */
375 if (!bat_algo_ops->bat_iface_enable ||
376 !bat_algo_ops->bat_iface_disable ||
377 !bat_algo_ops->bat_iface_update_mac ||
378 !bat_algo_ops->bat_primary_iface_set ||
379 !bat_algo_ops->bat_ogm_schedule ||
380 !bat_algo_ops->bat_ogm_emit) {
381 pr_info("Routing algo '%s' does not implement required ops\n",
382 bat_algo_ops->name);
383 ret = -EINVAL;
384 goto out;
385 }
386
387 INIT_HLIST_NODE(&bat_algo_ops->list);
388 hlist_add_head(&bat_algo_ops->list, &batadv_algo_list);
389 ret = 0;
390
391 out:
392 return ret;
393 }
394
395 int batadv_algo_select(struct batadv_priv *bat_priv, char *name)
396 {
397 struct batadv_algo_ops *bat_algo_ops;
398 int ret = -EINVAL;
399
400 bat_algo_ops = batadv_algo_get(name);
401 if (!bat_algo_ops)
402 goto out;
403
404 bat_priv->bat_algo_ops = bat_algo_ops;
405 ret = 0;
406
407 out:
408 return ret;
409 }
410
411 int batadv_algo_seq_print_text(struct seq_file *seq, void *offset)
412 {
413 struct batadv_algo_ops *bat_algo_ops;
414 struct hlist_node *node;
415
416 seq_printf(seq, "Available routing algorithms:\n");
417
418 hlist_for_each_entry(bat_algo_ops, node, &batadv_algo_list, list) {
419 seq_printf(seq, "%s\n", bat_algo_ops->name);
420 }
421
422 return 0;
423 }
424
425 /**
426 * batadv_skb_crc32 - calculate CRC32 of the whole packet and skip bytes in
427 * the header
428 * @skb: skb pointing to fragmented socket buffers
429 * @payload_ptr: Pointer to position inside the head buffer of the skb
430 * marking the start of the data to be CRC'ed
431 *
432 * payload_ptr must always point to an address in the skb head buffer and not to
433 * a fragment.
434 */
435 __be32 batadv_skb_crc32(struct sk_buff *skb, u8 *payload_ptr)
436 {
437 u32 crc = 0;
438 unsigned int from;
439 unsigned int to = skb->len;
440 struct skb_seq_state st;
441 const u8 *data;
442 unsigned int len;
443 unsigned int consumed = 0;
444
445 from = (unsigned int)(payload_ptr - skb->data);
446
447 skb_prepare_seq_read(skb, from, to, &st);
448 while ((len = skb_seq_read(consumed, &data, &st)) != 0) {
449 crc = crc32c(crc, data, len);
450 consumed += len;
451 }
452 skb_abort_seq_read(&st);
453
454 return htonl(crc);
455 }
456
457 static int batadv_param_set_ra(const char *val, const struct kernel_param *kp)
458 {
459 struct batadv_algo_ops *bat_algo_ops;
460 char *algo_name = (char *)val;
461 size_t name_len = strlen(algo_name);
462
463 if (algo_name[name_len - 1] == '\n')
464 algo_name[name_len - 1] = '\0';
465
466 bat_algo_ops = batadv_algo_get(algo_name);
467 if (!bat_algo_ops) {
468 pr_err("Routing algorithm '%s' is not supported\n", algo_name);
469 return -EINVAL;
470 }
471
472 return param_set_copystring(algo_name, kp);
473 }
474
475 static const struct kernel_param_ops batadv_param_ops_ra = {
476 .set = batadv_param_set_ra,
477 .get = param_get_string,
478 };
479
480 static struct kparam_string batadv_param_string_ra = {
481 .maxlen = sizeof(batadv_routing_algo),
482 .string = batadv_routing_algo,
483 };
484
485 module_param_cb(routing_algo, &batadv_param_ops_ra, &batadv_param_string_ra,
486 0644);
487 module_init(batadv_init);
488 module_exit(batadv_exit);
489
490 MODULE_LICENSE("GPL");
491
492 MODULE_AUTHOR(BATADV_DRIVER_AUTHOR);
493 MODULE_DESCRIPTION(BATADV_DRIVER_DESC);
494 MODULE_SUPPORTED_DEVICE(BATADV_DRIVER_DEVICE);
495 MODULE_VERSION(BATADV_SOURCE_VERSION);