]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - net/mac80211/mesh_pathtbl.c
mac80211: Consolidate mesh path duplicated functions
[mirror_ubuntu-artful-kernel.git] / net / mac80211 / mesh_pathtbl.c
1 /*
2 * Copyright (c) 2008, 2009 open80211s Ltd.
3 * Author: Luis Carlos Cobo <luisca@cozybit.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10 #include <linux/etherdevice.h>
11 #include <linux/list.h>
12 #include <linux/random.h>
13 #include <linux/slab.h>
14 #include <linux/spinlock.h>
15 #include <linux/string.h>
16 #include <net/mac80211.h>
17 #include "ieee80211_i.h"
18 #include "mesh.h"
19
20 #ifdef CONFIG_MAC80211_VERBOSE_MPATH_DEBUG
21 #define mpath_dbg(fmt, args...) printk(KERN_DEBUG fmt, ##args)
22 #else
23 #define mpath_dbg(fmt, args...) do { (void)(0); } while (0)
24 #endif
25
26 /* There will be initially 2^INIT_PATHS_SIZE_ORDER buckets */
27 #define INIT_PATHS_SIZE_ORDER 2
28
29 /* Keep the mean chain length below this constant */
30 #define MEAN_CHAIN_LEN 2
31
32 #define MPATH_EXPIRED(mpath) ((mpath->flags & MESH_PATH_ACTIVE) && \
33 time_after(jiffies, mpath->exp_time) && \
34 !(mpath->flags & MESH_PATH_FIXED))
35
36 struct mpath_node {
37 struct hlist_node list;
38 struct rcu_head rcu;
39 /* This indirection allows two different tables to point to the same
40 * mesh_path structure, useful when resizing
41 */
42 struct mesh_path *mpath;
43 };
44
45 static struct mesh_table __rcu *mesh_paths;
46 static struct mesh_table __rcu *mpp_paths; /* Store paths for MPP&MAP */
47
48 int mesh_paths_generation;
49
50 /* This lock will have the grow table function as writer and add / delete nodes
51 * as readers. RCU provides sufficient protection only when reading the table
52 * (i.e. doing lookups). Adding or adding or removing nodes requires we take
53 * the read lock or we risk operating on an old table. The write lock is only
54 * needed when modifying the number of buckets a table.
55 */
56 static DEFINE_RWLOCK(pathtbl_resize_lock);
57
58
59 static inline struct mesh_table *resize_dereference_mesh_paths(void)
60 {
61 return rcu_dereference_protected(mesh_paths,
62 lockdep_is_held(&pathtbl_resize_lock));
63 }
64
65 static inline struct mesh_table *resize_dereference_mpp_paths(void)
66 {
67 return rcu_dereference_protected(mpp_paths,
68 lockdep_is_held(&pathtbl_resize_lock));
69 }
70
71 static int mesh_gate_add(struct mesh_table *tbl, struct mesh_path *mpath);
72
73 /*
74 * CAREFUL -- "tbl" must not be an expression,
75 * in particular not an rcu_dereference(), since
76 * it's used twice. So it is illegal to do
77 * for_each_mesh_entry(rcu_dereference(...), ...)
78 */
79 #define for_each_mesh_entry(tbl, p, node, i) \
80 for (i = 0; i <= tbl->hash_mask; i++) \
81 hlist_for_each_entry_rcu(node, p, &tbl->hash_buckets[i], list)
82
83
84 static struct mesh_table *mesh_table_alloc(int size_order)
85 {
86 int i;
87 struct mesh_table *newtbl;
88
89 newtbl = kmalloc(sizeof(struct mesh_table), GFP_ATOMIC);
90 if (!newtbl)
91 return NULL;
92
93 newtbl->hash_buckets = kzalloc(sizeof(struct hlist_head) *
94 (1 << size_order), GFP_ATOMIC);
95
96 if (!newtbl->hash_buckets) {
97 kfree(newtbl);
98 return NULL;
99 }
100
101 newtbl->hashwlock = kmalloc(sizeof(spinlock_t) *
102 (1 << size_order), GFP_ATOMIC);
103 if (!newtbl->hashwlock) {
104 kfree(newtbl->hash_buckets);
105 kfree(newtbl);
106 return NULL;
107 }
108
109 newtbl->size_order = size_order;
110 newtbl->hash_mask = (1 << size_order) - 1;
111 atomic_set(&newtbl->entries, 0);
112 get_random_bytes(&newtbl->hash_rnd,
113 sizeof(newtbl->hash_rnd));
114 for (i = 0; i <= newtbl->hash_mask; i++)
115 spin_lock_init(&newtbl->hashwlock[i]);
116 spin_lock_init(&newtbl->gates_lock);
117
118 return newtbl;
119 }
120
121 static void __mesh_table_free(struct mesh_table *tbl)
122 {
123 kfree(tbl->hash_buckets);
124 kfree(tbl->hashwlock);
125 kfree(tbl);
126 }
127
128 static void mesh_table_free(struct mesh_table *tbl, bool free_leafs)
129 {
130 struct hlist_head *mesh_hash;
131 struct hlist_node *p, *q;
132 struct mpath_node *gate;
133 int i;
134
135 mesh_hash = tbl->hash_buckets;
136 for (i = 0; i <= tbl->hash_mask; i++) {
137 spin_lock_bh(&tbl->hashwlock[i]);
138 hlist_for_each_safe(p, q, &mesh_hash[i]) {
139 tbl->free_node(p, free_leafs);
140 atomic_dec(&tbl->entries);
141 }
142 spin_unlock_bh(&tbl->hashwlock[i]);
143 }
144 if (free_leafs) {
145 spin_lock_bh(&tbl->gates_lock);
146 hlist_for_each_entry_safe(gate, p, q,
147 tbl->known_gates, list) {
148 hlist_del(&gate->list);
149 kfree(gate);
150 }
151 kfree(tbl->known_gates);
152 spin_unlock_bh(&tbl->gates_lock);
153 }
154
155 __mesh_table_free(tbl);
156 }
157
158 static int mesh_table_grow(struct mesh_table *oldtbl,
159 struct mesh_table *newtbl)
160 {
161 struct hlist_head *oldhash;
162 struct hlist_node *p, *q;
163 int i;
164
165 if (atomic_read(&oldtbl->entries)
166 < oldtbl->mean_chain_len * (oldtbl->hash_mask + 1))
167 return -EAGAIN;
168
169 newtbl->free_node = oldtbl->free_node;
170 newtbl->mean_chain_len = oldtbl->mean_chain_len;
171 newtbl->copy_node = oldtbl->copy_node;
172 newtbl->known_gates = oldtbl->known_gates;
173 atomic_set(&newtbl->entries, atomic_read(&oldtbl->entries));
174
175 oldhash = oldtbl->hash_buckets;
176 for (i = 0; i <= oldtbl->hash_mask; i++)
177 hlist_for_each(p, &oldhash[i])
178 if (oldtbl->copy_node(p, newtbl) < 0)
179 goto errcopy;
180
181 return 0;
182
183 errcopy:
184 for (i = 0; i <= newtbl->hash_mask; i++) {
185 hlist_for_each_safe(p, q, &newtbl->hash_buckets[i])
186 oldtbl->free_node(p, 0);
187 }
188 return -ENOMEM;
189 }
190
191 static u32 mesh_table_hash(u8 *addr, struct ieee80211_sub_if_data *sdata,
192 struct mesh_table *tbl)
193 {
194 /* Use last four bytes of hw addr and interface index as hash index */
195 return jhash_2words(*(u32 *)(addr+2), sdata->dev->ifindex, tbl->hash_rnd)
196 & tbl->hash_mask;
197 }
198
199
200 /**
201 *
202 * mesh_path_assign_nexthop - update mesh path next hop
203 *
204 * @mpath: mesh path to update
205 * @sta: next hop to assign
206 *
207 * Locking: mpath->state_lock must be held when calling this function
208 */
209 void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta)
210 {
211 struct sk_buff *skb;
212 struct ieee80211_hdr *hdr;
213 struct sk_buff_head tmpq;
214 unsigned long flags;
215
216 rcu_assign_pointer(mpath->next_hop, sta);
217
218 __skb_queue_head_init(&tmpq);
219
220 spin_lock_irqsave(&mpath->frame_queue.lock, flags);
221
222 while ((skb = __skb_dequeue(&mpath->frame_queue)) != NULL) {
223 hdr = (struct ieee80211_hdr *) skb->data;
224 memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN);
225 __skb_queue_tail(&tmpq, skb);
226 }
227
228 skb_queue_splice(&tmpq, &mpath->frame_queue);
229 spin_unlock_irqrestore(&mpath->frame_queue.lock, flags);
230 }
231
232 static void prepare_for_gate(struct sk_buff *skb, char *dst_addr,
233 struct mesh_path *gate_mpath)
234 {
235 struct ieee80211_hdr *hdr;
236 struct ieee80211s_hdr *mshdr;
237 int mesh_hdrlen, hdrlen;
238 char *next_hop;
239
240 hdr = (struct ieee80211_hdr *) skb->data;
241 hdrlen = ieee80211_hdrlen(hdr->frame_control);
242 mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
243
244 if (!(mshdr->flags & MESH_FLAGS_AE)) {
245 /* size of the fixed part of the mesh header */
246 mesh_hdrlen = 6;
247
248 /* make room for the two extended addresses */
249 skb_push(skb, 2 * ETH_ALEN);
250 memmove(skb->data, hdr, hdrlen + mesh_hdrlen);
251
252 hdr = (struct ieee80211_hdr *) skb->data;
253
254 /* we preserve the previous mesh header and only add
255 * the new addreses */
256 mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
257 mshdr->flags = MESH_FLAGS_AE_A5_A6;
258 memcpy(mshdr->eaddr1, hdr->addr3, ETH_ALEN);
259 memcpy(mshdr->eaddr2, hdr->addr4, ETH_ALEN);
260 }
261
262 /* update next hop */
263 hdr = (struct ieee80211_hdr *) skb->data;
264 rcu_read_lock();
265 next_hop = rcu_dereference(gate_mpath->next_hop)->sta.addr;
266 memcpy(hdr->addr1, next_hop, ETH_ALEN);
267 rcu_read_unlock();
268 memcpy(hdr->addr3, dst_addr, ETH_ALEN);
269 }
270
271 /**
272 *
273 * mesh_path_move_to_queue - Move or copy frames from one mpath queue to another
274 *
275 * This function is used to transfer or copy frames from an unresolved mpath to
276 * a gate mpath. The function also adds the Address Extension field and
277 * updates the next hop.
278 *
279 * If a frame already has an Address Extension field, only the next hop and
280 * destination addresses are updated.
281 *
282 * The gate mpath must be an active mpath with a valid mpath->next_hop.
283 *
284 * @mpath: An active mpath the frames will be sent to (i.e. the gate)
285 * @from_mpath: The failed mpath
286 * @copy: When true, copy all the frames to the new mpath queue. When false,
287 * move them.
288 */
289 static void mesh_path_move_to_queue(struct mesh_path *gate_mpath,
290 struct mesh_path *from_mpath,
291 bool copy)
292 {
293 struct sk_buff *skb, *cp_skb = NULL;
294 struct sk_buff_head gateq, failq;
295 unsigned long flags;
296 int num_skbs;
297
298 BUG_ON(gate_mpath == from_mpath);
299 BUG_ON(!gate_mpath->next_hop);
300
301 __skb_queue_head_init(&gateq);
302 __skb_queue_head_init(&failq);
303
304 spin_lock_irqsave(&from_mpath->frame_queue.lock, flags);
305 skb_queue_splice_init(&from_mpath->frame_queue, &failq);
306 spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags);
307
308 num_skbs = skb_queue_len(&failq);
309
310 while (num_skbs--) {
311 skb = __skb_dequeue(&failq);
312 if (copy) {
313 cp_skb = skb_copy(skb, GFP_ATOMIC);
314 if (cp_skb)
315 __skb_queue_tail(&failq, cp_skb);
316 }
317
318 prepare_for_gate(skb, gate_mpath->dst, gate_mpath);
319 __skb_queue_tail(&gateq, skb);
320 }
321
322 spin_lock_irqsave(&gate_mpath->frame_queue.lock, flags);
323 skb_queue_splice(&gateq, &gate_mpath->frame_queue);
324 mpath_dbg("Mpath queue for gate %pM has %d frames\n",
325 gate_mpath->dst,
326 skb_queue_len(&gate_mpath->frame_queue));
327 spin_unlock_irqrestore(&gate_mpath->frame_queue.lock, flags);
328
329 if (!copy)
330 return;
331
332 spin_lock_irqsave(&from_mpath->frame_queue.lock, flags);
333 skb_queue_splice(&failq, &from_mpath->frame_queue);
334 spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags);
335 }
336
337
338 static struct mesh_path *path_lookup(struct mesh_table *tbl, u8 *dst,
339 struct ieee80211_sub_if_data *sdata)
340 {
341 struct mesh_path *mpath;
342 struct hlist_node *n;
343 struct hlist_head *bucket;
344 struct mpath_node *node;
345
346 bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)];
347 hlist_for_each_entry_rcu(node, n, bucket, list) {
348 mpath = node->mpath;
349 if (mpath->sdata == sdata &&
350 memcmp(dst, mpath->dst, ETH_ALEN) == 0) {
351 if (MPATH_EXPIRED(mpath)) {
352 spin_lock_bh(&mpath->state_lock);
353 mpath->flags &= ~MESH_PATH_ACTIVE;
354 spin_unlock_bh(&mpath->state_lock);
355 }
356 return mpath;
357 }
358 }
359 return NULL;
360 }
361
362 /**
363 * mesh_path_lookup - look up a path in the mesh path table
364 * @dst: hardware address (ETH_ALEN length) of destination
365 * @sdata: local subif
366 *
367 * Returns: pointer to the mesh path structure, or NULL if not found
368 *
369 * Locking: must be called within a read rcu section.
370 */
371 struct mesh_path *mesh_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
372 {
373 return path_lookup(rcu_dereference(mesh_paths), dst, sdata);
374 }
375
376 struct mesh_path *mpp_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
377 {
378 return path_lookup(rcu_dereference(mpp_paths), dst, sdata);
379 }
380
381
382 /**
383 * mesh_path_lookup_by_idx - look up a path in the mesh path table by its index
384 * @idx: index
385 * @sdata: local subif, or NULL for all entries
386 *
387 * Returns: pointer to the mesh path structure, or NULL if not found.
388 *
389 * Locking: must be called within a read rcu section.
390 */
391 struct mesh_path *mesh_path_lookup_by_idx(int idx, struct ieee80211_sub_if_data *sdata)
392 {
393 struct mesh_table *tbl = rcu_dereference(mesh_paths);
394 struct mpath_node *node;
395 struct hlist_node *p;
396 int i;
397 int j = 0;
398
399 for_each_mesh_entry(tbl, p, node, i) {
400 if (sdata && node->mpath->sdata != sdata)
401 continue;
402 if (j++ == idx) {
403 if (MPATH_EXPIRED(node->mpath)) {
404 spin_lock_bh(&node->mpath->state_lock);
405 node->mpath->flags &= ~MESH_PATH_ACTIVE;
406 spin_unlock_bh(&node->mpath->state_lock);
407 }
408 return node->mpath;
409 }
410 }
411
412 return NULL;
413 }
414
415 static void mesh_gate_node_reclaim(struct rcu_head *rp)
416 {
417 struct mpath_node *node = container_of(rp, struct mpath_node, rcu);
418 kfree(node);
419 }
420
421 /**
422 * mesh_gate_add - mark mpath as path to a mesh gate and add to known_gates
423 * @mesh_tbl: table which contains known_gates list
424 * @mpath: mpath to known mesh gate
425 *
426 * Returns: 0 on success
427 *
428 */
429 static int mesh_gate_add(struct mesh_table *tbl, struct mesh_path *mpath)
430 {
431 struct mpath_node *gate, *new_gate;
432 struct hlist_node *n;
433 int err;
434
435 rcu_read_lock();
436 tbl = rcu_dereference(tbl);
437
438 hlist_for_each_entry_rcu(gate, n, tbl->known_gates, list)
439 if (gate->mpath == mpath) {
440 err = -EEXIST;
441 goto err_rcu;
442 }
443
444 new_gate = kzalloc(sizeof(struct mpath_node), GFP_ATOMIC);
445 if (!new_gate) {
446 err = -ENOMEM;
447 goto err_rcu;
448 }
449
450 mpath->is_gate = true;
451 mpath->sdata->u.mesh.num_gates++;
452 new_gate->mpath = mpath;
453 spin_lock_bh(&tbl->gates_lock);
454 hlist_add_head_rcu(&new_gate->list, tbl->known_gates);
455 spin_unlock_bh(&tbl->gates_lock);
456 rcu_read_unlock();
457 mpath_dbg("Mesh path (%s): Recorded new gate: %pM. %d known gates\n",
458 mpath->sdata->name, mpath->dst,
459 mpath->sdata->u.mesh.num_gates);
460 return 0;
461 err_rcu:
462 rcu_read_unlock();
463 return err;
464 }
465
466 /**
467 * mesh_gate_del - remove a mesh gate from the list of known gates
468 * @tbl: table which holds our list of known gates
469 * @mpath: gate mpath
470 *
471 * Returns: 0 on success
472 *
473 * Locking: must be called inside rcu_read_lock() section
474 */
475 static int mesh_gate_del(struct mesh_table *tbl, struct mesh_path *mpath)
476 {
477 struct mpath_node *gate;
478 struct hlist_node *p, *q;
479
480 tbl = rcu_dereference(tbl);
481
482 hlist_for_each_entry_safe(gate, p, q, tbl->known_gates, list)
483 if (gate->mpath == mpath) {
484 spin_lock_bh(&tbl->gates_lock);
485 hlist_del_rcu(&gate->list);
486 call_rcu(&gate->rcu, mesh_gate_node_reclaim);
487 spin_unlock_bh(&tbl->gates_lock);
488 mpath->sdata->u.mesh.num_gates--;
489 mpath->is_gate = false;
490 mpath_dbg("Mesh path (%s): Deleted gate: %pM. "
491 "%d known gates\n", mpath->sdata->name,
492 mpath->dst, mpath->sdata->u.mesh.num_gates);
493 break;
494 }
495
496 return 0;
497 }
498
499 /**
500 *
501 * mesh_path_add_gate - add the given mpath to a mesh gate to our path table
502 * @mpath: gate path to add to table
503 */
504 int mesh_path_add_gate(struct mesh_path *mpath)
505 {
506 return mesh_gate_add(mesh_paths, mpath);
507 }
508
509 /**
510 * mesh_gate_num - number of gates known to this interface
511 * @sdata: subif data
512 */
513 int mesh_gate_num(struct ieee80211_sub_if_data *sdata)
514 {
515 return sdata->u.mesh.num_gates;
516 }
517
518 /**
519 * mesh_path_add - allocate and add a new path to the mesh path table
520 * @addr: destination address of the path (ETH_ALEN length)
521 * @sdata: local subif
522 *
523 * Returns: 0 on success
524 *
525 * State: the initial state of the new path is set to 0
526 */
527 int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
528 {
529 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
530 struct ieee80211_local *local = sdata->local;
531 struct mesh_table *tbl;
532 struct mesh_path *mpath, *new_mpath;
533 struct mpath_node *node, *new_node;
534 struct hlist_head *bucket;
535 struct hlist_node *n;
536 int grow = 0;
537 int err = 0;
538 u32 hash_idx;
539
540 if (memcmp(dst, sdata->vif.addr, ETH_ALEN) == 0)
541 /* never add ourselves as neighbours */
542 return -ENOTSUPP;
543
544 if (is_multicast_ether_addr(dst))
545 return -ENOTSUPP;
546
547 if (atomic_add_unless(&sdata->u.mesh.mpaths, 1, MESH_MAX_MPATHS) == 0)
548 return -ENOSPC;
549
550 err = -ENOMEM;
551 new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC);
552 if (!new_mpath)
553 goto err_path_alloc;
554
555 new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
556 if (!new_node)
557 goto err_node_alloc;
558
559 read_lock_bh(&pathtbl_resize_lock);
560 memcpy(new_mpath->dst, dst, ETH_ALEN);
561 new_mpath->sdata = sdata;
562 new_mpath->flags = 0;
563 skb_queue_head_init(&new_mpath->frame_queue);
564 new_node->mpath = new_mpath;
565 new_mpath->timer.data = (unsigned long) new_mpath;
566 new_mpath->timer.function = mesh_path_timer;
567 new_mpath->exp_time = jiffies;
568 spin_lock_init(&new_mpath->state_lock);
569 init_timer(&new_mpath->timer);
570
571 tbl = resize_dereference_mesh_paths();
572
573 hash_idx = mesh_table_hash(dst, sdata, tbl);
574 bucket = &tbl->hash_buckets[hash_idx];
575
576 spin_lock_bh(&tbl->hashwlock[hash_idx]);
577
578 err = -EEXIST;
579 hlist_for_each_entry(node, n, bucket, list) {
580 mpath = node->mpath;
581 if (mpath->sdata == sdata && memcmp(dst, mpath->dst, ETH_ALEN) == 0)
582 goto err_exists;
583 }
584
585 hlist_add_head_rcu(&new_node->list, bucket);
586 if (atomic_inc_return(&tbl->entries) >=
587 tbl->mean_chain_len * (tbl->hash_mask + 1))
588 grow = 1;
589
590 mesh_paths_generation++;
591
592 spin_unlock_bh(&tbl->hashwlock[hash_idx]);
593 read_unlock_bh(&pathtbl_resize_lock);
594 if (grow) {
595 set_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags);
596 ieee80211_queue_work(&local->hw, &sdata->work);
597 }
598 return 0;
599
600 err_exists:
601 spin_unlock_bh(&tbl->hashwlock[hash_idx]);
602 read_unlock_bh(&pathtbl_resize_lock);
603 kfree(new_node);
604 err_node_alloc:
605 kfree(new_mpath);
606 err_path_alloc:
607 atomic_dec(&sdata->u.mesh.mpaths);
608 return err;
609 }
610
611 static void mesh_table_free_rcu(struct rcu_head *rcu)
612 {
613 struct mesh_table *tbl = container_of(rcu, struct mesh_table, rcu_head);
614
615 mesh_table_free(tbl, false);
616 }
617
618 void mesh_mpath_table_grow(void)
619 {
620 struct mesh_table *oldtbl, *newtbl;
621
622 write_lock_bh(&pathtbl_resize_lock);
623 oldtbl = resize_dereference_mesh_paths();
624 newtbl = mesh_table_alloc(oldtbl->size_order + 1);
625 if (!newtbl)
626 goto out;
627 if (mesh_table_grow(oldtbl, newtbl) < 0) {
628 __mesh_table_free(newtbl);
629 goto out;
630 }
631 rcu_assign_pointer(mesh_paths, newtbl);
632
633 call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu);
634
635 out:
636 write_unlock_bh(&pathtbl_resize_lock);
637 }
638
639 void mesh_mpp_table_grow(void)
640 {
641 struct mesh_table *oldtbl, *newtbl;
642
643 write_lock_bh(&pathtbl_resize_lock);
644 oldtbl = resize_dereference_mpp_paths();
645 newtbl = mesh_table_alloc(oldtbl->size_order + 1);
646 if (!newtbl)
647 goto out;
648 if (mesh_table_grow(oldtbl, newtbl) < 0) {
649 __mesh_table_free(newtbl);
650 goto out;
651 }
652 rcu_assign_pointer(mpp_paths, newtbl);
653 call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu);
654
655 out:
656 write_unlock_bh(&pathtbl_resize_lock);
657 }
658
659 int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
660 {
661 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
662 struct ieee80211_local *local = sdata->local;
663 struct mesh_table *tbl;
664 struct mesh_path *mpath, *new_mpath;
665 struct mpath_node *node, *new_node;
666 struct hlist_head *bucket;
667 struct hlist_node *n;
668 int grow = 0;
669 int err = 0;
670 u32 hash_idx;
671
672 if (memcmp(dst, sdata->vif.addr, ETH_ALEN) == 0)
673 /* never add ourselves as neighbours */
674 return -ENOTSUPP;
675
676 if (is_multicast_ether_addr(dst))
677 return -ENOTSUPP;
678
679 err = -ENOMEM;
680 new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC);
681 if (!new_mpath)
682 goto err_path_alloc;
683
684 new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
685 if (!new_node)
686 goto err_node_alloc;
687
688 read_lock_bh(&pathtbl_resize_lock);
689 memcpy(new_mpath->dst, dst, ETH_ALEN);
690 memcpy(new_mpath->mpp, mpp, ETH_ALEN);
691 new_mpath->sdata = sdata;
692 new_mpath->flags = 0;
693 skb_queue_head_init(&new_mpath->frame_queue);
694 new_node->mpath = new_mpath;
695 init_timer(&new_mpath->timer);
696 new_mpath->exp_time = jiffies;
697 spin_lock_init(&new_mpath->state_lock);
698
699 tbl = resize_dereference_mpp_paths();
700
701 hash_idx = mesh_table_hash(dst, sdata, tbl);
702 bucket = &tbl->hash_buckets[hash_idx];
703
704 spin_lock_bh(&tbl->hashwlock[hash_idx]);
705
706 err = -EEXIST;
707 hlist_for_each_entry(node, n, bucket, list) {
708 mpath = node->mpath;
709 if (mpath->sdata == sdata && memcmp(dst, mpath->dst, ETH_ALEN) == 0)
710 goto err_exists;
711 }
712
713 hlist_add_head_rcu(&new_node->list, bucket);
714 if (atomic_inc_return(&tbl->entries) >=
715 tbl->mean_chain_len * (tbl->hash_mask + 1))
716 grow = 1;
717
718 spin_unlock_bh(&tbl->hashwlock[hash_idx]);
719 read_unlock_bh(&pathtbl_resize_lock);
720 if (grow) {
721 set_bit(MESH_WORK_GROW_MPP_TABLE, &ifmsh->wrkq_flags);
722 ieee80211_queue_work(&local->hw, &sdata->work);
723 }
724 return 0;
725
726 err_exists:
727 spin_unlock_bh(&tbl->hashwlock[hash_idx]);
728 read_unlock_bh(&pathtbl_resize_lock);
729 kfree(new_node);
730 err_node_alloc:
731 kfree(new_mpath);
732 err_path_alloc:
733 return err;
734 }
735
736
737 /**
738 * mesh_plink_broken - deactivates paths and sends perr when a link breaks
739 *
740 * @sta: broken peer link
741 *
742 * This function must be called from the rate control algorithm if enough
743 * delivery errors suggest that a peer link is no longer usable.
744 */
745 void mesh_plink_broken(struct sta_info *sta)
746 {
747 struct mesh_table *tbl;
748 static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
749 struct mesh_path *mpath;
750 struct mpath_node *node;
751 struct hlist_node *p;
752 struct ieee80211_sub_if_data *sdata = sta->sdata;
753 int i;
754 __le16 reason = cpu_to_le16(WLAN_REASON_MESH_PATH_DEST_UNREACHABLE);
755
756 rcu_read_lock();
757 tbl = rcu_dereference(mesh_paths);
758 for_each_mesh_entry(tbl, p, node, i) {
759 mpath = node->mpath;
760 if (rcu_dereference(mpath->next_hop) == sta &&
761 mpath->flags & MESH_PATH_ACTIVE &&
762 !(mpath->flags & MESH_PATH_FIXED)) {
763 spin_lock_bh(&mpath->state_lock);
764 mpath->flags &= ~MESH_PATH_ACTIVE;
765 ++mpath->sn;
766 spin_unlock_bh(&mpath->state_lock);
767 mesh_path_error_tx(sdata->u.mesh.mshcfg.element_ttl,
768 mpath->dst, cpu_to_le32(mpath->sn),
769 reason, bcast, sdata);
770 }
771 }
772 rcu_read_unlock();
773 }
774
775 static void mesh_path_node_reclaim(struct rcu_head *rp)
776 {
777 struct mpath_node *node = container_of(rp, struct mpath_node, rcu);
778 struct ieee80211_sub_if_data *sdata = node->mpath->sdata;
779
780 del_timer_sync(&node->mpath->timer);
781 atomic_dec(&sdata->u.mesh.mpaths);
782 kfree(node->mpath);
783 kfree(node);
784 }
785
786 /* needs to be called with the corresponding hashwlock taken */
787 static void __mesh_path_del(struct mesh_table *tbl, struct mpath_node *node)
788 {
789 struct mesh_path *mpath;
790 mpath = node->mpath;
791 spin_lock(&mpath->state_lock);
792 mpath->flags |= MESH_PATH_RESOLVING;
793 if (mpath->is_gate)
794 mesh_gate_del(tbl, mpath);
795 hlist_del_rcu(&node->list);
796 call_rcu(&node->rcu, mesh_path_node_reclaim);
797 spin_unlock(&mpath->state_lock);
798 atomic_dec(&tbl->entries);
799 }
800
801 /**
802 * mesh_path_flush_by_nexthop - Deletes mesh paths if their next hop matches
803 *
804 * @sta - mesh peer to match
805 *
806 * RCU notes: this function is called when a mesh plink transitions from
807 * PLINK_ESTAB to any other state, since PLINK_ESTAB state is the only one that
808 * allows path creation. This will happen before the sta can be freed (because
809 * sta_info_destroy() calls this) so any reader in a rcu read block will be
810 * protected against the plink disappearing.
811 */
812 void mesh_path_flush_by_nexthop(struct sta_info *sta)
813 {
814 struct mesh_table *tbl;
815 struct mesh_path *mpath;
816 struct mpath_node *node;
817 struct hlist_node *p;
818 int i;
819
820 rcu_read_lock();
821 read_lock_bh(&pathtbl_resize_lock);
822 tbl = resize_dereference_mesh_paths();
823 for_each_mesh_entry(tbl, p, node, i) {
824 mpath = node->mpath;
825 if (rcu_dereference(mpath->next_hop) == sta) {
826 spin_lock_bh(&tbl->hashwlock[i]);
827 __mesh_path_del(tbl, node);
828 spin_unlock_bh(&tbl->hashwlock[i]);
829 }
830 }
831 read_unlock_bh(&pathtbl_resize_lock);
832 rcu_read_unlock();
833 }
834
835 static void table_flush_by_iface(struct mesh_table *tbl,
836 struct ieee80211_sub_if_data *sdata)
837 {
838 struct mesh_path *mpath;
839 struct mpath_node *node;
840 struct hlist_node *p;
841 int i;
842
843 WARN_ON(!rcu_read_lock_held());
844 for_each_mesh_entry(tbl, p, node, i) {
845 mpath = node->mpath;
846 if (mpath->sdata != sdata)
847 continue;
848 spin_lock_bh(&tbl->hashwlock[i]);
849 __mesh_path_del(tbl, node);
850 spin_unlock_bh(&tbl->hashwlock[i]);
851 }
852 }
853
854 /**
855 * mesh_path_flush_by_iface - Deletes all mesh paths associated with a given iface
856 *
857 * This function deletes both mesh paths as well as mesh portal paths.
858 *
859 * @sdata - interface data to match
860 *
861 */
862 void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata)
863 {
864 struct mesh_table *tbl;
865
866 rcu_read_lock();
867 read_lock_bh(&pathtbl_resize_lock);
868 tbl = resize_dereference_mesh_paths();
869 table_flush_by_iface(tbl, sdata);
870 tbl = resize_dereference_mpp_paths();
871 table_flush_by_iface(tbl, sdata);
872 read_unlock_bh(&pathtbl_resize_lock);
873 rcu_read_unlock();
874 }
875
876 /**
877 * mesh_path_del - delete a mesh path from the table
878 *
879 * @addr: dst address (ETH_ALEN length)
880 * @sdata: local subif
881 *
882 * Returns: 0 if successful
883 */
884 int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata)
885 {
886 struct mesh_table *tbl;
887 struct mesh_path *mpath;
888 struct mpath_node *node;
889 struct hlist_head *bucket;
890 struct hlist_node *n;
891 int hash_idx;
892 int err = 0;
893
894 read_lock_bh(&pathtbl_resize_lock);
895 tbl = resize_dereference_mesh_paths();
896 hash_idx = mesh_table_hash(addr, sdata, tbl);
897 bucket = &tbl->hash_buckets[hash_idx];
898
899 spin_lock_bh(&tbl->hashwlock[hash_idx]);
900 hlist_for_each_entry(node, n, bucket, list) {
901 mpath = node->mpath;
902 if (mpath->sdata == sdata &&
903 memcmp(addr, mpath->dst, ETH_ALEN) == 0) {
904 __mesh_path_del(tbl, node);
905 goto enddel;
906 }
907 }
908
909 err = -ENXIO;
910 enddel:
911 mesh_paths_generation++;
912 spin_unlock_bh(&tbl->hashwlock[hash_idx]);
913 read_unlock_bh(&pathtbl_resize_lock);
914 return err;
915 }
916
917 /**
918 * mesh_path_tx_pending - sends pending frames in a mesh path queue
919 *
920 * @mpath: mesh path to activate
921 *
922 * Locking: the state_lock of the mpath structure must NOT be held when calling
923 * this function.
924 */
925 void mesh_path_tx_pending(struct mesh_path *mpath)
926 {
927 if (mpath->flags & MESH_PATH_ACTIVE)
928 ieee80211_add_pending_skbs(mpath->sdata->local,
929 &mpath->frame_queue);
930 }
931
932 /**
933 * mesh_path_send_to_gates - sends pending frames to all known mesh gates
934 *
935 * @mpath: mesh path whose queue will be emptied
936 *
937 * If there is only one gate, the frames are transferred from the failed mpath
938 * queue to that gate's queue. If there are more than one gates, the frames
939 * are copied from each gate to the next. After frames are copied, the
940 * mpath queues are emptied onto the transmission queue.
941 */
942 int mesh_path_send_to_gates(struct mesh_path *mpath)
943 {
944 struct ieee80211_sub_if_data *sdata = mpath->sdata;
945 struct hlist_node *n;
946 struct mesh_table *tbl;
947 struct mesh_path *from_mpath = mpath;
948 struct mpath_node *gate = NULL;
949 bool copy = false;
950 struct hlist_head *known_gates;
951
952 rcu_read_lock();
953 tbl = rcu_dereference(mesh_paths);
954 known_gates = tbl->known_gates;
955 rcu_read_unlock();
956
957 if (!known_gates)
958 return -EHOSTUNREACH;
959
960 hlist_for_each_entry_rcu(gate, n, known_gates, list) {
961 if (gate->mpath->sdata != sdata)
962 continue;
963
964 if (gate->mpath->flags & MESH_PATH_ACTIVE) {
965 mpath_dbg("Forwarding to %pM\n", gate->mpath->dst);
966 mesh_path_move_to_queue(gate->mpath, from_mpath, copy);
967 from_mpath = gate->mpath;
968 copy = true;
969 } else {
970 mpath_dbg("Not forwarding %p\n", gate->mpath);
971 mpath_dbg("flags %x\n", gate->mpath->flags);
972 }
973 }
974
975 hlist_for_each_entry_rcu(gate, n, known_gates, list)
976 if (gate->mpath->sdata == sdata) {
977 mpath_dbg("Sending to %pM\n", gate->mpath->dst);
978 mesh_path_tx_pending(gate->mpath);
979 }
980
981 return (from_mpath == mpath) ? -EHOSTUNREACH : 0;
982 }
983
984 /**
985 * mesh_path_discard_frame - discard a frame whose path could not be resolved
986 *
987 * @skb: frame to discard
988 * @sdata: network subif the frame was to be sent through
989 *
990 * If the frame was being forwarded from another MP, a PERR frame will be sent
991 * to the precursor. The precursor's address (i.e. the previous hop) was saved
992 * in addr1 of the frame-to-be-forwarded, and would only be overwritten once
993 * the destination is successfully resolved.
994 *
995 * Locking: the function must me called within a rcu_read_lock region
996 */
997 void mesh_path_discard_frame(struct sk_buff *skb,
998 struct ieee80211_sub_if_data *sdata)
999 {
1000 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1001 struct mesh_path *mpath;
1002 u32 sn = 0;
1003 __le16 reason = cpu_to_le16(WLAN_REASON_MESH_PATH_NOFORWARD);
1004
1005 if (memcmp(hdr->addr4, sdata->vif.addr, ETH_ALEN) != 0) {
1006 u8 *ra, *da;
1007
1008 da = hdr->addr3;
1009 ra = hdr->addr1;
1010 rcu_read_lock();
1011 mpath = mesh_path_lookup(da, sdata);
1012 if (mpath) {
1013 spin_lock_bh(&mpath->state_lock);
1014 sn = ++mpath->sn;
1015 spin_unlock_bh(&mpath->state_lock);
1016 }
1017 rcu_read_unlock();
1018 mesh_path_error_tx(sdata->u.mesh.mshcfg.element_ttl, skb->data,
1019 cpu_to_le32(sn), reason, ra, sdata);
1020 }
1021
1022 kfree_skb(skb);
1023 sdata->u.mesh.mshstats.dropped_frames_no_route++;
1024 }
1025
1026 /**
1027 * mesh_path_flush_pending - free the pending queue of a mesh path
1028 *
1029 * @mpath: mesh path whose queue has to be freed
1030 *
1031 * Locking: the function must me called within a rcu_read_lock region
1032 */
1033 void mesh_path_flush_pending(struct mesh_path *mpath)
1034 {
1035 struct sk_buff *skb;
1036
1037 while ((skb = skb_dequeue(&mpath->frame_queue)) != NULL)
1038 mesh_path_discard_frame(skb, mpath->sdata);
1039 }
1040
1041 /**
1042 * mesh_path_fix_nexthop - force a specific next hop for a mesh path
1043 *
1044 * @mpath: the mesh path to modify
1045 * @next_hop: the next hop to force
1046 *
1047 * Locking: this function must be called holding mpath->state_lock
1048 */
1049 void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop)
1050 {
1051 spin_lock_bh(&mpath->state_lock);
1052 mesh_path_assign_nexthop(mpath, next_hop);
1053 mpath->sn = 0xffff;
1054 mpath->metric = 0;
1055 mpath->hop_count = 0;
1056 mpath->exp_time = 0;
1057 mpath->flags |= MESH_PATH_FIXED;
1058 mesh_path_activate(mpath);
1059 spin_unlock_bh(&mpath->state_lock);
1060 mesh_path_tx_pending(mpath);
1061 }
1062
1063 static void mesh_path_node_free(struct hlist_node *p, bool free_leafs)
1064 {
1065 struct mesh_path *mpath;
1066 struct mpath_node *node = hlist_entry(p, struct mpath_node, list);
1067 mpath = node->mpath;
1068 hlist_del_rcu(p);
1069 if (free_leafs) {
1070 del_timer_sync(&mpath->timer);
1071 kfree(mpath);
1072 }
1073 kfree(node);
1074 }
1075
1076 static int mesh_path_node_copy(struct hlist_node *p, struct mesh_table *newtbl)
1077 {
1078 struct mesh_path *mpath;
1079 struct mpath_node *node, *new_node;
1080 u32 hash_idx;
1081
1082 new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
1083 if (new_node == NULL)
1084 return -ENOMEM;
1085
1086 node = hlist_entry(p, struct mpath_node, list);
1087 mpath = node->mpath;
1088 new_node->mpath = mpath;
1089 hash_idx = mesh_table_hash(mpath->dst, mpath->sdata, newtbl);
1090 hlist_add_head(&new_node->list,
1091 &newtbl->hash_buckets[hash_idx]);
1092 return 0;
1093 }
1094
1095 int mesh_pathtbl_init(void)
1096 {
1097 struct mesh_table *tbl_path, *tbl_mpp;
1098
1099 tbl_path = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
1100 if (!tbl_path)
1101 return -ENOMEM;
1102 tbl_path->free_node = &mesh_path_node_free;
1103 tbl_path->copy_node = &mesh_path_node_copy;
1104 tbl_path->mean_chain_len = MEAN_CHAIN_LEN;
1105 tbl_path->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC);
1106 INIT_HLIST_HEAD(tbl_path->known_gates);
1107
1108
1109 tbl_mpp = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
1110 if (!tbl_mpp) {
1111 mesh_table_free(tbl_path, true);
1112 return -ENOMEM;
1113 }
1114 tbl_mpp->free_node = &mesh_path_node_free;
1115 tbl_mpp->copy_node = &mesh_path_node_copy;
1116 tbl_mpp->mean_chain_len = MEAN_CHAIN_LEN;
1117 tbl_mpp->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC);
1118 INIT_HLIST_HEAD(tbl_mpp->known_gates);
1119
1120 /* Need no locking since this is during init */
1121 RCU_INIT_POINTER(mesh_paths, tbl_path);
1122 RCU_INIT_POINTER(mpp_paths, tbl_mpp);
1123
1124 return 0;
1125 }
1126
1127 void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
1128 {
1129 struct mesh_table *tbl;
1130 struct mesh_path *mpath;
1131 struct mpath_node *node;
1132 struct hlist_node *p;
1133 int i;
1134
1135 rcu_read_lock();
1136 tbl = rcu_dereference(mesh_paths);
1137 for_each_mesh_entry(tbl, p, node, i) {
1138 if (node->mpath->sdata != sdata)
1139 continue;
1140 mpath = node->mpath;
1141 if ((!(mpath->flags & MESH_PATH_RESOLVING)) &&
1142 (!(mpath->flags & MESH_PATH_FIXED)) &&
1143 time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE))
1144 mesh_path_del(mpath->dst, mpath->sdata);
1145 }
1146 rcu_read_unlock();
1147 }
1148
1149 void mesh_pathtbl_unregister(void)
1150 {
1151 /* no need for locking during exit path */
1152 mesh_table_free(rcu_dereference_raw(mesh_paths), true);
1153 mesh_table_free(rcu_dereference_raw(mpp_paths), true);
1154 }