]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blobdiff - net/mac80211/mesh_pathtbl.c
mac80211: Start implementing QoS support for mesh interfaces
[mirror_ubuntu-artful-kernel.git] / net / mac80211 / mesh_pathtbl.c
index 068ee65182547e0f825a404768a2c72b8c8b7a7d..4fc8c7a5d4dd010f934f5ddecf01f2d07224f9fe 100644 (file)
 #include <linux/spinlock.h>
 #include <linux/string.h>
 #include <net/mac80211.h>
+#include "wme.h"
 #include "ieee80211_i.h"
 #include "mesh.h"
 
+#ifdef CONFIG_MAC80211_VERBOSE_MPATH_DEBUG
+#define mpath_dbg(fmt, args...)        printk(KERN_DEBUG fmt, ##args)
+#else
+#define mpath_dbg(fmt, args...)        do { (void)(0); } while (0)
+#endif
+
 /* There will be initially 2^INIT_PATHS_SIZE_ORDER buckets */
 #define INIT_PATHS_SIZE_ORDER  2
 
@@ -42,8 +49,10 @@ static struct mesh_table __rcu *mpp_paths; /* Store paths for MPP&MAP */
 int mesh_paths_generation;
 
 /* This lock will have the grow table function as writer and add / delete nodes
- * as readers. When reading the table (i.e. doing lookups) we are well protected
- * by RCU
+ * as readers. RCU provides sufficient protection only when reading the table
+ * (i.e. doing lookups).  Adding or adding or removing nodes requires we take
+ * the read lock or we risk operating on an old table.  The write lock is only
+ * needed when modifying the number of buckets a table.
  */
 static DEFINE_RWLOCK(pathtbl_resize_lock);
 
@@ -60,6 +69,8 @@ static inline struct mesh_table *resize_dereference_mpp_paths(void)
                lockdep_is_held(&pathtbl_resize_lock));
 }
 
+static int mesh_gate_add(struct mesh_table *tbl, struct mesh_path *mpath);
+
 /*
  * CAREFUL -- "tbl" must not be an expression,
  * in particular not an rcu_dereference(), since
@@ -103,6 +114,7 @@ static struct mesh_table *mesh_table_alloc(int size_order)
                        sizeof(newtbl->hash_rnd));
        for (i = 0; i <= newtbl->hash_mask; i++)
                spin_lock_init(&newtbl->hashwlock[i]);
+       spin_lock_init(&newtbl->gates_lock);
 
        return newtbl;
 }
@@ -118,6 +130,7 @@ static void mesh_table_free(struct mesh_table *tbl, bool free_leafs)
 {
        struct hlist_head *mesh_hash;
        struct hlist_node *p, *q;
+       struct mpath_node *gate;
        int i;
 
        mesh_hash = tbl->hash_buckets;
@@ -129,6 +142,17 @@ static void mesh_table_free(struct mesh_table *tbl, bool free_leafs)
                }
                spin_unlock_bh(&tbl->hashwlock[i]);
        }
+       if (free_leafs) {
+               spin_lock_bh(&tbl->gates_lock);
+               hlist_for_each_entry_safe(gate, p, q,
+                                        tbl->known_gates, list) {
+                       hlist_del(&gate->list);
+                       kfree(gate);
+               }
+               kfree(tbl->known_gates);
+               spin_unlock_bh(&tbl->gates_lock);
+       }
+
        __mesh_table_free(tbl);
 }
 
@@ -146,6 +170,7 @@ static int mesh_table_grow(struct mesh_table *oldtbl,
        newtbl->free_node = oldtbl->free_node;
        newtbl->mean_chain_len = oldtbl->mean_chain_len;
        newtbl->copy_node = oldtbl->copy_node;
+       newtbl->known_gates = oldtbl->known_gates;
        atomic_set(&newtbl->entries, atomic_read(&oldtbl->entries));
 
        oldhash = oldtbl->hash_buckets;
@@ -188,6 +213,7 @@ void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta)
        struct ieee80211_hdr *hdr;
        struct sk_buff_head tmpq;
        unsigned long flags;
+       struct ieee80211_sub_if_data *sdata = mpath->sdata;
 
        rcu_assign_pointer(mpath->next_hop, sta);
 
@@ -198,6 +224,8 @@ void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta)
        while ((skb = __skb_dequeue(&mpath->frame_queue)) != NULL) {
                hdr = (struct ieee80211_hdr *) skb->data;
                memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN);
+               skb_set_queue_mapping(skb, ieee80211_select_queue(sdata, skb));
+               ieee80211_set_qos_hdr(sdata->local, skb);
                __skb_queue_tail(&tmpq, skb);
        }
 
@@ -205,62 +233,128 @@ void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta)
        spin_unlock_irqrestore(&mpath->frame_queue.lock, flags);
 }
 
+static void prepare_for_gate(struct sk_buff *skb, char *dst_addr,
+                            struct mesh_path *gate_mpath)
+{
+       struct ieee80211_hdr *hdr;
+       struct ieee80211s_hdr *mshdr;
+       int mesh_hdrlen, hdrlen;
+       char *next_hop;
+
+       hdr = (struct ieee80211_hdr *) skb->data;
+       hdrlen = ieee80211_hdrlen(hdr->frame_control);
+       mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
+
+       if (!(mshdr->flags & MESH_FLAGS_AE)) {
+               /* size of the fixed part of the mesh header */
+               mesh_hdrlen = 6;
+
+               /* make room for the two extended addresses */
+               skb_push(skb, 2 * ETH_ALEN);
+               memmove(skb->data, hdr, hdrlen + mesh_hdrlen);
+
+               hdr = (struct ieee80211_hdr *) skb->data;
+
+               /* we preserve the previous mesh header and only add
+                * the new addreses */
+               mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
+               mshdr->flags = MESH_FLAGS_AE_A5_A6;
+               memcpy(mshdr->eaddr1, hdr->addr3, ETH_ALEN);
+               memcpy(mshdr->eaddr2, hdr->addr4, ETH_ALEN);
+       }
+
+       /* update next hop */
+       hdr = (struct ieee80211_hdr *) skb->data;
+       rcu_read_lock();
+       next_hop = rcu_dereference(gate_mpath->next_hop)->sta.addr;
+       memcpy(hdr->addr1, next_hop, ETH_ALEN);
+       rcu_read_unlock();
+       memcpy(hdr->addr3, dst_addr, ETH_ALEN);
+}
 
 /**
- * mesh_path_lookup - look up a path in the mesh path table
- * @dst: hardware address (ETH_ALEN length) of destination
- * @sdata: local subif
  *
- * Returns: pointer to the mesh path structure, or NULL if not found
+ * mesh_path_move_to_queue - Move or copy frames from one mpath queue to another
  *
- * Locking: must be called within a read rcu section.
+ * This function is used to transfer or copy frames from an unresolved mpath to
+ * a gate mpath.  The function also adds the Address Extension field and
+ * updates the next hop.
+ *
+ * If a frame already has an Address Extension field, only the next hop and
+ * destination addresses are updated.
+ *
+ * The gate mpath must be an active mpath with a valid mpath->next_hop.
+ *
+ * @mpath: An active mpath the frames will be sent to (i.e. the gate)
+ * @from_mpath: The failed mpath
+ * @copy: When true, copy all the frames to the new mpath queue.  When false,
+ * move them.
  */
-struct mesh_path *mesh_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
+static void mesh_path_move_to_queue(struct mesh_path *gate_mpath,
+                                   struct mesh_path *from_mpath,
+                                   bool copy)
 {
-       struct mesh_path *mpath;
-       struct hlist_node *n;
-       struct hlist_head *bucket;
-       struct mesh_table *tbl;
-       struct mpath_node *node;
+       struct sk_buff *skb, *cp_skb = NULL;
+       struct sk_buff_head gateq, failq;
+       unsigned long flags;
+       int num_skbs;
 
-       tbl = rcu_dereference(mesh_paths);
+       BUG_ON(gate_mpath == from_mpath);
+       BUG_ON(!gate_mpath->next_hop);
 
-       bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)];
-       hlist_for_each_entry_rcu(node, n, bucket, list) {
-               mpath = node->mpath;
-               if (mpath->sdata == sdata &&
-                               memcmp(dst, mpath->dst, ETH_ALEN) == 0) {
-                       if (MPATH_EXPIRED(mpath)) {
-                               spin_lock_bh(&mpath->state_lock);
-                               if (MPATH_EXPIRED(mpath))
-                                       mpath->flags &= ~MESH_PATH_ACTIVE;
-                               spin_unlock_bh(&mpath->state_lock);
-                       }
-                       return mpath;
+       __skb_queue_head_init(&gateq);
+       __skb_queue_head_init(&failq);
+
+       spin_lock_irqsave(&from_mpath->frame_queue.lock, flags);
+       skb_queue_splice_init(&from_mpath->frame_queue, &failq);
+       spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags);
+
+       num_skbs = skb_queue_len(&failq);
+
+       while (num_skbs--) {
+               skb = __skb_dequeue(&failq);
+               if (copy) {
+                       cp_skb = skb_copy(skb, GFP_ATOMIC);
+                       if (cp_skb)
+                               __skb_queue_tail(&failq, cp_skb);
                }
+
+               prepare_for_gate(skb, gate_mpath->dst, gate_mpath);
+               __skb_queue_tail(&gateq, skb);
        }
-       return NULL;
+
+       spin_lock_irqsave(&gate_mpath->frame_queue.lock, flags);
+       skb_queue_splice(&gateq, &gate_mpath->frame_queue);
+       mpath_dbg("Mpath queue for gate %pM has %d frames\n",
+                       gate_mpath->dst,
+                       skb_queue_len(&gate_mpath->frame_queue));
+       spin_unlock_irqrestore(&gate_mpath->frame_queue.lock, flags);
+
+       if (!copy)
+               return;
+
+       spin_lock_irqsave(&from_mpath->frame_queue.lock, flags);
+       skb_queue_splice(&failq, &from_mpath->frame_queue);
+       spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags);
 }
 
-struct mesh_path *mpp_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
+
+static struct mesh_path *path_lookup(struct mesh_table *tbl, u8 *dst,
+                                         struct ieee80211_sub_if_data *sdata)
 {
        struct mesh_path *mpath;
        struct hlist_node *n;
        struct hlist_head *bucket;
-       struct mesh_table *tbl;
        struct mpath_node *node;
 
-       tbl = rcu_dereference(mpp_paths);
-
        bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)];
        hlist_for_each_entry_rcu(node, n, bucket, list) {
                mpath = node->mpath;
                if (mpath->sdata == sdata &&
-                   memcmp(dst, mpath->dst, ETH_ALEN) == 0) {
+                               memcmp(dst, mpath->dst, ETH_ALEN) == 0) {
                        if (MPATH_EXPIRED(mpath)) {
                                spin_lock_bh(&mpath->state_lock);
-                               if (MPATH_EXPIRED(mpath))
-                                       mpath->flags &= ~MESH_PATH_ACTIVE;
+                               mpath->flags &= ~MESH_PATH_ACTIVE;
                                spin_unlock_bh(&mpath->state_lock);
                        }
                        return mpath;
@@ -269,6 +363,25 @@ struct mesh_path *mpp_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
        return NULL;
 }
 
+/**
+ * mesh_path_lookup - look up a path in the mesh path table
+ * @dst: hardware address (ETH_ALEN length) of destination
+ * @sdata: local subif
+ *
+ * Returns: pointer to the mesh path structure, or NULL if not found
+ *
+ * Locking: must be called within a read rcu section.
+ */
+struct mesh_path *mesh_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
+{
+       return path_lookup(rcu_dereference(mesh_paths), dst, sdata);
+}
+
+struct mesh_path *mpp_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
+{
+       return path_lookup(rcu_dereference(mpp_paths), dst, sdata);
+}
+
 
 /**
  * mesh_path_lookup_by_idx - look up a path in the mesh path table by its index
@@ -293,8 +406,7 @@ struct mesh_path *mesh_path_lookup_by_idx(int idx, struct ieee80211_sub_if_data
                if (j++ == idx) {
                        if (MPATH_EXPIRED(node->mpath)) {
                                spin_lock_bh(&node->mpath->state_lock);
-                               if (MPATH_EXPIRED(node->mpath))
-                                       node->mpath->flags &= ~MESH_PATH_ACTIVE;
+                               node->mpath->flags &= ~MESH_PATH_ACTIVE;
                                spin_unlock_bh(&node->mpath->state_lock);
                        }
                        return node->mpath;
@@ -304,6 +416,109 @@ struct mesh_path *mesh_path_lookup_by_idx(int idx, struct ieee80211_sub_if_data
        return NULL;
 }
 
+static void mesh_gate_node_reclaim(struct rcu_head *rp)
+{
+       struct mpath_node *node = container_of(rp, struct mpath_node, rcu);
+       kfree(node);
+}
+
+/**
+ * mesh_gate_add - mark mpath as path to a mesh gate and add to known_gates
+ * @mesh_tbl: table which contains known_gates list
+ * @mpath: mpath to known mesh gate
+ *
+ * Returns: 0 on success
+ *
+ */
+static int mesh_gate_add(struct mesh_table *tbl, struct mesh_path *mpath)
+{
+       struct mpath_node *gate, *new_gate;
+       struct hlist_node *n;
+       int err;
+
+       rcu_read_lock();
+       tbl = rcu_dereference(tbl);
+
+       hlist_for_each_entry_rcu(gate, n, tbl->known_gates, list)
+               if (gate->mpath == mpath) {
+                       err = -EEXIST;
+                       goto err_rcu;
+               }
+
+       new_gate = kzalloc(sizeof(struct mpath_node), GFP_ATOMIC);
+       if (!new_gate) {
+               err = -ENOMEM;
+               goto err_rcu;
+       }
+
+       mpath->is_gate = true;
+       mpath->sdata->u.mesh.num_gates++;
+       new_gate->mpath = mpath;
+       spin_lock_bh(&tbl->gates_lock);
+       hlist_add_head_rcu(&new_gate->list, tbl->known_gates);
+       spin_unlock_bh(&tbl->gates_lock);
+       rcu_read_unlock();
+       mpath_dbg("Mesh path (%s): Recorded new gate: %pM. %d known gates\n",
+                 mpath->sdata->name, mpath->dst,
+                 mpath->sdata->u.mesh.num_gates);
+       return 0;
+err_rcu:
+       rcu_read_unlock();
+       return err;
+}
+
+/**
+ * mesh_gate_del - remove a mesh gate from the list of known gates
+ * @tbl: table which holds our list of known gates
+ * @mpath: gate mpath
+ *
+ * Returns: 0 on success
+ *
+ * Locking: must be called inside rcu_read_lock() section
+ */
+static int mesh_gate_del(struct mesh_table *tbl, struct mesh_path *mpath)
+{
+       struct mpath_node *gate;
+       struct hlist_node *p, *q;
+
+       tbl = rcu_dereference(tbl);
+
+       hlist_for_each_entry_safe(gate, p, q, tbl->known_gates, list)
+               if (gate->mpath == mpath) {
+                       spin_lock_bh(&tbl->gates_lock);
+                       hlist_del_rcu(&gate->list);
+                       call_rcu(&gate->rcu, mesh_gate_node_reclaim);
+                       spin_unlock_bh(&tbl->gates_lock);
+                       mpath->sdata->u.mesh.num_gates--;
+                       mpath->is_gate = false;
+                       mpath_dbg("Mesh path (%s): Deleted gate: %pM. "
+                                 "%d known gates\n", mpath->sdata->name,
+                                 mpath->dst, mpath->sdata->u.mesh.num_gates);
+                       break;
+               }
+
+       return 0;
+}
+
+/**
+ *
+ * mesh_path_add_gate - add the given mpath to a mesh gate to our path table
+ * @mpath: gate path to add to table
+ */
+int mesh_path_add_gate(struct mesh_path *mpath)
+{
+       return mesh_gate_add(mesh_paths, mpath);
+}
+
+/**
+ * mesh_gate_num - number of gates known to this interface
+ * @sdata: subif data
+ */
+int mesh_gate_num(struct ieee80211_sub_if_data *sdata)
+{
+       return sdata->u.mesh.num_gates;
+}
+
 /**
  * mesh_path_add - allocate and add a new path to the mesh path table
  * @addr: destination address of the path (ETH_ALEN length)
@@ -481,6 +696,7 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
        new_mpath->flags = 0;
        skb_queue_head_init(&new_mpath->frame_queue);
        new_node->mpath = new_mpath;
+       init_timer(&new_mpath->timer);
        new_mpath->exp_time = jiffies;
        spin_lock_init(&new_mpath->state_lock);
 
@@ -539,28 +755,53 @@ void mesh_plink_broken(struct sta_info *sta)
        struct hlist_node *p;
        struct ieee80211_sub_if_data *sdata = sta->sdata;
        int i;
+       __le16 reason = cpu_to_le16(WLAN_REASON_MESH_PATH_DEST_UNREACHABLE);
 
        rcu_read_lock();
        tbl = rcu_dereference(mesh_paths);
        for_each_mesh_entry(tbl, p, node, i) {
                mpath = node->mpath;
-               spin_lock_bh(&mpath->state_lock);
                if (rcu_dereference(mpath->next_hop) == sta &&
                    mpath->flags & MESH_PATH_ACTIVE &&
                    !(mpath->flags & MESH_PATH_FIXED)) {
+                       spin_lock_bh(&mpath->state_lock);
                        mpath->flags &= ~MESH_PATH_ACTIVE;
                        ++mpath->sn;
                        spin_unlock_bh(&mpath->state_lock);
                        mesh_path_error_tx(sdata->u.mesh.mshcfg.element_ttl,
                                        mpath->dst, cpu_to_le32(mpath->sn),
-                                       cpu_to_le16(PERR_RCODE_DEST_UNREACH),
-                                       bcast, sdata);
-               } else
-               spin_unlock_bh(&mpath->state_lock);
+                                       reason, bcast, sdata);
+               }
        }
        rcu_read_unlock();
 }
 
+static void mesh_path_node_reclaim(struct rcu_head *rp)
+{
+       struct mpath_node *node = container_of(rp, struct mpath_node, rcu);
+       struct ieee80211_sub_if_data *sdata = node->mpath->sdata;
+
+       del_timer_sync(&node->mpath->timer);
+       atomic_dec(&sdata->u.mesh.mpaths);
+       kfree(node->mpath);
+       kfree(node);
+}
+
+/* needs to be called with the corresponding hashwlock taken */
+static void __mesh_path_del(struct mesh_table *tbl, struct mpath_node *node)
+{
+       struct mesh_path *mpath;
+       mpath = node->mpath;
+       spin_lock(&mpath->state_lock);
+       mpath->flags |= MESH_PATH_RESOLVING;
+       if (mpath->is_gate)
+               mesh_gate_del(tbl, mpath);
+       hlist_del_rcu(&node->list);
+       call_rcu(&node->rcu, mesh_path_node_reclaim);
+       spin_unlock(&mpath->state_lock);
+       atomic_dec(&tbl->entries);
+}
+
 /**
  * mesh_path_flush_by_nexthop - Deletes mesh paths if their next hop matches
  *
@@ -581,42 +822,59 @@ void mesh_path_flush_by_nexthop(struct sta_info *sta)
        int i;
 
        rcu_read_lock();
-       tbl = rcu_dereference(mesh_paths);
+       read_lock_bh(&pathtbl_resize_lock);
+       tbl = resize_dereference_mesh_paths();
        for_each_mesh_entry(tbl, p, node, i) {
                mpath = node->mpath;
-               if (rcu_dereference(mpath->next_hop) == sta)
-                       mesh_path_del(mpath->dst, mpath->sdata);
+               if (rcu_dereference(mpath->next_hop) == sta) {
+                       spin_lock_bh(&tbl->hashwlock[i]);
+                       __mesh_path_del(tbl, node);
+                       spin_unlock_bh(&tbl->hashwlock[i]);
+               }
        }
+       read_unlock_bh(&pathtbl_resize_lock);
        rcu_read_unlock();
 }
 
-void mesh_path_flush(struct ieee80211_sub_if_data *sdata)
+static void table_flush_by_iface(struct mesh_table *tbl,
+                                struct ieee80211_sub_if_data *sdata)
 {
-       struct mesh_table *tbl;
        struct mesh_path *mpath;
        struct mpath_node *node;
        struct hlist_node *p;
        int i;
 
-       rcu_read_lock();
-       tbl = rcu_dereference(mesh_paths);
+       WARN_ON(!rcu_read_lock_held());
        for_each_mesh_entry(tbl, p, node, i) {
                mpath = node->mpath;
-               if (mpath->sdata == sdata)
-                       mesh_path_del(mpath->dst, mpath->sdata);
+               if (mpath->sdata != sdata)
+                       continue;
+               spin_lock_bh(&tbl->hashwlock[i]);
+               __mesh_path_del(tbl, node);
+               spin_unlock_bh(&tbl->hashwlock[i]);
        }
-       rcu_read_unlock();
 }
 
-static void mesh_path_node_reclaim(struct rcu_head *rp)
+/**
+ * mesh_path_flush_by_iface - Deletes all mesh paths associated with a given iface
+ *
+ * This function deletes both mesh paths as well as mesh portal paths.
+ *
+ * @sdata - interface data to match
+ *
+ */
+void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata)
 {
-       struct mpath_node *node = container_of(rp, struct mpath_node, rcu);
-       struct ieee80211_sub_if_data *sdata = node->mpath->sdata;
+       struct mesh_table *tbl;
 
-       del_timer_sync(&node->mpath->timer);
-       atomic_dec(&sdata->u.mesh.mpaths);
-       kfree(node->mpath);
-       kfree(node);
+       rcu_read_lock();
+       read_lock_bh(&pathtbl_resize_lock);
+       tbl = resize_dereference_mesh_paths();
+       table_flush_by_iface(tbl, sdata);
+       tbl = resize_dereference_mpp_paths();
+       table_flush_by_iface(tbl, sdata);
+       read_unlock_bh(&pathtbl_resize_lock);
+       rcu_read_unlock();
 }
 
 /**
@@ -647,12 +905,7 @@ int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata)
                mpath = node->mpath;
                if (mpath->sdata == sdata &&
                    memcmp(addr, mpath->dst, ETH_ALEN) == 0) {
-                       spin_lock(&mpath->state_lock);
-                       mpath->flags |= MESH_PATH_RESOLVING;
-                       hlist_del_rcu(&node->list);
-                       call_rcu(&node->rcu, mesh_path_node_reclaim);
-                       atomic_dec(&tbl->entries);
-                       spin_unlock(&mpath->state_lock);
+                       __mesh_path_del(tbl, node);
                        goto enddel;
                }
        }
@@ -680,6 +933,58 @@ void mesh_path_tx_pending(struct mesh_path *mpath)
                                &mpath->frame_queue);
 }
 
+/**
+ * mesh_path_send_to_gates - sends pending frames to all known mesh gates
+ *
+ * @mpath: mesh path whose queue will be emptied
+ *
+ * If there is only one gate, the frames are transferred from the failed mpath
+ * queue to that gate's queue.  If there are more than one gates, the frames
+ * are copied from each gate to the next.  After frames are copied, the
+ * mpath queues are emptied onto the transmission queue.
+ */
+int mesh_path_send_to_gates(struct mesh_path *mpath)
+{
+       struct ieee80211_sub_if_data *sdata = mpath->sdata;
+       struct hlist_node *n;
+       struct mesh_table *tbl;
+       struct mesh_path *from_mpath = mpath;
+       struct mpath_node *gate = NULL;
+       bool copy = false;
+       struct hlist_head *known_gates;
+
+       rcu_read_lock();
+       tbl = rcu_dereference(mesh_paths);
+       known_gates = tbl->known_gates;
+       rcu_read_unlock();
+
+       if (!known_gates)
+               return -EHOSTUNREACH;
+
+       hlist_for_each_entry_rcu(gate, n, known_gates, list) {
+               if (gate->mpath->sdata != sdata)
+                       continue;
+
+               if (gate->mpath->flags & MESH_PATH_ACTIVE) {
+                       mpath_dbg("Forwarding to %pM\n", gate->mpath->dst);
+                       mesh_path_move_to_queue(gate->mpath, from_mpath, copy);
+                       from_mpath = gate->mpath;
+                       copy = true;
+               } else {
+                       mpath_dbg("Not forwarding %p\n", gate->mpath);
+                       mpath_dbg("flags %x\n", gate->mpath->flags);
+               }
+       }
+
+       hlist_for_each_entry_rcu(gate, n, known_gates, list)
+               if (gate->mpath->sdata == sdata) {
+                       mpath_dbg("Sending to %pM\n", gate->mpath->dst);
+                       mesh_path_tx_pending(gate->mpath);
+               }
+
+       return (from_mpath == mpath) ? -EHOSTUNREACH : 0;
+}
+
 /**
  * mesh_path_discard_frame - discard a frame whose path could not be resolved
  *
@@ -699,18 +1004,23 @@ void mesh_path_discard_frame(struct sk_buff *skb,
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
        struct mesh_path *mpath;
        u32 sn = 0;
+       __le16 reason = cpu_to_le16(WLAN_REASON_MESH_PATH_NOFORWARD);
 
        if (memcmp(hdr->addr4, sdata->vif.addr, ETH_ALEN) != 0) {
                u8 *ra, *da;
 
                da = hdr->addr3;
                ra = hdr->addr1;
+               rcu_read_lock();
                mpath = mesh_path_lookup(da, sdata);
-               if (mpath)
+               if (mpath) {
+                       spin_lock_bh(&mpath->state_lock);
                        sn = ++mpath->sn;
+                       spin_unlock_bh(&mpath->state_lock);
+               }
+               rcu_read_unlock();
                mesh_path_error_tx(sdata->u.mesh.mshcfg.element_ttl, skb->data,
-                                  cpu_to_le32(sn),
-                                  cpu_to_le16(PERR_RCODE_NO_ROUTE), ra, sdata);
+                                  cpu_to_le32(sn), reason, ra, sdata);
        }
 
        kfree_skb(skb);
@@ -728,8 +1038,7 @@ void mesh_path_flush_pending(struct mesh_path *mpath)
 {
        struct sk_buff *skb;
 
-       while ((skb = skb_dequeue(&mpath->frame_queue)) &&
-                       (mpath->flags & MESH_PATH_ACTIVE))
+       while ((skb = skb_dequeue(&mpath->frame_queue)) != NULL)
                mesh_path_discard_frame(skb, mpath->sdata);
 }
 
@@ -790,6 +1099,7 @@ static int mesh_path_node_copy(struct hlist_node *p, struct mesh_table *newtbl)
 int mesh_pathtbl_init(void)
 {
        struct mesh_table *tbl_path, *tbl_mpp;
+       int ret;
 
        tbl_path = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
        if (!tbl_path)
@@ -797,21 +1107,40 @@ int mesh_pathtbl_init(void)
        tbl_path->free_node = &mesh_path_node_free;
        tbl_path->copy_node = &mesh_path_node_copy;
        tbl_path->mean_chain_len = MEAN_CHAIN_LEN;
+       tbl_path->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC);
+       if (!tbl_path->known_gates) {
+               ret = -ENOMEM;
+               goto free_path;
+       }
+       INIT_HLIST_HEAD(tbl_path->known_gates);
+
 
        tbl_mpp = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
        if (!tbl_mpp) {
-               mesh_table_free(tbl_path, true);
-               return -ENOMEM;
+               ret = -ENOMEM;
+               goto free_path;
        }
        tbl_mpp->free_node = &mesh_path_node_free;
        tbl_mpp->copy_node = &mesh_path_node_copy;
        tbl_mpp->mean_chain_len = MEAN_CHAIN_LEN;
+       tbl_mpp->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC);
+       if (!tbl_mpp->known_gates) {
+               ret = -ENOMEM;
+               goto free_mpp;
+       }
+       INIT_HLIST_HEAD(tbl_mpp->known_gates);
 
        /* Need no locking since this is during init */
        RCU_INIT_POINTER(mesh_paths, tbl_path);
        RCU_INIT_POINTER(mpp_paths, tbl_mpp);
 
        return 0;
+
+free_mpp:
+       mesh_table_free(tbl_mpp, true);
+free_path:
+       mesh_table_free(tbl_path, true);
+       return ret;
 }
 
 void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
@@ -828,14 +1157,10 @@ void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
                if (node->mpath->sdata != sdata)
                        continue;
                mpath = node->mpath;
-               spin_lock_bh(&mpath->state_lock);
                if ((!(mpath->flags & MESH_PATH_RESOLVING)) &&
                    (!(mpath->flags & MESH_PATH_FIXED)) &&
-                    time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE)) {
-                       spin_unlock_bh(&mpath->state_lock);
+                    time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE))
                        mesh_path_del(mpath->dst, mpath->sdata);
-               } else
-                       spin_unlock_bh(&mpath->state_lock);
        }
        rcu_read_unlock();
 }