spin_unlock_bh(&orig_node->neigh_list_lock);
- frag_list_free(&orig_node->frag_list);
+ batadv_frag_list_free(&orig_node->frag_list);
batadv_tt_global_del_orig(orig_node->bat_priv, orig_node,
"originator timed out");
if (has_timed_out(orig_node->last_frag_packet,
FRAG_TIMEOUT))
- frag_list_free(&orig_node->frag_list);
+ batadv_frag_list_free(&orig_node->frag_list);
}
spin_unlock_bh(list_lock);
}
if (unicast_packet->header.packet_type == BAT_UNICAST &&
atomic_read(&bat_priv->fragmentation) &&
skb->len > neigh_node->if_incoming->net_dev->mtu) {
- ret = frag_send_skb(skb, bat_priv,
- neigh_node->if_incoming, neigh_node->addr);
+ ret = batadv_frag_send_skb(skb, bat_priv,
+ neigh_node->if_incoming,
+ neigh_node->addr);
goto out;
}
if (unicast_packet->header.packet_type == BAT_UNICAST_FRAG &&
frag_can_reassemble(skb, neigh_node->if_incoming->net_dev->mtu)) {
- ret = frag_reassemble_skb(skb, bat_priv, &new_skb);
+ ret = batadv_frag_reassemble_skb(skb, bat_priv, &new_skb);
if (ret == NET_RX_DROP)
goto out;
/* packet for me */
if (is_my_mac(unicast_packet->dest)) {
- ret = frag_reassemble_skb(skb, bat_priv, &new_skb);
+ ret = batadv_frag_reassemble_skb(skb, bat_priv, &new_skb);
if (ret == NET_RX_DROP)
return NET_RX_DROP;
goto dropped;
}
- ret = unicast_send_skb(skb, bat_priv);
+ ret = batadv_unicast_send_skb(skb, bat_priv);
if (ret != 0)
goto dropped_freed;
}
for (i = 0; i < FRAG_BUFFER_SIZE; i++) {
tfp = kmalloc(sizeof(*tfp), GFP_ATOMIC);
if (!tfp) {
- frag_list_free(head);
+ batadv_frag_list_free(head);
return -ENOMEM;
}
tfp->skb = NULL;
return NULL;
}
-void frag_list_free(struct list_head *head)
+void batadv_frag_list_free(struct list_head *head)
{
struct frag_packet_list_entry *pf, *tmp_pf;
* or the skb could be reassembled (skb_new will point to the new packet and
* skb was freed)
*/
-int frag_reassemble_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
- struct sk_buff **new_skb)
+int batadv_frag_reassemble_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
+ struct sk_buff **new_skb)
{
struct orig_node *orig_node;
struct frag_packet_list_entry *tmp_frag_entry;
return ret;
}
-int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
- struct hard_iface *hard_iface, const uint8_t dstaddr[])
+int batadv_frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
+ struct hard_iface *hard_iface, const uint8_t dstaddr[])
{
struct unicast_packet tmp_uc, *unicast_packet;
struct hard_iface *primary_if;
return ret;
}
-int unicast_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv)
+int batadv_unicast_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv)
{
struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
struct unicast_packet *unicast_packet;
neigh_node->if_incoming->net_dev->mtu) {
/* send frag skb decreases ttl */
unicast_packet->header.ttl++;
- ret = frag_send_skb(skb, bat_priv,
- neigh_node->if_incoming, neigh_node->addr);
+ ret = batadv_frag_send_skb(skb, bat_priv,
+ neigh_node->if_incoming,
+ neigh_node->addr);
goto out;
}
#define FRAG_TIMEOUT 10000 /* purge frag list entries after time in ms */
#define FRAG_BUFFER_SIZE 6 /* number of list elements in buffer */
-int frag_reassemble_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
- struct sk_buff **new_skb);
-void frag_list_free(struct list_head *head);
-int unicast_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv);
-int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
- struct hard_iface *hard_iface, const uint8_t dstaddr[]);
+int batadv_frag_reassemble_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
+ struct sk_buff **new_skb);
+void batadv_frag_list_free(struct list_head *head);
+int batadv_unicast_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv);
+int batadv_frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
+ struct hard_iface *hard_iface,
+ const uint8_t dstaddr[]);
static inline int frag_can_reassemble(const struct sk_buff *skb, int mtu)
{