hlist_del(&forw_packet->list);
spin_unlock_irqrestore(&forw_bcast_list_lock, flags);
+ if (atomic_read(&module_state) == MODULE_DEACTIVATING)
+ goto out;
+
/* rebroadcast packet */
rcu_read_lock();
list_for_each_entry_rcu(batman_if, &if_list, list) {
forw_packet->num_packets++;
- /* if we still have some more bcasts to send and we are not shutting
- * down */
- if ((forw_packet->num_packets < 3) &&
- (atomic_read(&module_state) != MODULE_DEACTIVATING))
+ /* if we still have some more bcasts to send */
+ if (forw_packet->num_packets < 3) {
_add_bcast_packet_to_list(forw_packet, ((5 * HZ) / 1000));
- else {
- forw_packet_free(forw_packet);
- atomic_inc(&bcast_queue_left);
+ return;
}
+
+out:
+ forw_packet_free(forw_packet);
+ atomic_inc(&bcast_queue_left);
}
void send_outstanding_bat_packet(struct work_struct *work)
hlist_del(&forw_packet->list);
spin_unlock_irqrestore(&forw_bat_list_lock, flags);
+ if (atomic_read(&module_state) == MODULE_DEACTIVATING)
+ goto out;
+
send_packet(forw_packet);
/**
* to determine the queues wake up time unless we are
* shutting down
*/
- if ((forw_packet->own) &&
- (atomic_read(&module_state) != MODULE_DEACTIVATING))
+ if (forw_packet->own)
schedule_own_packet(forw_packet->if_incoming);
+out:
/* don't count own packet */
if (!forw_packet->own)
atomic_inc(&batman_queue_left);