batman-adv: Add missing hardif_free_ref in forw_packet_free
authorSven Eckelmann <sven@narfation.org>
Wed, 11 May 2011 18:59:06 +0000 (20:59 +0200)
committerSven Eckelmann <sven@narfation.org>
Sat, 14 May 2011 22:02:06 +0000 (00:02 +0200)
add_bcast_packet_to_list increases the refcount for if_incoming but the
reference count is never decreased. The reference count must be
increased for all kinds of forwarded packets which have the primary
interface stored and forw_packet_free must decrease them. Also
purge_outstanding_packets has to invoke forw_packet_free when a work
item was really cancelled.

This regression was introduced in
32ae9b221e788413ce68feaae2ca39e406211a0a.

Reported-by: Antonio Quartulli <ordex@autistici.org>
Signed-off-by: Sven Eckelmann <sven@narfation.org>
net/batman-adv/aggregation.c
net/batman-adv/send.c

index 9b945902447946fbf9bf2cd3b1c0a396ed3b6643..a8c32030527c7d7005160d12066fa3471caaa6fa 100644 (file)
@@ -23,6 +23,7 @@
 #include "aggregation.h"
 #include "send.h"
 #include "routing.h"
+#include "hard-interface.h"
 
 /* calculate the size of the tt information for a given packet */
 static int tt_len(struct batman_packet *batman_packet)
@@ -105,12 +106,15 @@ static void new_aggregated_packet(unsigned char *packet_buff, int packet_len,
        struct forw_packet *forw_packet_aggr;
        unsigned char *skb_buff;
 
+       if (!atomic_inc_not_zero(&if_incoming->refcount))
+               return;
+
        /* own packet should always be scheduled */
        if (!own_packet) {
                if (!atomic_dec_not_zero(&bat_priv->batman_queue_left)) {
                        bat_dbg(DBG_BATMAN, bat_priv,
                                "batman packet queue full\n");
-                       return;
+                       goto out;
                }
        }
 
@@ -118,7 +122,7 @@ static void new_aggregated_packet(unsigned char *packet_buff, int packet_len,
        if (!forw_packet_aggr) {
                if (!own_packet)
                        atomic_inc(&bat_priv->batman_queue_left);
-               return;
+               goto out;
        }
 
        if ((atomic_read(&bat_priv->aggregated_ogms)) &&
@@ -133,7 +137,7 @@ static void new_aggregated_packet(unsigned char *packet_buff, int packet_len,
                if (!own_packet)
                        atomic_inc(&bat_priv->batman_queue_left);
                kfree(forw_packet_aggr);
-               return;
+               goto out;
        }
        skb_reserve(forw_packet_aggr->skb, sizeof(struct ethhdr));
 
@@ -164,6 +168,10 @@ static void new_aggregated_packet(unsigned char *packet_buff, int packet_len,
        queue_delayed_work(bat_event_workqueue,
                           &forw_packet_aggr->delayed_work,
                           send_time - jiffies);
+
+       return;
+out:
+       hardif_free_ref(if_incoming);
 }
 
 /* aggregate a new packet into the existing aggregation */
index f30d0c69ccbb460f2215fe7208a79956e22e381c..76daa46efe19c61d97dfc9d5bde549bdb54c6e03 100644 (file)
@@ -377,6 +377,8 @@ static void forw_packet_free(struct forw_packet *forw_packet)
 {
        if (forw_packet->skb)
                kfree_skb(forw_packet->skb);
+       if (forw_packet->if_incoming)
+               hardif_free_ref(forw_packet->if_incoming);
        kfree(forw_packet);
 }
 
@@ -539,6 +541,7 @@ void purge_outstanding_packets(struct bat_priv *bat_priv,
 {
        struct forw_packet *forw_packet;
        struct hlist_node *tmp_node, *safe_tmp_node;
+       bool pending;
 
        if (hard_iface)
                bat_dbg(DBG_BATMAN, bat_priv,
@@ -567,8 +570,13 @@ void purge_outstanding_packets(struct bat_priv *bat_priv,
                 * send_outstanding_bcast_packet() will lock the list to
                 * delete the item from the list
                 */
-               cancel_delayed_work_sync(&forw_packet->delayed_work);
+               pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
                spin_lock_bh(&bat_priv->forw_bcast_list_lock);
+
+               if (pending) {
+                       hlist_del(&forw_packet->list);
+                       forw_packet_free(forw_packet);
+               }
        }
        spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
 
@@ -591,8 +599,13 @@ void purge_outstanding_packets(struct bat_priv *bat_priv,
                 * send_outstanding_bat_packet() will lock the list to
                 * delete the item from the list
                 */
-               cancel_delayed_work_sync(&forw_packet->delayed_work);
+               pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
                spin_lock_bh(&bat_priv->forw_bat_list_lock);
+
+               if (pending) {
+                       hlist_del(&forw_packet->list);
+                       forw_packet_free(forw_packet);
+               }
        }
        spin_unlock_bh(&bat_priv->forw_bat_list_lock);
 }