return false;
}
+#define atomic_dec_not_zero(v) atomic_add_unless((v), -1, 0)
/* create a new aggregated packet and add this packet to it */
static void new_aggregated_packet(unsigned char *packet_buff,
int packet_len,
struct forw_packet *forw_packet_aggr;
unsigned long flags;
+ /* own packet should always be scheduled */
+ if (!own_packet) {
+ if (!atomic_dec_not_zero(&batman_queue_left)) {
+ bat_dbg(DBG_BATMAN, "batman packet queue full\n");
+ return;
+ }
+ }
+
forw_packet_aggr = kmalloc(sizeof(struct forw_packet), GFP_ATOMIC);
- if (!forw_packet_aggr)
+ if (!forw_packet_aggr) {
+ if (!own_packet)
+ atomic_inc(&batman_queue_left);
return;
+ }
forw_packet_aggr->packet_buff = kmalloc(MAX_AGGREGATION_BYTES,
GFP_ATOMIC);
if (!forw_packet_aggr->packet_buff) {
+ if (!own_packet)
+ atomic_inc(&batman_queue_left);
kfree(forw_packet_aggr);
return;
}
DEFINE_SPINLOCK(forw_bcast_list_lock);
atomic_t vis_interval;
+atomic_t bcast_queue_left;
+atomic_t batman_queue_left;
+
int16_t num_hna;
struct net_device *soft_device;
atomic_set(&vis_interval, 1000);/* TODO: raise this later, this is only
* for debugging now. */
+ atomic_set(&bcast_queue_left, BCAST_QUEUE_LEN);
+ atomic_set(&batman_queue_left, BATMAN_QUEUE_LEN);
/* the name should not be longer than 10 chars - see
* http://lwn.net/Articles/23634/ */
#define MODULE_ACTIVE 1
#define MODULE_DEACTIVATING 2
+#define BCAST_QUEUE_LEN 256
+#define BATMAN_QUEUE_LE 256
/*
* Debug Messages
extern spinlock_t forw_bcast_list_lock;
extern atomic_t vis_interval;
+extern atomic_t bcast_queue_left;
+extern atomic_t batman_queue_left;
extern int16_t num_hna;
extern struct net_device *soft_device;
send_time);
}
-void add_bcast_packet_to_list(struct sk_buff *skb)
+#define atomic_dec_not_zero(v) atomic_add_unless((v), -1, 0)
+/* add a broadcast packet to the queue and setup timers. broadcast packets
+ * are sent multiple times to increase probability for beeing received.
+ *
+ * This function returns NETDEV_TX_OK on success and NETDEV_TX_BUSY on
+ * errors.
+ *
+ * The skb is not consumed, so the caller should make sure that the
+ * skb is freed. */
+int add_bcast_packet_to_list(struct sk_buff *skb)
{
struct forw_packet *forw_packet;
+ if (!atomic_dec_not_zero(&bcast_queue_left)) {
+ bat_dbg(DBG_BATMAN, "bcast packet queue full\n");
+ goto out;
+ }
+
forw_packet = kmalloc(sizeof(struct forw_packet), GFP_ATOMIC);
+
if (!forw_packet)
- goto out;
+ goto out_and_inc;
skb = skb_copy(skb, GFP_ATOMIC);
if (!skb)
forw_packet->num_packets = 0;
_add_bcast_packet_to_list(forw_packet, 1);
- return;
+ return NETDEV_TX_OK;
packet_free:
kfree(forw_packet);
+out_and_inc:
+ atomic_inc(&bcast_queue_left);
out:
- return;
+ return NETDEV_TX_BUSY;
}
void send_outstanding_bcast_packet(struct work_struct *work)
if ((forw_packet->num_packets < 3) &&
(atomic_read(&module_state) != MODULE_DEACTIVATING))
_add_bcast_packet_to_list(forw_packet, ((5 * HZ) / 1000));
- else
+ else {
forw_packet_free(forw_packet);
+ atomic_inc(&bcast_queue_left);
+ }
}
void send_outstanding_bat_packet(struct work_struct *work)
(atomic_read(&module_state) != MODULE_DEACTIVATING))
schedule_own_packet(forw_packet->if_incoming);
+ /* don't count own packet */
+ if (!forw_packet->own)
+ atomic_inc(&batman_queue_left);
+
forw_packet_free(forw_packet);
}
struct batman_packet *batman_packet,
uint8_t directlink, int hna_buff_len,
struct batman_if *if_outgoing);
-void add_bcast_packet_to_list(struct sk_buff *skb);
+int add_bcast_packet_to_list(struct sk_buff *skb);
void send_outstanding_bcast_packet(struct work_struct *work);
void send_outstanding_bat_packet(struct work_struct *work);
void purge_outstanding_packets(struct batman_if *batman_if);
/* set broadcast sequence number */
bcast_packet->seqno = htons(bcast_seqno);
- bcast_seqno++;
+ /* broadcast packet. on success, increase seqno. */
+ if (add_bcast_packet_to_list(skb) == NETDEV_TX_OK)
+ bcast_seqno++;
- /* broadcast packet */
- add_bcast_packet_to_list(skb);
/* a copy is stored in the bcast list, therefore removing
* the original skb. */
kfree_skb(skb);