/* own packet should always be scheduled */
if (!own_packet) {
- if (!atomic_dec_not_zero(&batman_queue_left)) {
+ if (!atomic_dec_not_zero(&bat_priv->batman_queue_left)) {
bat_dbg(DBG_BATMAN, bat_priv,
"batman packet queue full\n");
return;
forw_packet_aggr = kmalloc(sizeof(struct forw_packet), GFP_ATOMIC);
if (!forw_packet_aggr) {
if (!own_packet)
- atomic_inc(&batman_queue_left);
+ atomic_inc(&bat_priv->batman_queue_left);
return;
}
GFP_ATOMIC);
if (!forw_packet_aggr->packet_buff) {
if (!own_packet)
- atomic_inc(&batman_queue_left);
+ atomic_inc(&bat_priv->batman_queue_left);
kfree(forw_packet_aggr);
return;
}
atomic_set(&bat_priv->vis_mode, VIS_TYPE_CLIENT_UPDATE);
atomic_set(&bat_priv->orig_interval, 1000);
atomic_set(&bat_priv->log_level, 0);
+ atomic_set(&bat_priv->bcast_queue_left, BCAST_QUEUE_LEN);
+ atomic_set(&bat_priv->batman_queue_left, BATMAN_QUEUE_LEN);
bat_priv->primary_if = NULL;
bat_priv->num_ifaces = 0;
DEFINE_SPINLOCK(forw_bat_list_lock);
DEFINE_SPINLOCK(forw_bcast_list_lock);
-atomic_t bcast_queue_left;
-atomic_t batman_queue_left;
-
int16_t num_hna;
struct net_device *soft_device;
atomic_set(&module_state, MODULE_INACTIVE);
- atomic_set(&bcast_queue_left, BCAST_QUEUE_LEN);
- atomic_set(&batman_queue_left, BATMAN_QUEUE_LEN);
-
/* the name should not be longer than 10 chars - see
* http://lwn.net/Articles/23634/ */
bat_event_workqueue = create_singlethread_workqueue("bat_events");
extern spinlock_t forw_bat_list_lock;
extern spinlock_t forw_bcast_list_lock;
-extern atomic_t bcast_queue_left;
-extern atomic_t batman_queue_left;
extern int16_t num_hna;
extern struct net_device *soft_device;
/* FIXME: each batman_if will be attached to a softif */
struct bat_priv *bat_priv = netdev_priv(soft_device);
- if (!atomic_dec_not_zero(&bcast_queue_left)) {
+ if (!atomic_dec_not_zero(&bat_priv->bcast_queue_left)) {
bat_dbg(DBG_BATMAN, bat_priv, "bcast packet queue full\n");
goto out;
}
packet_free:
kfree(forw_packet);
out_and_inc:
- atomic_inc(&bcast_queue_left);
+ atomic_inc(&bat_priv->bcast_queue_left);
out:
return NETDEV_TX_BUSY;
}
container_of(delayed_work, struct forw_packet, delayed_work);
unsigned long flags;
struct sk_buff *skb1;
+ /* FIXME: each batman_if will be attached to a softif */
+ struct bat_priv *bat_priv = netdev_priv(soft_device);
spin_lock_irqsave(&forw_bcast_list_lock, flags);
hlist_del(&forw_packet->list);
out:
forw_packet_free(forw_packet);
- atomic_inc(&bcast_queue_left);
+ atomic_inc(&bat_priv->bcast_queue_left);
}
void send_outstanding_bat_packet(struct work_struct *work)
struct forw_packet *forw_packet =
container_of(delayed_work, struct forw_packet, delayed_work);
unsigned long flags;
+ /* FIXME: each batman_if will be attached to a softif */
+ struct bat_priv *bat_priv = netdev_priv(soft_device);
spin_lock_irqsave(&forw_bat_list_lock, flags);
hlist_del(&forw_packet->list);
out:
/* don't count own packet */
if (!forw_packet->own)
- atomic_inc(&batman_queue_left);
+ atomic_inc(&bat_priv->batman_queue_left);
forw_packet_free(forw_packet);
}
atomic_t vis_mode;
atomic_t orig_interval;
atomic_t log_level;
+ atomic_t bcast_queue_left;
+ atomic_t batman_queue_left;
char num_ifaces;
struct debug_log *debug_log;
struct batman_if *primary_if;