{
struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
struct forw_packet *forw_packet_aggr;
- unsigned long flags;
unsigned char *skb_buff;
/* own packet should always be scheduled */
forw_packet_aggr->direct_link_flags |= 1;
/* add new packet to packet list */
- spin_lock_irqsave(&bat_priv->forw_bat_list_lock, flags);
+ spin_lock_bh(&bat_priv->forw_bat_list_lock);
hlist_add_head(&forw_packet_aggr->list, &bat_priv->forw_bat_list);
- spin_unlock_irqrestore(&bat_priv->forw_bat_list_lock, flags);
+ spin_unlock_bh(&bat_priv->forw_bat_list_lock);
/* start timer for this packet */
INIT_DELAYED_WORK(&forw_packet_aggr->delayed_work,
struct batman_packet *batman_packet =
(struct batman_packet *)packet_buff;
bool direct_link = batman_packet->flags & DIRECTLINK ? 1 : 0;
- unsigned long flags;
/* find position for the packet in the forward queue */
- spin_lock_irqsave(&bat_priv->forw_bat_list_lock, flags);
+ spin_lock_bh(&bat_priv->forw_bat_list_lock);
/* own packets are not to be aggregated */
if ((atomic_read(&bat_priv->aggregated_ogms)) && (!own_packet)) {
hlist_for_each_entry(forw_packet_pos, tmp_node,
* suitable aggregation packet found */
if (forw_packet_aggr == NULL) {
/* the following section can run without the lock */
- spin_unlock_irqrestore(&bat_priv->forw_bat_list_lock, flags);
+ spin_unlock_bh(&bat_priv->forw_bat_list_lock);
/**
* if we could not aggregate this packet with one of the others
aggregate(forw_packet_aggr,
packet_buff, packet_len,
direct_link);
- spin_unlock_irqrestore(&bat_priv->forw_bat_list_lock, flags);
+ spin_unlock_bh(&bat_priv->forw_bat_list_lock);
}
}
va_list args;
static char debug_log_buf[256];
char *p;
- unsigned long flags;
if (!debug_log)
return 0;
- spin_lock_irqsave(&debug_log->lock, flags);
+ spin_lock_bh(&debug_log->lock);
va_start(args, fmt);
printed_len = vscnprintf(debug_log_buf, sizeof(debug_log_buf),
fmt, args);
for (p = debug_log_buf; *p != 0; p++)
emit_log_char(debug_log, *p);
- spin_unlock_irqrestore(&debug_log->lock, flags);
+ spin_unlock_bh(&debug_log->lock);
wake_up(&debug_log->queue_wait);
struct debug_log *debug_log = bat_priv->debug_log;
int error, i = 0;
char c;
- unsigned long flags;
if ((file->f_flags & O_NONBLOCK) &&
!(debug_log->log_end - debug_log->log_start))
if (error)
return error;
- spin_lock_irqsave(&debug_log->lock, flags);
+ spin_lock_bh(&debug_log->lock);
while ((!error) && (i < count) &&
(debug_log->log_start != debug_log->log_end)) {
debug_log->log_start++;
- spin_unlock_irqrestore(&debug_log->lock, flags);
+ spin_unlock_bh(&debug_log->lock);
error = __put_user(c, buf);
- spin_lock_irqsave(&debug_log->lock, flags);
+ spin_lock_bh(&debug_log->lock);
buf++;
i++;
}
- spin_unlock_irqrestore(&debug_log->lock, flags);
+ spin_unlock_bh(&debug_log->lock);
if (!error)
return i;
struct socket_client *socket_client = file->private_data;
struct socket_packet *socket_packet;
struct list_head *list_pos, *list_pos_tmp;
- unsigned long flags;
- spin_lock_irqsave(&socket_client->lock, flags);
+ spin_lock_bh(&socket_client->lock);
/* for all packets in the queue ... */
list_for_each_safe(list_pos, list_pos_tmp, &socket_client->queue_list) {
}
socket_client_hash[socket_client->index] = NULL;
- spin_unlock_irqrestore(&socket_client->lock, flags);
+ spin_unlock_bh(&socket_client->lock);
kfree(socket_client);
dec_module_count();
struct socket_packet *socket_packet;
size_t packet_len;
int error;
- unsigned long flags;
if ((file->f_flags & O_NONBLOCK) && (socket_client->queue_len == 0))
return -EAGAIN;
if (error)
return error;
- spin_lock_irqsave(&socket_client->lock, flags);
+ spin_lock_bh(&socket_client->lock);
socket_packet = list_first_entry(&socket_client->queue_list,
struct socket_packet, list);
list_del(&socket_packet->list);
socket_client->queue_len--;
- spin_unlock_irqrestore(&socket_client->lock, flags);
+ spin_unlock_bh(&socket_client->lock);
error = __copy_to_user(buf, &socket_packet->icmp_packet,
socket_packet->icmp_len);
struct batman_if *batman_if;
size_t packet_len = sizeof(struct icmp_packet);
uint8_t dstaddr[ETH_ALEN];
- unsigned long flags;
if (len < sizeof(struct icmp_packet)) {
bat_dbg(DBG_BATMAN, bat_priv,
if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE)
goto dst_unreach;
- spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
+ spin_lock_bh(&bat_priv->orig_hash_lock);
orig_node = ((struct orig_node *)hash_find(bat_priv->orig_hash,
compare_orig, choose_orig,
icmp_packet->dst));
batman_if = orig_node->router->if_incoming;
memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
- spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
+ spin_unlock_bh(&bat_priv->orig_hash_lock);
if (!batman_if)
goto dst_unreach;
goto out;
unlock:
- spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
+ spin_unlock_bh(&bat_priv->orig_hash_lock);
dst_unreach:
icmp_packet->msg_type = DESTINATION_UNREACHABLE;
bat_socket_add_packet(socket_client, icmp_packet, packet_len);
size_t icmp_len)
{
struct socket_packet *socket_packet;
- unsigned long flags;
socket_packet = kmalloc(sizeof(struct socket_packet), GFP_ATOMIC);
memcpy(&socket_packet->icmp_packet, icmp_packet, icmp_len);
socket_packet->icmp_len = icmp_len;
- spin_lock_irqsave(&socket_client->lock, flags);
+ spin_lock_bh(&socket_client->lock);
/* while waiting for the lock the socket_client could have been
* deleted */
if (!socket_client_hash[icmp_packet->uid]) {
- spin_unlock_irqrestore(&socket_client->lock, flags);
+ spin_unlock_bh(&socket_client->lock);
kfree(socket_packet);
return;
}
socket_client->queue_len--;
}
- spin_unlock_irqrestore(&socket_client->lock, flags);
+ spin_unlock_bh(&socket_client->lock);
wake_up(&socket_client->queue_wait);
}
int originator_init(struct bat_priv *bat_priv)
{
- unsigned long flags;
if (bat_priv->orig_hash)
return 1;
- spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
+ spin_lock_bh(&bat_priv->orig_hash_lock);
bat_priv->orig_hash = hash_new(128);
if (!bat_priv->orig_hash)
goto err;
- spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
+ spin_unlock_bh(&bat_priv->orig_hash_lock);
start_purge_timer(bat_priv);
return 1;
err:
- spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
+ spin_unlock_bh(&bat_priv->orig_hash_lock);
return 0;
}
void originator_free(struct bat_priv *bat_priv)
{
- unsigned long flags;
-
if (!bat_priv->orig_hash)
return;
cancel_delayed_work_sync(&bat_priv->orig_work);
- spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
+ spin_lock_bh(&bat_priv->orig_hash_lock);
hash_delete(bat_priv->orig_hash, free_orig_node, bat_priv);
bat_priv->orig_hash = NULL;
- spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
+ spin_unlock_bh(&bat_priv->orig_hash_lock);
}
/* this function finds or creates an originator entry for the given
HASHIT(hashit);
struct element_t *bucket;
struct orig_node *orig_node;
- unsigned long flags;
- spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
+ spin_lock_bh(&bat_priv->orig_hash_lock);
/* for all origins... */
while (hash_iterate(bat_priv->orig_hash, &hashit)) {
frag_list_free(&orig_node->frag_list);
}
- spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
+ spin_unlock_bh(&bat_priv->orig_hash_lock);
softif_neigh_purge(bat_priv);
}
int batman_count = 0;
int last_seen_secs;
int last_seen_msecs;
- unsigned long flags;
if ((!bat_priv->primary_if) ||
(bat_priv->primary_if->if_status != IF_ACTIVE)) {
"Originator", "last-seen", "#", TQ_MAX_VALUE, "Nexthop",
"outgoingIF", "Potential nexthops");
- spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
+ spin_lock_bh(&bat_priv->orig_hash_lock);
while (hash_iterate(bat_priv->orig_hash, &hashit)) {
bucket = hlist_entry(hashit.walk, struct element_t, hlist);
batman_count++;
}
- spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
+ spin_unlock_bh(&bat_priv->orig_hash_lock);
if ((batman_count == 0))
seq_printf(seq, "No batman nodes in range ...\n");
{
struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
struct orig_node *orig_node;
- unsigned long flags;
HASHIT(hashit);
struct element_t *bucket;
/* resize all orig nodes because orig_node->bcast_own(_sum) depend on
* if_num */
- spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
+ spin_lock_bh(&bat_priv->orig_hash_lock);
while (hash_iterate(bat_priv->orig_hash, &hashit)) {
bucket = hlist_entry(hashit.walk, struct element_t, hlist);
goto err;
}
- spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
+ spin_unlock_bh(&bat_priv->orig_hash_lock);
return 0;
err:
- spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
+ spin_unlock_bh(&bat_priv->orig_hash_lock);
return -ENOMEM;
}
struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
struct batman_if *batman_if_tmp;
struct orig_node *orig_node;
- unsigned long flags;
HASHIT(hashit);
struct element_t *bucket;
int ret;
/* resize all orig nodes because orig_node->bcast_own(_sum) depend on
* if_num */
- spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
+ spin_lock_bh(&bat_priv->orig_hash_lock);
while (hash_iterate(bat_priv->orig_hash, &hashit)) {
bucket = hlist_entry(hashit.walk, struct element_t, hlist);
rcu_read_unlock();
batman_if->if_num = -1;
- spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
+ spin_unlock_bh(&bat_priv->orig_hash_lock);
return 0;
err:
- spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
+ spin_unlock_bh(&bat_priv->orig_hash_lock);
return -ENOMEM;
}
struct element_t *bucket;
struct orig_node *orig_node;
TYPE_OF_WORD *word;
- unsigned long flags;
- spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
+ spin_lock_bh(&bat_priv->orig_hash_lock);
while (hash_iterate(bat_priv->orig_hash, &hashit)) {
bucket = hlist_entry(hashit.walk, struct element_t, hlist);
bit_packet_count(word);
}
- spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
+ spin_unlock_bh(&bat_priv->orig_hash_lock);
}
static void update_HNA(struct bat_priv *bat_priv, struct orig_node *orig_node,
{
struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
struct ethhdr *ethhdr;
- unsigned long flags;
/* drop packet if it has not necessary minimum size */
if (unlikely(!pskb_may_pull(skb, sizeof(struct batman_packet))))
ethhdr = (struct ethhdr *)skb_mac_header(skb);
- spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
+ spin_lock_bh(&bat_priv->orig_hash_lock);
receive_aggr_bat_packet(ethhdr,
skb->data,
skb_headlen(skb),
batman_if);
- spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
+ spin_unlock_bh(&bat_priv->orig_hash_lock);
kfree_skb(skb);
return NET_RX_SUCCESS;
struct ethhdr *ethhdr;
struct batman_if *batman_if;
int ret;
- unsigned long flags;
uint8_t dstaddr[ETH_ALEN];
icmp_packet = (struct icmp_packet_rr *)skb->data;
/* answer echo request (ping) */
/* get routing information */
- spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
+ spin_lock_bh(&bat_priv->orig_hash_lock);
orig_node = ((struct orig_node *)hash_find(bat_priv->orig_hash,
compare_orig, choose_orig,
icmp_packet->orig));
* copy the required data before sending */
batman_if = orig_node->router->if_incoming;
memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
- spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
+ spin_unlock_bh(&bat_priv->orig_hash_lock);
/* create a copy of the skb, if needed, to modify it. */
if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
ret = NET_RX_SUCCESS;
} else
- spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
+ spin_unlock_bh(&bat_priv->orig_hash_lock);
return ret;
}
struct ethhdr *ethhdr;
struct batman_if *batman_if;
int ret;
- unsigned long flags;
uint8_t dstaddr[ETH_ALEN];
icmp_packet = (struct icmp_packet *)skb->data;
return NET_RX_DROP;
/* get routing information */
- spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
+ spin_lock_bh(&bat_priv->orig_hash_lock);
orig_node = ((struct orig_node *)
hash_find(bat_priv->orig_hash, compare_orig, choose_orig,
icmp_packet->orig));
* copy the required data before sending */
batman_if = orig_node->router->if_incoming;
memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
- spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
+ spin_unlock_bh(&bat_priv->orig_hash_lock);
/* create a copy of the skb, if needed, to modify it. */
if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
ret = NET_RX_SUCCESS;
} else
- spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
+ spin_unlock_bh(&bat_priv->orig_hash_lock);
return ret;
}
struct batman_if *batman_if;
int hdr_size = sizeof(struct icmp_packet);
int ret;
- unsigned long flags;
uint8_t dstaddr[ETH_ALEN];
/**
ret = NET_RX_DROP;
/* get routing information */
- spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
+ spin_lock_bh(&bat_priv->orig_hash_lock);
orig_node = ((struct orig_node *)
hash_find(bat_priv->orig_hash, compare_orig, choose_orig,
icmp_packet->dst));
* copy the required data before sending */
batman_if = orig_node->router->if_incoming;
memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
- spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
+ spin_unlock_bh(&bat_priv->orig_hash_lock);
/* create a copy of the skb, if needed, to modify it. */
if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
ret = NET_RX_SUCCESS;
} else
- spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
+ spin_unlock_bh(&bat_priv->orig_hash_lock);
return ret;
}
struct neigh_node *router;
struct batman_if *batman_if;
uint8_t dstaddr[ETH_ALEN];
- unsigned long flags;
struct unicast_packet *unicast_packet;
struct ethhdr *ethhdr = (struct ethhdr *)skb_mac_header(skb);
int ret;
}
/* get routing information */
- spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
+ spin_lock_bh(&bat_priv->orig_hash_lock);
orig_node = ((struct orig_node *)
hash_find(bat_priv->orig_hash, compare_orig, choose_orig,
unicast_packet->dest));
router = find_router(bat_priv, orig_node, recv_if);
if (!router) {
- spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
+ spin_unlock_bh(&bat_priv->orig_hash_lock);
return NET_RX_DROP;
}
batman_if = router->if_incoming;
memcpy(dstaddr, router->addr, ETH_ALEN);
- spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
+ spin_unlock_bh(&bat_priv->orig_hash_lock);
/* create a copy of the skb, if needed, to modify it. */
if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
struct ethhdr *ethhdr;
int hdr_size = sizeof(struct bcast_packet);
int32_t seq_diff;
- unsigned long flags;
/* drop packet if it has not necessary minimum size */
if (unlikely(!pskb_may_pull(skb, hdr_size)))
if (bcast_packet->ttl < 2)
return NET_RX_DROP;
- spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
+ spin_lock_bh(&bat_priv->orig_hash_lock);
orig_node = ((struct orig_node *)
hash_find(bat_priv->orig_hash, compare_orig, choose_orig,
bcast_packet->orig));
if (orig_node == NULL) {
- spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
+ spin_unlock_bh(&bat_priv->orig_hash_lock);
return NET_RX_DROP;
}
if (get_bit_status(orig_node->bcast_bits,
orig_node->last_bcast_seqno,
ntohl(bcast_packet->seqno))) {
- spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
+ spin_unlock_bh(&bat_priv->orig_hash_lock);
return NET_RX_DROP;
}
/* check whether the packet is old and the host just restarted. */
if (window_protected(bat_priv, seq_diff,
&orig_node->bcast_seqno_reset)) {
- spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
+ spin_unlock_bh(&bat_priv->orig_hash_lock);
return NET_RX_DROP;
}
if (bit_get_packet(bat_priv, orig_node->bcast_bits, seq_diff, 1))
orig_node->last_bcast_seqno = ntohl(bcast_packet->seqno);
- spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
+ spin_unlock_bh(&bat_priv->orig_hash_lock);
/* rebroadcast packet */
add_bcast_packet_to_list(bat_priv, skb);
struct forw_packet *forw_packet,
unsigned long send_time)
{
- unsigned long flags;
INIT_HLIST_NODE(&forw_packet->list);
/* add new packet to packet list */
- spin_lock_irqsave(&bat_priv->forw_bcast_list_lock, flags);
+ spin_lock_bh(&bat_priv->forw_bcast_list_lock);
hlist_add_head(&forw_packet->list, &bat_priv->forw_bcast_list);
- spin_unlock_irqrestore(&bat_priv->forw_bcast_list_lock, flags);
+ spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
/* start timer for this packet */
INIT_DELAYED_WORK(&forw_packet->delayed_work,
container_of(work, struct delayed_work, work);
struct forw_packet *forw_packet =
container_of(delayed_work, struct forw_packet, delayed_work);
- unsigned long flags;
struct sk_buff *skb1;
struct net_device *soft_iface = forw_packet->if_incoming->soft_iface;
struct bat_priv *bat_priv = netdev_priv(soft_iface);
- spin_lock_irqsave(&bat_priv->forw_bcast_list_lock, flags);
+ spin_lock_bh(&bat_priv->forw_bcast_list_lock);
hlist_del(&forw_packet->list);
- spin_unlock_irqrestore(&bat_priv->forw_bcast_list_lock, flags);
+ spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
if (atomic_read(&bat_priv->mesh_state) == MESH_DEACTIVATING)
goto out;
container_of(work, struct delayed_work, work);
struct forw_packet *forw_packet =
container_of(delayed_work, struct forw_packet, delayed_work);
- unsigned long flags;
struct bat_priv *bat_priv;
bat_priv = netdev_priv(forw_packet->if_incoming->soft_iface);
- spin_lock_irqsave(&bat_priv->forw_bat_list_lock, flags);
+ spin_lock_bh(&bat_priv->forw_bat_list_lock);
hlist_del(&forw_packet->list);
- spin_unlock_irqrestore(&bat_priv->forw_bat_list_lock, flags);
+ spin_unlock_bh(&bat_priv->forw_bat_list_lock);
if (atomic_read(&bat_priv->mesh_state) == MESH_DEACTIVATING)
goto out;
{
struct forw_packet *forw_packet;
struct hlist_node *tmp_node, *safe_tmp_node;
- unsigned long flags;
if (batman_if)
bat_dbg(DBG_BATMAN, bat_priv,
"purge_outstanding_packets()\n");
/* free bcast list */
- spin_lock_irqsave(&bat_priv->forw_bcast_list_lock, flags);
+ spin_lock_bh(&bat_priv->forw_bcast_list_lock);
hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
&bat_priv->forw_bcast_list, list) {
(forw_packet->if_incoming != batman_if))
continue;
- spin_unlock_irqrestore(&bat_priv->forw_bcast_list_lock, flags);
+ spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
/**
* send_outstanding_bcast_packet() will lock the list to
* delete the item from the list
*/
cancel_delayed_work_sync(&forw_packet->delayed_work);
- spin_lock_irqsave(&bat_priv->forw_bcast_list_lock, flags);
+ spin_lock_bh(&bat_priv->forw_bcast_list_lock);
}
- spin_unlock_irqrestore(&bat_priv->forw_bcast_list_lock, flags);
+ spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
/* free batman packet list */
- spin_lock_irqsave(&bat_priv->forw_bat_list_lock, flags);
+ spin_lock_bh(&bat_priv->forw_bat_list_lock);
hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
&bat_priv->forw_bat_list, list) {
(forw_packet->if_incoming != batman_if))
continue;
- spin_unlock_irqrestore(&bat_priv->forw_bat_list_lock, flags);
+ spin_unlock_bh(&bat_priv->forw_bat_list_lock);
/**
* send_outstanding_bat_packet() will lock the list to
* delete the item from the list
*/
cancel_delayed_work_sync(&forw_packet->delayed_work);
- spin_lock_irqsave(&bat_priv->forw_bat_list_lock, flags);
+ spin_lock_bh(&bat_priv->forw_bat_list_lock);
}
- spin_unlock_irqrestore(&bat_priv->forw_bat_list_lock, flags);
+ spin_unlock_bh(&bat_priv->forw_bat_list_lock);
}
{
struct softif_neigh *softif_neigh, *softif_neigh_tmp;
struct hlist_node *node, *node_tmp;
- unsigned long flags;
- spin_lock_irqsave(&bat_priv->softif_neigh_lock, flags);
+ spin_lock_bh(&bat_priv->softif_neigh_lock);
hlist_for_each_entry_safe(softif_neigh, node, node_tmp,
&bat_priv->softif_neigh_list, list) {
call_rcu(&softif_neigh->rcu, softif_neigh_free_rcu);
}
- spin_unlock_irqrestore(&bat_priv->softif_neigh_lock, flags);
+ spin_unlock_bh(&bat_priv->softif_neigh_lock);
}
static struct softif_neigh *softif_neigh_get(struct bat_priv *bat_priv,
{
struct softif_neigh *softif_neigh;
struct hlist_node *node;
- unsigned long flags;
rcu_read_lock();
hlist_for_each_entry_rcu(softif_neigh, node,
kref_init(&softif_neigh->refcount);
INIT_HLIST_NODE(&softif_neigh->list);
- spin_lock_irqsave(&bat_priv->softif_neigh_lock, flags);
+ spin_lock_bh(&bat_priv->softif_neigh_lock);
hlist_add_head_rcu(&softif_neigh->list, &bat_priv->softif_neigh_list);
- spin_unlock_irqrestore(&bat_priv->softif_neigh_lock, flags);
+ spin_unlock_bh(&bat_priv->softif_neigh_lock);
found:
kref_get(&softif_neigh->refcount);
struct hna_local_entry *hna_local_entry;
struct hna_global_entry *hna_global_entry;
struct hashtable_t *swaphash;
- unsigned long flags;
int required_bytes;
- spin_lock_irqsave(&bat_priv->hna_lhash_lock, flags);
+ spin_lock_bh(&bat_priv->hna_lhash_lock);
hna_local_entry =
((struct hna_local_entry *)hash_find(bat_priv->hna_local_hash,
compare_orig, choose_orig,
addr));
- spin_unlock_irqrestore(&bat_priv->hna_lhash_lock, flags);
+ spin_unlock_bh(&bat_priv->hna_lhash_lock);
if (hna_local_entry) {
hna_local_entry->last_seen = jiffies;
else
hna_local_entry->never_purge = 0;
- spin_lock_irqsave(&bat_priv->hna_lhash_lock, flags);
+ spin_lock_bh(&bat_priv->hna_lhash_lock);
hash_add(bat_priv->hna_local_hash, compare_orig, choose_orig,
hna_local_entry);
bat_priv->hna_local_hash = swaphash;
}
- spin_unlock_irqrestore(&bat_priv->hna_lhash_lock, flags);
+ spin_unlock_bh(&bat_priv->hna_lhash_lock);
/* remove address from global hash if present */
- spin_lock_irqsave(&bat_priv->hna_ghash_lock, flags);
+ spin_lock_bh(&bat_priv->hna_ghash_lock);
hna_global_entry = ((struct hna_global_entry *)
hash_find(bat_priv->hna_global_hash,
_hna_global_del_orig(bat_priv, hna_global_entry,
"local hna received");
- spin_unlock_irqrestore(&bat_priv->hna_ghash_lock, flags);
+ spin_unlock_bh(&bat_priv->hna_ghash_lock);
}
int hna_local_fill_buffer(struct bat_priv *bat_priv,
struct element_t *bucket;
HASHIT(hashit);
int i = 0;
- unsigned long flags;
- spin_lock_irqsave(&bat_priv->hna_lhash_lock, flags);
+ spin_lock_bh(&bat_priv->hna_lhash_lock);
while (hash_iterate(bat_priv->hna_local_hash, &hashit)) {
if (i == bat_priv->num_local_hna)
atomic_set(&bat_priv->hna_local_changed, 0);
- spin_unlock_irqrestore(&bat_priv->hna_lhash_lock, flags);
+ spin_unlock_bh(&bat_priv->hna_lhash_lock);
return i;
}
HASHIT(hashit);
HASHIT(hashit_count);
struct element_t *bucket;
- unsigned long flags;
size_t buf_size, pos;
char *buff;
"announced via HNA:\n",
net_dev->name);
- spin_lock_irqsave(&bat_priv->hna_lhash_lock, flags);
+ spin_lock_bh(&bat_priv->hna_lhash_lock);
buf_size = 1;
/* Estimate length for: " * xx:xx:xx:xx:xx:xx\n" */
buff = kmalloc(buf_size, GFP_ATOMIC);
if (!buff) {
- spin_unlock_irqrestore(&bat_priv->hna_lhash_lock, flags);
+ spin_unlock_bh(&bat_priv->hna_lhash_lock);
return -ENOMEM;
}
buff[0] = '\0';
hna_local_entry->addr);
}
- spin_unlock_irqrestore(&bat_priv->hna_lhash_lock, flags);
+ spin_unlock_bh(&bat_priv->hna_lhash_lock);
seq_printf(seq, "%s", buff);
kfree(buff);
uint8_t *addr, char *message)
{
struct hna_local_entry *hna_local_entry;
- unsigned long flags;
- spin_lock_irqsave(&bat_priv->hna_lhash_lock, flags);
+ spin_lock_bh(&bat_priv->hna_lhash_lock);
hna_local_entry = (struct hna_local_entry *)
hash_find(bat_priv->hna_local_hash, compare_orig, choose_orig,
if (hna_local_entry)
hna_local_del(bat_priv, hna_local_entry, message);
- spin_unlock_irqrestore(&bat_priv->hna_lhash_lock, flags);
+ spin_unlock_bh(&bat_priv->hna_lhash_lock);
}
static void hna_local_purge(struct work_struct *work)
struct hna_local_entry *hna_local_entry;
HASHIT(hashit);
struct element_t *bucket;
- unsigned long flags;
unsigned long timeout;
- spin_lock_irqsave(&bat_priv->hna_lhash_lock, flags);
+ spin_lock_bh(&bat_priv->hna_lhash_lock);
while (hash_iterate(bat_priv->hna_local_hash, &hashit)) {
bucket = hlist_entry(hashit.walk, struct element_t, hlist);
"address timed out");
}
- spin_unlock_irqrestore(&bat_priv->hna_lhash_lock, flags);
+ spin_unlock_bh(&bat_priv->hna_lhash_lock);
hna_local_start_timer(bat_priv);
}
struct hna_local_entry *hna_local_entry;
struct hashtable_t *swaphash;
int hna_buff_count = 0;
- unsigned long flags;
unsigned char *hna_ptr;
while ((hna_buff_count + 1) * ETH_ALEN <= hna_buff_len) {
- spin_lock_irqsave(&bat_priv->hna_ghash_lock, flags);
+ spin_lock_bh(&bat_priv->hna_ghash_lock);
hna_ptr = hna_buff + (hna_buff_count * ETH_ALEN);
hna_global_entry = (struct hna_global_entry *)
choose_orig, hna_ptr);
if (!hna_global_entry) {
- spin_unlock_irqrestore(&bat_priv->hna_ghash_lock,
- flags);
+ spin_unlock_bh(&bat_priv->hna_ghash_lock);
hna_global_entry =
kmalloc(sizeof(struct hna_global_entry),
"%pM (via %pM)\n",
hna_global_entry->addr, orig_node->orig);
- spin_lock_irqsave(&bat_priv->hna_ghash_lock, flags);
+ spin_lock_bh(&bat_priv->hna_ghash_lock);
hash_add(bat_priv->hna_global_hash, compare_orig,
choose_orig, hna_global_entry);
}
hna_global_entry->orig_node = orig_node;
- spin_unlock_irqrestore(&bat_priv->hna_ghash_lock, flags);
+ spin_unlock_bh(&bat_priv->hna_ghash_lock);
/* remove address from local hash if present */
- spin_lock_irqsave(&bat_priv->hna_lhash_lock, flags);
+ spin_lock_bh(&bat_priv->hna_lhash_lock);
hna_ptr = hna_buff + (hna_buff_count * ETH_ALEN);
hna_local_entry = (struct hna_local_entry *)
hna_local_del(bat_priv, hna_local_entry,
"global hna received");
- spin_unlock_irqrestore(&bat_priv->hna_lhash_lock, flags);
+ spin_unlock_bh(&bat_priv->hna_lhash_lock);
hna_buff_count++;
}
}
}
- spin_lock_irqsave(&bat_priv->hna_ghash_lock, flags);
+ spin_lock_bh(&bat_priv->hna_ghash_lock);
if (bat_priv->hna_global_hash->elements * 4 >
bat_priv->hna_global_hash->size) {
bat_priv->hna_global_hash = swaphash;
}
- spin_unlock_irqrestore(&bat_priv->hna_ghash_lock, flags);
+ spin_unlock_bh(&bat_priv->hna_ghash_lock);
}
int hna_global_seq_print_text(struct seq_file *seq, void *offset)
HASHIT(hashit);
HASHIT(hashit_count);
struct element_t *bucket;
- unsigned long flags;
size_t buf_size, pos;
char *buff;
seq_printf(seq, "Globally announced HNAs received via the mesh %s\n",
net_dev->name);
- spin_lock_irqsave(&bat_priv->hna_ghash_lock, flags);
+ spin_lock_bh(&bat_priv->hna_ghash_lock);
buf_size = 1;
/* Estimate length for: " * xx:xx:xx:xx:xx:xx via xx:xx:xx:xx:xx:xx\n"*/
buff = kmalloc(buf_size, GFP_ATOMIC);
if (!buff) {
- spin_unlock_irqrestore(&bat_priv->hna_ghash_lock, flags);
+ spin_unlock_bh(&bat_priv->hna_ghash_lock);
return -ENOMEM;
}
buff[0] = '\0';
hna_global_entry->orig_node->orig);
}
- spin_unlock_irqrestore(&bat_priv->hna_ghash_lock, flags);
+ spin_unlock_bh(&bat_priv->hna_ghash_lock);
seq_printf(seq, "%s", buff);
kfree(buff);
{
struct hna_global_entry *hna_global_entry;
int hna_buff_count = 0;
- unsigned long flags;
unsigned char *hna_ptr;
if (orig_node->hna_buff_len == 0)
return;
- spin_lock_irqsave(&bat_priv->hna_ghash_lock, flags);
+ spin_lock_bh(&bat_priv->hna_ghash_lock);
while ((hna_buff_count + 1) * ETH_ALEN <= orig_node->hna_buff_len) {
hna_ptr = orig_node->hna_buff + (hna_buff_count * ETH_ALEN);
hna_buff_count++;
}
- spin_unlock_irqrestore(&bat_priv->hna_ghash_lock, flags);
+ spin_unlock_bh(&bat_priv->hna_ghash_lock);
orig_node->hna_buff_len = 0;
kfree(orig_node->hna_buff);
struct orig_node *transtable_search(struct bat_priv *bat_priv, uint8_t *addr)
{
struct hna_global_entry *hna_global_entry;
- unsigned long flags;
- spin_lock_irqsave(&bat_priv->hna_ghash_lock, flags);
+ spin_lock_bh(&bat_priv->hna_ghash_lock);
hna_global_entry = (struct hna_global_entry *)
hash_find(bat_priv->hna_global_hash,
compare_orig, choose_orig, addr);
- spin_unlock_irqrestore(&bat_priv->hna_ghash_lock, flags);
+ spin_unlock_bh(&bat_priv->hna_ghash_lock);
if (!hna_global_entry)
return NULL;
int frag_reassemble_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
struct sk_buff **new_skb)
{
- unsigned long flags;
struct orig_node *orig_node;
struct frag_packet_list_entry *tmp_frag_entry;
int ret = NET_RX_DROP;
(struct unicast_frag_packet *)skb->data;
*new_skb = NULL;
- spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
+ spin_lock_bh(&bat_priv->orig_hash_lock);
orig_node = ((struct orig_node *)
hash_find(bat_priv->orig_hash, compare_orig, choose_orig,
unicast_packet->orig));
if (*new_skb)
ret = NET_RX_SUCCESS;
out:
- spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
+ spin_unlock_bh(&bat_priv->orig_hash_lock);
return ret;
}
struct neigh_node *router;
int data_len = skb->len;
uint8_t dstaddr[6];
- unsigned long flags;
- spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
+ spin_lock_bh(&bat_priv->orig_hash_lock);
/* get routing information */
orig_node = ((struct orig_node *)hash_find(bat_priv->orig_hash,
batman_if = router->if_incoming;
memcpy(dstaddr, router->addr, ETH_ALEN);
- spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
+ spin_unlock_bh(&bat_priv->orig_hash_lock);
if (batman_if->if_status != IF_ACTIVE)
goto dropped;
return 0;
unlock:
- spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
+ spin_unlock_bh(&bat_priv->orig_hash_lock);
dropped:
kfree_skb(skb);
return 1;
struct vis_info *info = container_of(ref, struct vis_info, refcount);
struct bat_priv *bat_priv = info->bat_priv;
struct recvlist_node *entry, *tmp;
- unsigned long flags;
list_del_init(&info->send_list);
- spin_lock_irqsave(&bat_priv->vis_list_lock, flags);
+ spin_lock_bh(&bat_priv->vis_list_lock);
list_for_each_entry_safe(entry, tmp, &info->recv_list, list) {
list_del(&entry->list);
kfree(entry);
}
- spin_unlock_irqrestore(&bat_priv->vis_list_lock, flags);
+ spin_unlock_bh(&bat_priv->vis_list_lock);
kfree_skb(info->skb_packet);
}
struct if_list_entry *entry;
struct hlist_node *pos, *n;
int i;
- unsigned long flags;
int vis_server = atomic_read(&bat_priv->vis_mode);
size_t buff_pos, buf_size;
char *buff;
buf_size = 1;
/* Estimate length */
- spin_lock_irqsave(&bat_priv->vis_hash_lock, flags);
+ spin_lock_bh(&bat_priv->vis_hash_lock);
while (hash_iterate(bat_priv->vis_hash, &hashit_count)) {
bucket = hlist_entry(hashit_count.walk, struct element_t,
hlist);
buff = kmalloc(buf_size, GFP_ATOMIC);
if (!buff) {
- spin_unlock_irqrestore(&bat_priv->vis_hash_lock, flags);
+ spin_unlock_bh(&bat_priv->vis_hash_lock);
return -ENOMEM;
}
buff[0] = '\0';
}
}
- spin_unlock_irqrestore(&bat_priv->vis_hash_lock, flags);
+ spin_unlock_bh(&bat_priv->vis_hash_lock);
seq_printf(seq, "%s", buff);
kfree(buff);
struct list_head *recv_list, char *mac)
{
struct recvlist_node *entry;
- unsigned long flags;
entry = kmalloc(sizeof(struct recvlist_node), GFP_ATOMIC);
if (!entry)
return;
memcpy(entry->mac, mac, ETH_ALEN);
- spin_lock_irqsave(&bat_priv->vis_list_lock, flags);
+ spin_lock_bh(&bat_priv->vis_list_lock);
list_add_tail(&entry->list, recv_list);
- spin_unlock_irqrestore(&bat_priv->vis_list_lock, flags);
+ spin_unlock_bh(&bat_priv->vis_list_lock);
}
/* returns 1 if this mac is in the recv_list */
struct list_head *recv_list, char *mac)
{
struct recvlist_node *entry;
- unsigned long flags;
- spin_lock_irqsave(&bat_priv->vis_list_lock, flags);
+ spin_lock_bh(&bat_priv->vis_list_lock);
list_for_each_entry(entry, recv_list, list) {
if (memcmp(entry->mac, mac, ETH_ALEN) == 0) {
- spin_unlock_irqrestore(&bat_priv->vis_list_lock,
- flags);
+ spin_unlock_bh(&bat_priv->vis_list_lock);
return 1;
}
}
- spin_unlock_irqrestore(&bat_priv->vis_list_lock, flags);
+ spin_unlock_bh(&bat_priv->vis_list_lock);
return 0;
}
{
struct vis_info *info;
int is_new, make_broadcast;
- unsigned long flags;
int vis_server = atomic_read(&bat_priv->vis_mode);
make_broadcast = (vis_server == VIS_TYPE_SERVER_SYNC);
- spin_lock_irqsave(&bat_priv->vis_hash_lock, flags);
+ spin_lock_bh(&bat_priv->vis_hash_lock);
info = add_packet(bat_priv, vis_packet, vis_info_len,
&is_new, make_broadcast);
if (!info)
if (vis_server == VIS_TYPE_SERVER_SYNC && is_new)
send_list_add(bat_priv, info);
end:
- spin_unlock_irqrestore(&bat_priv->vis_hash_lock, flags);
+ spin_unlock_bh(&bat_priv->vis_hash_lock);
}
/* handle an incoming client update packet and schedule forward if needed. */
struct vis_info *info;
struct vis_packet *packet;
int is_new;
- unsigned long flags;
int vis_server = atomic_read(&bat_priv->vis_mode);
int are_target = 0;
is_my_mac(vis_packet->target_orig))
are_target = 1;
- spin_lock_irqsave(&bat_priv->vis_hash_lock, flags);
+ spin_lock_bh(&bat_priv->vis_hash_lock);
info = add_packet(bat_priv, vis_packet, vis_info_len,
&is_new, are_target);
}
end:
- spin_unlock_irqrestore(&bat_priv->vis_hash_lock, flags);
+ spin_unlock_bh(&bat_priv->vis_hash_lock);
}
/* Walk the originators and find the VIS server with the best tq. Set the packet
struct vis_info_entry *entry;
struct hna_local_entry *hna_local_entry;
int best_tq = -1;
- unsigned long flags;
info->first_seen = jiffies;
packet->vis_type = atomic_read(&bat_priv->vis_mode);
- spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
+ spin_lock_bh(&bat_priv->orig_hash_lock);
memcpy(packet->target_orig, broadcast_addr, ETH_ALEN);
packet->ttl = TTL;
packet->seqno = htonl(ntohl(packet->seqno) + 1);
best_tq = find_best_vis_server(bat_priv, info);
if (best_tq < 0) {
- spin_unlock_irqrestore(&bat_priv->orig_hash_lock,
- flags);
+ spin_unlock_bh(&bat_priv->orig_hash_lock);
return -1;
}
}
packet->entries++;
if (vis_packet_full(info)) {
- spin_unlock_irqrestore(
- &bat_priv->orig_hash_lock, flags);
+ spin_unlock_bh(&bat_priv->orig_hash_lock);
return 0;
}
}
- spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
+ spin_unlock_bh(&bat_priv->orig_hash_lock);
- spin_lock_irqsave(&bat_priv->hna_lhash_lock, flags);
+ spin_lock_bh(&bat_priv->hna_lhash_lock);
while (hash_iterate(bat_priv->hna_local_hash, &hashit_local)) {
bucket = hlist_entry(hashit_local.walk, struct element_t,
hlist);
packet->entries++;
if (vis_packet_full(info)) {
- spin_unlock_irqrestore(&bat_priv->hna_lhash_lock,
- flags);
+ spin_unlock_bh(&bat_priv->hna_lhash_lock);
return 0;
}
}
- spin_unlock_irqrestore(&bat_priv->hna_lhash_lock, flags);
+ spin_unlock_bh(&bat_priv->hna_lhash_lock);
return 0;
}
struct orig_node *orig_node;
struct vis_packet *packet;
struct sk_buff *skb;
- unsigned long flags;
struct batman_if *batman_if;
uint8_t dstaddr[ETH_ALEN];
- spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
+ spin_lock_bh(&bat_priv->orig_hash_lock);
packet = (struct vis_packet *)info->skb_packet->data;
/* send to all routers in range. */
memcpy(packet->target_orig, orig_node->orig, ETH_ALEN);
batman_if = orig_node->router->if_incoming;
memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
- spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
+ spin_unlock_bh(&bat_priv->orig_hash_lock);
skb = skb_clone(info->skb_packet, GFP_ATOMIC);
if (skb)
send_skb_packet(skb, batman_if, dstaddr);
- spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
+ spin_lock_bh(&bat_priv->orig_hash_lock);
}
- spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
+ spin_unlock_bh(&bat_priv->orig_hash_lock);
}
static void unicast_vis_packet(struct bat_priv *bat_priv,
struct orig_node *orig_node;
struct sk_buff *skb;
struct vis_packet *packet;
- unsigned long flags;
struct batman_if *batman_if;
uint8_t dstaddr[ETH_ALEN];
- spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
+ spin_lock_bh(&bat_priv->orig_hash_lock);
packet = (struct vis_packet *)info->skb_packet->data;
orig_node = ((struct orig_node *)hash_find(bat_priv->orig_hash,
compare_orig, choose_orig,
* copy the required data before sending */
batman_if = orig_node->router->if_incoming;
memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
- spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
+ spin_unlock_bh(&bat_priv->orig_hash_lock);
skb = skb_clone(info->skb_packet, GFP_ATOMIC);
if (skb)
return;
out:
- spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
+ spin_unlock_bh(&bat_priv->orig_hash_lock);
}
/* only send one vis packet. called from send_vis_packets() */
struct bat_priv *bat_priv =
container_of(delayed_work, struct bat_priv, vis_work);
struct vis_info *info, *temp;
- unsigned long flags;
- spin_lock_irqsave(&bat_priv->vis_hash_lock, flags);
+ spin_lock_bh(&bat_priv->vis_hash_lock);
purge_vis_packets(bat_priv);
if (generate_vis_packet(bat_priv) == 0) {
send_list) {
kref_get(&info->refcount);
- spin_unlock_irqrestore(&bat_priv->vis_hash_lock, flags);
+ spin_unlock_bh(&bat_priv->vis_hash_lock);
if (bat_priv->primary_if)
send_vis_packet(bat_priv, info);
- spin_lock_irqsave(&bat_priv->vis_hash_lock, flags);
+ spin_lock_bh(&bat_priv->vis_hash_lock);
send_list_del(info);
kref_put(&info->refcount, free_info);
}
- spin_unlock_irqrestore(&bat_priv->vis_hash_lock, flags);
+ spin_unlock_bh(&bat_priv->vis_hash_lock);
start_vis_timer(bat_priv);
}
int vis_init(struct bat_priv *bat_priv)
{
struct vis_packet *packet;
- unsigned long flags;
int hash_added;
if (bat_priv->vis_hash)
return 1;
- spin_lock_irqsave(&bat_priv->vis_hash_lock, flags);
+ spin_lock_bh(&bat_priv->vis_hash_lock);
bat_priv->vis_hash = hash_new(256);
if (!bat_priv->vis_hash) {
goto err;
}
- spin_unlock_irqrestore(&bat_priv->vis_hash_lock, flags);
+ spin_unlock_bh(&bat_priv->vis_hash_lock);
start_vis_timer(bat_priv);
return 1;
kfree(bat_priv->my_vis_info);
bat_priv->my_vis_info = NULL;
err:
- spin_unlock_irqrestore(&bat_priv->vis_hash_lock, flags);
+ spin_unlock_bh(&bat_priv->vis_hash_lock);
vis_quit(bat_priv);
return 0;
}
/* shutdown vis-server */
void vis_quit(struct bat_priv *bat_priv)
{
- unsigned long flags;
if (!bat_priv->vis_hash)
return;
cancel_delayed_work_sync(&bat_priv->vis_work);
- spin_lock_irqsave(&bat_priv->vis_hash_lock, flags);
+ spin_lock_bh(&bat_priv->vis_hash_lock);
/* properly remove, kill timers ... */
hash_delete(bat_priv->vis_hash, free_info_ref, NULL);
bat_priv->vis_hash = NULL;
bat_priv->my_vis_info = NULL;
- spin_unlock_irqrestore(&bat_priv->vis_hash_lock, flags);
+ spin_unlock_bh(&bat_priv->vis_hash_lock);
}
/* schedule packets for (re)transmission */