return;
}
- if ((atomic_read(&bat_priv->aggregation_enabled)) &&
+ if ((atomic_read(&bat_priv->aggregated_ogms)) &&
(packet_len < MAX_AGGREGATION_BYTES))
forw_packet_aggr->skb = dev_alloc_skb(MAX_AGGREGATION_BYTES +
sizeof(struct ethhdr));
/* find position for the packet in the forward queue */
spin_lock_irqsave(&bat_priv->forw_bat_list_lock, flags);
/* own packets are not to be aggregated */
- if ((atomic_read(&bat_priv->aggregation_enabled)) && (!own_packet)) {
+ if ((atomic_read(&bat_priv->aggregated_ogms)) && (!own_packet)) {
hlist_for_each_entry(forw_packet_pos, tmp_node,
&bat_priv->forw_bat_list, list) {
if (can_aggregate_with(batman_packet,
* later on
*/
if ((!own_packet) &&
- (atomic_read(&bat_priv->aggregation_enabled)))
+ (atomic_read(&bat_priv->aggregated_ogms)))
send_time += msecs_to_jiffies(MAX_AGGREGATION_MS);
new_aggregated_packet(packet_buff, packet_len,
{
struct device *dev = to_dev(kobj->parent);
struct bat_priv *bat_priv = netdev_priv(to_net_dev(dev));
- int aggr_status = atomic_read(&bat_priv->aggregation_enabled);
+ int aggr_status = atomic_read(&bat_priv->aggregated_ogms);
return sprintf(buff, "%s\n",
aggr_status == 0 ? "disabled" : "enabled");
return -EINVAL;
}
- if (atomic_read(&bat_priv->aggregation_enabled) == aggr_tmp)
+ if (atomic_read(&bat_priv->aggregated_ogms) == aggr_tmp)
return count;
bat_info(net_dev, "Changing aggregation from: %s to: %s\n",
- atomic_read(&bat_priv->aggregation_enabled) == 1 ?
+ atomic_read(&bat_priv->aggregated_ogms) == 1 ?
"enabled" : "disabled", aggr_tmp == 1 ? "enabled" :
"disabled");
- atomic_set(&bat_priv->aggregation_enabled, (unsigned)aggr_tmp);
+ atomic_set(&bat_priv->aggregated_ogms, (unsigned)aggr_tmp);
return count;
}
{
struct device *dev = to_dev(kobj->parent);
struct bat_priv *bat_priv = netdev_priv(to_net_dev(dev));
- int bond_status = atomic_read(&bat_priv->bonding_enabled);
+ int bond_status = atomic_read(&bat_priv->bonding);
return sprintf(buff, "%s\n",
bond_status == 0 ? "disabled" : "enabled");
return -EINVAL;
}
- if (atomic_read(&bat_priv->bonding_enabled) == bonding_enabled_tmp)
+ if (atomic_read(&bat_priv->bonding) == bonding_enabled_tmp)
return count;
bat_info(net_dev, "Changing bonding from: %s to: %s\n",
- atomic_read(&bat_priv->bonding_enabled) == 1 ?
+ atomic_read(&bat_priv->bonding) == 1 ?
"enabled" : "disabled",
bonding_enabled_tmp == 1 ? "enabled" : "disabled");
- atomic_set(&bat_priv->bonding_enabled, (unsigned)bonding_enabled_tmp);
+ atomic_set(&bat_priv->bonding, (unsigned)bonding_enabled_tmp);
return count;
}
{
struct device *dev = to_dev(kobj->parent);
struct bat_priv *bat_priv = netdev_priv(to_net_dev(dev));
- int frag_status = atomic_read(&bat_priv->frag_enabled);
+ int frag_status = atomic_read(&bat_priv->fragmentation);
return sprintf(buff, "%s\n",
frag_status == 0 ? "disabled" : "enabled");
return -EINVAL;
}
- if (atomic_read(&bat_priv->frag_enabled) == frag_enabled_tmp)
+ if (atomic_read(&bat_priv->fragmentation) == frag_enabled_tmp)
return count;
bat_info(net_dev, "Changing fragmentation from: %s to: %s\n",
- atomic_read(&bat_priv->frag_enabled) == 1 ?
+ atomic_read(&bat_priv->fragmentation) == 1 ?
"enabled" : "disabled",
frag_enabled_tmp == 1 ? "enabled" : "disabled");
- atomic_set(&bat_priv->frag_enabled, (unsigned)frag_enabled_tmp);
+ atomic_set(&bat_priv->fragmentation, (unsigned)frag_enabled_tmp);
update_min_mtu(net_dev);
return count;
}
* (have MTU > 1500 + BAT_HEADER_LEN) */
int min_mtu = ETH_DATA_LEN;
- if (atomic_read(&bat_priv->frag_enabled))
+ if (atomic_read(&bat_priv->fragmentation))
goto out;
rcu_read_lock();
bat_info(batman_if->soft_iface, "Adding interface: %s\n",
batman_if->net_dev->name);
- if (atomic_read(&bat_priv->frag_enabled) && batman_if->net_dev->mtu <
+ if (atomic_read(&bat_priv->fragmentation) && batman_if->net_dev->mtu <
ETH_DATA_LEN + BAT_HEADER_LEN)
bat_info(batman_if->soft_iface,
"The MTU of interface %s is too small (%i) to handle "
batman_if->net_dev->name, batman_if->net_dev->mtu,
ETH_DATA_LEN + BAT_HEADER_LEN);
- if (!atomic_read(&bat_priv->frag_enabled) && batman_if->net_dev->mtu <
+ if (!atomic_read(&bat_priv->fragmentation) && batman_if->net_dev->mtu <
ETH_DATA_LEN + BAT_HEADER_LEN)
bat_info(batman_if->soft_iface,
"The MTU of interface %s is too small (%i) to handle "
/* without bonding, the first node should
* always choose the default router. */
- bonding_enabled = atomic_read(&bat_priv->bonding_enabled);
+ bonding_enabled = atomic_read(&bat_priv->bonding);
if ((!recv_if) && (!bonding_enabled))
return orig_node->router;
unicast_packet = (struct unicast_packet *)skb->data;
if (unicast_packet->packet_type == BAT_UNICAST &&
- atomic_read(&bat_priv->frag_enabled) &&
+ atomic_read(&bat_priv->fragmentation) &&
skb->len > batman_if->net_dev->mtu)
return frag_send_skb(skb, bat_priv, batman_if,
dstaddr);
bat_priv = netdev_priv(soft_iface);
- atomic_set(&bat_priv->aggregation_enabled, 1);
- atomic_set(&bat_priv->bonding_enabled, 0);
+ atomic_set(&bat_priv->aggregated_ogms, 1);
+ atomic_set(&bat_priv->bonding, 0);
atomic_set(&bat_priv->vis_mode, VIS_TYPE_CLIENT_UPDATE);
atomic_set(&bat_priv->orig_interval, 1000);
atomic_set(&bat_priv->log_level, 0);
- atomic_set(&bat_priv->frag_enabled, 1);
+ atomic_set(&bat_priv->fragmentation, 1);
atomic_set(&bat_priv->bcast_queue_left, BCAST_QUEUE_LEN);
atomic_set(&bat_priv->batman_queue_left, BATMAN_QUEUE_LEN);
required_bytes += BAT_PACKET_LEN;
if ((required_bytes > ETH_DATA_LEN) ||
- (atomic_read(&bat_priv->aggregation_enabled) &&
+ (atomic_read(&bat_priv->aggregated_ogms) &&
required_bytes > MAX_AGGREGATION_BYTES) ||
(bat_priv->num_local_hna + 1 > 255)) {
bat_dbg(DBG_ROUTES, bat_priv,
struct bat_priv {
atomic_t mesh_state;
struct net_device_stats stats;
- atomic_t aggregation_enabled;
- atomic_t bonding_enabled;
- atomic_t frag_enabled;
- atomic_t vis_mode;
- atomic_t orig_interval;
- atomic_t log_level;
+ atomic_t aggregated_ogms; /* boolean */
+ atomic_t bonding; /* boolean */
+ atomic_t fragmentation; /* boolean */
+ atomic_t vis_mode; /* VIS_TYPE_* */
+ atomic_t orig_interval; /* uint */
+ atomic_t log_level; /* uint */
atomic_t bcast_seqno;
atomic_t bcast_queue_left;
atomic_t batman_queue_left;
/* copy the destination for faster routing */
memcpy(unicast_packet->dest, orig_node->orig, ETH_ALEN);
- if (atomic_read(&bat_priv->frag_enabled) &&
+ if (atomic_read(&bat_priv->fragmentation) &&
data_len + sizeof(struct unicast_packet) >
batman_if->net_dev->mtu) {
/* send frag skb decreases ttl */