spin_lock_irqsave(&cp->lock, flags);
cp->cpcmd &= ~RxVlanOn;
cpw16(CpCmd, cp->cpcmd);
- if (cp->vlgrp)
- cp->vlgrp->vlan_devices[vid] = NULL;
+ vlan_group_set_device(cp->vlgrp, vid, NULL);
spin_unlock_irqrestore(&cp->lock, flags);
}
#endif /* CP_VLAN_TAG_USED */
local_irq_save(flags);
ace_mask_irq(dev);
-
- if (ap->vlgrp)
- ap->vlgrp->vlan_devices[vid] = NULL;
-
+ vlan_group_set_device(ap->vlgrp, vid, NULL);
ace_unmask_irq(dev);
local_irq_restore(flags);
}
{
struct amd8111e_priv *lp = netdev_priv(dev);
spin_lock_irq(&lp->lock);
- if (lp->vlgrp)
- lp->vlgrp->vlan_devices[vid] = NULL;
+ vlan_group_set_device(lp->vlgrp, vid, NULL);
spin_unlock_irq(&lp->lock);
}
#endif
spin_lock_irqsave(&adapter->lock, flags);
/* atl1_irq_disable(adapter); */
- if (adapter->vlgrp)
- adapter->vlgrp->vlan_devices[vid] = NULL;
+ vlan_group_set_device(adapter->vlgrp, vid, NULL);
/* atl1_irq_enable(adapter); */
spin_unlock_irqrestore(&adapter->lock, flags);
/* We don't do Vlan filtering */
if (adapter->vlgrp) {
u16 vid;
for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
- if (!adapter->vlgrp->vlan_devices[vid])
+ if (!vlan_group_get_device(adapter->vlgrp, vid))
continue;
atl1_vlan_rx_add_vid(adapter->netdev, vid);
}
struct bnx2 *bp = netdev_priv(dev);
bnx2_netif_stop(bp);
-
- if (bp->vlgrp)
- bp->vlgrp->vlan_devices[vid] = NULL;
+ vlan_group_set_device(bp->vlgrp, vid, NULL);
bnx2_set_rx_mode(dev);
bnx2_netif_start(bp);
/* Save and then restore vlan_dev in the grp array,
* since the slave's driver might clear it.
*/
- vlan_dev = bond->vlgrp->vlan_devices[vid];
+ vlan_dev = vlan_group_get_device(bond->vlgrp, vid);
slave_dev->vlan_rx_kill_vid(slave_dev, vid);
- bond->vlgrp->vlan_devices[vid] = vlan_dev;
+ vlan_group_set_device(bond->vlgrp, vid, vlan_dev);
}
}
/* Save and then restore vlan_dev in the grp array,
* since the slave's driver might clear it.
*/
- vlan_dev = bond->vlgrp->vlan_devices[vlan->vlan_id];
+ vlan_dev = vlan_group_get_device(bond->vlgrp, vlan->vlan_id);
slave_dev->vlan_rx_kill_vid(slave_dev, vlan->vlan_id);
- bond->vlgrp->vlan_devices[vlan->vlan_id] = vlan_dev;
+ vlan_group_set_device(bond->vlgrp, vlan->vlan_id, vlan_dev);
}
unreg:
vlan_id = 0;
list_for_each_entry_safe(vlan, vlan_next, &bond->vlan_list,
vlan_list) {
- vlan_dev = bond->vlgrp->vlan_devices[vlan->vlan_id];
+ vlan_dev = vlan_group_get_device(bond->vlgrp, vlan->vlan_id);
if (vlan_dev == rt->u.dst.dev) {
vlan_id = vlan->vlan_id;
dprintk("basa: vlan match on %s %d\n",
}
list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
- vlan_dev = bond->vlgrp->vlan_devices[vlan->vlan_id];
+ vlan_dev = vlan_group_get_device(bond->vlgrp, vlan->vlan_id);
if (vlan->vlan_ip) {
bond_arp_send(slave->dev, ARPOP_REPLY, vlan->vlan_ip,
vlan->vlan_ip, vlan->vlan_id);
list_for_each_entry_safe(vlan, vlan_next, &bond->vlan_list,
vlan_list) {
- vlan_dev = bond->vlgrp->vlan_devices[vlan->vlan_id];
+ vlan_dev = vlan_group_get_device(bond->vlgrp, vlan->vlan_id);
if (vlan_dev == event_dev) {
switch (event) {
case NETDEV_UP:
struct adapter *adapter = dev->priv;
spin_lock_irq(&adapter->async_lock);
- if (adapter->vlan_grp)
- adapter->vlan_grp->vlan_devices[vid] = NULL;
+ vlan_group_set_device(adapter->vlan_grp, vid, NULL);
spin_unlock_irq(&adapter->async_lock);
}
#endif
int i;
for_each_port(adapter, i) {
- const struct vlan_group *grp;
+ struct vlan_group *grp;
struct net_device *dev = adapter->port[i];
const struct port_info *p = netdev_priv(dev);
if (!memcmp(dev->dev_addr, mac, ETH_ALEN)) {
if (vlan && vlan != VLAN_VID_MASK) {
grp = p->vlan_grp;
- dev = grp ? grp->vlan_devices[vlan] : NULL;
+ dev = NULL;
+ if (grp)
+ dev = vlan_group_get_device(grp, vlan);
} else
while (dev->master)
dev = dev->master;
uint16_t vid = adapter->hw.mng_cookie.vlan_id;
uint16_t old_vid = adapter->mng_vlan_id;
if (adapter->vlgrp) {
- if (!adapter->vlgrp->vlan_devices[vid]) {
+ if (!vlan_group_get_device(adapter->vlgrp, vid)) {
if (adapter->hw.mng_cookie.status &
E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) {
e1000_vlan_rx_add_vid(netdev, vid);
if ((old_vid != (uint16_t)E1000_MNG_VLAN_NONE) &&
(vid != old_vid) &&
- !adapter->vlgrp->vlan_devices[old_vid])
+ !vlan_group_get_device(adapter->vlgrp, old_vid))
e1000_vlan_rx_kill_vid(netdev, old_vid);
} else
adapter->mng_vlan_id = vid;
if ((adapter->hw.mng_cookie.status &
E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
!(adapter->vlgrp &&
- adapter->vlgrp->vlan_devices[adapter->mng_vlan_id])) {
+ vlan_group_get_device(adapter->vlgrp, adapter->mng_vlan_id))) {
e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
}
uint32_t vfta, index;
e1000_irq_disable(adapter);
-
- if (adapter->vlgrp)
- adapter->vlgrp->vlan_devices[vid] = NULL;
-
+ vlan_group_set_device(adapter->vlgrp, vid, NULL);
e1000_irq_enable(adapter);
if ((adapter->hw.mng_cookie.status &
if (adapter->vlgrp) {
uint16_t vid;
for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
- if (!adapter->vlgrp->vlan_devices[vid])
+ if (!vlan_group_get_device(adapter->vlgrp, vid))
continue;
e1000_vlan_rx_add_vid(adapter->netdev, vid);
}
int index;
u64 hret;
- if (port->vgrp)
- port->vgrp->vlan_devices[vid] = NULL;
+ vlan_group_set_device(port->vgrp, vid, NULL);
cb1 = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (!cb1) {
spin_lock_irqsave(&priv->rxlock, flags);
- if (priv->vlgrp)
- priv->vlgrp->vlan_devices[vid] = NULL;
+ vlan_group_set_device(priv->vgrp, vid, NULL);
spin_unlock_irqrestore(&priv->rxlock, flags);
}
ixgb_irq_disable(adapter);
- if(adapter->vlgrp)
- adapter->vlgrp->vlan_devices[vid] = NULL;
+ vlan_group_set_device(adapter->vlgrp, vid, NULL);
ixgb_irq_enable(adapter);
if(adapter->vlgrp) {
uint16_t vid;
for(vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
- if(!adapter->vlgrp->vlan_devices[vid])
+ if(!vlan_group_get_device(adapter->vlgrp, vid))
continue;
ixgb_vlan_rx_add_vid(adapter->netdev, vid);
}
spin_lock_irq(&dev->misc_lock);
spin_lock(&dev->tx_lock);
- if (dev->vlgrp)
- dev->vlgrp->vlan_devices[vid] = NULL;
+ vlan_group_set_device(dev->vlgrp, vid, NULL);
spin_unlock(&dev->tx_lock);
spin_unlock_irq(&dev->misc_lock);
}
unsigned long flags;
spin_lock_irqsave(&tp->lock, flags);
- if (tp->vlgrp)
- tp->vlgrp->vlan_devices[vid] = NULL;
+ vlan_group_set_device(tp->vlgrp, vid, NULL);
spin_unlock_irqrestore(&tp->lock, flags);
}
unsigned long flags;
spin_lock_irqsave(&nic->tx_lock, flags);
- if (nic->vlgrp)
- nic->vlgrp->vlan_devices[vid] = NULL;
+ vlan_group_set_device(nic->vlgrp, vid, NULL);
spin_unlock_irqrestore(&nic->tx_lock, flags);
}
sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), RX_VLAN_STRIP_OFF);
sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_VLAN_TAG_OFF);
- if (sky2->vlgrp)
- sky2->vlgrp->vlan_devices[vid] = NULL;
+ vlan_group_set_device(sky2->vlgrp, vid, NULL);
netif_tx_unlock_bh(dev);
}
spin_lock(&np->lock);
if (debug > 1)
printk("%s: removing vlanid %d from vlan filter\n", dev->name, vid);
- if (np->vlgrp)
- np->vlgrp->vlan_devices[vid] = NULL;
+ vlan_group_set_device(np->vlgrp, vid, NULL);
set_rx_mode(dev);
spin_unlock(&np->lock);
}
int vlan_count = 0;
void __iomem *filter_addr = ioaddr + HashTable + 8;
for (i = 0; i < VLAN_VID_MASK; i++) {
- if (np->vlgrp->vlan_devices[i]) {
+ if (vlan_group_get_device(np->vlgrp, i)) {
if (vlan_count >= 32)
break;
writew(cpu_to_be16(i), filter_addr);
tg3_netif_stop(tp);
tg3_full_lock(tp, 0);
- if (tp->vlgrp)
- tp->vlgrp->vlan_devices[vid] = NULL;
+ vlan_group_set_device(tp->vlgrp, vid, NULL);
tg3_full_unlock(tp);
if (netif_running(dev))
{
struct typhoon *tp = netdev_priv(dev);
spin_lock_bh(&tp->state_lock);
- if(tp->vlgrp)
- tp->vlgrp->vlan_devices[vid] = NULL;
+ vlan_group_set_device(tp->vlgrp, vid, NULL);
spin_unlock_bh(&tp->state_lock);
}
return rc;
for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++){
- if (vg->vlan_devices[i] == dev){
+ if (vlan_group_get_device(vg, i) == dev){
rc = QETH_VLAN_CARD;
break;
}
QETH_DBF_TEXT(trace, 4, "frvaddr4");
rcu_read_lock();
- in_dev = __in_dev_get_rcu(card->vlangrp->vlan_devices[vid]);
+ in_dev = __in_dev_get_rcu(vlan_group_get_device(card->vlangrp, vid));
if (!in_dev)
goto out;
for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) {
QETH_DBF_TEXT(trace, 4, "frvaddr6");
- in6_dev = in6_dev_get(card->vlangrp->vlan_devices[vid]);
+ in6_dev = in6_dev_get(vlan_group_get_device(card->vlangrp, vid));
if (!in6_dev)
return;
for (ifa = in6_dev->addr_list; ifa; ifa = ifa->lst_next){
if (!card->vlangrp)
return;
for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
- if (card->vlangrp->vlan_devices[i] == NULL)
+ if (vlan_group_get_device(card->vlangrp, i) == NULL)
continue;
if (clear)
qeth_layer2_send_setdelvlan(card, i, IPA_CMD_DELVLAN);
spin_lock_irqsave(&card->vlanlock, flags);
/* unregister IP addresses of vlan device */
qeth_free_vlan_addresses(card, vid);
- if (card->vlangrp)
- card->vlangrp->vlan_devices[vid] = NULL;
+ vlan_group_set_device(card->vlangrp, vid, NULL);
spin_unlock_irqrestore(&card->vlanlock, flags);
if (card->options.layer2)
qeth_layer2_send_setdelvlan(card, vid, IPA_CMD_DELVLAN);
vg = card->vlangrp;
for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
- if (vg->vlan_devices[i] == NULL ||
- !(vg->vlan_devices[i]->flags & IFF_UP))
+ struct net_device *netdev = vlan_group_get_device(vg, i);
+ if (netdev == NULL ||
+ !(netdev->flags & IFF_UP))
continue;
- in_dev = in_dev_get(vg->vlan_devices[i]);
+ in_dev = in_dev_get(netdev);
if (!in_dev)
continue;
read_lock(&in_dev->mc_list_lock);
vg = card->vlangrp;
for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
- if (vg->vlan_devices[i] == NULL ||
- !(vg->vlan_devices[i]->flags & IFF_UP))
+ struct net_device *netdev = vlan_group_get_device(vg, i);
+ if (netdev == NULL ||
+ !(netdev->flags & IFF_UP))
continue;
- in_dev = in6_dev_get(vg->vlan_devices[i]);
+ in_dev = in6_dev_get(netdev);
if (!in_dev)
continue;
read_lock(&in_dev->lock);
* depends on completely exhausting the VLAN identifier space. Thus
* it gives constant time look-up, but in many cases it wastes memory.
*/
-#define VLAN_GROUP_ARRAY_LEN 4096
+#define VLAN_GROUP_ARRAY_LEN 4096
+#define VLAN_GROUP_ARRAY_SPLIT_PARTS 8
+#define VLAN_GROUP_ARRAY_PART_LEN (VLAN_GROUP_ARRAY_LEN/VLAN_GROUP_ARRAY_SPLIT_PARTS)
struct vlan_group {
int real_dev_ifindex; /* The ifindex of the ethernet(like) device the vlan is attached to. */
struct hlist_node hlist; /* linked list */
- struct net_device *vlan_devices[VLAN_GROUP_ARRAY_LEN];
+ struct net_device **vlan_devices_arrays[VLAN_GROUP_ARRAY_SPLIT_PARTS];
struct rcu_head rcu;
};
+static inline struct net_device *vlan_group_get_device(struct vlan_group *vg, int vlan_id)
+{
+ struct net_device **array;
+ array = vg->vlan_devices_arrays[vlan_id / VLAN_GROUP_ARRAY_PART_LEN];
+ return array[vlan_id % VLAN_GROUP_ARRAY_PART_LEN];
+}
+
+static inline void vlan_group_set_device(struct vlan_group *vg, int vlan_id,
+ struct net_device *dev)
+{
+ struct net_device **array;
+ if (!vg)
+ return;
+ array = vg->vlan_devices_arrays[vlan_id / VLAN_GROUP_ARRAY_PART_LEN];
+ array[vlan_id % VLAN_GROUP_ARRAY_PART_LEN] = dev;
+}
+
struct vlan_priority_tci_mapping {
unsigned long priority;
unsigned short vlan_qos; /* This should be shifted when first set, so we only do it
return NET_RX_DROP;
}
- skb->dev = grp->vlan_devices[vlan_tag & VLAN_VID_MASK];
+ skb->dev = vlan_group_get_device(grp, vlan_tag & VLAN_VID_MASK);
if (skb->dev == NULL) {
dev_kfree_skb_any(skb);
struct vlan_group *grp = __vlan_find_group(real_dev->ifindex);
if (grp)
- return grp->vlan_devices[VID];
+ return vlan_group_get_device(grp, VID);
return NULL;
}
+static void vlan_group_free(struct vlan_group *grp)
+{
+ int i;
+
+ for (i=0; i < VLAN_GROUP_ARRAY_SPLIT_PARTS; i++)
+ kfree(grp->vlan_devices_arrays[i]);
+ kfree(grp);
+}
+
static void vlan_rcu_free(struct rcu_head *rcu)
{
- kfree(container_of(rcu, struct vlan_group, rcu));
+ vlan_group_free(container_of(rcu, struct vlan_group, rcu));
}
ret = 0;
if (grp) {
- dev = grp->vlan_devices[vlan_id];
+ dev = vlan_group_get_device(grp, vlan_id);
if (dev) {
/* Remove proc entry */
vlan_proc_rem_dev(dev);
real_dev->vlan_rx_kill_vid(real_dev, vlan_id);
}
- grp->vlan_devices[vlan_id] = NULL;
+ vlan_group_set_device(grp, vlan_id, NULL);
synchronize_net();
* group.
*/
for (i = 0; i < VLAN_VID_MASK; i++)
- if (grp->vlan_devices[i])
+ if (vlan_group_get_device(grp, i))
break;
if (i == VLAN_VID_MASK) {
struct net_device *new_dev;
struct net_device *real_dev; /* the ethernet device */
char name[IFNAMSIZ];
+ int i;
#ifdef VLAN_DEBUG
printk(VLAN_DBG "%s: if_name -:%s:- vid: %i\n",
if (!grp)
goto out_free_unregister;
+ for (i=0; i < VLAN_GROUP_ARRAY_SPLIT_PARTS; i++) {
+ grp->vlan_devices_arrays[i] = kzalloc(
+ sizeof(struct net_device *)*VLAN_GROUP_ARRAY_PART_LEN,
+ GFP_KERNEL);
+
+ if (!grp->vlan_devices_arrays[i])
+ goto out_free_arrays;
+ }
+
/* printk(KERN_ALERT "VLAN REGISTER: Allocated new group.\n"); */
grp->real_dev_ifindex = real_dev->ifindex;
real_dev->vlan_rx_register(real_dev, grp);
}
- grp->vlan_devices[VLAN_ID] = new_dev;
+ vlan_group_set_device(grp, VLAN_ID, new_dev);
if (vlan_proc_add_dev(new_dev)<0)/* create it's proc entry */
printk(KERN_WARNING "VLAN: failed to add proc entry for %s\n",
#endif
return new_dev;
+out_free_arrays:
+ vlan_group_free(grp);
+
out_free_unregister:
unregister_netdev(new_dev);
goto out_unlock;
case NETDEV_CHANGE:
/* Propagate real device state to vlan devices */
for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
- vlandev = grp->vlan_devices[i];
+ vlandev = vlan_group_get_device(grp, i);
if (!vlandev)
continue;
case NETDEV_DOWN:
/* Put all VLANs for this dev in the down state too. */
for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
- vlandev = grp->vlan_devices[i];
+ vlandev = vlan_group_get_device(grp, i);
if (!vlandev)
continue;
case NETDEV_UP:
/* Put all VLANs for this dev in the up state too. */
for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
- vlandev = grp->vlan_devices[i];
+ vlandev = vlan_group_get_device(grp, i);
if (!vlandev)
continue;
for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
int ret;
- vlandev = grp->vlan_devices[i];
+ vlandev = vlan_group_get_device(grp, i);
if (!vlandev)
continue;