1 #include <linux/skbuff.h>
2 #include <linux/netdevice.h>
3 #include <linux/if_vlan.h>
4 #include <linux/netpoll.h>
5 #include <linux/export.h>
8 bool vlan_do_receive(struct sk_buff
**skbp
)
10 struct sk_buff
*skb
= *skbp
;
11 u16 vlan_id
= skb
->vlan_tci
& VLAN_VID_MASK
;
12 struct net_device
*vlan_dev
;
13 struct vlan_pcpu_stats
*rx_stats
;
15 vlan_dev
= vlan_find_dev(skb
->dev
, vlan_id
);
19 skb
= *skbp
= skb_share_check(skb
, GFP_ATOMIC
);
24 if (skb
->pkt_type
== PACKET_OTHERHOST
) {
25 /* Our lower layer thinks this is not local, let's make sure.
26 * This allows the VLAN to have a different MAC than the
27 * underlying device, and still route correctly. */
28 if (ether_addr_equal(eth_hdr(skb
)->h_dest
, vlan_dev
->dev_addr
))
29 skb
->pkt_type
= PACKET_HOST
;
32 if (!(vlan_dev_priv(vlan_dev
)->flags
& VLAN_FLAG_REORDER_HDR
)) {
33 unsigned int offset
= skb
->data
- skb_mac_header(skb
);
36 * vlan_insert_tag expect skb->data pointing to mac header.
37 * So change skb->data before calling it and change back to
38 * original position later
40 skb_push(skb
, offset
);
41 skb
= *skbp
= vlan_insert_tag(skb
, skb
->vlan_tci
);
44 skb_pull(skb
, offset
+ VLAN_HLEN
);
45 skb_reset_mac_len(skb
);
48 skb
->priority
= vlan_get_ingress_priority(vlan_dev
, skb
->vlan_tci
);
51 rx_stats
= this_cpu_ptr(vlan_dev_priv(vlan_dev
)->vlan_pcpu_stats
);
53 u64_stats_update_begin(&rx_stats
->syncp
);
54 rx_stats
->rx_packets
++;
55 rx_stats
->rx_bytes
+= skb
->len
;
56 if (skb
->pkt_type
== PACKET_MULTICAST
)
57 rx_stats
->rx_multicast
++;
58 u64_stats_update_end(&rx_stats
->syncp
);
63 /* Must be invoked with rcu_read_lock. */
64 struct net_device
*__vlan_find_dev_deep(struct net_device
*dev
,
67 struct vlan_info
*vlan_info
= rcu_dereference(dev
->vlan_info
);
70 return vlan_group_get_device(&vlan_info
->grp
, vlan_id
);
73 * Lower devices of master uppers (bonding, team) do not have
74 * grp assigned to themselves. Grp is assigned to upper device
77 struct net_device
*upper_dev
;
79 upper_dev
= netdev_master_upper_dev_get_rcu(dev
);
81 return __vlan_find_dev_deep(upper_dev
, vlan_id
);
86 EXPORT_SYMBOL(__vlan_find_dev_deep
);
88 struct net_device
*vlan_dev_real_dev(const struct net_device
*dev
)
90 return vlan_dev_priv(dev
)->real_dev
;
92 EXPORT_SYMBOL(vlan_dev_real_dev
);
94 u16
vlan_dev_vlan_id(const struct net_device
*dev
)
96 return vlan_dev_priv(dev
)->vlan_id
;
98 EXPORT_SYMBOL(vlan_dev_vlan_id
);
100 static struct sk_buff
*vlan_reorder_header(struct sk_buff
*skb
)
102 if (skb_cow(skb
, skb_headroom(skb
)) < 0)
104 memmove(skb
->data
- ETH_HLEN
, skb
->data
- VLAN_ETH_HLEN
, 2 * ETH_ALEN
);
105 skb
->mac_header
+= VLAN_HLEN
;
109 struct sk_buff
*vlan_untag(struct sk_buff
*skb
)
111 struct vlan_hdr
*vhdr
;
114 if (unlikely(vlan_tx_tag_present(skb
))) {
115 /* vlan_tci is already set-up so leave this for another time */
119 skb
= skb_share_check(skb
, GFP_ATOMIC
);
123 if (unlikely(!pskb_may_pull(skb
, VLAN_HLEN
)))
126 vhdr
= (struct vlan_hdr
*) skb
->data
;
127 vlan_tci
= ntohs(vhdr
->h_vlan_TCI
);
128 __vlan_hwaccel_put_tag(skb
, vlan_tci
);
130 skb_pull_rcsum(skb
, VLAN_HLEN
);
131 vlan_set_encap_proto(skb
, vhdr
);
133 skb
= vlan_reorder_header(skb
);
137 skb_reset_network_header(skb
);
138 skb_reset_transport_header(skb
);
139 skb_reset_mac_len(skb
);
147 EXPORT_SYMBOL(vlan_untag
);
151 * vlan info and vid list
154 static void vlan_group_free(struct vlan_group
*grp
)
158 for (i
= 0; i
< VLAN_GROUP_ARRAY_SPLIT_PARTS
; i
++)
159 kfree(grp
->vlan_devices_arrays
[i
]);
162 static void vlan_info_free(struct vlan_info
*vlan_info
)
164 vlan_group_free(&vlan_info
->grp
);
168 static void vlan_info_rcu_free(struct rcu_head
*rcu
)
170 vlan_info_free(container_of(rcu
, struct vlan_info
, rcu
));
173 static struct vlan_info
*vlan_info_alloc(struct net_device
*dev
)
175 struct vlan_info
*vlan_info
;
177 vlan_info
= kzalloc(sizeof(struct vlan_info
), GFP_KERNEL
);
181 vlan_info
->real_dev
= dev
;
182 INIT_LIST_HEAD(&vlan_info
->vid_list
);
186 struct vlan_vid_info
{
187 struct list_head list
;
192 static struct vlan_vid_info
*vlan_vid_info_get(struct vlan_info
*vlan_info
,
195 struct vlan_vid_info
*vid_info
;
197 list_for_each_entry(vid_info
, &vlan_info
->vid_list
, list
) {
198 if (vid_info
->vid
== vid
)
204 static struct vlan_vid_info
*vlan_vid_info_alloc(unsigned short vid
)
206 struct vlan_vid_info
*vid_info
;
208 vid_info
= kzalloc(sizeof(struct vlan_vid_info
), GFP_KERNEL
);
216 static int __vlan_vid_add(struct vlan_info
*vlan_info
, unsigned short vid
,
217 struct vlan_vid_info
**pvid_info
)
219 struct net_device
*dev
= vlan_info
->real_dev
;
220 const struct net_device_ops
*ops
= dev
->netdev_ops
;
221 struct vlan_vid_info
*vid_info
;
224 vid_info
= vlan_vid_info_alloc(vid
);
228 if (dev
->features
& NETIF_F_HW_VLAN_FILTER
) {
229 err
= ops
->ndo_vlan_rx_add_vid(dev
, vid
);
235 list_add(&vid_info
->list
, &vlan_info
->vid_list
);
236 vlan_info
->nr_vids
++;
237 *pvid_info
= vid_info
;
241 int vlan_vid_add(struct net_device
*dev
, unsigned short vid
)
243 struct vlan_info
*vlan_info
;
244 struct vlan_vid_info
*vid_info
;
245 bool vlan_info_created
= false;
250 vlan_info
= rtnl_dereference(dev
->vlan_info
);
252 vlan_info
= vlan_info_alloc(dev
);
255 vlan_info_created
= true;
257 vid_info
= vlan_vid_info_get(vlan_info
, vid
);
259 err
= __vlan_vid_add(vlan_info
, vid
, &vid_info
);
261 goto out_free_vlan_info
;
263 vid_info
->refcount
++;
265 if (vlan_info_created
)
266 rcu_assign_pointer(dev
->vlan_info
, vlan_info
);
271 if (vlan_info_created
)
275 EXPORT_SYMBOL(vlan_vid_add
);
277 static void __vlan_vid_del(struct vlan_info
*vlan_info
,
278 struct vlan_vid_info
*vid_info
)
280 struct net_device
*dev
= vlan_info
->real_dev
;
281 const struct net_device_ops
*ops
= dev
->netdev_ops
;
282 unsigned short vid
= vid_info
->vid
;
285 if (dev
->features
& NETIF_F_HW_VLAN_FILTER
) {
286 err
= ops
->ndo_vlan_rx_kill_vid(dev
, vid
);
288 pr_warn("failed to kill vid %d for device %s\n",
292 list_del(&vid_info
->list
);
294 vlan_info
->nr_vids
--;
297 void vlan_vid_del(struct net_device
*dev
, unsigned short vid
)
299 struct vlan_info
*vlan_info
;
300 struct vlan_vid_info
*vid_info
;
304 vlan_info
= rtnl_dereference(dev
->vlan_info
);
308 vid_info
= vlan_vid_info_get(vlan_info
, vid
);
311 vid_info
->refcount
--;
312 if (vid_info
->refcount
== 0) {
313 __vlan_vid_del(vlan_info
, vid_info
);
314 if (vlan_info
->nr_vids
== 0) {
315 RCU_INIT_POINTER(dev
->vlan_info
, NULL
);
316 call_rcu(&vlan_info
->rcu
, vlan_info_rcu_free
);
320 EXPORT_SYMBOL(vlan_vid_del
);
322 int vlan_vids_add_by_dev(struct net_device
*dev
,
323 const struct net_device
*by_dev
)
325 struct vlan_vid_info
*vid_info
;
326 struct vlan_info
*vlan_info
;
331 vlan_info
= rtnl_dereference(by_dev
->vlan_info
);
335 list_for_each_entry(vid_info
, &vlan_info
->vid_list
, list
) {
336 err
= vlan_vid_add(dev
, vid_info
->vid
);
343 list_for_each_entry_continue_reverse(vid_info
,
344 &vlan_info
->vid_list
,
346 vlan_vid_del(dev
, vid_info
->vid
);
351 EXPORT_SYMBOL(vlan_vids_add_by_dev
);
353 void vlan_vids_del_by_dev(struct net_device
*dev
,
354 const struct net_device
*by_dev
)
356 struct vlan_vid_info
*vid_info
;
357 struct vlan_info
*vlan_info
;
361 vlan_info
= rtnl_dereference(by_dev
->vlan_info
);
365 list_for_each_entry(vid_info
, &vlan_info
->vid_list
, list
)
366 vlan_vid_del(dev
, vid_info
->vid
);
368 EXPORT_SYMBOL(vlan_vids_del_by_dev
);
370 bool vlan_uses_dev(const struct net_device
*dev
)
372 struct vlan_info
*vlan_info
;
376 vlan_info
= rtnl_dereference(dev
->vlan_info
);
379 return vlan_info
->grp
.nr_vlan_devs
? true : false;
381 EXPORT_SYMBOL(vlan_uses_dev
);