vxlan_set_owner(dev, skb);
- /* See __IPTUNNEL_XMIT */
+ /* See iptunnel_xmit() */
skb->ip_summed = CHECKSUM_NONE;
ip_select_ident(iph, &rt->dst, NULL);
#include <linux/ip.h>
#include <linux/in6.h>
#include <uapi/linux/if_tunnel.h>
+#include <linux/u64_stats_sync.h>
+
+/* often modified stats are per cpu, other are shared (netdev->stats) */
+struct pcpu_tstats {
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 tx_packets;
+ u64 tx_bytes;
+ struct u64_stats_sync syncp;
+};
#endif /* _IF_TUNNEL_H_ */
struct rcu_head rcu_head;
};
-#define __IPTUNNEL_XMIT(stats1, stats2) do { \
- int err; \
- int pkt_len = skb->len - skb_transport_offset(skb); \
- \
- skb->ip_summed = CHECKSUM_NONE; \
- ip_select_ident(iph, &rt->dst, NULL); \
- \
- err = ip_local_out(skb); \
- if (likely(net_xmit_eval(err) == 0)) { \
- u64_stats_update_begin(&(stats1)->syncp); \
- (stats1)->tx_bytes += pkt_len; \
- (stats1)->tx_packets++; \
- u64_stats_update_end(&(stats1)->syncp); \
- } else { \
- (stats2)->tx_errors++; \
- (stats2)->tx_aborted_errors++; \
- } \
-} while (0)
+static inline void iptunnel_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ int err;
+ struct iphdr *iph = ip_hdr(skb);
+ int pkt_len = skb->len - skb_transport_offset(skb);
+ struct pcpu_tstats *tstats = this_cpu_ptr(dev->tstats);
-#define IPTUNNEL_XMIT() __IPTUNNEL_XMIT(txq, stats)
+ nf_reset(skb);
+ skb->ip_summed = CHECKSUM_NONE;
+ ip_select_ident(iph, skb_dst(skb), NULL);
+
+ err = ip_local_out(skb);
+ if (likely(net_xmit_eval(err) == 0)) {
+ u64_stats_update_begin(&tstats->syncp);
+ tstats->tx_bytes += pkt_len;
+ tstats->tx_packets++;
+ u64_stats_update_end(&tstats->syncp);
+ } else {
+ dev->stats.tx_errors++;
+ dev->stats.tx_aborted_errors++;
+ }
+}
#endif
#define for_each_ip_tunnel_rcu(start) \
for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
-/* often modified stats are per cpu, other are shared (netdev->stats) */
-struct pcpu_tstats {
- u64 rx_packets;
- u64 rx_bytes;
- u64 tx_packets;
- u64 tx_bytes;
- struct u64_stats_sync syncp;
-};
-
static struct rtnl_link_stats64 *ipgre_get_stats64(struct net_device *dev,
struct rtnl_link_stats64 *tot)
{
static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
- struct pcpu_tstats *tstats;
const struct iphdr *old_iph = ip_hdr(skb);
const struct iphdr *tiph;
struct flowi4 fl4;
}
}
- nf_reset(skb);
- tstats = this_cpu_ptr(dev->tstats);
- __IPTUNNEL_XMIT(tstats, &dev->stats);
+ iptunnel_xmit(skb, dev);
return NETDEV_TX_OK;
#if IS_ENABLED(CONFIG_IPV6)
#define for_each_ip_tunnel_rcu(start) \
for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
-/* often modified stats are per cpu, other are shared (netdev->stats) */
-struct pcpu_tstats {
- u64 rx_packets;
- u64 rx_bytes;
- u64 tx_packets;
- u64 tx_bytes;
- struct u64_stats_sync syncp;
-};
-
#define VTI_XMIT(stats1, stats2) do { \
int err; \
int pkt_len = skb->len; \
#define for_each_ip_tunnel_rcu(start) \
for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
-/* often modified stats are per cpu, other are shared (netdev->stats) */
-struct pcpu_tstats {
- u64 rx_packets;
- u64 rx_bytes;
- u64 tx_packets;
- u64 tx_bytes;
- struct u64_stats_sync syncp;
-};
-
static struct rtnl_link_stats64 *ipip_get_stats64(struct net_device *dev,
struct rtnl_link_stats64 *tot)
{
static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
- struct pcpu_tstats *tstats;
const struct iphdr *tiph = &tunnel->parms.iph;
u8 tos = tunnel->parms.iph.tos;
__be16 df = tiph->frag_off;
if ((iph->ttl = tiph->ttl) == 0)
iph->ttl = old_iph->ttl;
- nf_reset(skb);
- tstats = this_cpu_ptr(dev->tstats);
- __IPTUNNEL_XMIT(tstats, &dev->stats);
+ iptunnel_xmit(skb, dev);
return NETDEV_TX_OK;
tx_error_icmp:
#define for_each_ip_tunnel_rcu(start) \
for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
-/* often modified stats are per cpu, other are shared (netdev->stats) */
-struct pcpu_tstats {
- u64 rx_packets;
- u64 rx_bytes;
- u64 tx_packets;
- u64 tx_bytes;
- struct u64_stats_sync syncp;
-};
-
static struct rtnl_link_stats64 *ip6gre_get_stats64(struct net_device *dev,
struct rtnl_link_stats64 *tot)
{
struct ip6_tnl __rcu **tnls[2];
};
-/* often modified stats are per cpu, other are shared (netdev->stats) */
-struct pcpu_tstats {
- unsigned long rx_packets;
- unsigned long rx_bytes;
- unsigned long tx_packets;
- unsigned long tx_bytes;
-} __attribute__((aligned(4*sizeof(unsigned long))));
-
static struct net_device_stats *ip6_get_stats(struct net_device *dev)
{
struct pcpu_tstats sum = { 0 };
#define for_each_ip_tunnel_rcu(start) \
for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
-/* often modified stats are per cpu, other are shared (netdev->stats) */
-struct pcpu_tstats {
- u64 rx_packets;
- u64 rx_bytes;
- u64 tx_packets;
- u64 tx_bytes;
- struct u64_stats_sync syncp;
-};
-
static struct rtnl_link_stats64 *ipip6_get_stats64(struct net_device *dev,
struct rtnl_link_stats64 *tot)
{
struct net_device *dev)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
- struct pcpu_tstats *tstats;
const struct iphdr *tiph = &tunnel->parms.iph;
const struct ipv6hdr *iph6 = ipv6_hdr(skb);
u8 tos = tunnel->parms.iph.tos;
if ((iph->ttl = tiph->ttl) == 0)
iph->ttl = iph6->hop_limit;
- nf_reset(skb);
- tstats = this_cpu_ptr(dev->tstats);
- __IPTUNNEL_XMIT(tstats, &dev->stats);
+ iptunnel_xmit(skb, dev);
return NETDEV_TX_OK;
tx_error_icmp: