Merge tag 'v3.10.90' into update
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / core / dev.c
index 56383a3e5d715627010a3c13841d240390644527..dcf65d474a9677cf74201050f51ae68f3ea5e9f5 100644 (file)
 #include <linux/inetdevice.h>
 #include <linux/cpu_rmap.h>
 #include <linux/static_key.h>
-
+#include <net/udp.h>
 #include "net-sysfs.h"
 
+#ifdef UDP_SKT_WIFI
+#include <linux/ftrace_event.h>
+#endif
+
 /* Instead of increasing this, you should create a hash table. */
 #define MAX_GRO_SKBS 8
 
@@ -927,7 +931,7 @@ bool dev_valid_name(const char *name)
                return false;
 
        while (*name) {
-               if (*name == '/' || isspace(*name))
+               if (*name == '/' || *name == ':' || isspace(*name))
                        return false;
                name++;
        }
@@ -2678,6 +2682,7 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
 
        spin_lock(root_lock);
        if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
+               printk(KERN_WARNING "[mtk_net]__dev_xmit_skb drop skb_len = %d \n", skb->len);
                kfree_skb(skb);
                rc = NET_XMIT_DROP;
        } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
@@ -2789,6 +2794,22 @@ int dev_queue_xmit(struct sk_buff *skb)
 
        skb_reset_mac_header(skb);
 
+#ifdef UDP_SKT_WIFI
+       
+       if (unlikely((sysctl_met_is_enable == 1) && (sysctl_udp_met_port > 0)
+                && (ip_hdr(skb)->protocol == IPPROTO_UDP) && skb->sk)) {
+               
+           if (sysctl_udp_met_port == ntohs((inet_sk(skb->sk))->inet_sport)) {
+               struct udphdr * udp_iphdr = udp_hdr(skb);
+               if (udp_iphdr && (ntohs(udp_iphdr->len) >= 12)) {
+                __u16 * seq_id = (__u16 *)((char *)udp_iphdr + 10);
+                   udp_event_trace_printk("F|%d|%s|%d\n", current->pid, *seq_id);
+                   
+               }
+           }
+       }
+#endif
+
        /* Disable soft irqs for various locks below. Also
         * stops preemption for RCU.
         */
@@ -3443,8 +3464,6 @@ static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc)
 
        pt_prev = NULL;
 
-       rcu_read_lock();
-
 another_round:
        skb->skb_iif = skb->dev->ifindex;
 
@@ -3454,7 +3473,7 @@ another_round:
            skb->protocol == cpu_to_be16(ETH_P_8021AD)) {
                skb = vlan_untag(skb);
                if (unlikely(!skb))
-                       goto unlock;
+                       goto out;
        }
 
 #ifdef CONFIG_NET_CLS_ACT
@@ -3479,7 +3498,7 @@ skip_taps:
 #ifdef CONFIG_NET_CLS_ACT
        skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
        if (!skb)
-               goto unlock;
+               goto out;
 ncls:
 #endif
 
@@ -3494,7 +3513,7 @@ ncls:
                if (vlan_do_receive(&skb))
                        goto another_round;
                else if (unlikely(!skb))
-                       goto unlock;
+                       goto out;
        }
 
        rx_handler = rcu_dereference(skb->dev->rx_handler);
@@ -3506,7 +3525,7 @@ ncls:
                switch (rx_handler(&skb)) {
                case RX_HANDLER_CONSUMED:
                        ret = NET_RX_SUCCESS;
-                       goto unlock;
+                       goto out;
                case RX_HANDLER_ANOTHER:
                        goto another_round;
                case RX_HANDLER_EXACT:
@@ -3558,8 +3577,6 @@ drop:
                ret = NET_RX_DROP;
        }
 
-unlock:
-       rcu_read_unlock();
 out:
        return ret;
 }
@@ -3606,29 +3623,30 @@ static int __netif_receive_skb(struct sk_buff *skb)
  */
 int netif_receive_skb(struct sk_buff *skb)
 {
+       int ret;
+
        net_timestamp_check(netdev_tstamp_prequeue, skb);
 
        if (skb_defer_rx_timestamp(skb))
                return NET_RX_SUCCESS;
 
+       rcu_read_lock();
+
 #ifdef CONFIG_RPS
        if (static_key_false(&rps_needed)) {
                struct rps_dev_flow voidflow, *rflow = &voidflow;
-               int cpu, ret;
-
-               rcu_read_lock();
-
-               cpu = get_rps_cpu(skb->dev, skb, &rflow);
+               int cpu = get_rps_cpu(skb->dev, skb, &rflow);
 
                if (cpu >= 0) {
                        ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
                        rcu_read_unlock();
                        return ret;
                }
-               rcu_read_unlock();
        }
 #endif
-       return __netif_receive_skb(skb);
+       ret = __netif_receive_skb(skb);
+       rcu_read_unlock();
+       return ret;
 }
 EXPORT_SYMBOL(netif_receive_skb);
 
@@ -4038,8 +4056,10 @@ static int process_backlog(struct napi_struct *napi, int quota)
                unsigned int qlen;
 
                while ((skb = __skb_dequeue(&sd->process_queue))) {
+                       rcu_read_lock();
                        local_irq_enable();
                        __netif_receive_skb(skb);
+                       rcu_read_unlock();
                        local_irq_disable();
                        input_queue_head_incr(sd);
                        if (++work >= quota) {
@@ -5827,6 +5847,9 @@ EXPORT_SYMBOL(unregister_netdevice_queue);
 /**
  *     unregister_netdevice_many - unregister many devices
  *     @head: list of devices
+ *
+ *  Note: As most callers use a stack allocated list_head,
+ *  we force a list_del() to make sure stack wont be corrupted later.
  */
 void unregister_netdevice_many(struct list_head *head)
 {
@@ -5836,6 +5859,7 @@ void unregister_netdevice_many(struct list_head *head)
                rollback_registered_many(head);
                list_for_each_entry(dev, head, unreg_list)
                        net_set_todo(dev);
+               list_del(head);
        }
 }
 EXPORT_SYMBOL(unregister_netdevice_many);
@@ -6011,10 +6035,20 @@ static int dev_cpu_callback(struct notifier_block *nfb,
                oldsd->output_queue = NULL;
                oldsd->output_queue_tailp = &oldsd->output_queue;
        }
-       /* Append NAPI poll list from offline CPU. */
-       if (!list_empty(&oldsd->poll_list)) {
-               list_splice_init(&oldsd->poll_list, &sd->poll_list);
-               raise_softirq_irqoff(NET_RX_SOFTIRQ);
+       /* Append NAPI poll list from offline CPU, with one exception :
+        * process_backlog() must be called by cpu owning percpu backlog.
+        * We properly handle process_queue & input_pkt_queue later.
+        */
+       while (!list_empty(&oldsd->poll_list)) {
+               struct napi_struct *napi = list_first_entry(&oldsd->poll_list,
+                                                           struct napi_struct,
+                                                           poll_list);
+
+               list_del_init(&napi->poll_list);
+               if (napi->poll == process_backlog)
+                       napi->state = 0;
+               else
+                       ____napi_schedule(sd, napi);
        }
 
        raise_softirq_irqoff(NET_TX_SOFTIRQ);
@@ -6025,7 +6059,7 @@ static int dev_cpu_callback(struct notifier_block *nfb,
                netif_rx(skb);
                input_queue_head_incr(oldsd);
        }
-       while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) {
+       while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) {
                netif_rx(skb);
                input_queue_head_incr(oldsd);
        }
@@ -6252,7 +6286,6 @@ static void __net_exit default_device_exit_batch(struct list_head *net_list)
                }
        }
        unregister_netdevice_many(&dev_kill_list);
-       list_del(&dev_kill_list);
        rtnl_unlock();
 }