Merge tag 'v3.10.58' into update
authorStricted <info@stricted.net>
Wed, 21 Mar 2018 21:31:25 +0000 (22:31 +0100)
committerStricted <info@stricted.net>
Wed, 21 Mar 2018 21:31:25 +0000 (22:31 +0100)
This is the 3.10.58 stable release

1  2 
Makefile
drivers/usb/core/hub.c
include/net/inet_connection_sock.h
include/net/sock.h
include/net/tcp.h
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_output.c
net/ipv6/addrconf.c
net/ipv6/tcp_ipv6.c
net/packet/af_packet.c

diff --combined Makefile
index 2e86bf6a11780d73d119be62beeef7df9deeec9b,c27454b8ca3e7fee837660e4bdf48190a6e6730b..abb79ff7acba5888d3af1dbf0764c5f61c41a737
+++ b/Makefile
@@@ -1,6 -1,6 +1,6 @@@
  VERSION = 3
  PATCHLEVEL = 10
- SUBLEVEL = 57
+ SUBLEVEL = 58
  EXTRAVERSION =
  NAME = TOSSUG Baby Fish
  
@@@ -373,9 -373,7 +373,9 @@@ KBUILD_CFLAGS   := -Wall -Wundef -Wstri
                   -fno-strict-aliasing -fno-common \
                   -Werror-implicit-function-declaration \
                   -Wno-format-security \
 -                 -fno-delete-null-pointer-checks
 +                 -fno-delete-null-pointer-checks \
 +                 -w
 +
  KBUILD_AFLAGS_KERNEL :=
  KBUILD_CFLAGS_KERNEL :=
  KBUILD_AFLAGS   := -D__ASSEMBLY__
diff --combined drivers/usb/core/hub.c
index abdbaa581a11fa0467f4f5f5d313830eb9db0a87,c9f56ffdba9a435c2e0f775567644cb08879d504..5155d19f8b41b8fae637308d0f0ee2e1c502bf0f
  
  #include <asm/uaccess.h>
  #include <asm/byteorder.h>
 -
  #include "hub.h"
  
 +
 +#ifdef CONFIG_MTK_ICUSB_SUPPORT
 +int is_musbfsh_rh(struct usb_device *udev);
 +void set_icusb_sts_disconnect_done(void);
 +#endif
 +
 +#ifdef CONFIG_MTK_DT_USB_SUPPORT
 +#include <linux/proc_fs.h>
 +#include <linux/uaccess.h>
 +static struct usb_device *g_dsda_dev = NULL;
 +
 +#ifdef        CONFIG_PM_RUNTIME
 +struct usb_hub *usb11_hub = NULL;
 +int is_musbfsh_rh(struct usb_device *udev);
 +
 +struct usb_device *get_usb11_child_udev(void)
 +{
 +      if(usb11_hub){
 +              MYDBG("\n");
 +              return usb11_hub->ports[0]->child;
 +      }else{
 +              MYDBG("\n");
 +              return NULL;
 +      }
 +}
 +#endif
 +
 +void dump_data(char *buf, int len)
 +{
 +      int i;
 +      for(i =0 ; i< len ; i++)
 +      {
 +              MYDBG("data[%d]: %x\n", i, buf[i]);
 +      }
 +}
 +
 +void test_dsda_device_ep0(void)
 +{
 +
 +      int ret;
 +      char data_buf[256];
 +      ret = usb_control_msg(g_dsda_dev, usb_rcvctrlpipe(g_dsda_dev, 0),
 +                      USB_REQ_GET_DESCRIPTOR,
 +                      USB_DIR_IN,
 +                      USB_DT_DEVICE << 8,
 +                      0,
 +                      data_buf,
 +                      64,
 +                      USB_CTRL_GET_TIMEOUT);
 +
 +
 +
 +      if (ret < 0) {
 +              MYDBG("test ep fail, ret : %d\n", ret);
 +      }
 +      else
 +      {
 +              MYDBG("test ep0 ok, ret : %d\n", ret);
 +              dump_data(data_buf, ret);
 +      }
 +
 +}
 +
 +void release_usb11_wakelock(void);
 +static ssize_t dsda_tmp_proc_entry(struct file *file_ptr, const char __user *user_buffer, size_t count, loff_t *position)
 +{
 +      char cmd[64];
 +
 +      int ret = copy_from_user((char *) &cmd, user_buffer, count);
 +
 +      if(ret != 0)
 +      {
 +              return -EFAULT;
 +      }
 +
 +      /* apply action here */
 +      if(cmd[0] == '0')
 +      {
 +              MYDBG("");
 +              test_dsda_device_ep0();
 +      }
 +      if(cmd[0] == '1')
 +      {
 +              MYDBG("");
 +              release_usb11_wakelock();
 +      }
 +
 +      MYDBG("");
 +
 +      return count;
 +}
 +
 +struct file_operations dsda_tmp_proc_fops = {
 +      .write = dsda_tmp_proc_entry
 +};
 +
 +
 +void create_dsda_tmp_entry(void)
 +{
 +      struct proc_dir_entry *prEntry;
 +
 +      MYDBG("");
 +
 +      prEntry = proc_create("DSDA_TMP_ENTRY", 0660, 0, &dsda_tmp_proc_fops);
 +      if (prEntry)
 +      {
 +              MYDBG("add /proc/DSDA_TMP_ENTRY ok\n");
 +      }
 +      else
 +      {
 +              MYDBG("add /proc/DSDA_TMP_ENTRY fail\n");
 +      }
 +}
 +#endif
 +
  /* if we are in debug mode, always announce new devices */
  #ifdef DEBUG
  #ifndef CONFIG_USB_ANNOUNCE_NEW_DEVICES
  #define USB_VENDOR_GENESYS_LOGIC              0x05e3
  #define HUB_QUIRK_CHECK_PORT_AUTOSUSPEND      0x01
  
 +#if defined(CONFIG_USBIF_COMPLIANCE) && defined(CONFIG_USB_XHCI_HCD)
 +extern int usbif_u3h_send_event(char* event) ;
 +#include "otg_whitelist.h"
 +#endif
 +
 +
  static inline int hub_is_superspeed(struct usb_device *hdev)
  {
        return (hdev->descriptor.bDeviceProtocol == USB_HUB_PR_SS);
@@@ -233,13 -113,8 +233,13 @@@ EXPORT_SYMBOL_GPL(ehci_cf_port_reset_rw
  #define HUB_DEBOUNCE_STEP       25
  #define HUB_DEBOUNCE_STABLE    100
  
 +static void hub_release(struct kref *kref);
  static int usb_reset_and_verify_device(struct usb_device *udev);
  
 +#define usb_sndaddr0pipe()    (PIPE_CONTROL << 30)
 +#define usb_rcvaddr0pipe()    ((PIPE_CONTROL << 30) | USB_DIR_IN)
 +
 +
  static inline char *portspeed(struct usb_hub *hub, int portstatus)
  {
        if (hub_is_superspeed(hub->hdev))
@@@ -531,7 -406,6 +531,7 @@@ int usb_clear_port_feature(struct usb_d
   */
  static int set_port_feature(struct usb_device *hdev, int port1, int feature)
  {
 +      MYDBG("");
        return usb_control_msg(hdev, usb_sndctrlpipe(hdev, 0),
                USB_REQ_SET_FEATURE, USB_RT_PORT, feature, port1,
                NULL, 0, 1000);
@@@ -979,7 -853,7 +979,7 @@@ static int hub_hub_status(struct usb_hu
                                "%s failed (err = %d)\n", __func__, ret);
        } else {
                *status = le16_to_cpu(hub->status->hub.wHubStatus);
 -              *change = le16_to_cpu(hub->status->hub.wHubChange); 
 +              *change = le16_to_cpu(hub->status->hub.wHubChange);
                ret = 0;
        }
        mutex_unlock(&hub->status_mutex);
@@@ -1146,21 -1020,10 +1146,21 @@@ static void hub_activate(struct usb_hu
        unsigned delay;
  
        /* Continue a partial initialization */
 -      if (type == HUB_INIT2)
 -              goto init2;
 -      if (type == HUB_INIT3)
 +      if (type == HUB_INIT2 || type == HUB_INIT3) {
 +              device_lock(hub->intfdev);
 +
 +              /* Was the hub disconnected while we were waiting? */
 +              if (hub->disconnected) {
 +                      device_unlock(hub->intfdev);
 +                      kref_put(&hub->kref, hub_release);
 +                      return;
 +              }
 +              if (type == HUB_INIT2)
 +                      goto init2;
 +
                goto init3;
 +      }
 +      kref_get(&hub->kref);
  
        /* The superspeed hub except for root hub has to use Hub Depth
         * value as an offset into the route string to locate the bits
                        PREPARE_DELAYED_WORK(&hub->init_work, hub_init_func3);
                        schedule_delayed_work(&hub->init_work,
                                        msecs_to_jiffies(delay));
 +                      device_unlock(hub->intfdev);
                        return;         /* Continues at init3: below */
                } else {
                        msleep(delay);
        /* Allow autosuspend if it was suppressed */
        if (type <= HUB_INIT3)
                usb_autopm_put_interface_async(to_usb_interface(hub->intfdev));
 +
 +      if (type == HUB_INIT2 || type == HUB_INIT3)
 +              device_unlock(hub->intfdev);
 +
 +      kref_put(&hub->kref, hub_release);
  }
  
  /* Implement the continuations for the delays above */
@@@ -1649,7 -1506,7 +1649,7 @@@ static int hub_configure(struct usb_hu
                        hub->mA_per_port = hdev->bus_mA;
                        hub->limited_power = 1;
                }
 -      } else if ((hubstatus & (1 << USB_DEVICE_SELF_POWERED)) == 0) {
 +      } else if ((hubstatus & (1 << USB_DEVICE_SELF_POWERED)) == 0) { // bus powered
                int remaining = hdev->bus_mA -
                        hub->descriptor->bHubContrCurrent;
  
                        hub->descriptor->bHubContrCurrent);
                hub->limited_power = 1;
  
 -              if (remaining < hdev->maxchild * unit_load)
 +              if (remaining < hdev->maxchild * unit_load){
 +#if defined(CONFIG_USBIF_COMPLIANCE) && defined(CONFIG_USB_XHCI_HCD)
 +                      usbif_u3h_send_event("DEV_OVER_CURRENT");
 +#endif
                        dev_warn(hub_dev,
                                        "insufficient power available "
                                        "to use all downstream ports\n");
 +              }
                hub->mA_per_port = unit_load;   /* 7.2.1 */
  
        } else {        /* Self-powered external hub */
@@@ -1812,7 -1665,6 +1812,7 @@@ static int hub_probe(struct usb_interfa
        struct usb_device *hdev;
        struct usb_hub *hub;
  
 +
        desc = intf->cur_altsetting;
        hdev = interface_to_usbdev(intf);
  
        if (hdev->level == MAX_TOPO_LEVEL) {
                dev_err(&intf->dev,
                        "Unsupported bus topology: hub nested too deep\n");
 +#if defined(CONFIG_USBIF_COMPLIANCE) && defined(CONFIG_USB_XHCI_HCD)
 +              usbif_u3h_send_event("MAX_HUB_TIER_EXCEED");
 +#endif
                return -E2BIG;
        }
  
  #ifdef        CONFIG_USB_OTG_BLACKLIST_HUB
        if (hdev->parent) {
                dev_warn(&intf->dev, "ignoring external hub\n");
 +#if defined(CONFIG_USBIF_COMPLIANCE) && defined(CONFIG_USB_XHCI_HCD)
 +              usbif_u3h_send_event("HUB_NOT_SUPPORTED");
 +#endif
                return -ENODEV;
        }
  #endif
@@@ -2105,8 -1951,10 +2105,10 @@@ void usb_set_device_state(struct usb_de
                                        || new_state == USB_STATE_SUSPENDED)
                                ;       /* No change to wakeup settings */
                        else if (new_state == USB_STATE_CONFIGURED)
-                               wakeup = udev->actconfig->desc.bmAttributes
-                                        & USB_CONFIG_ATT_WAKEUP;
+                               wakeup = (udev->quirks &
+                                       USB_QUIRK_IGNORE_REMOTE_WAKEUP) ? 0 :
+                                       udev->actconfig->desc.bmAttributes &
+                                       USB_CONFIG_ATT_WAKEUP;
                        else
                                wakeup = 0;
                }
@@@ -2224,18 -2072,6 +2226,18 @@@ void usb_disconnect(struct usb_device *
        struct usb_device       *udev = *pdev;
        struct usb_hub          *hub = usb_hub_to_struct_hub(udev);
        int                     i;
 +      struct timeval tv_begin, tv_end;
 +      struct timeval tv_before, tv_after;
 +      do_gettimeofday(&tv_begin);
 +
 +#ifdef CONFIG_MTK_ICUSB_SUPPORT
 +      int is_icusb_rh;
 +#endif
 +
 +#ifdef CONFIG_MTK_ICUSB_SUPPORT
 +      is_icusb_rh = is_musbfsh_rh(udev->parent);
 +#endif
 +
  
        /* mark the device as inactive, so any further urb submissions for
         * this device (and any of its children) will fail immediately.
         * so that the hardware is now fully quiesced.
         */
        dev_dbg (&udev->dev, "unregistering device\n");
 +
 +      do_gettimeofday(&tv_before);
        usb_disable_device(udev, 0);
 +      do_gettimeofday(&tv_after);
 +      MYDBG("usb_disable_device(), time spent, sec : %d, usec : %d\n", (unsigned int)(tv_after.tv_sec - tv_before.tv_sec), (unsigned int)(tv_after.tv_usec - tv_before.tv_usec));
 +
        usb_hcd_synchronize_unlinks(udev);
  
        if (udev->parent) {
                        port_dev->did_runtime_put = false;
        }
  
 +      do_gettimeofday(&tv_before);
        usb_remove_ep_devs(&udev->ep0);
 +      do_gettimeofday(&tv_after);
 +      MYDBG("usb_remove_ep_devs(), time spent, sec : %d, usec : %d\n", (unsigned int)(tv_after.tv_sec - tv_before.tv_sec), (unsigned int)(tv_after.tv_usec - tv_before.tv_usec));
 +
        usb_unlock_device(udev);
  
        /* Unregister the device.  The device driver is responsible
         * for de-configuring the device and invoking the remove-device
         * notifier chain (used by usbfs and possibly others).
         */
 +      do_gettimeofday(&tv_before);
        device_del(&udev->dev);
 +      do_gettimeofday(&tv_after);
 +      MYDBG("device_del(), time spent, sec : %d, usec : %d\n", (unsigned int)(tv_after.tv_sec - tv_before.tv_sec), (unsigned int)(tv_after.tv_usec - tv_before.tv_usec));
  
        /* Free the device number and delete the parent's children[]
         * (or root_hub) pointer.
        hub_free_dev(udev);
  
        put_device(&udev->dev);
 +
 +#ifdef CONFIG_MTK_ICUSB_SUPPORT
 +      if (is_icusb_rh)
 +      {
 +              set_icusb_sts_disconnect_done();
 +              MYDBG("ICUSB Disconnect\n");
 +      }
 +#endif
 +      do_gettimeofday(&tv_end);
 +      MYDBG("time spent, sec : %d, usec : %d\n", (unsigned int)(tv_end.tv_sec - tv_begin.tv_sec), (unsigned int)(tv_end.tv_usec - tv_begin.tv_usec));
  }
  
  #ifdef CONFIG_USB_ANNOUNCE_NEW_DEVICES
@@@ -2459,16 -2273,6 +2461,16 @@@ static int usb_enumerate_device(struct 
        udev->serial = usb_cache_string(udev, udev->descriptor.iSerialNumber);
  
        err = usb_enumerate_device_otg(udev);
 +
 +#if defined(CONFIG_USBIF_COMPLIANCE) && defined(CONFIG_USB_XHCI_HCD)
 +      if (udev->parent){ // we don't have to check ourself (roothub)
 +              if (!is_targeted(udev)) {
 +                      usbif_u3h_send_event("DEV_NOT_SUPPORTED");
 +                      err = -ENOTSUPP;
 +              }
 +      }
 +#endif
 +
        if (err < 0)
                return err;
  
@@@ -2540,16 -2344,6 +2542,16 @@@ int usb_new_device(struct usb_device *u
                 * sysfs power/wakeup controls wakeup enabled/disabled
                 */
                device_init_wakeup(&udev->dev, 0);
 +              MYDBG("udev :%p\n", udev);
 +#ifdef CONFIG_MTK_DT_USB_SUPPORT
 +#ifdef        CONFIG_PM_RUNTIME
 +              if(is_musbfsh_rh(udev->parent)){
 +                      MYDBG("\n");
 +                      /*find out struct *usb_hub and hook it */
 +                      usb11_hub = usb_hub_to_struct_hub(udev->parent);
 +              }
 +#endif
 +#endif
        }
  
        /* Tell the runtime-PM framework the device is active */
@@@ -2760,9 -2554,7 +2762,9 @@@ static int hub_port_wait_reset(struct u
                msleep(delay);
  
                /* read and decode port status */
 +              MYDBG("");
                ret = hub_port_status(hub, port1, &portstatus, &portchange);
 +              MYDBG("");
                if (ret < 0)
                        return ret;
  
@@@ -2887,38 -2679,27 +2889,38 @@@ static int hub_port_reset(struct usb_hu
  
        /* Reset the port */
        for (i = 0; i < PORT_RESET_TRIES; i++) {
 +              MYDBG("");
                status = set_port_feature(hub->hdev, port1, (warm ?
                                        USB_PORT_FEAT_BH_PORT_RESET :
                                        USB_PORT_FEAT_RESET));
 +              MYDBG("");
                if (status == -ENODEV) {
 +                      MYDBG("");
                        ;       /* The hub is gone */
                } else if (status) {
 +                      MYDBG("");
                        dev_err(hub->intfdev,
                                        "cannot %sreset port %d (err = %d)\n",
                                        warm ? "warm " : "", port1, status);
                } else {
 +                      MYDBG("");
                        status = hub_port_wait_reset(hub, port1, udev, delay,
                                                                warm);
 -                      if (status && status != -ENOTCONN && status != -ENODEV)
 +                      if (status && status != -ENOTCONN)
 +                      {
 +                              MYDBG("");
                                dev_dbg(hub->intfdev,
                                                "port_wait_reset: err = %d\n",
                                                status);
 +                      }
                }
  
 +              MYDBG("");
                /* Check for disconnect or reset */
                if (status == 0 || status == -ENOTCONN || status == -ENODEV) {
 +                      MYDBG("");
                        hub_port_finish_reset(hub, port1, udev, &status);
 +                      MYDBG("");
  
                        if (!hub_is_superspeed(hub->hdev))
                                goto done;
                                warm = true;
                        }
                }
 +              MYDBG("");
  
                dev_dbg (hub->intfdev,
                        "port %d not enabled, trying %sreset again...\n",
                        port1, warm ? "warm " : "");
                delay = HUB_LONG_RESET_TIME;
        }
 +      MYDBG("");
 +
  
        dev_err (hub->intfdev,
                "Cannot enable port %i.  Maybe the USB cable is bad?\n",
  
  done:
        if (!hub_is_superspeed(hub->hdev))
 +      {
 +              MYDBG("");
                up_read(&ehci_cf_port_reset_rwsem);
 +      }
 +
 +      MYDBG("");
  
        return status;
  }
@@@ -3209,10 -2982,7 +3211,10 @@@ int usb_port_suspend(struct usb_device 
                                        status);
                        /* bail if autosuspend is requested */
                        if (PMSG_IS_AUTO(msg))
 +                      {
 +                              MYDBG("");
                                goto err_wakeup;
 +                      }
                }
        }
  
        if (usb_disable_ltm(udev)) {
                dev_err(&udev->dev, "Failed to disable LTM before suspend\n.");
                status = -ENOMEM;
 +              MYDBG("");
                if (PMSG_IS_AUTO(msg))
                        goto err_ltm;
        }
        if (usb_unlocked_disable_lpm(udev)) {
                dev_err(&udev->dev, "Failed to disable LPM before suspend\n.");
                status = -ENOMEM;
 +              MYDBG("");
                if (PMSG_IS_AUTO(msg))
                        goto err_lpm3;
        }
  
        /* see 7.1.7.6 */
        if (hub_is_superspeed(hub->hdev))
 +      {
 +              MYDBG("");
                status = hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_U3);
  
 +#if 0 /* behavior for kernel 3.10 */
        /*
         * For system suspend, we do not need to enable the suspend feature
         * on individual USB-2 ports.  The devices will automatically go
         * Therefore we will turn on the suspend feature if udev or any of its
         * descendants is enabled for remote wakeup.
         */
 -      else if (PMSG_IS_AUTO(msg) || wakeup_enabled_descendants(udev) > 0)
 +      } else if (PMSG_IS_AUTO(msg) || wakeup_enabled_descendants(udev) > 0) {
 +              MYDBG("");
                status = set_port_feature(hub->hdev, port1,
                                USB_PORT_FEAT_SUSPEND);
 -      else {
 +      else {
                really_suspend = false;
                status = 0;
        }
 +#else /*roll back behavior to kernel 3.4 */
 +      }else{
 +              MYDBG("");
 +              status = set_port_feature(hub->hdev, port1,
 +                              USB_PORT_FEAT_SUSPEND);
 +      }
 +#endif
 +
        if (status) {
                dev_dbg(hub->intfdev, "can't suspend port %d, status %d\n",
                                port1, status);
 +              MYDBG("");
  
                /* Try to enable USB3 LPM and LTM again */
                usb_unlocked_enable_lpm(udev);
@@@ -3368,9 -3123,7 +3370,9 @@@ static int finish_port_resume(struct us
         */
        if (status == 0) {
                devstatus = 0;
 +              MYDBG("\n");
                status = usb_get_status(udev, USB_RECIP_DEVICE, 0, &devstatus);
 +              MYDBG("%d\n", status);
                if (status >= 0)
                        status = (status > 0 ? 0 : -ENODEV);
  
@@@ -4161,7 -3914,7 +4163,7 @@@ EXPORT_SYMBOL_GPL(usb_enable_ltm)
   * Between connect detection and reset signaling there must be a delay
   * of 100ms at least for debounce and power-settling.  The corresponding
   * timer shall restart whenever the downstream port detects a disconnect.
 - * 
 + *
   * Apparently there are some bluetooth and irda-dongles and a number of
   * low-speed devices for which this debounce period may last over a second.
   * Not covered by the spec - but easy to deal with.
@@@ -4222,6 -3975,8 +4224,6 @@@ void usb_ep0_reinit(struct usb_device *
  }
  EXPORT_SYMBOL_GPL(usb_ep0_reinit);
  
 -#define usb_sndaddr0pipe()    (PIPE_CONTROL << 30)
 -#define usb_rcvaddr0pipe()    ((PIPE_CONTROL << 30) | USB_DIR_IN)
  
  static int hub_set_address(struct usb_device *udev, int devnum)
  {
@@@ -4276,7 -4031,6 +4278,7 @@@ hub_port_init (struct usb_hub *hub, str
        const char              *speed;
        int                     devnum = udev->devnum;
  
 +      dump_stack();
        /* root hub ports have a slightly longer reset period
         * (from USB 2.0 spec, section 7.1.7.5)
         */
  
        /* Reset the device; full speed may morph to high speed */
        /* FIXME a USB 2.0 device may morph into SuperSpeed on reset. */
 +      MYDBG("");
        retval = hub_port_reset(hub, port1, udev, delay, false);
 +      MYDBG("");
        if (retval < 0)         /* error or disconnect */
                goto fail;
        /* success, speed is known */
        default:
                goto fail;
        }
 +      MYDBG("");
  
        if (udev->speed == USB_SPEED_WIRELESS)
                speed = "variable speed Wireless";
                udev->tt = &hub->tt;
                udev->ttport = port1;
        }
 - 
 +
        /* Why interleave GET_DESCRIPTOR and SET_ADDRESS this way?
         * Because device hardware and firmware is sometimes buggy in
         * this area, and this is how Linux has done it for ages.
         * value.
         */
        for (i = 0; i < GET_DESCRIPTOR_TRIES; (++i, msleep(100))) {
 +              MYDBG("");
                if (USE_NEW_SCHEME(retry_counter) && !(hcd->driver->flags & HCD_USB3)) {
                        struct usb_device_descriptor *buf;
                        int r = 0;
                                }
                                if (r == 0)
                                        break;
 +
 +#if defined(CONFIG_USBIF_COMPLIANCE) && defined(CONFIG_USB_XHCI_HCD)
 +                              if (buf->bMaxPacketSize0 == 0) {
 +                                      usbif_u3h_send_event("DEV_CONN_TMOUT");
 +                              }
 +#endif
 +
                        }
                        udev->descriptor.bMaxPacketSize0 =
                                        buf->bMaxPacketSize0;
                udev->ep0.desc.wMaxPacketSize = cpu_to_le16(i);
                usb_ep0_reinit(udev);
        }
 -  
 +
        retval = usb_get_device_descriptor(udev, USB_DT_DEVICE_SIZE);
        if (retval < (signed)sizeof(udev->descriptor)) {
                if (retval != -ENODEV)
@@@ -4631,9 -4374,6 +4633,9 @@@ hub_power_remaining (struct usb_hub *hu
                remaining -= delta;
        }
        if (remaining < 0) {
 +#if defined(CONFIG_USBIF_COMPLIANCE) && defined(CONFIG_USB_XHCI_HCD)
 +              usbif_u3h_send_event("DEV_OVER_CURRENT");
 +#endif
                dev_warn(hub->intfdev, "%dmA over power budget!\n",
                        - remaining);
                remaining = 0;
@@@ -4661,7 -4401,6 +4663,7 @@@ static void hub_port_connect_change(str
        int status, i;
        unsigned unit_load;
  
 +      MYDBG("");
        dev_dbg (hub_dev,
                "port %d, status %04x, change %04x, %s\n",
                port1, portstatus, portchange, portspeed(hub, portstatus));
                }
  
                /* reset (non-USB 3.0 devices) and get descriptor */
 +              MYDBG("");
                status = hub_port_init(hub, udev, port1, i);
                if (status < 0)
 +              {
 +                      MYDBG("");
                        goto loop;
 +              }
 +              MYDBG("");
  
                usb_detect_quirks(udev);
                if (udev->quirks & USB_QUIRK_DELAY_INIT)
                                goto loop_disable;
                        }
                }
 - 
 +
                /* check for devices running slower than they could */
                if (le16_to_cpu(udev->descriptor.bcdUSB) >= 0x0200
                                && udev->speed == USB_SPEED_FULL
                                hub->ports[port1 - 1]->child = NULL;
                                spin_unlock_irq(&device_state_lock);
                        }
 +#ifdef CONFIG_MTK_DT_USB_SUPPORT
 +                      g_dsda_dev = udev;
 +                      MYDBG("get new device !!!, BUILD TIME : %s, g_dsda_dev : %p\n", __TIME__, g_dsda_dev);
 +#endif
                }
  
                if (status)
@@@ -4894,7 -4624,7 +4896,7 @@@ loop
                        dev_err(hub_dev, "unable to enumerate USB device on port %d\n",
                                        port1);
        }
 - 
 +
  done:
        hub_port_disable(hub, port1, 1);
        if (hcd->driver->relinquish_port && !hub->hdev->parent)
@@@ -5017,7 -4747,6 +5019,7 @@@ static void hub_events(void
                        dev_dbg (hub_dev, "resetting for error %d\n",
                                hub->error);
  
 +                      MYDBG("");
                        ret = usb_reset_device(hdev);
                        if (ret) {
                                dev_dbg (hub_dev,
                                 * EM interference sometimes causes badly
                                 * shielded USB devices to be shutdown by
                                 * the hub, this hack enables them again.
 -                               * Works at least with mouse driver. 
 +                               * Works at least with mouse driver.
                                 */
                                if (!(portstatus & USB_PORT_STAT_ENABLE)
                                    && !connect_change
@@@ -5249,9 -4978,6 +5251,9 @@@ static struct usb_driver hub_driver = 
        .supports_autosuspend = 1,
  };
  
 +#if defined(CONFIG_MTK_XHCI) && defined(CONFIG_USB_MTK_DUALMODE)
 +extern void mtk_hub_event_steal(spinlock_t *lock, struct list_head* list);
 +#endif
  int usb_hub_init(void)
  {
        if (usb_register(&hub_driver) < 0) {
                return -1;
        }
  
 +#if defined(CONFIG_MTK_XHCI) && defined(CONFIG_USB_MTK_DUALMODE)
 +      mtk_hub_event_steal(&hub_event_lock, &hub_event_list);
 +#endif
 +
        khubd_task = kthread_run(hub_thread, NULL, "khubd");
        if (!IS_ERR(khubd_task))
                return 0;
@@@ -5402,7 -5124,6 +5404,7 @@@ static int usb_reset_and_verify_device(
        int                             i, ret = 0;
        int                             port1 = udev->portnum;
  
 +      MYDBG("");
        if (udev->state == USB_STATE_NOTATTACHED ||
                        udev->state == USB_STATE_SUSPENDED) {
                dev_dbg(&udev->dev, "device reset not allowed in state %d\n",
  
        if (ret < 0)
                goto re_enumerate;
 - 
 +
        /* Device might have changed firmware (DFU or similar) */
        if (descriptors_changed(udev, &descriptor)) {
                dev_info(&udev->dev, "device firmware changed\n");
@@@ -5524,7 -5245,7 +5526,7 @@@ done
        usb_unlocked_enable_lpm(udev);
        usb_enable_ltm(udev);
        return 0;
 - 
 +
  re_enumerate:
        /* LPM state doesn't matter when we're about to destroy the device. */
        hub_port_logical_disconnect(parent_hub, port1);
@@@ -5558,7 -5279,6 +5560,7 @@@ int usb_reset_device(struct usb_device 
        unsigned int noio_flag;
        struct usb_host_config *config = udev->actconfig;
  
 +      MYDBG("");
        if (udev->state == USB_STATE_NOTATTACHED ||
                        udev->state == USB_STATE_SUSPENDED) {
                dev_dbg(&udev->dev, "device reset not allowed in state %d\n",
index b4322314606f9eecc57065cd77d7877137bc6f49,0a8f6f961baa749b4b4aaed155ebe999d4c59a0d..2da0c72f48e37304e1dbf9178c36bd722f00c073
@@@ -62,6 -62,7 +62,7 @@@ struct inet_connection_sock_af_ops 
        void        (*addr2sockaddr)(struct sock *sk, struct sockaddr *);
        int         (*bind_conflict)(const struct sock *sk,
                                     const struct inet_bind_bucket *tb, bool relax);
+       void        (*mtu_reduced)(struct sock *sk);
  };
  
  /** inet_connection_sock - INET connection oriented sock
@@@ -93,7 -94,6 +94,7 @@@ struct inet_connection_sock 
        struct timer_list         icsk_retransmit_timer;
        struct timer_list         icsk_delack_timer;
        __u32                     icsk_rto;
 +      __u32                     icsk_MMSRB;
        __u32                     icsk_pmtu_cookie;
        const struct tcp_congestion_ops *icsk_ca_ops;
        const struct inet_connection_sock_af_ops *icsk_af_ops;
diff --combined include/net/sock.h
index 66bef38c5180aeba0fafae541d7f261bba5dffc6,c0aad07160ef3e3b03be628634ca6ab70aec339c..02199accb6be2b9ddeffdef24dc001c686b2e9d5
@@@ -780,12 -780,8 +780,12 @@@ static inline __must_check int sk_add_b
                                              unsigned int limit)
  {
        if (sk_rcvqueues_full(sk, skb, limit))
 +      {
 +              #ifdef CONFIG_MTK_NET_LOGGING 
 +              printk(KERN_ERR "[mtk_net][sock]sk_add_backlog->sk_rcvqueues_full sk->sk_rcvbuf:%d,sk->sk_sndbuf:%d ",sk->sk_rcvbuf,sk->sk_sndbuf);
 +              #endif          
                return -ENOBUFS;
 -
 +      }
        __sk_add_backlog(sk, skb);
        sk->sk_backlog.len += skb->truesize;
        return 0;
@@@ -936,7 -932,6 +936,6 @@@ struct proto 
                                                struct sk_buff *skb);
  
        void            (*release_cb)(struct sock *sk);
-       void            (*mtu_reduced)(struct sock *sk);
  
        /* Keeping track of sk's, looking them up, and port selection methods. */
        void                    (*hash)(struct sock *sk);
diff --combined include/net/tcp.h
index 18af925cef1ff8da3a5253cb9e22f8c6095a7d45,29a1a63cd3033d6967b0b06a003cf26256576156..85a69deaf091b2fa1c843588ec7554492d31bb36
@@@ -61,8 -61,8 +61,8 @@@ extern void tcp_time_wait(struct sock *
   */
  #define MAX_TCP_WINDOW                32767U
  
 -/* Offer an initial receive window of 10 mss. */
 -#define TCP_DEFAULT_INIT_RCVWND       10
 +/* Offer an initial receive window of 20 mss. */
 +#define TCP_DEFAULT_INIT_RCVWND       20
  
  /* Minimal accepted MSS. It is (60+60+8) - (20+20). */
  #define TCP_MIN_MSS           88U
                                 * to ~3sec-8min depending on RTO.
                                 */
  
 -#define TCP_RETR2     15      /*
 +#define TCP_RETR2     10      /*
                                 * This should take at least
                                 * 90 minutes to time out.
                                 * RFC1122 says that the limit is 100 sec.
                                 * 15 is ~13-30min depending on RTO.
                                 */
  
 -#define TCP_SYN_RETRIES        6      /* This is how many retries are done
 +#define TCP_SYN_RETRIES        9      /* This is how many retries are done
                                 * when active opening a connection.
                                 * RFC1122 says the minimum retry MUST
                                 * be at least 180secs.  Nevertheless
  #define TCP_DELACK_MIN        4U
  #define TCP_ATO_MIN   4U
  #endif
 -#define TCP_RTO_MAX   ((unsigned)(120*HZ))
 +#define TCP_RTO_MAX   ((unsigned)(60*HZ))
  #define TCP_RTO_MIN   ((unsigned)(HZ/5))
  #define TCP_TIMEOUT_INIT ((unsigned)(1*HZ))   /* RFC6298 2.1 initial RTO value        */
  #define TCP_TIMEOUT_FALLBACK ((unsigned)(3*HZ))       /* RFC 1122 initial RTO value, now
@@@ -287,11 -287,8 +287,11 @@@ extern int sysctl_tcp_thin_dupack
  extern int sysctl_tcp_early_retrans;
  extern int sysctl_tcp_limit_output_bytes;
  extern int sysctl_tcp_challenge_ack_limit;
 +extern int sysctl_tcp_default_init_rwnd;
  extern int sysctl_tcp_min_tso_segs;
 -
 +extern int sysctl_tcp_default_init_rwnd;
 +extern int sysctl_tcp_rto_min;
 +extern int sysctl_tcp_rto_max;
  extern atomic_long_t tcp_memory_allocated;
  extern struct percpu_counter tcp_sockets_allocated;
  extern int tcp_memory_pressure;
@@@ -463,6 -460,7 +463,7 @@@ extern const u8 *tcp_parse_md5sig_optio
   */
  
  extern void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb);
+ void tcp_v4_mtu_reduced(struct sock *sk);
  extern int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
  extern struct sock * tcp_create_openreq_child(struct sock *sk,
                                              struct request_sock *req,
@@@ -603,8 -601,8 +604,8 @@@ extern void tcp_init_buffer_space(struc
  
  static inline void tcp_bound_rto(const struct sock *sk)
  {
 -      if (inet_csk(sk)->icsk_rto > TCP_RTO_MAX)
 -              inet_csk(sk)->icsk_rto = TCP_RTO_MAX;
 +      if (inet_csk(sk)->icsk_rto > sysctl_tcp_rto_max)
 +              inet_csk(sk)->icsk_rto = sysctl_tcp_rto_max;
  }
  
  static inline u32 __tcp_set_rto(const struct tcp_sock *tp)
@@@ -641,7 -639,7 +642,7 @@@ static inline void tcp_fast_path_check(
  static inline u32 tcp_rto_min(struct sock *sk)
  {
        const struct dst_entry *dst = __sk_dst_get(sk);
 -      u32 rto_min = TCP_RTO_MIN;
 +      u32 rto_min = sysctl_tcp_rto_min;
  
        if (dst && dst_metric_locked(dst, RTAX_RTO_MIN))
                rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN);
@@@ -981,7 -979,7 +982,7 @@@ static inline void tcp_check_probe_time
  
        if (!tp->packets_out && !icsk->icsk_pending)
                inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
 -                                        icsk->icsk_rto, TCP_RTO_MAX);
 +                                        icsk->icsk_rto, sysctl_tcp_rto_max);
  }
  
  static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq)
@@@ -1188,8 -1186,8 +1189,8 @@@ static inline void tcp_mib_init(struct 
  {
        /* See RFC 2012 */
        TCP_ADD_STATS_USER(net, TCP_MIB_RTOALGORITHM, 1);
 -      TCP_ADD_STATS_USER(net, TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ);
 -      TCP_ADD_STATS_USER(net, TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ);
 +      TCP_ADD_STATS_USER(net, TCP_MIB_RTOMIN, sysctl_tcp_rto_min*1000/HZ);
 +      TCP_ADD_STATS_USER(net, TCP_MIB_RTOMAX, sysctl_tcp_rto_max*1000/HZ);
        TCP_ADD_STATS_USER(net, TCP_MIB_MAXCONN, -1);
  }
  
@@@ -1394,8 -1392,6 +1395,8 @@@ static inline void tcp_check_send_head(
  {
        if (sk->sk_send_head == skb_unlinked)
                sk->sk_send_head = NULL;
 +      if (tcp_sk(sk)->highest_sack == skb_unlinked)
 +              tcp_sk(sk)->highest_sack = NULL;
  }
  
  static inline void tcp_init_send_head(struct sock *sk)
@@@ -1539,15 -1535,6 +1540,15 @@@ struct tcp_iter_state 
        loff_t                  last_pos;
  };
  
 +/* MTK_NET_CHANGES */
 +/*
 + * reset tcp connection by uid
 + */
 +struct uid_err {
 +      int appuid;
 +      int errNum;
 +};
 +
  extern int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo);
  extern void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo);
  
@@@ -1566,11 -1553,6 +1567,11 @@@ extern struct sk_buff **tcp4_gro_receiv
  extern int tcp_gro_complete(struct sk_buff *skb);
  extern int tcp4_gro_complete(struct sk_buff *skb);
  
 +extern int tcp_nuke_addr(struct net *net, struct sockaddr *addr);
 +/* MTK_NET_CHANGES */
 +extern void tcp_v4_reset_connections_by_uid(struct uid_err uid_e);
 +extern void tcp_v4_handle_retrans_time_by_uid(struct uid_err uid_e);
 +
  #ifdef CONFIG_PROC_FS
  extern int tcp4_proc_init(void);
  extern void tcp4_proc_exit(void);
diff --combined net/ipv4/tcp_ipv4.c
index fb244f4a38b523b4939d1a3cd4e69ae59cc69cbb,e025c1c788a1f2db47b3ed2e36241567dabdf875..35c01585cdeb2d0cb30986166b32168e079d7f5b
@@@ -233,7 -233,7 +233,7 @@@ int tcp_v4_connect(struct sock *sk, str
        /* OK, now commit destination to socket.  */
        sk->sk_gso_type = SKB_GSO_TCPV4;
        sk_setup_caps(sk, &rt->dst);
 -
 +        printk(KERN_INFO "[socket_conn]IPV4 socket[%lu] sport:%u \n", SOCK_INODE(sk->sk_socket)->i_ino, ntohs(inet->inet_sport));
        if (!tp->write_seq && likely(!tp->repair))
                tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
                                                           inet->inet_daddr,
@@@ -268,7 -268,7 +268,7 @@@ EXPORT_SYMBOL(tcp_v4_connect)
   * It can be called through tcp_release_cb() if socket was owned by user
   * at the time tcp_v4_err() was called to handle ICMP message.
   */
static void tcp_v4_mtu_reduced(struct sock *sk)
+ void tcp_v4_mtu_reduced(struct sock *sk)
  {
        struct dst_entry *dst;
        struct inet_sock *inet = inet_sk(sk);
                tcp_simple_retransmit(sk);
        } /* else let the usual retransmit timer handle it */
  }
+ EXPORT_SYMBOL(tcp_v4_mtu_reduced);
  
  static void do_redirect(struct sk_buff *skb, struct sock *sk)
  {
@@@ -445,7 -446,7 +446,7 @@@ void tcp_v4_err(struct sk_buff *icmp_sk
  
                if (remaining) {
                        inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
 -                                                remaining, TCP_RTO_MAX);
 +                                                remaining, sysctl_tcp_rto_max);
                } else {
                        /* RTO revert clocked out retransmission.
                         * Will retransmit now */
@@@ -1423,7 -1424,7 +1424,7 @@@ static int tcp_v4_conn_req_fastopen(str
         * because it's been added to the accept queue directly.
         */
        inet_csk_reset_xmit_timer(child, ICSK_TIME_RETRANS,
 -          TCP_TIMEOUT_INIT, TCP_RTO_MAX);
 +          TCP_TIMEOUT_INIT, sysctl_tcp_rto_max);
  
        /* Add the child socket directly into the accept queue */
        inet_csk_reqsk_queue_add(sk, req, child);
@@@ -1527,7 -1528,6 +1528,7 @@@ int tcp_v4_conn_request(struct sock *sk
        ireq->rmt_addr = saddr;
        ireq->no_srccheck = inet_sk(sk)->transparent;
        ireq->opt = tcp_v4_save_options(skb);
 +      ireq->ir_mark = inet_request_mark(sk, skb);
  
        if (security_inet_conn_request(sk, skb, req))
                goto drop_and_free;
@@@ -1949,7 -1949,7 +1950,7 @@@ bool tcp_prequeue(struct sock *sk, stru
                if (!inet_csk_ack_scheduled(sk))
                        inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
                                                  (3 * tcp_rto_min(sk)) / 4,
 -                                                TCP_RTO_MAX);
 +                                                sysctl_tcp_rto_max);
        }
        return true;
  }
@@@ -2143,6 -2143,7 +2144,7 @@@ const struct inet_connection_sock_af_op
        .compat_setsockopt = compat_ip_setsockopt,
        .compat_getsockopt = compat_ip_getsockopt,
  #endif
+       .mtu_reduced       = tcp_v4_mtu_reduced,
  };
  EXPORT_SYMBOL(ipv4_specific);
  
@@@ -2162,7 -2163,6 +2164,7 @@@ static int tcp_v4_init_sock(struct soc
        struct inet_connection_sock *icsk = inet_csk(sk);
  
        tcp_init_sock(sk);
 +        icsk->icsk_MMSRB = 0;
  
        icsk->icsk_af_ops = &ipv4_specific;
  
@@@ -2218,115 -2218,6 +2220,115 @@@ void tcp_v4_destroy_sock(struct sock *s
  }
  EXPORT_SYMBOL(tcp_v4_destroy_sock);
  
 +void tcp_v4_handle_retrans_time_by_uid(struct uid_err uid_e)
 +{
 +    unsigned int bucket;
 +    uid_t skuid = (uid_t)(uid_e.appuid);
 +      struct inet_connection_sock *icsk = NULL;//inet_csk(sk);
 +
 +
 +    for (bucket = 0; bucket < tcp_hashinfo.ehash_mask; bucket++) {
 +        struct hlist_nulls_node *node;
 +        struct sock *sk;
 +        spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, bucket);
 +    
 +        spin_lock_bh(lock);
 +        sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[bucket].chain) {
 +    
 +            if (sysctl_ip_dynaddr && sk->sk_state == TCP_SYN_SENT)
 +                continue;
 +            if (sock_flag(sk, SOCK_DEAD))
 +                continue;
 +    
 +            if(sk->sk_socket){
 +                if(SOCK_INODE(sk->sk_socket)->i_uid != skuid)
 +                    continue;
 +                else
 +                    printk("[mmspb] tcp_v4_handle_retrans_time_by_uid socket uid(%d) match!",
 +                        SOCK_INODE(sk->sk_socket)->i_uid);
 +            } else{
 +                continue;
 +          }
 +
 +                sock_hold(sk);
 +                spin_unlock_bh(lock);
 +    
 +                local_bh_disable();
 +                bh_lock_sock(sk);
 +
 +                // update sk time out value
 +              icsk = inet_csk(sk);
 +              printk("[mmspb] tcp_v4_handle_retrans_time_by_uid update timer\n");
 +                                      
 +              sk_reset_timer(sk, &icsk->icsk_retransmit_timer, jiffies + 2);
 +              icsk->icsk_rto = sysctl_tcp_rto_min * 30;       
 +              icsk->icsk_MMSRB = 1;
 +                              
 +                bh_unlock_sock(sk);
 +                local_bh_enable();
 +              spin_lock_bh(lock);
 +                sock_put(sk);
 +
 +            }
 +            spin_unlock_bh(lock);
 +        }
 +
 +}
 +
 +
 +/*
 + * tcp_v4_nuke_addr_by_uid - destroy all sockets of spcial uid
 + */
 +void tcp_v4_reset_connections_by_uid(struct uid_err uid_e)
 +{
 +    unsigned int bucket;
 +    uid_t skuid = (uid_t)(uid_e.appuid);
 +
 +    for (bucket = 0; bucket < tcp_hashinfo.ehash_mask; bucket++) {
 +        struct hlist_nulls_node *node;
 +        struct sock *sk;
 +        spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, bucket);
 +    
 +restart:
 +        spin_lock_bh(lock);
 +        sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[bucket].chain) {
 +    
 +            if (sysctl_ip_dynaddr && sk->sk_state == TCP_SYN_SENT)
 +                continue;
 +            if (sock_flag(sk, SOCK_DEAD))
 +                continue;
 +    
 +            if(sk->sk_socket){
 +                if(SOCK_INODE(sk->sk_socket)->i_uid != skuid)
 +                    continue;
 +                else
 +                    printk(KERN_INFO "SIOCKILLSOCK socket uid(%d) match!",
 +                        SOCK_INODE(sk->sk_socket)->i_uid);
 +            } else{
 +                continue;
 +          }
 +
 +                sock_hold(sk);
 +                spin_unlock_bh(lock);
 +    
 +                local_bh_disable();
 +                bh_lock_sock(sk);
 +                sk->sk_err = uid_e.errNum;
 +                printk(KERN_INFO "SIOCKILLSOCK set sk err == %d!! \n", sk->sk_err);
 +                sk->sk_error_report(sk);
 +    
 +                tcp_done(sk);
 +                bh_unlock_sock(sk);
 +                local_bh_enable();
 +                sock_put(sk);
 +
 +                goto restart;
 +            }
 +            spin_unlock_bh(lock);
 +        }
 +}
 +
 +
  #ifdef CONFIG_PROC_FS
  /* Proc filesystem TCP sock list dumping. */
  
@@@ -2978,7 -2869,6 +2980,6 @@@ struct proto tcp_prot = 
        .sendpage               = tcp_sendpage,
        .backlog_rcv            = tcp_v4_do_rcv,
        .release_cb             = tcp_release_cb,
-       .mtu_reduced            = tcp_v4_mtu_reduced,
        .hash                   = inet_hash,
        .unhash                 = inet_unhash,
        .get_port               = inet_csk_get_port,
diff --combined net/ipv4/tcp_output.c
index a79b1f43a3e8a181441b1c1e7714fea9ed37513f,11ef25c9cf43200548f5a75760adb2f509575124..eef147cff1265516bf16a6fea281e05a56098cf5
@@@ -231,13 -231,14 +231,13 @@@ void tcp_select_initial_window(int __sp
        }
  
        /* Set initial window to a value enough for senders starting with
 -       * initial congestion window of TCP_DEFAULT_INIT_RCVWND. Place
 +       * initial congestion window of sysctl_tcp_default_init_rwnd. Place
         * a limit on the initial window when mss is larger than 1460.
         */
        if (mss > (1 << *rcv_wscale)) {
 -              int init_cwnd = TCP_DEFAULT_INIT_RCVWND;
 +              int init_cwnd = sysctl_tcp_default_init_rwnd;
                if (mss > 1460)
 -                      init_cwnd =
 -                      max_t(u32, (1460 * TCP_DEFAULT_INIT_RCVWND) / mss, 2);
 +                      init_cwnd = max_t(u32, (1460 * init_cwnd) / mss, 2);
                /* when initializing use the value from init_rcv_wnd
                 * rather than the default from above
                 */
@@@ -774,7 -775,7 +774,7 @@@ void tcp_release_cb(struct sock *sk
                __sock_put(sk);
        }
        if (flags & (1UL << TCP_MTU_REDUCED_DEFERRED)) {
-               sk->sk_prot->mtu_reduced(sk);
+               inet_csk(sk)->icsk_af_ops->mtu_reduced(sk);
                __sock_put(sk);
        }
  }
@@@ -1993,7 -1994,7 +1993,7 @@@ bool tcp_schedule_loss_probe(struct soc
        }
  
        inet_csk_reset_xmit_timer(sk, ICSK_TIME_LOSS_PROBE, timeout,
 -                                TCP_RTO_MAX);
 +                                sysctl_tcp_rto_max);
        return true;
  }
  
@@@ -2035,9 -2036,7 +2035,7 @@@ void tcp_send_loss_probe(struct sock *s
        if (WARN_ON(!skb || !tcp_skb_pcount(skb)))
                goto rearm_timer;
  
-       /* Probe with zero data doesn't trigger fast recovery. */
-       if (skb->len > 0)
-               err = __tcp_retransmit_skb(sk, skb);
+       err = __tcp_retransmit_skb(sk, skb);
  
        /* Record snd_nxt for loss detection. */
        if (likely(!err))
  rearm_timer:
        inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
                                  inet_csk(sk)->icsk_rto,
 -                                TCP_RTO_MAX);
 +                                sysctl_tcp_rto_max);
  
        if (likely(!err))
                NET_INC_STATS_BH(sock_net(sk),
@@@ -2568,7 -2567,7 +2566,7 @@@ begin_fwd
                if (skb == tcp_write_queue_head(sk))
                        inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
                                                  inet_csk(sk)->icsk_rto,
 -                                                TCP_RTO_MAX);
 +                                                sysctl_tcp_rto_max);
        }
  }
  
@@@ -3000,7 -2999,7 +2998,7 @@@ int tcp_connect(struct sock *sk
  
        /* Timer for repeating the SYN until an answer. */
        inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
 -                                inet_csk(sk)->icsk_rto, TCP_RTO_MAX);
 +                                inet_csk(sk)->icsk_rto, sysctl_tcp_rto_max);
        return 0;
  }
  EXPORT_SYMBOL(tcp_connect);
@@@ -3079,7 -3078,7 +3077,7 @@@ void tcp_send_ack(struct sock *sk
                inet_csk_schedule_ack(sk);
                inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN;
                inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
 -                                        TCP_DELACK_MAX, TCP_RTO_MAX);
 +                                        TCP_DELACK_MAX, sysctl_tcp_rto_max);
                return;
        }
  
@@@ -3199,8 -3198,8 +3197,8 @@@ void tcp_send_probe0(struct sock *sk
                        icsk->icsk_backoff++;
                icsk->icsk_probes_out++;
                inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
 -                                        min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX),
 -                                        TCP_RTO_MAX);
 +                                        min_t(unsigned int, icsk->icsk_rto << icsk->icsk_backoff, sysctl_tcp_rto_max),
 +                                        sysctl_tcp_rto_max);
        } else {
                /* If packet was not sent due to local congestion,
                 * do not backoff and do not remember icsk_probes_out.
                inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
                                          min(icsk->icsk_rto << icsk->icsk_backoff,
                                              TCP_RESOURCE_PROBE_INTERVAL),
 -                                        TCP_RTO_MAX);
 +                                        sysctl_tcp_rto_max);
        }
  }
diff --combined net/ipv6/addrconf.c
index b67143e621209a84d40fbf97a93e7792d1dcfd6d,d0912acd9522daef6de084df139aa38101913bd7..475a16d7b9557fffdd04732353d5e60d740d3d48
@@@ -173,7 -173,7 +173,7 @@@ static struct ipv6_devconf ipv6_devcon
        .forwarding             = 0,
        .hop_limit              = IPV6_DEFAULT_HOPLIMIT,
        .mtu6                   = IPV6_MIN_MTU,
 -      .accept_ra              = 1,
 +      .accept_ra              = 1,    
        .accept_redirects       = 1,
        .autoconf               = 1,
        .force_mld_version      = 0,
        .rtr_solicit_interval   = RTR_SOLICITATION_INTERVAL,
        .rtr_solicit_delay      = MAX_RTR_SOLICITATION_DELAY,
  #ifdef CONFIG_IPV6_PRIVACY
 -      .use_tempaddr           = 0,
 +      .use_tempaddr           = 1,
        .temp_valid_lft         = TEMP_VALID_LIFETIME,
        .temp_prefered_lft      = TEMP_PREFERRED_LIFETIME,
        .regen_max_retry        = REGEN_MAX_RETRY,
        .max_addresses          = IPV6_MAX_ADDRESSES,
        .accept_ra_defrtr       = 1,
        .accept_ra_pinfo        = 1,
 +#ifdef CONFIG_MTK_DHCPV6C_WIFI        
 +      .ra_info_flag           = 0,
 +#endif                
  #ifdef CONFIG_IPV6_ROUTER_PREF
        .accept_ra_rtr_pref     = 1,
        .rtr_probe_interval     = 60 * HZ,
        .accept_ra_rt_info_max_plen = 0,
  #endif
  #endif
 +      .accept_ra_rt_table     = 0,
        .proxy_ndp              = 0,
        .accept_source_route    = 0,    /* we do not accept RH0 by default. */
        .disable_ipv6           = 0,
@@@ -220,7 -216,7 +220,7 @@@ static struct ipv6_devconf ipv6_devconf
        .rtr_solicit_interval   = RTR_SOLICITATION_INTERVAL,
        .rtr_solicit_delay      = MAX_RTR_SOLICITATION_DELAY,
  #ifdef CONFIG_IPV6_PRIVACY
 -      .use_tempaddr           = 0,
 +      .use_tempaddr           = 1,
        .temp_valid_lft         = TEMP_VALID_LIFETIME,
        .temp_prefered_lft      = TEMP_PREFERRED_LIFETIME,
        .regen_max_retry        = REGEN_MAX_RETRY,
        .max_addresses          = IPV6_MAX_ADDRESSES,
        .accept_ra_defrtr       = 1,
        .accept_ra_pinfo        = 1,
 +#ifdef CONFIG_MTK_DHCPV6C_WIFI        
 +      .ra_info_flag           = 0,
 +#endif        
  #ifdef CONFIG_IPV6_ROUTER_PREF
        .accept_ra_rtr_pref     = 1,
        .rtr_probe_interval     = 60 * HZ,
        .accept_ra_rt_info_max_plen = 0,
  #endif
  #endif
 +      .accept_ra_rt_table     = 0,
        .proxy_ndp              = 0,
        .accept_source_route    = 0,    /* we do not accept RH0 by default. */
        .disable_ipv6           = 0,
@@@ -766,10 -758,9 +766,10 @@@ static int addrconf_fixup_forwarding(st
        } else if ((!newf) ^ (!old))
                dev_forward_change((struct inet6_dev *)table->extra1);
        rtnl_unlock();
 -
 +      
        if (newf)
                rt6_purge_dflt_routers(net);
 +
        return 1;
  }
  #endif
@@@ -1848,10 -1839,6 +1848,10 @@@ static int addrconf_ifid_gre(u8 *eui, s
  
  static int ipv6_generate_eui64(u8 *eui, struct net_device *dev)
  {
 +    /* MTK_NET_CHANGES */
 +    if (strncmp(dev->name, "ccmni", 2) == 0)
 +        return -1;
 +              
        switch (dev->type) {
        case ARPHRD_ETHER:
        case ARPHRD_FDDI:
@@@ -1959,31 -1946,6 +1959,31 @@@ static void  __ipv6_try_regen_rndid(str
  }
  #endif
  
 +u32 addrconf_rt_table(const struct net_device *dev, u32 default_table) {
 +      /* Determines into what table to put autoconf PIO/RIO/default routes
 +       * learned on this device.
 +       *
 +       * - If 0, use the same table for every device. This puts routes into
 +       *   one of RT_TABLE_{PREFIX,INFO,DFLT} depending on the type of route
 +       *   (but note that these three are currently all equal to
 +       *   RT6_TABLE_MAIN).
 +       * - If > 0, use the specified table.
 +       * - If < 0, put routes into table dev->ifindex + (-rt_table).
 +       */
 +      struct inet6_dev *idev = in6_dev_get(dev);
 +      u32 table;
 +      int sysctl = idev->cnf.accept_ra_rt_table;
 +      if (sysctl == 0) {
 +              table = default_table;
 +      } else if (sysctl > 0) {
 +              table = (u32) sysctl;
 +      } else {
 +              table = (unsigned) dev->ifindex + (-sysctl);
 +      }
 +      in6_dev_put(idev);
 +      return table;
 +}
 +
  /*
   *    Add prefix route.
   */
@@@ -1993,7 -1955,7 +1993,7 @@@ addrconf_prefix_route(struct in6_addr *
                      unsigned long expires, u32 flags)
  {
        struct fib6_config cfg = {
 -              .fc_table = RT6_TABLE_PREFIX,
 +              .fc_table = addrconf_rt_table(dev, RT6_TABLE_PREFIX),
                .fc_metric = IP6_RT_PRIO_ADDRCONF,
                .fc_ifindex = dev->ifindex,
                .fc_expires = expires,
@@@ -2027,8 -1989,7 +2027,8 @@@ static struct rt6_info *addrconf_get_pr
        struct rt6_info *rt = NULL;
        struct fib6_table *table;
  
 -      table = fib6_get_table(dev_net(dev), RT6_TABLE_PREFIX);
 +      table = fib6_get_table(dev_net(dev),
 +                             addrconf_rt_table(dev, RT6_TABLE_PREFIX));
        if (table == NULL)
                return NULL;
  
@@@ -2730,8 -2691,18 +2730,18 @@@ static void init_loopback(struct net_de
                        if (sp_ifa->flags & (IFA_F_DADFAILED | IFA_F_TENTATIVE))
                                continue;
  
-                       if (sp_ifa->rt)
-                               continue;
+                       if (sp_ifa->rt) {
+                               /* This dst has been added to garbage list when
+                                * lo device down, release this obsolete dst and
+                                * reallocate a new router for ifa.
+                                */
+                               if (sp_ifa->rt->dst.obsolete > 0) {
+                                       ip6_rt_put(sp_ifa->rt);
+                                       sp_ifa->rt = NULL;
+                               } else {
+                                       continue;
+                               }
+                       }
  
                        sp_rt = addrconf_dst_alloc(idev, &sp_ifa->addr, 0);
  
@@@ -4234,7 -4205,6 +4244,7 @@@ static inline void ipv6_store_devconf(s
        array[DEVCONF_ACCEPT_RA_RT_INFO_MAX_PLEN] = cnf->accept_ra_rt_info_max_plen;
  #endif
  #endif
 +      array[DEVCONF_ACCEPT_RA_RT_TABLE] = cnf->accept_ra_rt_table;
        array[DEVCONF_PROXY_NDP] = cnf->proxy_ndp;
        array[DEVCONF_ACCEPT_SOURCE_ROUTE] = cnf->accept_source_route;
  #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
        array[DEVCONF_ACCEPT_DAD] = cnf->accept_dad;
        array[DEVCONF_FORCE_TLLAO] = cnf->force_tllao;
        array[DEVCONF_NDISC_NOTIFY] = cnf->ndisc_notify;
 +#ifdef CONFIG_MTK_DHCPV6C_WIFI        
 +      array[DEVCONF_RA_INFO_FLAG] = cnf->ra_info_flag;
 +#endif        
  }
  
  static inline size_t inet6_ifla6_size(void)
@@@ -4946,13 -4913,6 +4956,13 @@@ static struct addrconf_sysctl_tabl
                },
  #endif
  #endif
 +              {
 +                      .procname       = "accept_ra_rt_table",
 +                      .data           = &ipv6_devconf.accept_ra_rt_table,
 +                      .maxlen         = sizeof(int),
 +                      .mode           = 0644,
 +                      .proc_handler   = proc_dointvec,
 +              },
                {
                        .procname       = "proxy_ndp",
                        .data           = &ipv6_devconf.proxy_ndp,
                        .mode           = 0644,
                        .proc_handler   = proc_dointvec
                },
 +#ifdef        CONFIG_MTK_DHCPV6C_WIFI 
 +              {
 +                      .procname               = "ra_info_flag",
 +                      .data                   = &ipv6_devconf.ra_info_flag,
 +                      .maxlen                 = sizeof(int),
 +                      .mode                   = 0644,
 +                      .proc_handler   = proc_dointvec
 +              },
 +#endif
                {
                        /* sentinel */
                }
diff --combined net/ipv6/tcp_ipv6.c
index f3fa7fa2ae31f438546ba452b41853ac00c5e131,1a87659a61396cf69a1071e34d906b2d5ca4ce51..c27303d4482fdc0264accfd622c5a1b62597ee7c
@@@ -252,7 -252,6 +252,7 @@@ static int tcp_v6_connect(struct sock *
        fl6.flowi6_mark = sk->sk_mark;
        fl6.fl6_dport = usin->sin6_port;
        fl6.fl6_sport = inet->inet_sport;
 +      fl6.flowi6_uid = sock_i_uid(sk);
  
        final_p = fl6_update_dst(&fl6, np->opt, &final);
  
        if (err)
                goto late_failure;
  
 +        printk(KERN_INFO  "net_sock, IPV6 socket[%lu] sport:%u \n", SOCK_INODE(sk->sk_socket)->i_ino, ntohs(inet->inet_sport));
        if (!tp->write_seq && likely(!tp->repair))
                tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
                                                             np->daddr.s6_addr32,
@@@ -793,7 -791,6 +793,7 @@@ static void tcp_v6_send_response(struc
        fl6.flowi6_proto = IPPROTO_TCP;
        if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
                fl6.flowi6_oif = inet6_iif(skb);
 +      fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark);
        fl6.fl6_dport = t1->dest;
        fl6.fl6_sport = t1->source;
        security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
@@@ -1002,7 -999,6 +1002,7 @@@ static int tcp_v6_conn_request(struct s
                TCP_ECN_create_request(req, skb, sock_net(sk));
  
        treq->iif = sk->sk_bound_dev_if;
 +      inet_rsk(req)->ir_mark = inet_request_mark(sk, skb);
  
        /* So that link locals have meaning */
        if (!sk->sk_bound_dev_if &&
@@@ -1655,6 -1651,7 +1655,7 @@@ static const struct inet_connection_soc
        .compat_setsockopt = compat_ipv6_setsockopt,
        .compat_getsockopt = compat_ipv6_getsockopt,
  #endif
+       .mtu_reduced       = tcp_v6_mtu_reduced,
  };
  
  #ifdef CONFIG_TCP_MD5SIG
@@@ -1686,6 -1683,7 +1687,7 @@@ static const struct inet_connection_soc
        .compat_setsockopt = compat_ipv6_setsockopt,
        .compat_getsockopt = compat_ipv6_getsockopt,
  #endif
+       .mtu_reduced       = tcp_v4_mtu_reduced,
  };
  
  #ifdef CONFIG_TCP_MD5SIG
@@@ -1923,7 -1921,6 +1925,6 @@@ struct proto tcpv6_prot = 
        .sendpage               = tcp_sendpage,
        .backlog_rcv            = tcp_v6_do_rcv,
        .release_cb             = tcp_release_cb,
-       .mtu_reduced            = tcp_v6_mtu_reduced,
        .hash                   = tcp_v6_hash,
        .unhash                 = inet_unhash,
        .get_port               = inet_csk_get_port,
diff --combined net/packet/af_packet.c
index b16a7b83014d755213d231a8b73de07c3c1b3413,81b4b816f13132b8bddc1cd3ca28a7bdad835066..1e8ff53dded4528a3c7ddcce6b56686e95699555
@@@ -565,6 -565,7 +565,7 @@@ static void init_prb_bdqc(struct packet
        p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov);
        p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv;
  
+       p1->max_frame_len = p1->kblk_size - BLK_PLUS_PRIV(p1->blk_sizeof_priv);
        prb_init_ft_ops(p1, req_u);
        prb_setup_retire_blk_timer(po, tx_ring);
        prb_open_block(p1, pbd);
@@@ -1803,6 -1804,18 +1804,18 @@@ static int tpacket_rcv(struct sk_buff *
                        if ((int)snaplen < 0)
                                snaplen = 0;
                }
+       } else if (unlikely(macoff + snaplen >
+                           GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len)) {
+               u32 nval;
+               nval = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len - macoff;
+               pr_err_once("tpacket_rcv: packet too big, clamped from %u to %u. macoff=%u\n",
+                           snaplen, nval, macoff);
+               snaplen = nval;
+               if (unlikely((int)snaplen < 0)) {
+                       snaplen = 0;
+                       macoff = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len;
+               }
        }
        spin_lock(&sk->sk_receive_queue.lock);
        h.raw = packet_current_rx_frame(po, skb,
@@@ -3135,25 -3148,19 +3148,25 @@@ packet_setsockopt(struct socket *sock, 
  
                if (optlen != sizeof(val))
                        return -EINVAL;
 -              if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
 -                      return -EBUSY;
                if (copy_from_user(&val, optval, sizeof(val)))
                        return -EFAULT;
                switch (val) {
                case TPACKET_V1:
                case TPACKET_V2:
                case TPACKET_V3:
 -                      po->tp_version = val;
 -                      return 0;
 +                      break;
                default:
                        return -EINVAL;
                }
 +              lock_sock(sk);
 +              if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
 +                      ret = -EBUSY;
 +              } else {
 +                      po->tp_version = val;
 +                      ret = 0;
 +              }
 +              release_sock(sk);
 +              return ret;
        }
        case PACKET_RESERVE:
        {
                        return -EBUSY;
                if (copy_from_user(&val, optval, sizeof(val)))
                        return -EFAULT;
 +              if (val > INT_MAX)
 +                      return -EINVAL;
                po->tp_reserve = val;
                return 0;
        }
@@@ -3610,7 -3615,6 +3623,7 @@@ static int packet_set_ring(struct sock 
        /* Added to avoid minimal code churn */
        struct tpacket_req *req = &req_u->req;
  
 +      lock_sock(sk);
        /* Opening a Tx-ring is NOT supported in TPACKET_V3 */
        if (!closing && tx_ring && (po->tp_version > TPACKET_V2)) {
                WARN(1, "Tx-ring is not supported.\n");
                if (unlikely(req->tp_block_size & (PAGE_SIZE - 1)))
                        goto out;
                if (po->tp_version >= TPACKET_V3 &&
-                   req->tp_block_size <=
-                                               BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv))
+                   (int)(req->tp_block_size -
+                         BLK_PLUS_PRIV(req_u->req3.tp_sizeof_priv)) <= 0)
                        goto out;
                if (unlikely(req->tp_frame_size < po->tp_hdrlen +
                                        po->tp_reserve))
                rb->frames_per_block = req->tp_block_size/req->tp_frame_size;
                if (unlikely(rb->frames_per_block <= 0))
                        goto out;
 +              if (unlikely(req->tp_block_size > UINT_MAX / req->tp_block_nr))
 +                      goto out;
                if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
                                        req->tp_frame_nr))
                        goto out;
                        goto out;
        }
  
 -      lock_sock(sk);
  
        /* Detach socket from network */
        spin_lock(&po->bind_lock);
                if (!tx_ring)
                        prb_shutdown_retire_blk_timer(po, tx_ring, rb_queue);
        }
 -      release_sock(sk);
  
        if (pg_vec)
                free_pg_vec(pg_vec, order, req->tp_block_nr);
  out:
 +      release_sock(sk);
        return err;
  }