VERSION = 3
PATCHLEVEL = 10
- SUBLEVEL = 57
+ SUBLEVEL = 58
EXTRAVERSION =
NAME = TOSSUG Baby Fish
-fno-strict-aliasing -fno-common \
-Werror-implicit-function-declaration \
-Wno-format-security \
- -fno-delete-null-pointer-checks
+ -fno-delete-null-pointer-checks \
+ -w
+
KBUILD_AFLAGS_KERNEL :=
KBUILD_CFLAGS_KERNEL :=
KBUILD_AFLAGS := -D__ASSEMBLY__
#include <asm/uaccess.h>
#include <asm/byteorder.h>
-
#include "hub.h"
+
+#ifdef CONFIG_MTK_ICUSB_SUPPORT
+int is_musbfsh_rh(struct usb_device *udev);
+void set_icusb_sts_disconnect_done(void);
+#endif
+
+#ifdef CONFIG_MTK_DT_USB_SUPPORT
+#include <linux/proc_fs.h>
+#include <linux/uaccess.h>
+static struct usb_device *g_dsda_dev = NULL;
+
+#ifdef CONFIG_PM_RUNTIME
+struct usb_hub *usb11_hub = NULL;
+int is_musbfsh_rh(struct usb_device *udev);
+
+struct usb_device *get_usb11_child_udev(void)
+{
+ if(usb11_hub){
+ MYDBG("\n");
+ return usb11_hub->ports[0]->child;
+ }else{
+ MYDBG("\n");
+ return NULL;
+ }
+}
+#endif
+
+void dump_data(char *buf, int len)
+{
+ int i;
+ for(i =0 ; i< len ; i++)
+ {
+ MYDBG("data[%d]: %x\n", i, buf[i]);
+ }
+}
+
+void test_dsda_device_ep0(void)
+{
+
+ int ret;
+ char data_buf[256];
+ ret = usb_control_msg(g_dsda_dev, usb_rcvctrlpipe(g_dsda_dev, 0),
+ USB_REQ_GET_DESCRIPTOR,
+ USB_DIR_IN,
+ USB_DT_DEVICE << 8,
+ 0,
+ data_buf,
+ 64,
+ USB_CTRL_GET_TIMEOUT);
+
+
+
+ if (ret < 0) {
+ MYDBG("test ep fail, ret : %d\n", ret);
+ }
+ else
+ {
+ MYDBG("test ep0 ok, ret : %d\n", ret);
+ dump_data(data_buf, ret);
+ }
+
+}
+
+void release_usb11_wakelock(void);
+static ssize_t dsda_tmp_proc_entry(struct file *file_ptr, const char __user *user_buffer, size_t count, loff_t *position)
+{
+ char cmd[64];
+
+ int ret = copy_from_user((char *) &cmd, user_buffer, count);
+
+ if(ret != 0)
+ {
+ return -EFAULT;
+ }
+
+ /* apply action here */
+ if(cmd[0] == '0')
+ {
+ MYDBG("");
+ test_dsda_device_ep0();
+ }
+ if(cmd[0] == '1')
+ {
+ MYDBG("");
+ release_usb11_wakelock();
+ }
+
+ MYDBG("");
+
+ return count;
+}
+
+struct file_operations dsda_tmp_proc_fops = {
+ .write = dsda_tmp_proc_entry
+};
+
+
+void create_dsda_tmp_entry(void)
+{
+ struct proc_dir_entry *prEntry;
+
+ MYDBG("");
+
+ prEntry = proc_create("DSDA_TMP_ENTRY", 0660, 0, &dsda_tmp_proc_fops);
+ if (prEntry)
+ {
+ MYDBG("add /proc/DSDA_TMP_ENTRY ok\n");
+ }
+ else
+ {
+ MYDBG("add /proc/DSDA_TMP_ENTRY fail\n");
+ }
+}
+#endif
+
/* if we are in debug mode, always announce new devices */
#ifdef DEBUG
#ifndef CONFIG_USB_ANNOUNCE_NEW_DEVICES
#define USB_VENDOR_GENESYS_LOGIC 0x05e3
#define HUB_QUIRK_CHECK_PORT_AUTOSUSPEND 0x01
+#if defined(CONFIG_USBIF_COMPLIANCE) && defined(CONFIG_USB_XHCI_HCD)
+extern int usbif_u3h_send_event(char* event) ;
+#include "otg_whitelist.h"
+#endif
+
+
static inline int hub_is_superspeed(struct usb_device *hdev)
{
return (hdev->descriptor.bDeviceProtocol == USB_HUB_PR_SS);
#define HUB_DEBOUNCE_STEP 25
#define HUB_DEBOUNCE_STABLE 100
+static void hub_release(struct kref *kref);
static int usb_reset_and_verify_device(struct usb_device *udev);
+#define usb_sndaddr0pipe() (PIPE_CONTROL << 30)
+#define usb_rcvaddr0pipe() ((PIPE_CONTROL << 30) | USB_DIR_IN)
+
+
static inline char *portspeed(struct usb_hub *hub, int portstatus)
{
if (hub_is_superspeed(hub->hdev))
*/
static int set_port_feature(struct usb_device *hdev, int port1, int feature)
{
+ MYDBG("");
return usb_control_msg(hdev, usb_sndctrlpipe(hdev, 0),
USB_REQ_SET_FEATURE, USB_RT_PORT, feature, port1,
NULL, 0, 1000);
"%s failed (err = %d)\n", __func__, ret);
} else {
*status = le16_to_cpu(hub->status->hub.wHubStatus);
- *change = le16_to_cpu(hub->status->hub.wHubChange);
+ *change = le16_to_cpu(hub->status->hub.wHubChange);
ret = 0;
}
mutex_unlock(&hub->status_mutex);
unsigned delay;
/* Continue a partial initialization */
- if (type == HUB_INIT2)
- goto init2;
- if (type == HUB_INIT3)
+ if (type == HUB_INIT2 || type == HUB_INIT3) {
+ device_lock(hub->intfdev);
+
+ /* Was the hub disconnected while we were waiting? */
+ if (hub->disconnected) {
+ device_unlock(hub->intfdev);
+ kref_put(&hub->kref, hub_release);
+ return;
+ }
+ if (type == HUB_INIT2)
+ goto init2;
+
goto init3;
+ }
+ kref_get(&hub->kref);
/* The superspeed hub except for root hub has to use Hub Depth
* value as an offset into the route string to locate the bits
PREPARE_DELAYED_WORK(&hub->init_work, hub_init_func3);
schedule_delayed_work(&hub->init_work,
msecs_to_jiffies(delay));
+ device_unlock(hub->intfdev);
return; /* Continues at init3: below */
} else {
msleep(delay);
/* Allow autosuspend if it was suppressed */
if (type <= HUB_INIT3)
usb_autopm_put_interface_async(to_usb_interface(hub->intfdev));
+
+ if (type == HUB_INIT2 || type == HUB_INIT3)
+ device_unlock(hub->intfdev);
+
+ kref_put(&hub->kref, hub_release);
}
/* Implement the continuations for the delays above */
hub->mA_per_port = hdev->bus_mA;
hub->limited_power = 1;
}
- } else if ((hubstatus & (1 << USB_DEVICE_SELF_POWERED)) == 0) {
+ } else if ((hubstatus & (1 << USB_DEVICE_SELF_POWERED)) == 0) { // bus powered
int remaining = hdev->bus_mA -
hub->descriptor->bHubContrCurrent;
hub->descriptor->bHubContrCurrent);
hub->limited_power = 1;
- if (remaining < hdev->maxchild * unit_load)
+ if (remaining < hdev->maxchild * unit_load){
+#if defined(CONFIG_USBIF_COMPLIANCE) && defined(CONFIG_USB_XHCI_HCD)
+ usbif_u3h_send_event("DEV_OVER_CURRENT");
+#endif
dev_warn(hub_dev,
"insufficient power available "
"to use all downstream ports\n");
+ }
hub->mA_per_port = unit_load; /* 7.2.1 */
} else { /* Self-powered external hub */
struct usb_device *hdev;
struct usb_hub *hub;
+
desc = intf->cur_altsetting;
hdev = interface_to_usbdev(intf);
if (hdev->level == MAX_TOPO_LEVEL) {
dev_err(&intf->dev,
"Unsupported bus topology: hub nested too deep\n");
+#if defined(CONFIG_USBIF_COMPLIANCE) && defined(CONFIG_USB_XHCI_HCD)
+ usbif_u3h_send_event("MAX_HUB_TIER_EXCEED");
+#endif
return -E2BIG;
}
#ifdef CONFIG_USB_OTG_BLACKLIST_HUB
if (hdev->parent) {
dev_warn(&intf->dev, "ignoring external hub\n");
+#if defined(CONFIG_USBIF_COMPLIANCE) && defined(CONFIG_USB_XHCI_HCD)
+ usbif_u3h_send_event("HUB_NOT_SUPPORTED");
+#endif
return -ENODEV;
}
#endif
|| new_state == USB_STATE_SUSPENDED)
; /* No change to wakeup settings */
else if (new_state == USB_STATE_CONFIGURED)
- wakeup = udev->actconfig->desc.bmAttributes
- & USB_CONFIG_ATT_WAKEUP;
+ wakeup = (udev->quirks &
+ USB_QUIRK_IGNORE_REMOTE_WAKEUP) ? 0 :
+ udev->actconfig->desc.bmAttributes &
+ USB_CONFIG_ATT_WAKEUP;
else
wakeup = 0;
}
struct usb_device *udev = *pdev;
struct usb_hub *hub = usb_hub_to_struct_hub(udev);
int i;
+ struct timeval tv_begin, tv_end;
+ struct timeval tv_before, tv_after;
+ do_gettimeofday(&tv_begin);
+
+#ifdef CONFIG_MTK_ICUSB_SUPPORT
+ int is_icusb_rh;
+#endif
+
+#ifdef CONFIG_MTK_ICUSB_SUPPORT
+ is_icusb_rh = is_musbfsh_rh(udev->parent);
+#endif
+
/* mark the device as inactive, so any further urb submissions for
* this device (and any of its children) will fail immediately.
* so that the hardware is now fully quiesced.
*/
dev_dbg (&udev->dev, "unregistering device\n");
+
+ do_gettimeofday(&tv_before);
usb_disable_device(udev, 0);
+ do_gettimeofday(&tv_after);
+ MYDBG("usb_disable_device(), time spent, sec : %d, usec : %d\n", (unsigned int)(tv_after.tv_sec - tv_before.tv_sec), (unsigned int)(tv_after.tv_usec - tv_before.tv_usec));
+
usb_hcd_synchronize_unlinks(udev);
if (udev->parent) {
port_dev->did_runtime_put = false;
}
+ do_gettimeofday(&tv_before);
usb_remove_ep_devs(&udev->ep0);
+ do_gettimeofday(&tv_after);
+ MYDBG("usb_remove_ep_devs(), time spent, sec : %d, usec : %d\n", (unsigned int)(tv_after.tv_sec - tv_before.tv_sec), (unsigned int)(tv_after.tv_usec - tv_before.tv_usec));
+
usb_unlock_device(udev);
/* Unregister the device. The device driver is responsible
* for de-configuring the device and invoking the remove-device
* notifier chain (used by usbfs and possibly others).
*/
+ do_gettimeofday(&tv_before);
device_del(&udev->dev);
+ do_gettimeofday(&tv_after);
+ MYDBG("device_del(), time spent, sec : %d, usec : %d\n", (unsigned int)(tv_after.tv_sec - tv_before.tv_sec), (unsigned int)(tv_after.tv_usec - tv_before.tv_usec));
/* Free the device number and delete the parent's children[]
* (or root_hub) pointer.
hub_free_dev(udev);
put_device(&udev->dev);
+
+#ifdef CONFIG_MTK_ICUSB_SUPPORT
+ if (is_icusb_rh)
+ {
+ set_icusb_sts_disconnect_done();
+ MYDBG("ICUSB Disconnect\n");
+ }
+#endif
+ do_gettimeofday(&tv_end);
+ MYDBG("time spent, sec : %d, usec : %d\n", (unsigned int)(tv_end.tv_sec - tv_begin.tv_sec), (unsigned int)(tv_end.tv_usec - tv_begin.tv_usec));
}
#ifdef CONFIG_USB_ANNOUNCE_NEW_DEVICES
udev->serial = usb_cache_string(udev, udev->descriptor.iSerialNumber);
err = usb_enumerate_device_otg(udev);
+
+#if defined(CONFIG_USBIF_COMPLIANCE) && defined(CONFIG_USB_XHCI_HCD)
+ if (udev->parent){ // we don't have to check ourself (roothub)
+ if (!is_targeted(udev)) {
+ usbif_u3h_send_event("DEV_NOT_SUPPORTED");
+ err = -ENOTSUPP;
+ }
+ }
+#endif
+
if (err < 0)
return err;
* sysfs power/wakeup controls wakeup enabled/disabled
*/
device_init_wakeup(&udev->dev, 0);
+ MYDBG("udev :%p\n", udev);
+#ifdef CONFIG_MTK_DT_USB_SUPPORT
+#ifdef CONFIG_PM_RUNTIME
+ if(is_musbfsh_rh(udev->parent)){
+ MYDBG("\n");
+ /*find out struct *usb_hub and hook it */
+ usb11_hub = usb_hub_to_struct_hub(udev->parent);
+ }
+#endif
+#endif
}
/* Tell the runtime-PM framework the device is active */
msleep(delay);
/* read and decode port status */
+ MYDBG("");
ret = hub_port_status(hub, port1, &portstatus, &portchange);
+ MYDBG("");
if (ret < 0)
return ret;
/* Reset the port */
for (i = 0; i < PORT_RESET_TRIES; i++) {
+ MYDBG("");
status = set_port_feature(hub->hdev, port1, (warm ?
USB_PORT_FEAT_BH_PORT_RESET :
USB_PORT_FEAT_RESET));
+ MYDBG("");
if (status == -ENODEV) {
+ MYDBG("");
; /* The hub is gone */
} else if (status) {
+ MYDBG("");
dev_err(hub->intfdev,
"cannot %sreset port %d (err = %d)\n",
warm ? "warm " : "", port1, status);
} else {
+ MYDBG("");
status = hub_port_wait_reset(hub, port1, udev, delay,
warm);
- if (status && status != -ENOTCONN && status != -ENODEV)
+ if (status && status != -ENOTCONN)
+ {
+ MYDBG("");
dev_dbg(hub->intfdev,
"port_wait_reset: err = %d\n",
status);
+ }
}
+ MYDBG("");
/* Check for disconnect or reset */
if (status == 0 || status == -ENOTCONN || status == -ENODEV) {
+ MYDBG("");
hub_port_finish_reset(hub, port1, udev, &status);
+ MYDBG("");
if (!hub_is_superspeed(hub->hdev))
goto done;
warm = true;
}
}
+ MYDBG("");
dev_dbg (hub->intfdev,
"port %d not enabled, trying %sreset again...\n",
port1, warm ? "warm " : "");
delay = HUB_LONG_RESET_TIME;
}
+ MYDBG("");
+
dev_err (hub->intfdev,
"Cannot enable port %i. Maybe the USB cable is bad?\n",
done:
if (!hub_is_superspeed(hub->hdev))
+ {
+ MYDBG("");
up_read(&ehci_cf_port_reset_rwsem);
+ }
+
+ MYDBG("");
return status;
}
status);
/* bail if autosuspend is requested */
if (PMSG_IS_AUTO(msg))
+ {
+ MYDBG("");
goto err_wakeup;
+ }
}
}
if (usb_disable_ltm(udev)) {
dev_err(&udev->dev, "Failed to disable LTM before suspend\n.");
status = -ENOMEM;
+ MYDBG("");
if (PMSG_IS_AUTO(msg))
goto err_ltm;
}
if (usb_unlocked_disable_lpm(udev)) {
dev_err(&udev->dev, "Failed to disable LPM before suspend\n.");
status = -ENOMEM;
+ MYDBG("");
if (PMSG_IS_AUTO(msg))
goto err_lpm3;
}
/* see 7.1.7.6 */
if (hub_is_superspeed(hub->hdev))
+ {
+ MYDBG("");
status = hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_U3);
+#if 0 /* behavior for kernel 3.10 */
/*
* For system suspend, we do not need to enable the suspend feature
* on individual USB-2 ports. The devices will automatically go
* Therefore we will turn on the suspend feature if udev or any of its
* descendants is enabled for remote wakeup.
*/
- else if (PMSG_IS_AUTO(msg) || wakeup_enabled_descendants(udev) > 0)
+ } else if (PMSG_IS_AUTO(msg) || wakeup_enabled_descendants(udev) > 0) {
+ MYDBG("");
status = set_port_feature(hub->hdev, port1,
USB_PORT_FEAT_SUSPEND);
- else {
+ } else {
really_suspend = false;
status = 0;
}
+#else /*roll back behavior to kernel 3.4 */
+ }else{
+ MYDBG("");
+ status = set_port_feature(hub->hdev, port1,
+ USB_PORT_FEAT_SUSPEND);
+ }
+#endif
+
if (status) {
dev_dbg(hub->intfdev, "can't suspend port %d, status %d\n",
port1, status);
+ MYDBG("");
/* Try to enable USB3 LPM and LTM again */
usb_unlocked_enable_lpm(udev);
*/
if (status == 0) {
devstatus = 0;
+ MYDBG("\n");
status = usb_get_status(udev, USB_RECIP_DEVICE, 0, &devstatus);
+ MYDBG("%d\n", status);
if (status >= 0)
status = (status > 0 ? 0 : -ENODEV);
* Between connect detection and reset signaling there must be a delay
* of 100ms at least for debounce and power-settling. The corresponding
* timer shall restart whenever the downstream port detects a disconnect.
- *
+ *
* Apparently there are some bluetooth and irda-dongles and a number of
* low-speed devices for which this debounce period may last over a second.
* Not covered by the spec - but easy to deal with.
}
EXPORT_SYMBOL_GPL(usb_ep0_reinit);
-#define usb_sndaddr0pipe() (PIPE_CONTROL << 30)
-#define usb_rcvaddr0pipe() ((PIPE_CONTROL << 30) | USB_DIR_IN)
static int hub_set_address(struct usb_device *udev, int devnum)
{
const char *speed;
int devnum = udev->devnum;
+ dump_stack();
/* root hub ports have a slightly longer reset period
* (from USB 2.0 spec, section 7.1.7.5)
*/
/* Reset the device; full speed may morph to high speed */
/* FIXME a USB 2.0 device may morph into SuperSpeed on reset. */
+ MYDBG("");
retval = hub_port_reset(hub, port1, udev, delay, false);
+ MYDBG("");
if (retval < 0) /* error or disconnect */
goto fail;
/* success, speed is known */
default:
goto fail;
}
+ MYDBG("");
if (udev->speed == USB_SPEED_WIRELESS)
speed = "variable speed Wireless";
udev->tt = &hub->tt;
udev->ttport = port1;
}
-
+
/* Why interleave GET_DESCRIPTOR and SET_ADDRESS this way?
* Because device hardware and firmware is sometimes buggy in
* this area, and this is how Linux has done it for ages.
* value.
*/
for (i = 0; i < GET_DESCRIPTOR_TRIES; (++i, msleep(100))) {
+ MYDBG("");
if (USE_NEW_SCHEME(retry_counter) && !(hcd->driver->flags & HCD_USB3)) {
struct usb_device_descriptor *buf;
int r = 0;
}
if (r == 0)
break;
+
+#if defined(CONFIG_USBIF_COMPLIANCE) && defined(CONFIG_USB_XHCI_HCD)
+ if (buf->bMaxPacketSize0 == 0) {
+ usbif_u3h_send_event("DEV_CONN_TMOUT");
+ }
+#endif
+
}
udev->descriptor.bMaxPacketSize0 =
buf->bMaxPacketSize0;
udev->ep0.desc.wMaxPacketSize = cpu_to_le16(i);
usb_ep0_reinit(udev);
}
-
+
retval = usb_get_device_descriptor(udev, USB_DT_DEVICE_SIZE);
if (retval < (signed)sizeof(udev->descriptor)) {
if (retval != -ENODEV)
remaining -= delta;
}
if (remaining < 0) {
+#if defined(CONFIG_USBIF_COMPLIANCE) && defined(CONFIG_USB_XHCI_HCD)
+ usbif_u3h_send_event("DEV_OVER_CURRENT");
+#endif
dev_warn(hub->intfdev, "%dmA over power budget!\n",
- remaining);
remaining = 0;
int status, i;
unsigned unit_load;
+ MYDBG("");
dev_dbg (hub_dev,
"port %d, status %04x, change %04x, %s\n",
port1, portstatus, portchange, portspeed(hub, portstatus));
}
/* reset (non-USB 3.0 devices) and get descriptor */
+ MYDBG("");
status = hub_port_init(hub, udev, port1, i);
if (status < 0)
+ {
+ MYDBG("");
goto loop;
+ }
+ MYDBG("");
usb_detect_quirks(udev);
if (udev->quirks & USB_QUIRK_DELAY_INIT)
goto loop_disable;
}
}
-
+
/* check for devices running slower than they could */
if (le16_to_cpu(udev->descriptor.bcdUSB) >= 0x0200
&& udev->speed == USB_SPEED_FULL
hub->ports[port1 - 1]->child = NULL;
spin_unlock_irq(&device_state_lock);
}
+#ifdef CONFIG_MTK_DT_USB_SUPPORT
+ g_dsda_dev = udev;
+ MYDBG("get new device !!!, BUILD TIME : %s, g_dsda_dev : %p\n", __TIME__, g_dsda_dev);
+#endif
}
if (status)
dev_err(hub_dev, "unable to enumerate USB device on port %d\n",
port1);
}
-
+
done:
hub_port_disable(hub, port1, 1);
if (hcd->driver->relinquish_port && !hub->hdev->parent)
dev_dbg (hub_dev, "resetting for error %d\n",
hub->error);
+ MYDBG("");
ret = usb_reset_device(hdev);
if (ret) {
dev_dbg (hub_dev,
* EM interference sometimes causes badly
* shielded USB devices to be shutdown by
* the hub, this hack enables them again.
- * Works at least with mouse driver.
+ * Works at least with mouse driver.
*/
if (!(portstatus & USB_PORT_STAT_ENABLE)
&& !connect_change
.supports_autosuspend = 1,
};
+#if defined(CONFIG_MTK_XHCI) && defined(CONFIG_USB_MTK_DUALMODE)
+extern void mtk_hub_event_steal(spinlock_t *lock, struct list_head* list);
+#endif
int usb_hub_init(void)
{
if (usb_register(&hub_driver) < 0) {
return -1;
}
+#if defined(CONFIG_MTK_XHCI) && defined(CONFIG_USB_MTK_DUALMODE)
+ mtk_hub_event_steal(&hub_event_lock, &hub_event_list);
+#endif
+
khubd_task = kthread_run(hub_thread, NULL, "khubd");
if (!IS_ERR(khubd_task))
return 0;
int i, ret = 0;
int port1 = udev->portnum;
+ MYDBG("");
if (udev->state == USB_STATE_NOTATTACHED ||
udev->state == USB_STATE_SUSPENDED) {
dev_dbg(&udev->dev, "device reset not allowed in state %d\n",
if (ret < 0)
goto re_enumerate;
-
+
/* Device might have changed firmware (DFU or similar) */
if (descriptors_changed(udev, &descriptor)) {
dev_info(&udev->dev, "device firmware changed\n");
usb_unlocked_enable_lpm(udev);
usb_enable_ltm(udev);
return 0;
-
+
re_enumerate:
/* LPM state doesn't matter when we're about to destroy the device. */
hub_port_logical_disconnect(parent_hub, port1);
unsigned int noio_flag;
struct usb_host_config *config = udev->actconfig;
+ MYDBG("");
if (udev->state == USB_STATE_NOTATTACHED ||
udev->state == USB_STATE_SUSPENDED) {
dev_dbg(&udev->dev, "device reset not allowed in state %d\n",
void (*addr2sockaddr)(struct sock *sk, struct sockaddr *);
int (*bind_conflict)(const struct sock *sk,
const struct inet_bind_bucket *tb, bool relax);
+ void (*mtu_reduced)(struct sock *sk);
};
/** inet_connection_sock - INET connection oriented sock
struct timer_list icsk_retransmit_timer;
struct timer_list icsk_delack_timer;
__u32 icsk_rto;
+ __u32 icsk_MMSRB;
__u32 icsk_pmtu_cookie;
const struct tcp_congestion_ops *icsk_ca_ops;
const struct inet_connection_sock_af_ops *icsk_af_ops;
unsigned int limit)
{
if (sk_rcvqueues_full(sk, skb, limit))
+ {
+ #ifdef CONFIG_MTK_NET_LOGGING
+ printk(KERN_ERR "[mtk_net][sock]sk_add_backlog->sk_rcvqueues_full sk->sk_rcvbuf:%d,sk->sk_sndbuf:%d ",sk->sk_rcvbuf,sk->sk_sndbuf);
+ #endif
return -ENOBUFS;
-
+ }
__sk_add_backlog(sk, skb);
sk->sk_backlog.len += skb->truesize;
return 0;
struct sk_buff *skb);
void (*release_cb)(struct sock *sk);
- void (*mtu_reduced)(struct sock *sk);
/* Keeping track of sk's, looking them up, and port selection methods. */
void (*hash)(struct sock *sk);
*/
#define MAX_TCP_WINDOW 32767U
-/* Offer an initial receive window of 10 mss. */
-#define TCP_DEFAULT_INIT_RCVWND 10
+/* Offer an initial receive window of 20 mss. */
+#define TCP_DEFAULT_INIT_RCVWND 20
/* Minimal accepted MSS. It is (60+60+8) - (20+20). */
#define TCP_MIN_MSS 88U
* to ~3sec-8min depending on RTO.
*/
-#define TCP_RETR2 15 /*
+#define TCP_RETR2 10 /*
* This should take at least
* 90 minutes to time out.
* RFC1122 says that the limit is 100 sec.
* 15 is ~13-30min depending on RTO.
*/
-#define TCP_SYN_RETRIES 6 /* This is how many retries are done
+#define TCP_SYN_RETRIES 9 /* This is how many retries are done
* when active opening a connection.
* RFC1122 says the minimum retry MUST
* be at least 180secs. Nevertheless
#define TCP_DELACK_MIN 4U
#define TCP_ATO_MIN 4U
#endif
-#define TCP_RTO_MAX ((unsigned)(120*HZ))
+#define TCP_RTO_MAX ((unsigned)(60*HZ))
#define TCP_RTO_MIN ((unsigned)(HZ/5))
#define TCP_TIMEOUT_INIT ((unsigned)(1*HZ)) /* RFC6298 2.1 initial RTO value */
#define TCP_TIMEOUT_FALLBACK ((unsigned)(3*HZ)) /* RFC 1122 initial RTO value, now
extern int sysctl_tcp_early_retrans;
extern int sysctl_tcp_limit_output_bytes;
extern int sysctl_tcp_challenge_ack_limit;
+extern int sysctl_tcp_default_init_rwnd;
extern int sysctl_tcp_min_tso_segs;
-
+extern int sysctl_tcp_default_init_rwnd;
+extern int sysctl_tcp_rto_min;
+extern int sysctl_tcp_rto_max;
extern atomic_long_t tcp_memory_allocated;
extern struct percpu_counter tcp_sockets_allocated;
extern int tcp_memory_pressure;
*/
extern void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb);
+ void tcp_v4_mtu_reduced(struct sock *sk);
extern int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
extern struct sock * tcp_create_openreq_child(struct sock *sk,
struct request_sock *req,
static inline void tcp_bound_rto(const struct sock *sk)
{
- if (inet_csk(sk)->icsk_rto > TCP_RTO_MAX)
- inet_csk(sk)->icsk_rto = TCP_RTO_MAX;
+ if (inet_csk(sk)->icsk_rto > sysctl_tcp_rto_max)
+ inet_csk(sk)->icsk_rto = sysctl_tcp_rto_max;
}
static inline u32 __tcp_set_rto(const struct tcp_sock *tp)
static inline u32 tcp_rto_min(struct sock *sk)
{
const struct dst_entry *dst = __sk_dst_get(sk);
- u32 rto_min = TCP_RTO_MIN;
+ u32 rto_min = sysctl_tcp_rto_min;
if (dst && dst_metric_locked(dst, RTAX_RTO_MIN))
rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN);
if (!tp->packets_out && !icsk->icsk_pending)
inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
- icsk->icsk_rto, TCP_RTO_MAX);
+ icsk->icsk_rto, sysctl_tcp_rto_max);
}
static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq)
{
/* See RFC 2012 */
TCP_ADD_STATS_USER(net, TCP_MIB_RTOALGORITHM, 1);
- TCP_ADD_STATS_USER(net, TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ);
- TCP_ADD_STATS_USER(net, TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ);
+ TCP_ADD_STATS_USER(net, TCP_MIB_RTOMIN, sysctl_tcp_rto_min*1000/HZ);
+ TCP_ADD_STATS_USER(net, TCP_MIB_RTOMAX, sysctl_tcp_rto_max*1000/HZ);
TCP_ADD_STATS_USER(net, TCP_MIB_MAXCONN, -1);
}
{
if (sk->sk_send_head == skb_unlinked)
sk->sk_send_head = NULL;
+ if (tcp_sk(sk)->highest_sack == skb_unlinked)
+ tcp_sk(sk)->highest_sack = NULL;
}
static inline void tcp_init_send_head(struct sock *sk)
loff_t last_pos;
};
+/* MTK_NET_CHANGES */
+/*
+ * reset tcp connection by uid
+ */
+struct uid_err {
+ int appuid;
+ int errNum;
+};
+
extern int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo);
extern void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo);
extern int tcp_gro_complete(struct sk_buff *skb);
extern int tcp4_gro_complete(struct sk_buff *skb);
+extern int tcp_nuke_addr(struct net *net, struct sockaddr *addr);
+/* MTK_NET_CHANGES */
+extern void tcp_v4_reset_connections_by_uid(struct uid_err uid_e);
+extern void tcp_v4_handle_retrans_time_by_uid(struct uid_err uid_e);
+
#ifdef CONFIG_PROC_FS
extern int tcp4_proc_init(void);
extern void tcp4_proc_exit(void);
/* OK, now commit destination to socket. */
sk->sk_gso_type = SKB_GSO_TCPV4;
sk_setup_caps(sk, &rt->dst);
-
+ printk(KERN_INFO "[socket_conn]IPV4 socket[%lu] sport:%u \n", SOCK_INODE(sk->sk_socket)->i_ino, ntohs(inet->inet_sport));
if (!tp->write_seq && likely(!tp->repair))
tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
inet->inet_daddr,
* It can be called through tcp_release_cb() if socket was owned by user
* at the time tcp_v4_err() was called to handle ICMP message.
*/
- static void tcp_v4_mtu_reduced(struct sock *sk)
+ void tcp_v4_mtu_reduced(struct sock *sk)
{
struct dst_entry *dst;
struct inet_sock *inet = inet_sk(sk);
tcp_simple_retransmit(sk);
} /* else let the usual retransmit timer handle it */
}
+ EXPORT_SYMBOL(tcp_v4_mtu_reduced);
static void do_redirect(struct sk_buff *skb, struct sock *sk)
{
if (remaining) {
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
- remaining, TCP_RTO_MAX);
+ remaining, sysctl_tcp_rto_max);
} else {
/* RTO revert clocked out retransmission.
* Will retransmit now */
* because it's been added to the accept queue directly.
*/
inet_csk_reset_xmit_timer(child, ICSK_TIME_RETRANS,
- TCP_TIMEOUT_INIT, TCP_RTO_MAX);
+ TCP_TIMEOUT_INIT, sysctl_tcp_rto_max);
/* Add the child socket directly into the accept queue */
inet_csk_reqsk_queue_add(sk, req, child);
ireq->rmt_addr = saddr;
ireq->no_srccheck = inet_sk(sk)->transparent;
ireq->opt = tcp_v4_save_options(skb);
+ ireq->ir_mark = inet_request_mark(sk, skb);
if (security_inet_conn_request(sk, skb, req))
goto drop_and_free;
if (!inet_csk_ack_scheduled(sk))
inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
(3 * tcp_rto_min(sk)) / 4,
- TCP_RTO_MAX);
+ sysctl_tcp_rto_max);
}
return true;
}
.compat_setsockopt = compat_ip_setsockopt,
.compat_getsockopt = compat_ip_getsockopt,
#endif
+ .mtu_reduced = tcp_v4_mtu_reduced,
};
EXPORT_SYMBOL(ipv4_specific);
struct inet_connection_sock *icsk = inet_csk(sk);
tcp_init_sock(sk);
+ icsk->icsk_MMSRB = 0;
icsk->icsk_af_ops = &ipv4_specific;
}
EXPORT_SYMBOL(tcp_v4_destroy_sock);
+void tcp_v4_handle_retrans_time_by_uid(struct uid_err uid_e)
+{
+ unsigned int bucket;
+ uid_t skuid = (uid_t)(uid_e.appuid);
+ struct inet_connection_sock *icsk = NULL;//inet_csk(sk);
+
+
+ for (bucket = 0; bucket < tcp_hashinfo.ehash_mask; bucket++) {
+ struct hlist_nulls_node *node;
+ struct sock *sk;
+ spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, bucket);
+
+ spin_lock_bh(lock);
+ sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[bucket].chain) {
+
+ if (sysctl_ip_dynaddr && sk->sk_state == TCP_SYN_SENT)
+ continue;
+ if (sock_flag(sk, SOCK_DEAD))
+ continue;
+
+ if(sk->sk_socket){
+ if(SOCK_INODE(sk->sk_socket)->i_uid != skuid)
+ continue;
+ else
+ printk("[mmspb] tcp_v4_handle_retrans_time_by_uid socket uid(%d) match!",
+ SOCK_INODE(sk->sk_socket)->i_uid);
+ } else{
+ continue;
+ }
+
+ sock_hold(sk);
+ spin_unlock_bh(lock);
+
+ local_bh_disable();
+ bh_lock_sock(sk);
+
+ // update sk time out value
+ icsk = inet_csk(sk);
+ printk("[mmspb] tcp_v4_handle_retrans_time_by_uid update timer\n");
+
+ sk_reset_timer(sk, &icsk->icsk_retransmit_timer, jiffies + 2);
+ icsk->icsk_rto = sysctl_tcp_rto_min * 30;
+ icsk->icsk_MMSRB = 1;
+
+ bh_unlock_sock(sk);
+ local_bh_enable();
+ spin_lock_bh(lock);
+ sock_put(sk);
+
+ }
+ spin_unlock_bh(lock);
+ }
+
+}
+
+
+/*
+ * tcp_v4_nuke_addr_by_uid - destroy all sockets of spcial uid
+ */
+void tcp_v4_reset_connections_by_uid(struct uid_err uid_e)
+{
+ unsigned int bucket;
+ uid_t skuid = (uid_t)(uid_e.appuid);
+
+ for (bucket = 0; bucket < tcp_hashinfo.ehash_mask; bucket++) {
+ struct hlist_nulls_node *node;
+ struct sock *sk;
+ spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, bucket);
+
+restart:
+ spin_lock_bh(lock);
+ sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[bucket].chain) {
+
+ if (sysctl_ip_dynaddr && sk->sk_state == TCP_SYN_SENT)
+ continue;
+ if (sock_flag(sk, SOCK_DEAD))
+ continue;
+
+ if(sk->sk_socket){
+ if(SOCK_INODE(sk->sk_socket)->i_uid != skuid)
+ continue;
+ else
+ printk(KERN_INFO "SIOCKILLSOCK socket uid(%d) match!",
+ SOCK_INODE(sk->sk_socket)->i_uid);
+ } else{
+ continue;
+ }
+
+ sock_hold(sk);
+ spin_unlock_bh(lock);
+
+ local_bh_disable();
+ bh_lock_sock(sk);
+ sk->sk_err = uid_e.errNum;
+ printk(KERN_INFO "SIOCKILLSOCK set sk err == %d!! \n", sk->sk_err);
+ sk->sk_error_report(sk);
+
+ tcp_done(sk);
+ bh_unlock_sock(sk);
+ local_bh_enable();
+ sock_put(sk);
+
+ goto restart;
+ }
+ spin_unlock_bh(lock);
+ }
+}
+
+
#ifdef CONFIG_PROC_FS
/* Proc filesystem TCP sock list dumping. */
.sendpage = tcp_sendpage,
.backlog_rcv = tcp_v4_do_rcv,
.release_cb = tcp_release_cb,
- .mtu_reduced = tcp_v4_mtu_reduced,
.hash = inet_hash,
.unhash = inet_unhash,
.get_port = inet_csk_get_port,
}
/* Set initial window to a value enough for senders starting with
- * initial congestion window of TCP_DEFAULT_INIT_RCVWND. Place
+ * initial congestion window of sysctl_tcp_default_init_rwnd. Place
* a limit on the initial window when mss is larger than 1460.
*/
if (mss > (1 << *rcv_wscale)) {
- int init_cwnd = TCP_DEFAULT_INIT_RCVWND;
+ int init_cwnd = sysctl_tcp_default_init_rwnd;
if (mss > 1460)
- init_cwnd =
- max_t(u32, (1460 * TCP_DEFAULT_INIT_RCVWND) / mss, 2);
+ init_cwnd = max_t(u32, (1460 * init_cwnd) / mss, 2);
/* when initializing use the value from init_rcv_wnd
* rather than the default from above
*/
__sock_put(sk);
}
if (flags & (1UL << TCP_MTU_REDUCED_DEFERRED)) {
- sk->sk_prot->mtu_reduced(sk);
+ inet_csk(sk)->icsk_af_ops->mtu_reduced(sk);
__sock_put(sk);
}
}
}
inet_csk_reset_xmit_timer(sk, ICSK_TIME_LOSS_PROBE, timeout,
- TCP_RTO_MAX);
+ sysctl_tcp_rto_max);
return true;
}
if (WARN_ON(!skb || !tcp_skb_pcount(skb)))
goto rearm_timer;
- /* Probe with zero data doesn't trigger fast recovery. */
- if (skb->len > 0)
- err = __tcp_retransmit_skb(sk, skb);
+ err = __tcp_retransmit_skb(sk, skb);
/* Record snd_nxt for loss detection. */
if (likely(!err))
rearm_timer:
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
inet_csk(sk)->icsk_rto,
- TCP_RTO_MAX);
+ sysctl_tcp_rto_max);
if (likely(!err))
NET_INC_STATS_BH(sock_net(sk),
if (skb == tcp_write_queue_head(sk))
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
inet_csk(sk)->icsk_rto,
- TCP_RTO_MAX);
+ sysctl_tcp_rto_max);
}
}
/* Timer for repeating the SYN until an answer. */
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
- inet_csk(sk)->icsk_rto, TCP_RTO_MAX);
+ inet_csk(sk)->icsk_rto, sysctl_tcp_rto_max);
return 0;
}
EXPORT_SYMBOL(tcp_connect);
inet_csk_schedule_ack(sk);
inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN;
inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
- TCP_DELACK_MAX, TCP_RTO_MAX);
+ TCP_DELACK_MAX, sysctl_tcp_rto_max);
return;
}
icsk->icsk_backoff++;
icsk->icsk_probes_out++;
inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
- min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX),
- TCP_RTO_MAX);
+ min_t(unsigned int, icsk->icsk_rto << icsk->icsk_backoff, sysctl_tcp_rto_max),
+ sysctl_tcp_rto_max);
} else {
/* If packet was not sent due to local congestion,
* do not backoff and do not remember icsk_probes_out.
inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
min(icsk->icsk_rto << icsk->icsk_backoff,
TCP_RESOURCE_PROBE_INTERVAL),
- TCP_RTO_MAX);
+ sysctl_tcp_rto_max);
}
}
.forwarding = 0,
.hop_limit = IPV6_DEFAULT_HOPLIMIT,
.mtu6 = IPV6_MIN_MTU,
- .accept_ra = 1,
+ .accept_ra = 1,
.accept_redirects = 1,
.autoconf = 1,
.force_mld_version = 0,
.rtr_solicit_interval = RTR_SOLICITATION_INTERVAL,
.rtr_solicit_delay = MAX_RTR_SOLICITATION_DELAY,
#ifdef CONFIG_IPV6_PRIVACY
- .use_tempaddr = 0,
+ .use_tempaddr = 1,
.temp_valid_lft = TEMP_VALID_LIFETIME,
.temp_prefered_lft = TEMP_PREFERRED_LIFETIME,
.regen_max_retry = REGEN_MAX_RETRY,
.max_addresses = IPV6_MAX_ADDRESSES,
.accept_ra_defrtr = 1,
.accept_ra_pinfo = 1,
+#ifdef CONFIG_MTK_DHCPV6C_WIFI
+ .ra_info_flag = 0,
+#endif
#ifdef CONFIG_IPV6_ROUTER_PREF
.accept_ra_rtr_pref = 1,
.rtr_probe_interval = 60 * HZ,
.accept_ra_rt_info_max_plen = 0,
#endif
#endif
+ .accept_ra_rt_table = 0,
.proxy_ndp = 0,
.accept_source_route = 0, /* we do not accept RH0 by default. */
.disable_ipv6 = 0,
.rtr_solicit_interval = RTR_SOLICITATION_INTERVAL,
.rtr_solicit_delay = MAX_RTR_SOLICITATION_DELAY,
#ifdef CONFIG_IPV6_PRIVACY
- .use_tempaddr = 0,
+ .use_tempaddr = 1,
.temp_valid_lft = TEMP_VALID_LIFETIME,
.temp_prefered_lft = TEMP_PREFERRED_LIFETIME,
.regen_max_retry = REGEN_MAX_RETRY,
.max_addresses = IPV6_MAX_ADDRESSES,
.accept_ra_defrtr = 1,
.accept_ra_pinfo = 1,
+#ifdef CONFIG_MTK_DHCPV6C_WIFI
+ .ra_info_flag = 0,
+#endif
#ifdef CONFIG_IPV6_ROUTER_PREF
.accept_ra_rtr_pref = 1,
.rtr_probe_interval = 60 * HZ,
.accept_ra_rt_info_max_plen = 0,
#endif
#endif
+ .accept_ra_rt_table = 0,
.proxy_ndp = 0,
.accept_source_route = 0, /* we do not accept RH0 by default. */
.disable_ipv6 = 0,
} else if ((!newf) ^ (!old))
dev_forward_change((struct inet6_dev *)table->extra1);
rtnl_unlock();
-
+
if (newf)
rt6_purge_dflt_routers(net);
+
return 1;
}
#endif
static int ipv6_generate_eui64(u8 *eui, struct net_device *dev)
{
+ /* MTK_NET_CHANGES */
+ if (strncmp(dev->name, "ccmni", 2) == 0)
+ return -1;
+
switch (dev->type) {
case ARPHRD_ETHER:
case ARPHRD_FDDI:
}
#endif
+u32 addrconf_rt_table(const struct net_device *dev, u32 default_table) {
+ /* Determines into what table to put autoconf PIO/RIO/default routes
+ * learned on this device.
+ *
+ * - If 0, use the same table for every device. This puts routes into
+ * one of RT_TABLE_{PREFIX,INFO,DFLT} depending on the type of route
+ * (but note that these three are currently all equal to
+ * RT6_TABLE_MAIN).
+ * - If > 0, use the specified table.
+ * - If < 0, put routes into table dev->ifindex + (-rt_table).
+ */
+ struct inet6_dev *idev = in6_dev_get(dev);
+ u32 table;
+ int sysctl = idev->cnf.accept_ra_rt_table;
+ if (sysctl == 0) {
+ table = default_table;
+ } else if (sysctl > 0) {
+ table = (u32) sysctl;
+ } else {
+ table = (unsigned) dev->ifindex + (-sysctl);
+ }
+ in6_dev_put(idev);
+ return table;
+}
+
/*
* Add prefix route.
*/
unsigned long expires, u32 flags)
{
struct fib6_config cfg = {
- .fc_table = RT6_TABLE_PREFIX,
+ .fc_table = addrconf_rt_table(dev, RT6_TABLE_PREFIX),
.fc_metric = IP6_RT_PRIO_ADDRCONF,
.fc_ifindex = dev->ifindex,
.fc_expires = expires,
struct rt6_info *rt = NULL;
struct fib6_table *table;
- table = fib6_get_table(dev_net(dev), RT6_TABLE_PREFIX);
+ table = fib6_get_table(dev_net(dev),
+ addrconf_rt_table(dev, RT6_TABLE_PREFIX));
if (table == NULL)
return NULL;
if (sp_ifa->flags & (IFA_F_DADFAILED | IFA_F_TENTATIVE))
continue;
- if (sp_ifa->rt)
- continue;
+ if (sp_ifa->rt) {
+ /* This dst has been added to garbage list when
+ * lo device down, release this obsolete dst and
+ * reallocate a new router for ifa.
+ */
+ if (sp_ifa->rt->dst.obsolete > 0) {
+ ip6_rt_put(sp_ifa->rt);
+ sp_ifa->rt = NULL;
+ } else {
+ continue;
+ }
+ }
sp_rt = addrconf_dst_alloc(idev, &sp_ifa->addr, 0);
array[DEVCONF_ACCEPT_RA_RT_INFO_MAX_PLEN] = cnf->accept_ra_rt_info_max_plen;
#endif
#endif
+ array[DEVCONF_ACCEPT_RA_RT_TABLE] = cnf->accept_ra_rt_table;
array[DEVCONF_PROXY_NDP] = cnf->proxy_ndp;
array[DEVCONF_ACCEPT_SOURCE_ROUTE] = cnf->accept_source_route;
#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
array[DEVCONF_ACCEPT_DAD] = cnf->accept_dad;
array[DEVCONF_FORCE_TLLAO] = cnf->force_tllao;
array[DEVCONF_NDISC_NOTIFY] = cnf->ndisc_notify;
+#ifdef CONFIG_MTK_DHCPV6C_WIFI
+ array[DEVCONF_RA_INFO_FLAG] = cnf->ra_info_flag;
+#endif
}
static inline size_t inet6_ifla6_size(void)
},
#endif
#endif
+ {
+ .procname = "accept_ra_rt_table",
+ .data = &ipv6_devconf.accept_ra_rt_table,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
{
.procname = "proxy_ndp",
.data = &ipv6_devconf.proxy_ndp,
.mode = 0644,
.proc_handler = proc_dointvec
},
+#ifdef CONFIG_MTK_DHCPV6C_WIFI
+ {
+ .procname = "ra_info_flag",
+ .data = &ipv6_devconf.ra_info_flag,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec
+ },
+#endif
{
/* sentinel */
}
fl6.flowi6_mark = sk->sk_mark;
fl6.fl6_dport = usin->sin6_port;
fl6.fl6_sport = inet->inet_sport;
+ fl6.flowi6_uid = sock_i_uid(sk);
final_p = fl6_update_dst(&fl6, np->opt, &final);
if (err)
goto late_failure;
+ printk(KERN_INFO "net_sock, IPV6 socket[%lu] sport:%u \n", SOCK_INODE(sk->sk_socket)->i_ino, ntohs(inet->inet_sport));
if (!tp->write_seq && likely(!tp->repair))
tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
np->daddr.s6_addr32,
fl6.flowi6_proto = IPPROTO_TCP;
if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
fl6.flowi6_oif = inet6_iif(skb);
+ fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark);
fl6.fl6_dport = t1->dest;
fl6.fl6_sport = t1->source;
security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
TCP_ECN_create_request(req, skb, sock_net(sk));
treq->iif = sk->sk_bound_dev_if;
+ inet_rsk(req)->ir_mark = inet_request_mark(sk, skb);
/* So that link locals have meaning */
if (!sk->sk_bound_dev_if &&
.compat_setsockopt = compat_ipv6_setsockopt,
.compat_getsockopt = compat_ipv6_getsockopt,
#endif
+ .mtu_reduced = tcp_v6_mtu_reduced,
};
#ifdef CONFIG_TCP_MD5SIG
.compat_setsockopt = compat_ipv6_setsockopt,
.compat_getsockopt = compat_ipv6_getsockopt,
#endif
+ .mtu_reduced = tcp_v4_mtu_reduced,
};
#ifdef CONFIG_TCP_MD5SIG
.sendpage = tcp_sendpage,
.backlog_rcv = tcp_v6_do_rcv,
.release_cb = tcp_release_cb,
- .mtu_reduced = tcp_v6_mtu_reduced,
.hash = tcp_v6_hash,
.unhash = inet_unhash,
.get_port = inet_csk_get_port,
p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov);
p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv;
+ p1->max_frame_len = p1->kblk_size - BLK_PLUS_PRIV(p1->blk_sizeof_priv);
prb_init_ft_ops(p1, req_u);
prb_setup_retire_blk_timer(po, tx_ring);
prb_open_block(p1, pbd);
if ((int)snaplen < 0)
snaplen = 0;
}
+ } else if (unlikely(macoff + snaplen >
+ GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len)) {
+ u32 nval;
+
+ nval = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len - macoff;
+ pr_err_once("tpacket_rcv: packet too big, clamped from %u to %u. macoff=%u\n",
+ snaplen, nval, macoff);
+ snaplen = nval;
+ if (unlikely((int)snaplen < 0)) {
+ snaplen = 0;
+ macoff = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len;
+ }
}
spin_lock(&sk->sk_receive_queue.lock);
h.raw = packet_current_rx_frame(po, skb,
if (optlen != sizeof(val))
return -EINVAL;
- if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
- return -EBUSY;
if (copy_from_user(&val, optval, sizeof(val)))
return -EFAULT;
switch (val) {
case TPACKET_V1:
case TPACKET_V2:
case TPACKET_V3:
- po->tp_version = val;
- return 0;
+ break;
default:
return -EINVAL;
}
+ lock_sock(sk);
+ if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
+ ret = -EBUSY;
+ } else {
+ po->tp_version = val;
+ ret = 0;
+ }
+ release_sock(sk);
+ return ret;
}
case PACKET_RESERVE:
{
return -EBUSY;
if (copy_from_user(&val, optval, sizeof(val)))
return -EFAULT;
+ if (val > INT_MAX)
+ return -EINVAL;
po->tp_reserve = val;
return 0;
}
/* Added to avoid minimal code churn */
struct tpacket_req *req = &req_u->req;
+ lock_sock(sk);
/* Opening a Tx-ring is NOT supported in TPACKET_V3 */
if (!closing && tx_ring && (po->tp_version > TPACKET_V2)) {
WARN(1, "Tx-ring is not supported.\n");
if (unlikely(req->tp_block_size & (PAGE_SIZE - 1)))
goto out;
if (po->tp_version >= TPACKET_V3 &&
- req->tp_block_size <=
- BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv))
+ (int)(req->tp_block_size -
+ BLK_PLUS_PRIV(req_u->req3.tp_sizeof_priv)) <= 0)
goto out;
if (unlikely(req->tp_frame_size < po->tp_hdrlen +
po->tp_reserve))
rb->frames_per_block = req->tp_block_size/req->tp_frame_size;
if (unlikely(rb->frames_per_block <= 0))
goto out;
+ if (unlikely(req->tp_block_size > UINT_MAX / req->tp_block_nr))
+ goto out;
if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
req->tp_frame_nr))
goto out;
goto out;
}
- lock_sock(sk);
/* Detach socket from network */
spin_lock(&po->bind_lock);
if (!tx_ring)
prb_shutdown_retire_blk_timer(po, tx_ring, rb_queue);
}
- release_sock(sk);
if (pg_vec)
free_pg_vec(pg_vec, order, req->tp_block_nr);
out:
+ release_sock(sk);
return err;
}