VERSION = 3
PATCHLEVEL = 10
- SUBLEVEL = 94
+ SUBLEVEL = 95
EXTRAVERSION =
NAME = TOSSUG Baby Fish
-Werror-implicit-function-declaration \
-Wno-format-security \
-fno-delete-null-pointer-checks \
- -std=gnu89
+ -w -std=gnu89
KBUILD_AFLAGS_KERNEL :=
KBUILD_CFLAGS_KERNEL :=
#include "cdc-acm.h"
+#ifdef CONFIG_MTK_DT_USB_SUPPORT
+
+/* adjust to 1 to avoid musb bug when lots write with clean urb */
+#undef ACM_NW
+#define ACM_NW 1
+/* adjust SZ to hsmaxp*20 to prevent tty disc don't accept big size write */
+#define ACM_WB_SZ (512*20)
+
+#define DATA_DUMP_BYTES 15 // wx, how many bytes we'll print out for each packet
+#define DATA_DUMP_SIZE 64 // wx, should be large enough to hold DATA_DUMP_DIGITS
+static unsigned char data_out[DATA_DUMP_SIZE];
+static unsigned char data_in[DATA_DUMP_SIZE];
+/* Debug functions */
+static int enable_debug = 0;
+static int enable_dump = 0;
+//origninal CDC-ACM log, more detail
+#undef dev_dbg
+#define dev_dbg(dev, format, args...) \
+ do{ \
+ if(enable_debug) { \
+ dev_printk(KERN_WARNING, dev, "[CDC-ACM] " format, ##args); \
+ } \
+ }while(0)
+#undef dev_vdbg
+#define dev_vdbg dev_dbg
+//MTK added CDC-ACM log, more critical
+#define dbg_mtk(dev, format, args...) \
+ do{ \
+ dev_printk(KERN_WARNING, dev, "[CDC-ACM-MTK] " format "\n", ##args); \
+ }while(0)
+#else
+#define dbg_mtk(dev, format, args...) do{}while(0)
+#endif
+
#define DRIVER_AUTHOR "Armin Fuerst, Pavel Machek, Johannes Erdfelt, Vojtech Pavlik, David Kubicek, Johan Hovold"
#define DRIVER_DESC "USB Abstract Control Model driver for USB modems and ISDN adapters"
static DEFINE_MUTEX(acm_table_lock);
+#define MYDBG(fmt, args...) do {printk(KERN_WARNING "MTK_ACM, <%s(), %d> " fmt, __func__, __LINE__, ## args); }while(0)
+
+#ifdef CONFIG_MTK_DT_USB_SUPPORT
+
+#define RECORD_SZ 100
+#define DUMP_DELAY 3
+struct record_entry
+{
+ struct timeval cur_time;
+ u32 transfer_buffer_length;
+ u32 actual_length;
+ int status;
+ unsigned char data;
+
+}w1[RECORD_SZ], w2[RECORD_SZ], r1[RECORD_SZ], r2[RECORD_SZ];
+
+static int w1_idx, w2_idx, r1_idx, r2_idx;
+static int record_enable = 0;
+static struct timeval tv_start;
+void dump_record(void)
+{
+ int i, index_limit;
+ struct record_entry *ptr;
+
+ index_limit = w1_idx;
+ ptr = w1;
+ for(i = 0; i < index_limit; i++){
+ MYDBG("w1, time:(%d,%d), reqlen:%d, data:%x\n",
+ (unsigned int)ptr[i].cur_time.tv_sec, (unsigned int)ptr[i].cur_time.tv_usec, (unsigned int)ptr[i].transfer_buffer_length, ptr[i].data);
+ mdelay(DUMP_DELAY);
+ }
+
+ index_limit = r1_idx;
+ ptr = r1;
+ for(i = 0; i < index_limit; i++){
+ MYDBG("r1, time:(%d,%d), reqlen:%d\n",
+ (unsigned int)ptr[i].cur_time.tv_sec, (unsigned int)ptr[i].cur_time.tv_usec, ptr[i].transfer_buffer_length);
+ mdelay(DUMP_DELAY);
+ }
+
+ index_limit = w2_idx;
+ ptr = w2;
+ for(i = 0; i < index_limit; i++){
+ MYDBG("w2, time:(%d,%d), reqlen:%d, actlen:%d, status:%d, data:%x\n",
+ (unsigned int)ptr[i].cur_time.tv_sec, (unsigned int)ptr[i].cur_time.tv_usec, ptr[i].transfer_buffer_length, ptr[i].actual_length, ptr[i].status, ptr[i].data);
+ mdelay(DUMP_DELAY);
+ }
+
+ index_limit = r2_idx;
+ ptr = r2;
+ for(i = 0; i < index_limit; i++){
+ MYDBG("r2, time:(%d,%d), reqlen:%d, actlen:%d, status:%d, data:%x\n",
+ (unsigned int)ptr[i].cur_time.tv_sec, (unsigned int)ptr[i].cur_time.tv_usec, ptr[i].transfer_buffer_length, ptr[i].actual_length, ptr[i].status, ptr[i].data);
+ mdelay(DUMP_DELAY);
+ }
+
+}
+void record_activity(struct urb *urb, int is_in, int is_complete)
+{
+ struct timeval tv_time;
+
+ if(!record_enable)
+ return;
+
+ do_gettimeofday(&tv_time);
+ tv_time.tv_sec = tv_time.tv_sec - tv_start.tv_sec;
+ tv_time.tv_usec = tv_time.tv_usec - tv_start.tv_usec;
+ if(is_in){
+ if(is_complete){
+ if(r2_idx >= RECORD_SZ)
+ return;
+
+ r2[r2_idx].cur_time = tv_time;
+ r2[r2_idx].transfer_buffer_length = urb->transfer_buffer_length;
+ r2[r2_idx].actual_length = urb->actual_length;
+ r2[r2_idx].status = urb->status;
+ r2[r2_idx].data = *((unsigned char*)urb->transfer_buffer);
+ r2_idx++;
+ return;
+ }
+ if(r1_idx >= RECORD_SZ)
+ return;
+ r1[r1_idx].cur_time = tv_time;
+ r1[r1_idx].transfer_buffer_length = urb->transfer_buffer_length;
+ r1[r1_idx].actual_length = urb->actual_length;
+ r1[r1_idx].status = urb->status;
+ r1_idx++;
+ }else{
+ if(is_complete){
+ if(w2_idx >= RECORD_SZ)
+ return;
+ w2[w2_idx].cur_time = tv_time;
+ w2[w2_idx].transfer_buffer_length = urb->transfer_buffer_length;
+ w2[w2_idx].actual_length = urb->actual_length;
+ w2[w2_idx].status = urb->status;
+ w2[w2_idx].data = *((unsigned char*)urb->transfer_buffer);
+ w2_idx++;
+ return;
+ }
+ if(w1_idx >= RECORD_SZ)
+ return;
+ w1[w1_idx].cur_time = tv_time;
+ w1[w1_idx].transfer_buffer_length = urb->transfer_buffer_length;
+ w1[w1_idx].actual_length = urb->actual_length;
+ w1[w1_idx].status = urb->status;
+ w1[w1_idx].data = *((unsigned char*)urb->transfer_buffer);
+ w1_idx++;
+ }
+}
+
+bool usb_h_acm_all_clear(void)
+{
+ int i;
+ int count = 0;
+ for (i = 0; i < ACM_TTY_MINORS; i++) {
+ if(acm_table[i] != NULL) {
+ count++;
+ }
+ }
+ MYDBG("count<%d>\n", count);
+ return !count;
+}
+EXPORT_SYMBOL_GPL(usb_h_acm_all_clear);
+
+#define CHECK_INTERVAL 2
+#define CB_NUM 3
+extern unsigned long volatile jiffies;
+static unsigned long callback_check_timeout[CB_NUM];
+static char *callback_name[CB_NUM] = {
+ "acm_read_bulk_callback",
+ "acm_write_bulk",
+ "acm_ctrl_irq",
+};
+void mark_callback_alive(char *func_name, struct urb *urb, struct acm *acm)
+{
+
+
+ int i;
+ for(i = 0; i < CB_NUM ; i++)
+ {
+ if(!strcmp(func_name, callback_name[i])){
+ if(enable_debug || time_after(jiffies, callback_check_timeout[i]))
+ {
+ MYDBG("%s,ep(%d),len(%d,%d),data(%x),sts(%d), minor(%d)\n",
+ func_name,
+ urb->ep->desc.bEndpointAddress,
+ urb->actual_length,
+ urb->transfer_buffer_length,
+ *((unsigned char*)urb->transfer_buffer),
+ urb->status,
+ acm->minor);
+ callback_check_timeout[i] = jiffies + HZ * CHECK_INTERVAL;
+ }
+ break;
+ }
+ }
+}
+
+
+#endif
+
/*
* acm_table accessors
*/
wb->urb->transfer_buffer_length = wb->len;
wb->urb->dev = acm->dev;
+#ifdef CONFIG_MTK_DT_USB_SUPPORT
+ record_activity(wb->urb, 0, 0);
+#endif
rc = usb_submit_urb(wb->urb, GFP_ATOMIC);
if (rc < 0) {
dev_err(&acm->data->dev,
case -ENOENT:
case -ESHUTDOWN:
/* this urb is terminated, clean up */
- dev_dbg(&acm->control->dev,
+ dev_err(&acm->control->dev,
"%s - urb shutting down with status: %d\n",
__func__, status);
return;
default:
- dev_dbg(&acm->control->dev,
+ dev_err(&acm->control->dev,
"%s - nonzero urb status received: %d\n",
__func__, status);
goto exit;
}
usb_mark_last_busy(acm->dev);
+#ifdef CONFIG_MTK_DT_USB_SUPPORT
+ mark_callback_alive(__func__, urb, acm);
+#endif
data = (unsigned char *)(dr + 1);
switch (dr->bNotificationType) {
{
int res;
- if (!test_and_clear_bit(index, &acm->read_urbs_free))
+ if (!test_and_clear_bit(index, &acm->read_urbs_free)){
return 0;
+ }
dev_vdbg(&acm->data->dev, "%s - urb %d\n", __func__, index);
+#ifdef CONFIG_MTK_DT_USB_SUPPORT
+ record_activity(acm->read_urbs[index], 1, 0);
+#endif
+
res = usb_submit_urb(acm->read_urbs[index], mem_flags);
if (res) {
+ MYDBG("urb fail(%d)\n", res);
if (res != -EPERM) {
dev_err(&acm->data->dev,
"%s - usb_submit_urb failed: %d\n",
for (i = 0; i < acm->rx_buflimit; ++i) {
res = acm_submit_read_urb(acm, i, mem_flags);
- if (res)
+ if (res){
return res;
+ }
}
return 0;
static void acm_process_read_urb(struct acm *acm, struct urb *urb)
{
- if (!urb->actual_length)
+
+#ifdef CONFIG_MTK_DT_USB_SUPPORT
+ int i, len;
+#endif
+ if (!urb->actual_length){
return;
+ }
+#ifdef CONFIG_MTK_DT_USB_SUPPORT
+ if(enable_dump) {
+ len = sprintf(data_in, "DT-I: ");
+ for(i=0; i<urb->actual_length && i<DATA_DUMP_BYTES; i++) {
+ len += sprintf(data_in+len, "%02X ", *(((unsigned char *)(urb->transfer_buffer))+i));
+ }
+ sprintf(data_in+len, "\n");
+ dbg_mtk(&acm->data->dev, "%s", data_in);
+ }
+#endif
tty_insert_flip_string(&acm->port, urb->transfer_buffer,
urb->actual_length);
rb->index, urb->actual_length);
set_bit(rb->index, &acm->read_urbs_free);
+#ifdef CONFIG_MTK_DT_USB_SUPPORT
+ record_activity(urb, 1, 1);
+#endif
+
if (!acm->dev) {
dev_dbg(&acm->data->dev, "%s - disconnected\n", __func__);
return;
usb_mark_last_busy(acm->dev);
if (urb->status) {
- dev_dbg(&acm->data->dev, "%s - non-zero urb status: %d\n",
+ dev_err(&acm->data->dev, "%s - non-zero urb status: %d\n",
__func__, urb->status);
return;
}
+#ifdef CONFIG_MTK_DT_USB_SUPPORT
+ mark_callback_alive(__func__, urb, acm);
+#endif
acm_process_read_urb(acm, urb);
/* throttle device if requested by tty */
struct acm *acm = wb->instance;
unsigned long flags;
- if (urb->status || (urb->actual_length != urb->transfer_buffer_length))
- dev_vdbg(&acm->data->dev, "%s - len %d/%d, status %d\n",
+
+ if (urb->status || (urb->actual_length != urb->transfer_buffer_length)){
+ dev_err(&acm->data->dev, "%s - len %d/%d, status %d, data(%x)\n",
__func__,
urb->actual_length,
urb->transfer_buffer_length,
- urb->status);
+ urb->status,
+ *((char *)(urb->transfer_buffer)));
+ }
+#ifdef CONFIG_MTK_DT_USB_SUPPORT
+ record_activity(urb, 0, 1);
+#endif
+#ifdef CONFIG_MTK_DT_USB_SUPPORT
+ mark_callback_alive(__func__, urb, acm);
+#endif
spin_lock_irqsave(&acm->write_lock, flags);
acm_write_done(acm, wb);
spin_unlock_irqrestore(&acm->write_lock, flags);
return retval;
}
+#ifdef CONFIG_MTK_DT_USB_SUPPORT
+extern struct usb_device *get_usb11_child_udev(void);
+extern int usb_autoresume_device(struct usb_device *udev);
+#endif
static int acm_tty_open(struct tty_struct *tty, struct file *filp)
{
- struct acm *acm = tty->driver_data;
+#ifdef CONFIG_MTK_DT_USB_SUPPORT
+ struct usb_device *udev;
+ int result;
+#endif
+ struct acm *acm;
+ acm = tty->driver_data;
+
+#ifdef CONFIG_MTK_DT_USB_SUPPORT
+ dbg_mtk(&acm->control->dev, "%s port_cnt=%d", __func__, acm->port.count);
+ MYDBG("ctrl:%x, read:%x, write:%x\n",
+ (acm->control->cur_altsetting->endpoint[0].desc).bEndpointAddress,
+ (acm->data->cur_altsetting->endpoint[0].desc).bEndpointAddress,
+ (acm->data->cur_altsetting->endpoint[1].desc).bEndpointAddress);
+
+#define META_BIN_NAME "meta_tst"
+#define MDDOWNLOADER_BIN_NAME "downloader"
+
+ /* make sure usb1 is always alive in dl/meta mode */
+ if(!strcmp(META_BIN_NAME, current->comm) || !strcmp(MDDOWNLOADER_BIN_NAME, current->comm)){
+ udev = get_usb11_child_udev();
+ result = usb_autoresume_device(udev);
+ dbg_mtk(&acm->control->dev, "%s, auto result:%d", __func__, result);
+ }
+
+ if(!strcmp(MDDOWNLOADER_BIN_NAME, current->comm)){
+ record_enable = 1;
+ w1_idx = w2_idx = r1_idx = r2_idx = 0;
+ do_gettimeofday(&tv_start);
+ }
+
+#else
+ dev_dbg(&acm->control->dev, "%s\n", __func__);
+#endif
dev_dbg(tty->dev, "%s\n", __func__);
return tty_port_open(&acm->port, tty, filp);
*/
set_bit(TTY_NO_WRITE_SPLIT, &tty->flags);
acm->control->needs_remote_wakeup = 1;
+#ifdef CONFIG_MTK_DT_USB_SUPPORT
+#ifdef CONFIG_PM_RUNTIME
+ acm->control->needs_remote_wakeup = 0;
+#endif
+#endif
acm->ctrlurb->dev = acm->dev;
if (usb_submit_urb(acm->ctrlurb, GFP_KERNEL)) {
{
struct acm *acm = tty->driver_data;
dev_dbg(&acm->control->dev, "%s\n", __func__);
+#ifdef CONFIG_MTK_DT_USB_SUPPORT
+ dbg_mtk(&acm->control->dev, "%s port_cnt=%d", __func__, acm->port.count);
+ if(!strcmp(MDDOWNLOADER_BIN_NAME, current->comm)){
+ record_enable = 0;
+ dump_record();
+ }
+#endif
tty_port_close(&acm->port, tty, filp);
}
unsigned long flags;
int wbn;
struct acm_wb *wb;
+#ifdef CONFIG_MTK_DT_USB_SUPPORT
+ int i, len;
+#endif
if (!count)
return 0;
dev_vdbg(&acm->data->dev, "%s - count %d\n", __func__, count);
+#ifdef CONFIG_MTK_DT_USB_SUPPORT
+ if(enable_dump) {
+ len = sprintf(data_out, "DT-O: ");
+ for(i=0; i<count && i<DATA_DUMP_BYTES; i++) {
+ len += sprintf(data_out+len, "%02X ", *(buf+i));
+ }
+ sprintf(data_out+len, "\n");
+ dbg_mtk(&acm->data->dev, "%s", data_out);
+ }
+#endif
spin_lock_irqsave(&acm->write_lock, flags);
wbn = acm_wb_alloc(acm);
if (wbn < 0) {
{
struct acm *acm = tty->driver_data;
+#ifdef CONFIG_MTK_DT_USB_SUPPORT
+ if(enable_dump) {
+ dbg_mtk(&acm->control->dev, "tiocmget ctrlin=%x\n", acm->ctrlin);
+ }
+#endif
return (acm->ctrlout & ACM_CTRL_DTR ? TIOCM_DTR : 0) |
(acm->ctrlout & ACM_CTRL_RTS ? TIOCM_RTS : 0) |
(acm->ctrlin & ACM_CTRL_DSR ? TIOCM_DSR : 0) |
struct acm *acm = tty->driver_data;
unsigned int newctrl;
+#ifdef CONFIG_MTK_DT_USB_SUPPORT
+ if(enable_dump) {
+ dbg_mtk(&acm->control->dev, "tiocmset ctrlout=%x\n", acm->ctrlout);
+ }
+#endif
newctrl = acm->ctrlout;
set = (set & TIOCM_DTR ? ACM_CTRL_DTR : 0) |
(set & TIOCM_RTS ? ACM_CTRL_RTS : 0);
readsize = usb_endpoint_maxp(epread) *
(quirks == SINGLE_RX_URB ? 1 : 2);
acm->combined_interfaces = combined_interfaces;
+#ifdef CONFIG_MTK_DT_USB_SUPPORT
+ acm->writesize = ACM_WB_SZ;
+#else
acm->writesize = usb_endpoint_maxp(epwrite) * 20;
+#endif
acm->control = control_interface;
acm->data = data_interface;
acm->minor = minor;
struct acm *acm = usb_get_intfdata(intf);
int cnt;
+ dbg_mtk(&acm->control->dev, "%s intf=%d", __func__, intf->cur_altsetting->desc.bInterfaceNumber);
spin_lock_irq(&acm->read_lock);
spin_lock(&acm->write_lock);
if (PMSG_IS_AUTO(message)) {
spin_unlock_irq(&acm->read_lock);
return -EBUSY;
}
+ }else{
+ int i;
+ for (i = 0; i < ACM_NW; i++){
+ if(acm->wb[i].use){
+ spin_unlock(&acm->write_lock);
+ spin_unlock_irq(&acm->read_lock);
+ return -EBUSY;
+ }
+ }
}
+
cnt = acm->susp_count++;
spin_unlock(&acm->write_lock);
spin_unlock_irq(&acm->read_lock);
struct urb *urb;
int rv = 0;
+ dbg_mtk(&acm->control->dev, "%s intf=%d", __func__, intf->cur_altsetting->desc.bInterfaceNumber);
+
spin_lock_irq(&acm->read_lock);
spin_lock(&acm->write_lock);
* delayed error checking because we must
* do the write path at all cost
*/
- if (rv < 0)
+ if (rv < 0){
+ MYDBG("urb fail:%d\n", rv);
goto out;
+ }
rv = acm_submit_read_urbs(acm, GFP_ATOMIC);
}
},
#endif
+ /* Exclude Infineon Flash Loader utility */
+ { USB_DEVICE(0x058b, 0x0041),
+ .driver_info = IGNORE_DEVICE,
+ },
+
/* control interfaces without any protocol set */
{ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
USB_CDC_PROTO_NONE) },
static int __init acm_init(void)
{
int retval;
+#ifdef CONFIG_MTK_DT_USB_SUPPORT
+ int i;
+#endif
acm_tty_driver = alloc_tty_driver(ACM_TTY_MINORS);
if (!acm_tty_driver)
return -ENOMEM;
acm_tty_driver->init_termios = tty_std_termios;
acm_tty_driver->init_termios.c_cflag = B9600 | CS8 | CREAD |
HUPCL | CLOCAL;
+#ifdef CONFIG_MTK_DT_USB_SUPPORT
+ /* wx, disable echo and other flags in the very beginning.
+ * otherwise RILD will disable them via calling tcsetattr() after it opened tty port,
+ * so there may be a gap between port opening and calling tcsetattr(). If modem send data
+ * at that time, things goes ugly.
+ */
+ acm_tty_driver->init_termios.c_iflag = 0;
+ acm_tty_driver->init_termios.c_oflag = 0;
+ acm_tty_driver->init_termios.c_lflag = 0;
+ for(i= 0; i < CB_NUM ; i++){
+ callback_check_timeout[i] = jiffies;
+ }
+#endif
tty_set_operations(acm_tty_driver, &acm_ops);
retval = tty_register_driver(acm_tty_driver);
module_init(acm_init);
module_exit(acm_exit);
+#ifdef CONFIG_MTK_DT_USB_SUPPORT
+module_param(enable_debug, int, 0644);
+module_param(enable_dump, int, 0644);
+#endif
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
#include <asm/uaccess.h>
#include <asm/byteorder.h>
-
#include "hub.h"
+
+#ifdef CONFIG_MTK_ICUSB_SUPPORT
+int is_musbfsh_rh(struct usb_device *udev);
+void set_icusb_sts_disconnect_done(void);
+#endif
+
+#ifdef CONFIG_MTK_DT_USB_SUPPORT
+#include <linux/proc_fs.h>
+#include <linux/uaccess.h>
+static struct usb_device *g_dsda_dev = NULL;
+
+#ifdef CONFIG_PM_RUNTIME
+struct usb_hub *usb11_hub = NULL;
+int is_musbfsh_rh(struct usb_device *udev);
+
+struct usb_device *get_usb11_child_udev(void)
+{
+ if(usb11_hub){
+ MYDBG("\n");
+ return usb11_hub->ports[0]->child;
+ }else{
+ MYDBG("\n");
+ return NULL;
+ }
+}
+#endif
+
+void dump_data(char *buf, int len)
+{
+ int i;
+ for(i =0 ; i< len ; i++)
+ {
+ MYDBG("data[%d]: %x\n", i, buf[i]);
+ }
+}
+
+void test_dsda_device_ep0(void)
+{
+
+ int ret;
+ char data_buf[256];
+ ret = usb_control_msg(g_dsda_dev, usb_rcvctrlpipe(g_dsda_dev, 0),
+ USB_REQ_GET_DESCRIPTOR,
+ USB_DIR_IN,
+ USB_DT_DEVICE << 8,
+ 0,
+ data_buf,
+ 64,
+ USB_CTRL_GET_TIMEOUT);
+
+
+
+ if (ret < 0) {
+ MYDBG("test ep fail, ret : %d\n", ret);
+ }
+ else
+ {
+ MYDBG("test ep0 ok, ret : %d\n", ret);
+ dump_data(data_buf, ret);
+ }
+
+}
+
+void release_usb11_wakelock(void);
+static ssize_t dsda_tmp_proc_entry(struct file *file_ptr, const char __user *user_buffer, size_t count, loff_t *position)
+{
+ char cmd[64];
+
+ int ret = copy_from_user((char *) &cmd, user_buffer, count);
+
+ if(ret != 0)
+ {
+ return -EFAULT;
+ }
+
+ /* apply action here */
+ if(cmd[0] == '0')
+ {
+ MYDBG("");
+ test_dsda_device_ep0();
+ }
+ if(cmd[0] == '1')
+ {
+ MYDBG("");
+ release_usb11_wakelock();
+ }
+
+ MYDBG("");
+
+ return count;
+}
+
+struct file_operations dsda_tmp_proc_fops = {
+ .write = dsda_tmp_proc_entry
+};
+
+
+void create_dsda_tmp_entry(void)
+{
+ struct proc_dir_entry *prEntry;
+
+ MYDBG("");
+
+ prEntry = proc_create("DSDA_TMP_ENTRY", 0660, 0, &dsda_tmp_proc_fops);
+ if (prEntry)
+ {
+ MYDBG("add /proc/DSDA_TMP_ENTRY ok\n");
+ }
+ else
+ {
+ MYDBG("add /proc/DSDA_TMP_ENTRY fail\n");
+ }
+}
+#endif
+
/* if we are in debug mode, always announce new devices */
#ifdef DEBUG
#ifndef CONFIG_USB_ANNOUNCE_NEW_DEVICES
#define USB_VENDOR_GENESYS_LOGIC 0x05e3
#define HUB_QUIRK_CHECK_PORT_AUTOSUSPEND 0x01
+#if defined(CONFIG_USBIF_COMPLIANCE) && defined(CONFIG_USB_XHCI_HCD)
+extern int usbif_u3h_send_event(char* event) ;
+#include "otg_whitelist.h"
+#endif
+
+
static inline int hub_is_superspeed(struct usb_device *hdev)
{
return (hdev->descriptor.bDeviceProtocol == USB_HUB_PR_SS);
#define HUB_DEBOUNCE_STEP 25
#define HUB_DEBOUNCE_STABLE 100
+static void hub_release(struct kref *kref);
static int usb_reset_and_verify_device(struct usb_device *udev);
+#define usb_sndaddr0pipe() (PIPE_CONTROL << 30)
+#define usb_rcvaddr0pipe() ((PIPE_CONTROL << 30) | USB_DIR_IN)
+
+
static inline char *portspeed(struct usb_hub *hub, int portstatus)
{
if (hub_is_superspeed(hub->hdev))
static int usb_device_supports_lpm(struct usb_device *udev)
{
+ /* Some devices have trouble with LPM */
+ if (udev->quirks & USB_QUIRK_NO_LPM)
+ return 0;
+
/* USB 2.1 (and greater) devices indicate LPM support through
* their USB 2.0 Extended Capabilities BOS descriptor.
*/
*/
static int set_port_feature(struct usb_device *hdev, int port1, int feature)
{
+ MYDBG("");
return usb_control_msg(hdev, usb_sndctrlpipe(hdev, 0),
USB_REQ_SET_FEATURE, USB_RT_PORT, feature, port1,
NULL, 0, 1000);
"%s failed (err = %d)\n", __func__, ret);
} else {
*status = le16_to_cpu(hub->status->hub.wHubStatus);
- *change = le16_to_cpu(hub->status->hub.wHubChange);
+ *change = le16_to_cpu(hub->status->hub.wHubChange);
ret = 0;
}
mutex_unlock(&hub->status_mutex);
unsigned delay;
/* Continue a partial initialization */
- if (type == HUB_INIT2)
- goto init2;
- if (type == HUB_INIT3)
+ if (type == HUB_INIT2 || type == HUB_INIT3) {
+ device_lock(hub->intfdev);
+
+ /* Was the hub disconnected while we were waiting? */
+ if (hub->disconnected) {
+ device_unlock(hub->intfdev);
+ kref_put(&hub->kref, hub_release);
+ return;
+ }
+ if (type == HUB_INIT2)
+ goto init2;
+
goto init3;
+ }
+ kref_get(&hub->kref);
/* The superspeed hub except for root hub has to use Hub Depth
* value as an offset into the route string to locate the bits
PREPARE_DELAYED_WORK(&hub->init_work, hub_init_func3);
schedule_delayed_work(&hub->init_work,
msecs_to_jiffies(delay));
+ device_unlock(hub->intfdev);
return; /* Continues at init3: below */
} else {
msleep(delay);
/* Allow autosuspend if it was suppressed */
if (type <= HUB_INIT3)
usb_autopm_put_interface_async(to_usb_interface(hub->intfdev));
+
+ if (type == HUB_INIT2 || type == HUB_INIT3)
+ device_unlock(hub->intfdev);
+
+ kref_put(&hub->kref, hub_release);
}
/* Implement the continuations for the delays above */
hub->mA_per_port = hdev->bus_mA;
hub->limited_power = 1;
}
- } else if ((hubstatus & (1 << USB_DEVICE_SELF_POWERED)) == 0) {
+ } else if ((hubstatus & (1 << USB_DEVICE_SELF_POWERED)) == 0) { // bus powered
int remaining = hdev->bus_mA -
hub->descriptor->bHubContrCurrent;
hub->descriptor->bHubContrCurrent);
hub->limited_power = 1;
- if (remaining < hdev->maxchild * unit_load)
+ if (remaining < hdev->maxchild * unit_load){
+#if defined(CONFIG_USBIF_COMPLIANCE) && defined(CONFIG_USB_XHCI_HCD)
+ usbif_u3h_send_event("DEV_OVER_CURRENT");
+#endif
dev_warn(hub_dev,
"insufficient power available "
"to use all downstream ports\n");
+ }
hub->mA_per_port = unit_load; /* 7.2.1 */
} else { /* Self-powered external hub */
struct usb_device *hdev;
struct usb_hub *hub;
+
desc = intf->cur_altsetting;
hdev = interface_to_usbdev(intf);
if (hdev->level == MAX_TOPO_LEVEL) {
dev_err(&intf->dev,
"Unsupported bus topology: hub nested too deep\n");
+#if defined(CONFIG_USBIF_COMPLIANCE) && defined(CONFIG_USB_XHCI_HCD)
+ usbif_u3h_send_event("MAX_HUB_TIER_EXCEED");
+#endif
return -E2BIG;
}
#ifdef CONFIG_USB_OTG_BLACKLIST_HUB
if (hdev->parent) {
dev_warn(&intf->dev, "ignoring external hub\n");
+#if defined(CONFIG_USBIF_COMPLIANCE) && defined(CONFIG_USB_XHCI_HCD)
+ usbif_u3h_send_event("HUB_NOT_SUPPORTED");
+#endif
return -ENODEV;
}
#endif
struct usb_device *udev = *pdev;
struct usb_hub *hub = usb_hub_to_struct_hub(udev);
int i;
+ struct timeval tv_begin, tv_end;
+ struct timeval tv_before, tv_after;
+ do_gettimeofday(&tv_begin);
+
+#ifdef CONFIG_MTK_ICUSB_SUPPORT
+ int is_icusb_rh;
+#endif
+
+#ifdef CONFIG_MTK_ICUSB_SUPPORT
+ is_icusb_rh = is_musbfsh_rh(udev->parent);
+#endif
+
/* mark the device as inactive, so any further urb submissions for
* this device (and any of its children) will fail immediately.
* so that the hardware is now fully quiesced.
*/
dev_dbg (&udev->dev, "unregistering device\n");
+
+ do_gettimeofday(&tv_before);
usb_disable_device(udev, 0);
+ do_gettimeofday(&tv_after);
+ MYDBG("usb_disable_device(), time spent, sec : %d, usec : %d\n", (unsigned int)(tv_after.tv_sec - tv_before.tv_sec), (unsigned int)(tv_after.tv_usec - tv_before.tv_usec));
+
usb_hcd_synchronize_unlinks(udev);
if (udev->parent) {
port_dev->did_runtime_put = false;
}
+ do_gettimeofday(&tv_before);
usb_remove_ep_devs(&udev->ep0);
+ do_gettimeofday(&tv_after);
+ MYDBG("usb_remove_ep_devs(), time spent, sec : %d, usec : %d\n", (unsigned int)(tv_after.tv_sec - tv_before.tv_sec), (unsigned int)(tv_after.tv_usec - tv_before.tv_usec));
+
usb_unlock_device(udev);
/* Unregister the device. The device driver is responsible
* for de-configuring the device and invoking the remove-device
* notifier chain (used by usbfs and possibly others).
*/
+ do_gettimeofday(&tv_before);
device_del(&udev->dev);
+ do_gettimeofday(&tv_after);
+ MYDBG("device_del(), time spent, sec : %d, usec : %d\n", (unsigned int)(tv_after.tv_sec - tv_before.tv_sec), (unsigned int)(tv_after.tv_usec - tv_before.tv_usec));
/* Free the device number and delete the parent's children[]
* (or root_hub) pointer.
hub_free_dev(udev);
put_device(&udev->dev);
+
+#ifdef CONFIG_MTK_ICUSB_SUPPORT
+ if (is_icusb_rh)
+ {
+ set_icusb_sts_disconnect_done();
+ MYDBG("ICUSB Disconnect\n");
+ }
+#endif
+ do_gettimeofday(&tv_end);
+ MYDBG("time spent, sec : %d, usec : %d\n", (unsigned int)(tv_end.tv_sec - tv_begin.tv_sec), (unsigned int)(tv_end.tv_usec - tv_begin.tv_usec));
}
#ifdef CONFIG_USB_ANNOUNCE_NEW_DEVICES
udev->serial = usb_cache_string(udev, udev->descriptor.iSerialNumber);
err = usb_enumerate_device_otg(udev);
+
+#if defined(CONFIG_USBIF_COMPLIANCE) && defined(CONFIG_USB_XHCI_HCD)
+ if (udev->parent){ // we don't have to check ourself (roothub)
+ if (!is_targeted(udev)) {
+ usbif_u3h_send_event("DEV_NOT_SUPPORTED");
+ err = -ENOTSUPP;
+ }
+ }
+#endif
+
if (err < 0)
return err;
* sysfs power/wakeup controls wakeup enabled/disabled
*/
device_init_wakeup(&udev->dev, 0);
+ MYDBG("udev :%p\n", udev);
+#ifdef CONFIG_MTK_DT_USB_SUPPORT
+#ifdef CONFIG_PM_RUNTIME
+ if(is_musbfsh_rh(udev->parent)){
+ MYDBG("\n");
+ /*find out struct *usb_hub and hook it */
+ usb11_hub = usb_hub_to_struct_hub(udev->parent);
+ }
+#endif
+#endif
}
/* Tell the runtime-PM framework the device is active */
msleep(delay);
/* read and decode port status */
+ MYDBG("");
ret = hub_port_status(hub, port1, &portstatus, &portchange);
+ MYDBG("");
if (ret < 0)
return ret;
/* Reset the port */
for (i = 0; i < PORT_RESET_TRIES; i++) {
+ MYDBG("");
status = set_port_feature(hub->hdev, port1, (warm ?
USB_PORT_FEAT_BH_PORT_RESET :
USB_PORT_FEAT_RESET));
+ MYDBG("");
if (status == -ENODEV) {
+ MYDBG("");
; /* The hub is gone */
} else if (status) {
+ MYDBG("");
dev_err(hub->intfdev,
"cannot %sreset port %d (err = %d)\n",
warm ? "warm " : "", port1, status);
} else {
+ MYDBG("");
status = hub_port_wait_reset(hub, port1, udev, delay,
warm);
- if (status && status != -ENOTCONN && status != -ENODEV)
+ if (status && status != -ENOTCONN)
+ {
+ MYDBG("");
dev_dbg(hub->intfdev,
"port_wait_reset: err = %d\n",
status);
+ }
}
+ MYDBG("");
/* Check for disconnect or reset */
if (status == 0 || status == -ENOTCONN || status == -ENODEV) {
+ MYDBG("");
hub_port_finish_reset(hub, port1, udev, &status);
+ MYDBG("");
if (!hub_is_superspeed(hub->hdev))
goto done;
warm = true;
}
}
+ MYDBG("");
dev_dbg (hub->intfdev,
"port %d not enabled, trying %sreset again...\n",
port1, warm ? "warm " : "");
delay = HUB_LONG_RESET_TIME;
}
+ MYDBG("");
+
dev_err (hub->intfdev,
"Cannot enable port %i. Maybe the USB cable is bad?\n",
done:
if (!hub_is_superspeed(hub->hdev))
+ {
+ MYDBG("");
up_read(&ehci_cf_port_reset_rwsem);
+ }
+
+ MYDBG("");
return status;
}
status);
/* bail if autosuspend is requested */
if (PMSG_IS_AUTO(msg))
+ {
+ MYDBG("");
goto err_wakeup;
+ }
}
}
if (usb_disable_ltm(udev)) {
dev_err(&udev->dev, "Failed to disable LTM before suspend\n.");
status = -ENOMEM;
+ MYDBG("");
if (PMSG_IS_AUTO(msg))
goto err_ltm;
}
if (usb_unlocked_disable_lpm(udev)) {
dev_err(&udev->dev, "Failed to disable LPM before suspend\n.");
status = -ENOMEM;
+ MYDBG("");
if (PMSG_IS_AUTO(msg))
goto err_lpm3;
}
/* see 7.1.7.6 */
if (hub_is_superspeed(hub->hdev))
+ {
+ MYDBG("");
status = hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_U3);
+#if 0 /* behavior for kernel 3.10 */
/*
* For system suspend, we do not need to enable the suspend feature
* on individual USB-2 ports. The devices will automatically go
* Therefore we will turn on the suspend feature if udev or any of its
* descendants is enabled for remote wakeup.
*/
- else if (PMSG_IS_AUTO(msg) || wakeup_enabled_descendants(udev) > 0)
+ } else if (PMSG_IS_AUTO(msg) || wakeup_enabled_descendants(udev) > 0) {
+ MYDBG("");
status = set_port_feature(hub->hdev, port1,
USB_PORT_FEAT_SUSPEND);
- else {
+ } else {
really_suspend = false;
status = 0;
}
+#else /*roll back behavior to kernel 3.4 */
+ }else{
+ MYDBG("");
+ status = set_port_feature(hub->hdev, port1,
+ USB_PORT_FEAT_SUSPEND);
+ }
+#endif
+
if (status) {
dev_dbg(hub->intfdev, "can't suspend port %d, status %d\n",
port1, status);
+ MYDBG("");
/* Try to enable USB3 LPM and LTM again */
usb_unlocked_enable_lpm(udev);
*/
if (status == 0) {
devstatus = 0;
+ MYDBG("\n");
status = usb_get_status(udev, USB_RECIP_DEVICE, 0, &devstatus);
+ MYDBG("%d\n", status);
if (status >= 0)
status = (status > 0 ? 0 : -ENODEV);
* Between connect detection and reset signaling there must be a delay
* of 100ms at least for debounce and power-settling. The corresponding
* timer shall restart whenever the downstream port detects a disconnect.
- *
+ *
* Apparently there are some bluetooth and irda-dongles and a number of
* low-speed devices for which this debounce period may last over a second.
* Not covered by the spec - but easy to deal with.
}
EXPORT_SYMBOL_GPL(usb_ep0_reinit);
-#define usb_sndaddr0pipe() (PIPE_CONTROL << 30)
-#define usb_rcvaddr0pipe() ((PIPE_CONTROL << 30) | USB_DIR_IN)
static int hub_set_address(struct usb_device *udev, int devnum)
{
const char *speed;
int devnum = udev->devnum;
+ dump_stack();
/* root hub ports have a slightly longer reset period
* (from USB 2.0 spec, section 7.1.7.5)
*/
/* Reset the device; full speed may morph to high speed */
/* FIXME a USB 2.0 device may morph into SuperSpeed on reset. */
+ MYDBG("");
retval = hub_port_reset(hub, port1, udev, delay, false);
+ MYDBG("");
if (retval < 0) /* error or disconnect */
goto fail;
/* success, speed is known */
default:
goto fail;
}
+ MYDBG("");
if (udev->speed == USB_SPEED_WIRELESS)
speed = "variable speed Wireless";
udev->tt = &hub->tt;
udev->ttport = port1;
}
-
+
/* Why interleave GET_DESCRIPTOR and SET_ADDRESS this way?
* Because device hardware and firmware is sometimes buggy in
* this area, and this is how Linux has done it for ages.
* value.
*/
for (i = 0; i < GET_DESCRIPTOR_TRIES; (++i, msleep(100))) {
+ MYDBG("");
if (USE_NEW_SCHEME(retry_counter) && !(hcd->driver->flags & HCD_USB3)) {
struct usb_device_descriptor *buf;
int r = 0;
}
if (r == 0)
break;
+
+#if defined(CONFIG_USBIF_COMPLIANCE) && defined(CONFIG_USB_XHCI_HCD)
+ if (buf->bMaxPacketSize0 == 0) {
+ usbif_u3h_send_event("DEV_CONN_TMOUT");
+ }
+#endif
+
}
udev->descriptor.bMaxPacketSize0 =
buf->bMaxPacketSize0;
udev->ep0.desc.wMaxPacketSize = cpu_to_le16(i);
usb_ep0_reinit(udev);
}
-
+
retval = usb_get_device_descriptor(udev, USB_DT_DEVICE_SIZE);
if (retval < (signed)sizeof(udev->descriptor)) {
if (retval != -ENODEV)
goto fail;
}
+ usb_detect_quirks(udev);
+
if (udev->wusb == 0 && le16_to_cpu(udev->descriptor.bcdUSB) >= 0x0201) {
retval = usb_get_bos_descriptor(udev);
if (!retval) {
remaining -= delta;
}
if (remaining < 0) {
+#if defined(CONFIG_USBIF_COMPLIANCE) && defined(CONFIG_USB_XHCI_HCD)
+ usbif_u3h_send_event("DEV_OVER_CURRENT");
+#endif
dev_warn(hub->intfdev, "%dmA over power budget!\n",
- remaining);
remaining = 0;
int status, i;
unsigned unit_load;
+ MYDBG("");
dev_dbg (hub_dev,
"port %d, status %04x, change %04x, %s\n",
port1, portstatus, portchange, portspeed(hub, portstatus));
}
/* reset (non-USB 3.0 devices) and get descriptor */
+ MYDBG("");
status = hub_port_init(hub, udev, port1, i);
if (status < 0)
+ {
+ MYDBG("");
goto loop;
+ }
+ MYDBG("");
- usb_detect_quirks(udev);
if (udev->quirks & USB_QUIRK_DELAY_INIT)
msleep(1000);
goto loop_disable;
}
}
-
+
/* check for devices running slower than they could */
if (le16_to_cpu(udev->descriptor.bcdUSB) >= 0x0200
&& udev->speed == USB_SPEED_FULL
hub->ports[port1 - 1]->child = NULL;
spin_unlock_irq(&device_state_lock);
}
+#ifdef CONFIG_MTK_DT_USB_SUPPORT
+ g_dsda_dev = udev;
+ MYDBG("get new device !!!, BUILD TIME : %s, g_dsda_dev : %p\n", __TIME__, g_dsda_dev);
+#endif
}
if (status)
dev_err(hub_dev, "unable to enumerate USB device on port %d\n",
port1);
}
-
+
done:
hub_port_disable(hub, port1, 1);
if (hcd->driver->relinquish_port && !hub->hdev->parent)
dev_dbg (hub_dev, "resetting for error %d\n",
hub->error);
+ MYDBG("");
ret = usb_reset_device(hdev);
if (ret) {
dev_dbg (hub_dev,
* EM interference sometimes causes badly
* shielded USB devices to be shutdown by
* the hub, this hack enables them again.
- * Works at least with mouse driver.
+ * Works at least with mouse driver.
*/
if (!(portstatus & USB_PORT_STAT_ENABLE)
&& !connect_change
.supports_autosuspend = 1,
};
+#if defined(CONFIG_MTK_XHCI) && defined(CONFIG_USB_MTK_DUALMODE)
+extern void mtk_hub_event_steal(spinlock_t *lock, struct list_head* list);
+#endif
int usb_hub_init(void)
{
if (usb_register(&hub_driver) < 0) {
return -1;
}
+#if defined(CONFIG_MTK_XHCI) && defined(CONFIG_USB_MTK_DUALMODE)
+ mtk_hub_event_steal(&hub_event_lock, &hub_event_list);
+#endif
+
khubd_task = kthread_run(hub_thread, NULL, "khubd");
if (!IS_ERR(khubd_task))
return 0;
int i, ret = 0;
int port1 = udev->portnum;
+ MYDBG("");
if (udev->state == USB_STATE_NOTATTACHED ||
udev->state == USB_STATE_SUSPENDED) {
dev_dbg(&udev->dev, "device reset not allowed in state %d\n",
if (ret < 0)
goto re_enumerate;
-
+
/* Device might have changed firmware (DFU or similar) */
if (descriptors_changed(udev, &descriptor)) {
dev_info(&udev->dev, "device firmware changed\n");
usb_unlocked_enable_lpm(udev);
usb_enable_ltm(udev);
return 0;
-
+
re_enumerate:
/* LPM state doesn't matter when we're about to destroy the device. */
hub_port_logical_disconnect(parent_hub, port1);
unsigned int noio_flag;
struct usb_host_config *config = udev->actconfig;
+ MYDBG("");
if (udev->state == USB_STATE_NOTATTACHED ||
udev->state == USB_STATE_SUSPENDED) {
dev_dbg(&udev->dev, "device reset not allowed in state %d\n",
ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only");
sb->s_flags |= MS_RDONLY;
}
- if (test_opt(sb, ERRORS_PANIC))
+ if (test_opt(sb, ERRORS_PANIC)) {
+ if (EXT4_SB(sb)->s_journal &&
+ !(EXT4_SB(sb)->s_journal->j_flags & JBD2_REC_ERR))
+ return;
panic("EXT4-fs (device %s): panic forced after error\n",
sb->s_id);
+ }
}
void __ext4_error(struct super_block *sb, const char *function,
jbd2_journal_abort(EXT4_SB(sb)->s_journal, -EIO);
save_error_info(sb, function, line);
}
- if (test_opt(sb, ERRORS_PANIC))
+ if (test_opt(sb, ERRORS_PANIC)) {
+ if (EXT4_SB(sb)->s_journal &&
+ !(EXT4_SB(sb)->s_journal->j_flags & JBD2_REC_ERR))
+ return;
panic("EXT4-fs panic from previous error\n");
+ }
}
void ext4_msg(struct super_block *sb, const char *prefix, const char *fmt, ...)
unsigned long next_wakeup, cur;
BUG_ON(NULL == eli);
+ set_freezable();
cont_thread:
while (true) {
schedule_timeout_interruptible(next_wakeup - cur);
- if (kthread_should_stop()) {
+ if (kthread_freezable_should_stop(NULL)) {
ext4_clear_request_list();
goto exit_thread;
}
{
if (fatal_signal_pending(current))
return -ERESTARTSYS;
- freezable_schedule();
+ freezable_schedule_unsafe();
return 0;
}
EXPORT_SYMBOL_GPL(nfs_wait_bit_killable);
nfsi->attrtimeo_timestamp = now;
}
}
- invalid &= ~NFS_INO_INVALID_ATTR;
+
+ /* Don't declare attrcache up to date if there were no attrs! */
+ if (fattr->valid != 0)
+ invalid &= ~NFS_INO_INVALID_ATTR;
+
/* Don't invalidate the data if we were to blame */
if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode)
|| S_ISLNK(inode->i_mode)))
#define IP6_MF 0x0001
+#define IP6_REPLY_MARK(net, mark) \
+ ((net)->ipv6.sysctl.fwmark_reflect ? (mark) : 0)
+
#include <net/sock.h>
/* sysctls */
extern void icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info);
+int icmpv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6,
+ struct icmp6hdr *thdr, int len);
+
+struct dst_entry *icmpv6_route_lookup(struct net *net, struct sk_buff *skb,
+ struct sock *sk, struct flowi6 *fl6);
+
extern int ip6_ra_control(struct sock *sk, int sel);
extern int ipv6_parse_hopopts(struct sk_buff *skb);
u32 user;
const struct in6_addr *src;
const struct in6_addr *dst;
+ int iif;
u8 ecn;
};
sk_no_check : 2,
sk_userlocks : 4,
sk_protocol : 8,
+ #define SK_PROTOCOL_MAX U8_MAX
sk_type : 16;
kmemcheck_bitfield_end(flags);
int sk_wmem_queued;
SOCK_SELECT_ERR_QUEUE, /* Wake select on error queue */
};
+ #define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE))
+
static inline void sock_copy_flags(struct sock *nsk, struct sock *osk)
{
nsk->sk_flags = osk->sk_flags;
unsigned int limit)
{
if (sk_rcvqueues_full(sk, skb, limit))
+ {
+ #ifdef CONFIG_MTK_NET_LOGGING
+ printk(KERN_ERR "[mtk_net][sock]sk_add_backlog->sk_rcvqueues_full sk->sk_rcvbuf:%d,sk->sk_sndbuf:%d ",sk->sk_rcvbuf,sk->sk_sndbuf);
+ #endif
return -ENOBUFS;
+ }
/*
* If the skb was allocated from pfmemalloc reserves, only
#include <trace/events/sock.h>
+#include <net/af_unix.h>
+
+
#ifdef CONFIG_INET
#include <net/tcp.h>
#endif
+#include <linux/xlog.h>
static DEFINE_MUTEX(proto_list_mutex);
static LIST_HEAD(proto_list);
/* Run time adjustable parameters. */
__u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX;
EXPORT_SYMBOL(sysctl_wmem_max);
-__u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX;
+__u32 sysctl_rmem_max __read_mostly = (SK_RMEM_MAX*8);
EXPORT_SYMBOL(sysctl_rmem_max);
__u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX;
__u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
}
}
- #define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE))
-
static void sock_disable_timestamp(struct sock *sk, unsigned long flags)
{
if (sk->sk_flags & flags) {
val = min_t(u32, val, sysctl_wmem_max);
set_sndbuf:
sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
- sk->sk_sndbuf = max_t(u32, val * 2, SOCK_MIN_SNDBUF);
+ sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF);
/* Wake up sending tasks if we upped the value. */
sk->sk_write_space(sk);
break;
* returning the value we actually used in getsockopt
* is the most desirable behavior.
*/
- sk->sk_rcvbuf = max_t(u32, val * 2, SOCK_MIN_RCVBUF);
+ sk->sk_rcvbuf = max_t(int, val * 2, SOCK_MIN_RCVBUF);
break;
case SO_RCVBUFFORCE:
kuid_t sock_i_uid(struct sock *sk)
{
kuid_t uid;
-
+
+ /*mtk_net: fix kernel bug*/
+ if (!sk) {
+ pr_info("sk == NULL for sock_i_uid\n");
+ return GLOBAL_ROOT_UID;
+ }
+
read_lock_bh(&sk->sk_callback_lock);
uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : GLOBAL_ROOT_UID;
read_unlock_bh(&sk->sk_callback_lock);
}
+//debug funcion
+
+static int sock_dump_info(struct sock *sk)
+{
+ //dump receiver queue 128 bytes
+ //struct sk_buff *skb;
+ //char skbmsg[128];
+ //dump receiver queue 128 bytes end
+
+ if(sk->sk_family == AF_UNIX)
+ {
+ struct unix_sock *u = unix_sk(sk);
+ struct sock *other = NULL;
+ if( (u->path.dentry !=NULL)&&(u->path.dentry->d_iname!=NULL))
+ //if( (u->dentry !=NULL)&&(u->dentry->d_iname!=NULL))
+ {
+ #ifdef CONFIG_MTK_NET_LOGGING
+ printk(KERN_INFO "[mtk_net][sock]sockdbg: socket-Name:%s \n",u->path.dentry->d_iname);
+ #endif
+ }
+ else
+ {
+ #ifdef CONFIG_MTK_NET_LOGGING
+ printk(KERN_INFO "[mtk_net][sock]sockdbg:socket Name (NULL)\n" );
+ #endif
+ }
+
+ if(sk->sk_socket && SOCK_INODE(sk->sk_socket))
+ {
+ #ifdef CONFIG_MTK_NET_LOGGING
+ printk(KERN_INFO "[mtk_net][sock]sockdbg:socket Inode[%lu]\n" ,SOCK_INODE(sk->sk_socket)->i_ino);
+ #endif
+ }
+
+ other = unix_sk(sk)->peer ;
+ if (!other)
+ {
+ #ifdef CONFIG_MTK_NET_LOGGING
+ printk(KERN_INFO "[mtk_net][sock]sockdbg:peer is (NULL) \n");
+ #endif
+ } else{
+
+ if ((((struct unix_sock *)other)->path.dentry != NULL)&&(((struct unix_sock *)other)->path.dentry->d_iname != NULL))
+ //if ((((struct unix_sock *)other)->dentry != NULL)&&(((struct unix_sock *)other)->dentry->d_iname != NULL))
+ {
+ #ifdef CONFIG_MTK_NET_LOGGING
+ printk(KERN_INFO "[mtk_net][sock]sockdbg: Peer Name:%s \n",((struct unix_sock *)other)->path.dentry->d_iname);
+ #endif
+ }
+ else
+ {
+ #ifdef CONFIG_MTK_NET_LOGGING
+ printk(KERN_INFO "[mtk_net][sock]sockdbg: Peer Name (NULL) \n");
+ #endif
+ }
+
+ if(other->sk_socket && SOCK_INODE(other->sk_socket))
+ {
+ #ifdef CONFIG_MTK_NET_LOGGING
+ printk(KERN_INFO "[mtk_net][sock]sockdbg: Peer Inode [%lu] \n", SOCK_INODE(other->sk_socket)->i_ino);
+ #endif
+ }
+ #ifdef CONFIG_MTK_NET_LOGGING
+ printk(KERN_INFO "[mtk_net][sock]sockdbg: Peer Recieve Queue len:%d \n",other->sk_receive_queue.qlen);
+ #endif
+ //dump receiver queue 128 bytes
+ /* if ((skb = skb_peek_tail(&other->sk_receive_queue)) == NULL) {
+
+ printk(KERN_INFO "sockdbg: Peer Recieve Queue is null (warning) \n");
+ }else{
+ int i =0 ,len=0;
+ if((skb->len !=0) && (skb->data != NULL)){
+
+ if(skb->len >= 127){
+ len = 127 ;
+ }else
+ {
+ len = skb->len ;
+ }
+ for (i=0;i<len;i++)
+ sprintf(skbmsg+i, "%x", skb->data[i]);
+
+ skbmsg[len]= '\0' ;
+
+ printk(KERN_INFO "sockdbg: Peer Recieve Queue dump(%d bytes):%s\n", len, skbmsg);
+
+
+ }else{
+ printk(KERN_INFO "sockdbg: Peer Recieve skb error \n");
+ }*/
+ //dump receiver queue 128 bytes end
+
+ //}
+ //dump receiver queue 128 bytes end
+
+ }
+ }
+
+ return 0 ;
+
+
+}
+
+
+
/*
* Generic send/receive buffer handlers
*/
goto failure;
if (signal_pending(current))
goto interrupted;
+
+ sock_dump_info(sk);
+ #ifdef CONFIG_MTK_NET_LOGGING
+ printk(KERN_INFO "[mtk_net][sock]sockdbg: wait_for_wmem, timeo =%ld, wmem =%d, snd buf =%d \n",
+ timeo, atomic_read(&sk->sk_wmem_alloc), sk->sk_sndbuf);
+ #endif
timeo = sock_wait_for_wmem(sk, timeo);
+ #ifdef CONFIG_MTK_NET_LOGGING
+ printk(KERN_INFO "[mtk_net][sock]sockdbg: wait_for_wmem done, header_len=0x%lx, data_len=0x%lx,timeo =%ld \n",
+ header_len, data_len ,timeo);
+ #endif
}
skb_set_owner_w(skb, sk);
#include <linux/mroute.h>
#endif
+#ifdef CONFIG_ANDROID_PARANOID_NETWORK
+#include <linux/android_aid.h>
+
+static inline int current_has_network(void)
+{
+ return in_egroup_p(AID_INET) || capable(CAP_NET_RAW);
+}
+#else
+static inline int current_has_network(void)
+{
+ return 1;
+}
+#endif
/* The inetsw table contains everything that inet_create needs to
* build a new socket.
int try_loading_module = 0;
int err;
+ if (!current_has_network())
+ return -EACCES;
+
if (unlikely(!inet_ehash_secret))
if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM)
build_ehash_secret();
+ if (protocol < 0 || protocol >= IPPROTO_MAX)
+ return -EINVAL;
+
sock->state = SS_UNCONNECTED;
/* Look for the requested type/protocol pair. */
}
err = -EPERM;
- if (sock->type == SOCK_RAW && !kern &&
- !ns_capable(net->user_ns, CAP_NET_RAW))
+ if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
goto out_rcu_unlock;
sock->ops = answer->ops;
case SIOCSIFPFLAGS:
case SIOCGIFPFLAGS:
case SIOCSIFFLAGS:
+ case SIOCKILLADDR:
err = devinet_ioctl(net, cmd, (void __user *)arg);
break;
default:
#include <linux/module.h>
#include <linux/sysctl.h>
#include <linux/kernel.h>
+#include <linux/reciprocal_div.h>
#include <net/dst.h>
#include <net/tcp.h>
#include <net/inet_common.h>
EXPORT_SYMBOL(sysctl_tcp_adv_win_scale);
/* rfc5961 challenge ack rate limiting */
-int sysctl_tcp_challenge_ack_limit = 100;
+int sysctl_tcp_challenge_ack_limit = 1000;
int sysctl_tcp_stdurg __read_mostly;
int sysctl_tcp_rfc1337 __read_mostly;
int sysctl_tcp_moderate_rcvbuf __read_mostly = 1;
int sysctl_tcp_early_retrans __read_mostly = 3;
+int sysctl_tcp_default_init_rwnd __read_mostly = TCP_DEFAULT_INIT_RCVWND;
#define FLAG_DATA 0x01 /* Incoming frame contained data. */
#define FLAG_WIN_UPDATE 0x02 /* Incoming ACK was a window update. */
static void tcp_fixup_rcvbuf(struct sock *sk)
{
u32 mss = tcp_sk(sk)->advmss;
- u32 icwnd = TCP_DEFAULT_INIT_RCVWND;
+ u32 icwnd = sysctl_tcp_default_init_rwnd;
int rcvmem;
/* Limit to 10 segments if mss <= 1460,
* or 14600/mss segments, with a minimum of two segments.
*/
if (mss > 1460)
- icwnd = max_t(u32, (1460 * TCP_DEFAULT_INIT_RCVWND) / mss, 2);
+ icwnd = max_t(u32, (1460 * icwnd) / mss, 2);
rcvmem = SKB_TRUESIZE(mss + MAX_TCP_HEADER);
while (tcp_win_from_space(rcvmem) < mss)
void tcp_enter_loss(struct sock *sk, int how)
{
const struct inet_connection_sock *icsk = inet_csk(sk);
+ struct inet_connection_sock *icsk1 = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb;
bool new_recovery = false;
tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);
tcp_ca_event(sk, CA_EVENT_LOSS);
}
+ if (icsk->icsk_MMSRB == 1)
+ {
+ #ifdef CONFIG_MTK_NET_LOGGING
+ printk("[mtk_net][mmspb] tcp_enter_loss snd_cwnd=%u, snd_cwnd_cnt=%u\n", tp->snd_cwnd, tp->snd_cwnd_cnt);
+ #endif
+ if (tp->mss_cache != 0)
+ tp->snd_cwnd = (tp->rcv_wnd / tp->mss_cache);
+ else
+ {
+ tp->snd_cwnd = (tp->rcv_wnd / tp->advmss);
+ }
+
+ if (tp->snd_ssthresh > 16)
+ {
+ tp->snd_cwnd = tp->snd_ssthresh / 2;//set snd_cwnd is half of default snd_ssthresh
+ }
+ else
+ {
+ tp->snd_cwnd = tp->snd_ssthresh / 2 + 4;
+ }
+ #ifdef CONFIG_MTK_NET_LOGGING
+ printk("[mtk_net][mmspb] tcp_enter_loss update snd_cwnd=%u\n", tp->snd_cwnd);
+ #endif
+ icsk1->icsk_MMSRB = 0;
+ #ifdef CONFIG_MTK_NET_LOGGING
+ printk("[mtk_net][mmspb] tcp_enter_loss set icsk_MMSRB=0\n");
+ #endif
+ }
+ else
+ {
tp->snd_cwnd = 1;
+ }
+
+ //tp->snd_cwnd = 1;
tp->snd_cwnd_cnt = 0;
tp->snd_cwnd_stamp = tcp_time_stamp;
icsk->icsk_retransmits++;
tcp_retransmit_skb(sk, tcp_write_queue_head(sk));
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
- icsk->icsk_rto, TCP_RTO_MAX);
+ icsk->icsk_rto, sysctl_tcp_rto_max);
return true;
}
return false;
return false;
inet_csk_reset_xmit_timer(sk, ICSK_TIME_EARLY_RETRANS, delay,
- TCP_RTO_MAX);
+ sysctl_tcp_rto_max);
return true;
}
rto = delta;
}
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, rto,
- TCP_RTO_MAX);
+ sysctl_tcp_rto_max);
}
}
*/
} else {
inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
- min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX),
- TCP_RTO_MAX);
+ min_t(unsigned int, icsk->icsk_rto << icsk->icsk_backoff, sysctl_tcp_rto_max),
+ sysctl_tcp_rto_max);
}
}
static u32 challenge_timestamp;
static unsigned int challenge_count;
u32 now = jiffies / HZ;
+ u32 count;
if (now != challenge_timestamp) {
+ u32 half = (sysctl_tcp_challenge_ack_limit + 1) >> 1;
+
challenge_timestamp = now;
- challenge_count = 0;
+ ACCESS_ONCE(challenge_count) = half +
+ reciprocal_divide(prandom_u32(),
+ sysctl_tcp_challenge_ack_limit);
}
- if (++challenge_count <= sysctl_tcp_challenge_ack_limit) {
+ count = ACCESS_ONCE(challenge_count);
+ if (count > 0) {
+ ACCESS_ONCE(challenge_count) = count - 1;
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPCHALLENGEACK);
tcp_send_ack(sk);
}
icsk->icsk_ack.lrcvtime = tcp_time_stamp;
tcp_enter_quickack_mode(sk);
inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
- TCP_DELACK_MAX, TCP_RTO_MAX);
+ TCP_DELACK_MAX, sysctl_tcp_rto_max);
discard:
__kfree_skb(skb);
}
tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
+ tp->copied_seq = tp->rcv_nxt;
tp->rcv_wup = TCP_SKB_CB(skb)->seq + 1;
/* RFC1323: The window in SYN & SYN/ACK segments is
/* OK, now commit destination to socket. */
sk->sk_gso_type = SKB_GSO_TCPV4;
sk_setup_caps(sk, &rt->dst);
-
+ printk(KERN_INFO "[socket_conn]IPV4 socket[%lu] sport:%u \n", SOCK_INODE(sk->sk_socket)->i_ino, ntohs(inet->inet_sport));
if (!tp->write_seq && likely(!tp->repair))
tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
inet->inet_daddr,
if (remaining) {
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
- remaining, TCP_RTO_MAX);
+ remaining, sysctl_tcp_rto_max);
} else {
/* RTO revert clocked out retransmission.
* Will retransmit now */
}
md5sig = rcu_dereference_protected(tp->md5sig_info,
- sock_owned_by_user(sk));
+ sock_owned_by_user(sk) ||
+ lockdep_is_held(&sk->sk_lock.slock));
if (!md5sig) {
md5sig = kmalloc(sizeof(*md5sig), gfp);
if (!md5sig)
* because it's been added to the accept queue directly.
*/
inet_csk_reset_xmit_timer(child, ICSK_TIME_RETRANS,
- TCP_TIMEOUT_INIT, TCP_RTO_MAX);
+ TCP_TIMEOUT_INIT, sysctl_tcp_rto_max);
/* Add the child socket directly into the accept queue */
inet_csk_reqsk_queue_add(sk, req, child);
ireq->rmt_addr = saddr;
ireq->no_srccheck = inet_sk(sk)->transparent;
ireq->opt = tcp_v4_save_options(skb);
+ ireq->ir_mark = inet_request_mark(sk, skb);
if (security_inet_conn_request(sk, skb, req))
goto drop_and_free;
if (!inet_csk_ack_scheduled(sk))
inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
(3 * tcp_rto_min(sk)) / 4,
- TCP_RTO_MAX);
+ sysctl_tcp_rto_max);
}
return true;
}
struct inet_connection_sock *icsk = inet_csk(sk);
tcp_init_sock(sk);
+ icsk->icsk_MMSRB = 0;
icsk->icsk_af_ops = &ipv4_specific;
}
EXPORT_SYMBOL(tcp_v4_destroy_sock);
+void tcp_v4_handle_retrans_time_by_uid(struct uid_err uid_e)
+{
+ unsigned int bucket;
+ uid_t skuid = (uid_t)(uid_e.appuid);
+ struct inet_connection_sock *icsk = NULL;//inet_csk(sk);
+
+
+ for (bucket = 0; bucket < tcp_hashinfo.ehash_mask; bucket++) {
+ struct hlist_nulls_node *node;
+ struct sock *sk;
+ spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, bucket);
+
+ spin_lock_bh(lock);
+ sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[bucket].chain) {
+
+ if (sysctl_ip_dynaddr && sk->sk_state == TCP_SYN_SENT)
+ continue;
+ if (sock_flag(sk, SOCK_DEAD))
+ continue;
+
+ if(sk->sk_socket){
+ if(SOCK_INODE(sk->sk_socket)->i_uid != skuid)
+ continue;
+ else
+ printk("[mmspb] tcp_v4_handle_retrans_time_by_uid socket uid(%d) match!",
+ SOCK_INODE(sk->sk_socket)->i_uid);
+ } else{
+ continue;
+ }
+
+ sock_hold(sk);
+ spin_unlock_bh(lock);
+
+ local_bh_disable();
+ bh_lock_sock(sk);
+
+ // update sk time out value
+ icsk = inet_csk(sk);
+ printk("[mmspb] tcp_v4_handle_retrans_time_by_uid update timer\n");
+
+ sk_reset_timer(sk, &icsk->icsk_retransmit_timer, jiffies + 2);
+ icsk->icsk_rto = sysctl_tcp_rto_min * 30;
+ icsk->icsk_MMSRB = 1;
+
+ bh_unlock_sock(sk);
+ local_bh_enable();
+ spin_lock_bh(lock);
+ sock_put(sk);
+
+ }
+ spin_unlock_bh(lock);
+ }
+
+}
+
+
+/*
+ * tcp_v4_nuke_addr_by_uid - destroy all sockets of spcial uid
+ */
+void tcp_v4_reset_connections_by_uid(struct uid_err uid_e)
+{
+ unsigned int bucket;
+ uid_t skuid = (uid_t)(uid_e.appuid);
+
+ for (bucket = 0; bucket < tcp_hashinfo.ehash_mask; bucket++) {
+ struct hlist_nulls_node *node;
+ struct sock *sk;
+ spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, bucket);
+
+restart:
+ spin_lock_bh(lock);
+ sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[bucket].chain) {
+
+ if (sysctl_ip_dynaddr && sk->sk_state == TCP_SYN_SENT)
+ continue;
+ if (sock_flag(sk, SOCK_DEAD))
+ continue;
+
+ if(sk->sk_socket){
+ if(SOCK_INODE(sk->sk_socket)->i_uid != skuid)
+ continue;
+ else
+ printk(KERN_INFO "SIOCKILLSOCK socket uid(%d) match!",
+ SOCK_INODE(sk->sk_socket)->i_uid);
+ } else{
+ continue;
+ }
+
+ sock_hold(sk);
+ spin_unlock_bh(lock);
+
+ local_bh_disable();
+ bh_lock_sock(sk);
+ sk->sk_err = uid_e.errNum;
+ printk(KERN_INFO "SIOCKILLSOCK set sk err == %d!! \n", sk->sk_err);
+ sk->sk_error_report(sk);
+
+ tcp_done(sk);
+ bh_unlock_sock(sk);
+ local_bh_enable();
+ sock_put(sk);
+
+ goto restart;
+ }
+ spin_unlock_bh(lock);
+ }
+}
+
+
#ifdef CONFIG_PROC_FS
/* Proc filesystem TCP sock list dumping. */
#include <net/udp.h>
#include <net/udplite.h>
#include <net/tcp.h>
+#include <net/ping.h>
#include <net/protocol.h>
#include <net/inet_common.h>
#include <net/route.h>
#include <asm/uaccess.h>
#include <linux/mroute6.h>
+#ifdef CONFIG_ANDROID_PARANOID_NETWORK
+#include <linux/android_aid.h>
+
+static inline int current_has_network(void)
+{
+ return in_egroup_p(AID_INET) || capable(CAP_NET_RAW);
+}
+#else
+static inline int current_has_network(void)
+{
+ return 1;
+}
+#endif
+
MODULE_AUTHOR("Cast of dozens");
MODULE_DESCRIPTION("IPv6 protocol stack for Linux");
MODULE_LICENSE("GPL");
int try_loading_module = 0;
int err;
+ if (!current_has_network())
+ return -EACCES;
+
if (sock->type != SOCK_RAW &&
sock->type != SOCK_DGRAM &&
!inet_ehash_secret)
build_ehash_secret();
+ if (protocol < 0 || protocol >= IPPROTO_MAX)
+ return -EINVAL;
+
/* Look for the requested type/protocol pair. */
lookup_protocol:
err = -ESOCKTNOSUPPORT;
}
err = -EPERM;
- if (sock->type == SOCK_RAW && !kern &&
- !ns_capable(net->user_ns, CAP_NET_RAW))
+ if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
goto out_rcu_unlock;
sock->ops = answer->ops;
}
EXPORT_SYMBOL(inet6_getname);
+int inet6_killaddr_ioctl(struct net *net, void __user *arg) {
+ struct in6_ifreq ireq;
+ struct sockaddr_in6 sin6;
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EACCES;
+
+ if (copy_from_user(&ireq, arg, sizeof(struct in6_ifreq)))
+ return -EFAULT;
+
+ sin6.sin6_family = AF_INET6;
+ sin6.sin6_addr = ireq.ifr6_addr;
+ return tcp_nuke_addr(net, (struct sockaddr *) &sin6);
+}
+
int inet6_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
{
struct sock *sk = sock->sk;
return addrconf_del_ifaddr(net, (void __user *) arg);
case SIOCSIFDSTADDR:
return addrconf_set_dstaddr(net, (void __user *) arg);
+ case SIOCKILLADDR:
+ return inet6_killaddr_ioctl(net, (void __user *) arg);
default:
if (!sk->sk_prot->ioctl)
return -ENOIOCTLCMD;
fl6.flowi6_mark = sk->sk_mark;
fl6.fl6_dport = inet->inet_dport;
fl6.fl6_sport = inet->inet_sport;
+ fl6.flowi6_uid = sock_i_uid(sk);
security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
final_p = fl6_update_dst(&fl6, np->opt, &final);
if (err)
goto out_unregister_udplite_proto;
+ err = proto_register(&pingv6_prot, 1);
+ if (err)
+ goto out_unregister_ping_proto;
/* We MUST register RAW sockets before we create the ICMP6,
* IGMP6, or NDISC control sockets.
if (err)
goto ipv6_packet_fail;
+ err = pingv6_init();
+ if (err)
+ goto pingv6_fail;
+
#ifdef CONFIG_SYSCTL
err = ipv6_sysctl_register();
if (err)
sysctl_fail:
ipv6_packet_cleanup();
#endif
+pingv6_fail:
+ pingv6_exit();
ipv6_packet_fail:
tcpv6_exit();
tcpv6_fail:
rtnl_unregister_all(PF_INET6);
out_sock_register_fail:
rawv6_exit();
+out_unregister_ping_proto:
+ proto_unregister(&pingv6_prot);
out_unregister_raw_proto:
proto_unregister(&rawv6_prot);
out_unregister_udplite_proto:
#include <linux/mount.h>
#include <net/checksum.h>
#include <linux/security.h>
+#include <linux/freezer.h>
+
+
+#include <linux/uio.h>
+#include <linux/blkdev.h>
+#include <linux/compat.h>
+#include <linux/rtc.h>
+#include <asm/kmap_types.h>
+#include <linux/device.h>
+
struct hlist_head unix_socket_table[2 * UNIX_HASH_SIZE];
EXPORT_SYMBOL_GPL(unix_socket_table);
#define UNIX_ABSTRACT(sk) (unix_sk(sk)->addr->hash < UNIX_HASH_SIZE)
+
+//for aee interface start
+#define __UNIX_SOCKET_OUTPUT_BUF_SIZE__ 3500
+static struct proc_dir_entry *gunix_socket_track_aee_entry = NULL;
+#define UNIX_SOCK_TRACK_AEE_PROCNAME "driver/usktrk_aee"
+#define UNIX_SOCK_TRACK_PROC_AEE_SIZE 3072
+
+static volatile unsigned int unix_sock_track_stop_flag = 0;
+#define unix_peer(sk) (unix_sk(sk)->peer)
+
+
#ifdef CONFIG_SECURITY_NETWORK
static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
{
return hash&(UNIX_HASH_SIZE-1);
}
-#define unix_peer(sk) (unix_sk(sk)->peer)
+
static inline int unix_our_peer(struct sock *sk, struct sock *osk)
{
WARN_ON(!sk_unhashed(sk));
WARN_ON(sk->sk_socket);
if (!sock_flag(sk, SOCK_DEAD)) {
- printk(KERN_INFO "Attempt to release alive unix socket: %p\n", sk);
+ #ifdef CONFIG_MTK_NET_LOGGING
+ printk(KERN_INFO "[mtk_net][unix]Attempt to release alive unix socket: %p\n", sk);
+ #endif
return;
}
local_bh_disable();
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
local_bh_enable();
-#ifdef UNIX_REFCNT_DEBUG
- printk(KERN_DEBUG "UNIX %p is destroyed, %ld are still alive.\n", sk,
+ #ifdef UNIX_REFCNT_DEBUG
+ printk(KERN_DEBUG "[mtk_net][unix]UNIX %p is destroyed, %ld are still alive.\n", sk,
atomic_long_read(&unix_nr_socks));
-#endif
+ #endif
}
static void unix_release_sock(struct sock *sk, int embrion)
unix_state_unlock(sk);
put_pid(old_pid);
out:
+
return err;
}
atomic_set(&addr->refcnt, 1);
if (sun_path[0]) {
- struct path path;
+ struct path path;
+
umode_t mode = S_IFSOCK |
(SOCK_INODE(sock)->i_mode & ~current_umask());
err = unix_mknod(sun_path, mode, &path);
out_up:
mutex_unlock(&u->readlock);
out:
+
return err;
}
int err;
if (addr->sa_family != AF_UNSPEC) {
+
err = unix_mkname(sunaddr, alen, &hash);
if (err < 0)
goto out;
unix_peer(sk) = other;
unix_state_double_unlock(sk, other);
}
+
+#ifdef CONFIG_MTK_NET_LOGGING
+ if((SOCK_INODE(sock)!= NULL) && (sunaddr != NULL) && (other->sk_socket != NULL) && (SOCK_INODE(other->sk_socket) != NULL))
+ {
+ printk(KERN_INFO "[mtk_net][socket]unix_dgram_connect[%lu]:connect [%s] other[%lu]\n",SOCK_INODE(sock)->i_ino,sunaddr->sun_path,SOCK_INODE(other->sk_socket)->i_ino);
+ }
+#endif
+
return 0;
out_unlock:
unix_state_double_unlock(sk, other);
sock_put(other);
out:
+
return err;
}
__skb_queue_tail(&other->sk_receive_queue, skb);
spin_unlock(&other->sk_receive_queue.lock);
unix_state_unlock(other);
+
+ #ifdef CONFIG_MTK_NET_LOGGING
+ if((SOCK_INODE(sock)!= NULL) && (sunaddr != NULL) && (other->sk_socket != NULL) && (SOCK_INODE(other->sk_socket) != NULL))
+ {
+ printk(KERN_INFO "[mtk_net][socket]unix_stream_connect[%lu ]: connect [%s] other[%lu] \n",SOCK_INODE(sock)->i_ino,sunaddr->sun_path,SOCK_INODE(other->sk_socket)->i_ino);
+ }
+ #endif
+
other->sk_data_ready(other, 0);
sock_put(other);
+
return 0;
out_unlock:
unix_release_sock(newsk, 0);
if (other)
sock_put(other);
+
return err;
}
/* If socket state is TCP_LISTEN it cannot change (for now...),
* so that no locks are necessary.
*/
-
+
skb = skb_recv_datagram(sk, 0, flags&O_NONBLOCK, &err);
if (!skb) {
/* This means receive shutdown. */
unix_sock_inherit_flags(sock, newsock);
sock_graft(tsk, newsock);
unix_state_unlock(tsk);
+
return 0;
out:
+
return err;
}
int max_level;
int data_len = 0;
int sk_locked;
++<<<<<<< HEAD
+
++=======
+
++>>>>>>> v3.10.95
if (NULL == siocb->scm)
siocb->scm = &tmp_scm;
wait_for_unix_gc();
sock_put(other);
if (!sk_locked)
++<<<<<<< HEAD
+ unix_state_lock(sk);
++=======
+ unix_state_lock(sk);
+
+ err = 0;
++>>>>>>> v3.10.95
if (unix_peer(sk) == other) {
unix_peer(sk) = NULL;
unix_dgram_peer_wake_disconnect_wakeup(sk, other);
goto out_unlock;
}
++<<<<<<< HEAD
+ /* other == sk && unix_peer(other) != sk if
+ * - unix_peer(sk) == NULL, destination address bound to sk
+ * - unix_peer(sk) == sk by time of get but disconnected before lock
+ */
+ if (other != sk &&
+ unlikely(unix_peer(other) != sk && unix_recvq_full(other))) {
++=======
+ if (unlikely(unix_peer(other) != sk && unix_recvq_full(other))) {
++>>>>>>> v3.10.95
if (timeo) {
timeo = unix_wait_for_peer(other, timeo);
other->sk_data_ready(other, len);
sock_put(other);
scm_destroy(siocb->scm);
+
return len;
out_unlock:
if (other)
sock_put(other);
scm_destroy(siocb->scm);
+
return err;
}
if (NULL == siocb->scm)
siocb->scm = &tmp_scm;
+
wait_for_unix_gc();
err = scm_send(sock, msg, siocb->scm, false);
if (err < 0)
skb = sock_alloc_send_skb(sk, size, msg->msg_flags&MSG_DONTWAIT,
&err);
+
if (skb == NULL)
goto out_err;
if (sock_flag(other, SOCK_DEAD) ||
(other->sk_shutdown & RCV_SHUTDOWN))
+ {
+ if( other->sk_socket )
+ {
+ if(sk->sk_socket)
+ {
+
+ #ifdef CONFIG_MTK_NET_LOGGING
+ printk(KERN_INFO " [mtk_net][unix]: sendmsg[%lu:%lu]:peer close\n" ,SOCK_INODE(sk->sk_socket)->i_ino,SOCK_INODE(other->sk_socket)->i_ino);
+ #endif
+ }
+ else{
+ #ifdef CONFIG_MTK_NET_LOGGING
+ printk(KERN_INFO " [mtk_net][unix]: sendmsg[null:%lu]:peer close\n" ,SOCK_INODE(other->sk_socket)->i_ino);
+ #endif
+ }
+
+ }
+ else
+ {
+ #ifdef CONFIG_MTK_NET_LOGGING
+ printk(KERN_INFO " [mtk_net][unix]: sendmsg:peer close \n" );
+ #endif
+ }
+
+
goto pipe_err_free;
+ }
maybe_add_creds(skb, sock, other);
skb_queue_tail(&other->sk_receive_queue, skb);
out_err:
scm_destroy(siocb->scm);
siocb->scm = NULL;
+
return sent ? : err;
}
if (flags&MSG_OOB)
goto out;
- err = mutex_lock_interruptible(&u->readlock);
- if (unlikely(err)) {
- /* recvmsg() in non blocking mode is supposed to return -EAGAIN
- * sk_rcvtimeo is not honored by mutex_lock_interruptible()
- */
- err = noblock ? -EAGAIN : -ERESTARTSYS;
- goto out;
- }
+ mutex_lock(&u->readlock);
skip = sk_peek_offset(sk, flags);
out_unlock:
mutex_unlock(&u->readlock);
out:
+
return err;
}
set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
unix_state_unlock(sk);
- timeo = schedule_timeout(timeo);
+ timeo = freezable_schedule_timeout(timeo);
unix_state_lock(sk);
if (sock_flag(sk, SOCK_DEAD))
int err = 0;
long timeo;
int skip;
+ struct sock * other = unix_peer(sk);
err = -EINVAL;
if (sk->sk_state != TCP_ESTABLISHED)
if (err)
goto unlock;
if (sk->sk_shutdown & RCV_SHUTDOWN)
+ {
+ if(sk && sk->sk_socket )
+ {
+ if(other && other->sk_socket ){
+ #ifdef CONFIG_MTK_NET_LOGGING
+
+ printk(KERN_INFO " [mtk_net][unix]: recvmsg[%lu:%lu]:exit read due to peer shutdown \n" ,SOCK_INODE(sk->sk_socket)->i_ino,SOCK_INODE(other->sk_socket)->i_ino);
+ #endif
+ }else{
+ #ifdef CONFIG_MTK_NET_LOGGING
+ printk(KERN_INFO "[mtk_net][unix]: recvmsg[%lu:null]:exit read due to peer shutdown \n" ,SOCK_INODE(sk->sk_socket)->i_ino);
+ #endif
+ }
+ }
+ else{
+ #ifdef CONFIG_MTK_NET_LOGGING
+ printk(KERN_INFO " [mtk_net][unix]: recvmsg: exit read due to peer shutdown \n" );
+ #endif
+ }
goto unlock;
-
+ }
unix_state_unlock(sk);
err = -EAGAIN;
if (!timeo)
mutex_unlock(&u->readlock);
timeo = unix_stream_data_wait(sk, timeo, last);
+ if (!timeo)
+ {
+ if(sk && sk->sk_socket )
+ {
+ if(other && other->sk_socket ){
+ #ifdef CONFIG_MTK_NET_LOGGING
+ printk(KERN_INFO " [mtk_net][unix]: recvmsg[%lu:%lu]:exit read due to timeout \n" ,SOCK_INODE(sk->sk_socket)->i_ino,SOCK_INODE(other->sk_socket)->i_ino);
+ #endif
+ }else{
+ #ifdef CONFIG_MTK_NET_LOGGING
+ printk(KERN_INFO " [mtk_net][unix]: recvmsg[%lu:null]:exit read due to timeout \n" ,SOCK_INODE(sk->sk_socket)->i_ino);
+ #endif
+ }
+ }
+ else
+ {
+ #ifdef CONFIG_MTK_NET_LOGGING
+ printk(KERN_INFO " [mtk_net][unix]: recvmsg:exit read due to timeout \n" );
+ #endif
+ }
+
+ }
- if (signal_pending(current)
- || mutex_lock_interruptible(&u->readlock)) {
+ if (signal_pending(current)) {
err = sock_intr_errno(timeo);
goto out;
}
+ mutex_lock(&u->readlock);
continue;
unlock:
unix_state_unlock(sk);
mutex_unlock(&u->readlock);
scm_recv(sock, msg, siocb->scm, flags);
out:
+
return copied ? : err;
}
mask |= POLLHUP;
/* connection hasn't started yet? */
if (sk->sk_state == TCP_SYN_SENT)
+ {
+
return mask;
- }
+ }
+ }
/* No write status requested, avoid expensive OUT tests. */
if (!(poll_requested_events(wait) & (POLLWRBAND|POLLWRNORM|POLLOUT)))
+ {
return mask;
+ }
writable = unix_writable(sk);
if (writable) {