VERSION = 3
PATCHLEVEL = 10
- SUBLEVEL = 96
+ SUBLEVEL = 97
EXTRAVERSION =
NAME = TOSSUG Baby Fish
-Werror-implicit-function-declaration \
-Wno-format-security \
-fno-delete-null-pointer-checks \
- -std=gnu89
+ -w -std=gnu89
KBUILD_AFLAGS_KERNEL :=
KBUILD_CFLAGS_KERNEL :=
#include "mtdcore.h"
+#define DYNAMIC_CHANGE_MTD_WRITEABLE
+#ifdef DYNAMIC_CHANGE_MTD_WRITEABLE //wschen 2011-01-05
+#include <linux/proc_fs.h>
+#include <asm/uaccess.h>
+static struct mtd_info *my_mtd = NULL;
+int mtd_writeable_proc_write(struct file *file, const char *buffer, unsigned long count, void *data);
+
+struct mtd_change {
+ uint64_t size;
+ uint64_t offset;
+};
+int mtd_change_proc_write(struct file *file, const char *buffer, unsigned long count, void *data);
+#endif
+
+
/* Our partition linked list */
static LIST_HEAD(mtd_partitions);
static DEFINE_MUTEX(mtd_partitions_mutex);
mutex_lock(&mtd_partitions_mutex);
list_for_each_entry_safe(slave, next, &mtd_partitions, list)
if (slave->master == master) {
+#ifdef DYNAMIC_CHANGE_MTD_WRITEABLE //wschen 2011-01-05
+ my_mtd = NULL;
+#endif
+
ret = del_mtd_device(&slave->mtd);
if (ret < 0) {
err = ret;
for (i = 0; i < nbparts; i++) {
slave = allocate_partition(master, parts + i, i, cur_offset);
- if (IS_ERR(slave))
+ if (IS_ERR(slave)) {
+ del_mtd_partitions(master);
return PTR_ERR(slave);
+ }
mutex_lock(&mtd_partitions_mutex);
list_add(&slave->list, &mtd_partitions);
cur_offset = slave->offset + slave->mtd.size;
}
-
+#ifdef DYNAMIC_CHANGE_MTD_WRITEABLE //wschen 2011-01-05
+ my_mtd = master;
+#endif
return 0;
}
}
EXPORT_SYMBOL_GPL(mtd_is_partition);
+#ifdef CONFIG_MTK_MTD_NAND
+u64 mtd_partition_start_address(struct mtd_info *mtd)
+{
+ struct mtd_part *part = PART(mtd);
+ return part->offset;
+}
+EXPORT_SYMBOL_GPL(mtd_partition_start_address);
+#endif
+
/* Returns the size of the entire flash chip */
uint64_t mtd_get_device_size(const struct mtd_info *mtd)
{
return PART(mtd)->master->size;
}
EXPORT_SYMBOL_GPL(mtd_get_device_size);
+
+#ifdef DYNAMIC_CHANGE_MTD_WRITEABLE //wschen 2011-01-05
+int mtd_writeable_proc_write(struct file *file, const char *buffer, unsigned long count, void *data)
+{
+ char buf[3];
+
+ if (count != 3) {
+ return -EFAULT;
+ }
+
+ if (copy_from_user(buf, buffer, 3)) {
+ return -EFAULT;
+ }
+
+ if ((buf[0] != 0) || (buf[1] != 0) || (buf[2] != 0)) {
+ return -EFAULT;
+ }
+
+ if (my_mtd) {
+
+ struct mtd_part *slave, *next;
+
+ list_for_each_entry_safe(slave, next, &mtd_partitions, list)
+ if (slave->master == my_mtd) {
+ slave->mtd.flags |= MTD_WRITEABLE;
+ }
+ }
+
+ return count;
+}
+
+#define MTD_CHANGE_NUM 4
+int mtd_change_proc_write(struct file *file, const char *buffer, unsigned long count, void *data)
+{
+ struct mtd_change mtd_change[MTD_CHANGE_NUM];
+ int write_3 = 0;
+
+ if (count == (sizeof(struct mtd_change) * (MTD_CHANGE_NUM - 1))) {
+ write_3 = 1;
+ } else if (count != (sizeof(struct mtd_change) * MTD_CHANGE_NUM)) {
+ return -EFAULT;
+ }
+
+ if (copy_from_user(mtd_change, buffer, count)) {
+ return -EFAULT;
+ }
+
+ if (my_mtd) {
+ struct mtd_part *slave, *next;
+
+ list_for_each_entry_safe(slave, next, &mtd_partitions, list)
+ if (slave->master == my_mtd) {
+ if (write_3 == 1) {
+ if (strncmp(slave->mtd.name, "system", strlen(slave->mtd.name)) == 0) {
+ slave->mtd.size = mtd_change[0].size;
+ slave->offset = mtd_change[0].offset;
+ } else if (strncmp(slave->mtd.name, "cache", strlen(slave->mtd.name)) == 0) {
+ slave->mtd.size = mtd_change[1].size;
+ slave->offset = mtd_change[1].offset;
+ } else if (strncmp(slave->mtd.name, "userdata", strlen(slave->mtd.name)) == 0) {
+ slave->mtd.size = mtd_change[2].size;
+ slave->offset = mtd_change[2].offset;
+ }
+ } else {
+ //4 arguments
+ if (strncmp(slave->mtd.name, "expdb", strlen(slave->mtd.name)) == 0) {
+ slave->mtd.size = mtd_change[0].size;
+ slave->offset = mtd_change[0].offset;
+ } else if (strncmp(slave->mtd.name, "system", strlen(slave->mtd.name)) == 0) {
+ slave->mtd.size = mtd_change[1].size;
+ slave->offset = mtd_change[1].offset;
+ } else if (strncmp(slave->mtd.name, "cache", strlen(slave->mtd.name)) == 0) {
+ slave->mtd.size = mtd_change[2].size;
+ slave->offset = mtd_change[2].offset;
+ } else if (strncmp(slave->mtd.name, "userdata", strlen(slave->mtd.name)) == 0) {
+ slave->mtd.size = mtd_change[3].size;
+ slave->offset = mtd_change[3].offset;
+ }
+ }
+ }
+ }
+
+ return count;
+}
+#endif
#include "xhci.h"
+#ifdef CONFIG_MTK_XHCI
+#include <asm/uaccess.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/xhci/xhci-mtk-scheduler.h>
+#include <linux/xhci/xhci-mtk-power.h>
+#include <linux/xhci/xhci-mtk.h>
+
+#ifdef CONFIG_USBIF_COMPLIANCE
+#include <linux/proc_fs.h>
+#include <asm/uaccess.h>
+#include <linux/seq_file.h>
+#include <linux/kobject.h>
+#include <linux/miscdevice.h>
+
+static struct miscdevice mu3h_uevent_device = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "usbif_u3h_uevent",
+ .fops = NULL,
+};
+#endif
+#endif
+
#define DRIVER_AUTHOR "Sarah Sharp"
#define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver"
module_param(link_quirk, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(link_quirk, "Don't clear the chain bit on a link TRB");
+#ifdef CONFIG_USBIF_COMPLIANCE
+int usbif_u3h_send_event(char* event)
+{
+ char udev_event[128];
+ char *envp[] = {udev_event, NULL };
+ int ret ;
+
+ snprintf(udev_event, 128, "USBIF_EVENT=%s",event);
+ printk("usbif_u3h_send_event - sending event - %s in %s\n", udev_event, kobject_get_path(&mu3h_uevent_device.this_device->kobj, GFP_KERNEL));
+ ret = kobject_uevent_env(&mu3h_uevent_device.this_device->kobj, KOBJ_CHANGE, envp);
+ if (ret < 0)
+ printk("usbif_u3h_send_event sending failed with ret = %d, \n", ret);
+
+ return ret;
+}
+#endif
+
/* TODO: copied from ehci-hcd.c - can this be refactored? */
/*
* xhci_handshake - spin reading hc until handshake completes or fails
} else {
xhci_dbg(xhci, "xHCI doesn't need link TRB QUIRK\n");
}
+
retval = xhci_mem_init(xhci, GFP_KERNEL);
xhci_dbg(xhci, "Finished xhci_init\n");
xhci_halt(xhci);
return -ENODEV;
}
+
xhci->shared_hcd->state = HC_STATE_RUNNING;
xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
xhci_ring_cmd_db(xhci);
xhci_dbg(xhci, "Finished xhci_run for USB3 roothub\n");
+
return 0;
}
xhci_dbg(xhci, "%s: compliance mode recovery timer deleted\n",
__func__);
}
-
+#ifndef CONFIG_MTK_XHCI
if (xhci->quirks & XHCI_AMD_PLL_FIX)
usb_amd_dev_put();
-
+#endif
xhci_dbg(xhci, "// Disabling event ring interrupts\n");
temp = xhci_readl(xhci, &xhci->op_regs->status);
xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status);
u32 drop_flag;
u32 new_add_flags, new_drop_flags, new_slot_info;
int ret;
+#ifdef CONFIG_MTK_XHCI
+ struct sch_ep *sch_ep = NULL;
+ int isTT;
+ int ep_type = 0;
+#endif
ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
if (ret <= 0)
xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep);
+#ifdef CONFIG_MTK_XHCI
+ slot_ctx = xhci_get_slot_ctx(xhci, xhci->devs[udev->slot_id]->out_ctx);
+ if((slot_ctx->tt_info & 0xff) > 0){
+ isTT = 1;
+ }
+ else{
+ isTT = 0;
+ }
+ if(usb_endpoint_xfer_int(&ep->desc)){
+ ep_type = USB_EP_INT;
+ }
+ else if(usb_endpoint_xfer_isoc(&ep->desc)){
+ ep_type = USB_EP_ISOC;
+ }
+ else if(usb_endpoint_xfer_bulk(&ep->desc)){
+ ep_type = USB_EP_BULK;
+ }
+ sch_ep = mtk_xhci_scheduler_remove_ep(udev->speed, usb_endpoint_dir_in(&ep->desc)
+ , isTT, ep_type, (mtk_u32 *)ep);
+ if(sch_ep != NULL){
+ kfree(sch_ep);
+ }
+ else{
+ xhci_warn(xhci, "[MTK]Doesn't find ep_sch instance when removing endpoint\n");
+ }
+#endif
+
xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n",
(unsigned int) ep->desc.bEndpointAddress,
udev->slot_id,
(unsigned int) new_drop_flags,
(unsigned int) new_add_flags,
(unsigned int) new_slot_info);
+
+ #if defined(CONFIG_MTK_XHCI) && defined(CONFIG_USB_MTK_DUALMODE)
+ mtk_ep_count_dec();
+ #endif
+
return 0;
}
u32 new_add_flags, new_drop_flags, new_slot_info;
struct xhci_virt_device *virt_dev;
int ret = 0;
+#ifdef CONFIG_MTK_XHCI
+ struct xhci_ep_ctx *in_ep_ctx;
+ struct sch_ep *sch_ep;
+ int isTT;
+ int ep_type = 0;
+ int maxp = 0;
+ int burst = 0;
+ int mult = 0;
+ int interval = 0;
+#endif
ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
if (ret <= 0) {
return -ENOMEM;
}
+#ifdef CONFIG_MTK_XHCI
+ in_ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
+ slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
+
+ if((slot_ctx->tt_info & 0xff) > 0){
+ isTT = 1;
+ }
+ else{
+ isTT = 0;
+ }
+ if(usb_endpoint_xfer_int(&ep->desc)){
+ ep_type = USB_EP_INT;
+ }
+ else if(usb_endpoint_xfer_isoc(&ep->desc)){
+ ep_type = USB_EP_ISOC;
+ }
+ else if(usb_endpoint_xfer_bulk(&ep->desc)){
+ ep_type = USB_EP_BULK;
+ }
+ if(udev->speed == USB_SPEED_FULL || udev->speed == USB_SPEED_HIGH
+ || udev->speed == USB_SPEED_LOW){
+ maxp = ep->desc.wMaxPacketSize & 0x7FF;
+ burst = ep->desc.wMaxPacketSize >> 11;
+ mult = 0;
+ }
+ else if(udev->speed == USB_SPEED_SUPER){
+ maxp = ep->desc.wMaxPacketSize & 0x7FF;
+ burst = ep->ss_ep_comp.bMaxBurst;
+ mult = ep->ss_ep_comp.bmAttributes & 0x3;
+ }
+ interval = (1 << ((in_ep_ctx->ep_info >> 16) & 0xff));
+ sch_ep = kmalloc(sizeof(struct sch_ep), GFP_KERNEL);
+ if(mtk_xhci_scheduler_add_ep(udev->speed, usb_endpoint_dir_in(&ep->desc),
+ isTT, ep_type, maxp, interval, burst, mult, (mtk_u32 *)ep
+ , (mtk_u32 *)in_ep_ctx, sch_ep) != SCH_SUCCESS){
+ xhci_err(xhci, "[MTK] not enough bandwidth\n");
+ return -ENOSPC;
+ }
+#endif
+
ctrl_ctx->add_flags |= cpu_to_le32(added_ctxs);
new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);
(unsigned int) new_drop_flags,
(unsigned int) new_add_flags,
(unsigned int) new_slot_info);
+
+ #if defined(CONFIG_MTK_XHCI) && defined(CONFIG_USB_MTK_DUALMODE)
+ mtk_ep_count_inc();
+ #endif
+
return 0;
}
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
struct xhci_virt_device *virt_dev;
+#ifndef CONFIG_USB_DEFAULT_PERSIST
struct device *dev = hcd->self.controller;
+#endif
unsigned long flags;
u32 state;
int i, ret;
return 0;
}
+#ifdef CONFIG_MTK_XHCI
+ retval = mtk_xhci_ip_init(hcd, xhci);
+ if(retval)
+ goto error;
+#endif
+
xhci->cap_regs = hcd->regs;
xhci->op_regs = hcd->regs +
HC_LENGTH(xhci_readl(xhci, &xhci->cap_regs->hc_capbase));
if (retval)
goto error;
xhci_dbg(xhci, "Called HCD init\n");
+
+ printk("%s(%d): do mtk_xhci_set\n", __func__, __LINE__);
+
return 0;
error:
kfree(xhci);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_LICENSE("GPL");
+#ifdef CONFIG_USBIF_COMPLIANCE
+#ifndef CONFIG_USB_MTK_DUALMODE
+static int xhci_hcd_driver_init(void)
+{
+ int retval;
+
+ retval = xhci_register_pci();
+ if (retval < 0) {
+ printk(KERN_DEBUG "Problem registering PCI driver.");
+ return retval;
+ }
+
+ #ifdef CONFIG_MTK_XHCI
+ mtk_xhci_ip_init();
+ #endif
+
+ retval = xhci_register_plat();
+ if (retval < 0) {
+ printk(KERN_DEBUG "Problem registering platform driver.");
+ goto unreg_pci;
+ }
+
+ #ifdef CONFIG_MTK_XHCI
+ retval = xhci_attrs_init();
+ if(retval < 0){
+ printk(KERN_DEBUG "Problem creating xhci attributes.");
+ goto unreg_plat;
+ }
+
+ mtk_xhci_wakelock_init();
+ #endif
+
+ /*
+ * Check the compiler generated sizes of structures that must be laid
+ * out in specific ways for hardware access.
+ */
+ BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8);
+ BUILD_BUG_ON(sizeof(struct xhci_slot_ctx) != 8*32/8);
+ BUILD_BUG_ON(sizeof(struct xhci_ep_ctx) != 8*32/8);
+ /* xhci_device_control has eight fields, and also
+ * embeds one xhci_slot_ctx and 31 xhci_ep_ctx
+ */
+ BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8);
+ BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8);
+ BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8);
+ BUILD_BUG_ON(sizeof(struct xhci_cap_regs) != 7*32/8);
+ BUILD_BUG_ON(sizeof(struct xhci_intr_reg) != 8*32/8);
+ /* xhci_run_regs has eight fields and embeds 128 xhci_intr_regs */
+ BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8);
+ return 0;
+
+#ifdef CONFIG_MTK_XHCI
+unreg_plat:
+ xhci_unregister_plat();
+#endif
+unreg_pci:
+ xhci_unregister_pci();
+ return retval;
+}
+
+static void xhci_hcd_driver_cleanup(void)
+{
+ xhci_unregister_pci();
+ xhci_unregister_plat();
+ xhci_attrs_exit();
+}
+#else
+static int xhci_hcd_driver_init(void)
+{
+ // init in mt_devs.c
+ mtk_xhci_eint_iddig_init();
+ mtk_xhci_switch_init();
+ //mtk_xhci_wakelock_init();
+ return 0;
+}
+
+static void xhci_hcd_driver_cleanup(void)
+{
+ mtk_xhci_eint_iddig_deinit() ;
+}
+
+#endif
+
+static int mu3h_normal_driver_on = 0 ;
+
+static int xhci_mu3h_proc_show(struct seq_file *seq, void *v)
+{
+ seq_printf(seq, "xhci_mu3h_proc_show, mu3h is %d (on:1, off:0)\n", mu3h_normal_driver_on);
+ return 0;
+}
+
+static int xhci_mu3h_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, xhci_mu3h_proc_show, inode->i_private);
+}
+
+static ssize_t xhci_mu3h_proc_write(struct file *file, const char __user *buf, size_t length, loff_t *ppos)
+{
+ int ret ;
+ char msg[32] ;
+ int result;
+
+ if (length >= sizeof(msg)) {
+ printk( "xhci_mu3h_proc_write length error, the error len is %d\n", (unsigned int)length);
+ return -EINVAL;
+ }
+ if (copy_from_user(msg, buf, length))
+ return -EFAULT;
+
+ msg[length] = 0 ;
+
+ printk("xhci_mu3h_proc_write: %s, current driver on/off: %d\n", msg, mu3h_normal_driver_on);
+
+ if ((msg[0] == '1') && (mu3h_normal_driver_on == 0)){
+ xhci_hcd_driver_init() ;
+ mu3h_normal_driver_on = 1 ;
+ printk("registe mu3h driver : m3h xhci driver\n");
+ }else if ((msg[0] == '0') && (mu3h_normal_driver_on == 1)){
+ xhci_hcd_driver_cleanup();
+ mu3h_normal_driver_on = 0 ;
+ printk("unregiste m3h xhci driver.\n");
+ }else{
+ printk("xhci_mu3h_proc_write write faile !\n");
+ }
+ return length;
+}
+
+static const struct file_operations mu3h_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = xhci_mu3h_proc_open,
+ .write = xhci_mu3h_proc_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+
+};
+
+static int __init xhci_hcd_init(void)
+{
+ struct proc_dir_entry *prEntry;
+
+ printk(KERN_DEBUG "xhci_hcd_init");
+
+ // set xhci up at boot up
+ xhci_hcd_driver_init() ;
+ mtk_xhci_wakelock_init();
+ mu3h_normal_driver_on = 1;
+
+ // USBIF
+ prEntry = proc_create("mu3h_driver_init", 0666, NULL, &mu3h_proc_fops);
+ if (prEntry)
+ {
+ printk("create the mu3h init proc OK!\n") ;
+ }else{
+ printk("[ERROR] create the mu3h init proc FAIL\n") ;
+ }
+
+#ifdef CONFIG_MTK_XHCI
+
+ if (!misc_register(&mu3h_uevent_device)){
+ printk("create the mu3h_uevent_device uevent device OK!\n") ;
+
+ }else{
+ printk("[ERROR] create the mu3h_uevent_device uevent device fail\n") ;
+ }
+
+#endif
+
+ return 0 ;
+
+}
+module_init(xhci_hcd_init);
+
+static void __exit xhci_hcd_cleanup(void)
+{
+#ifdef CONFIG_MTK_XHCI
+ misc_deregister(&mu3h_uevent_device);
+#endif
+ printk(KERN_DEBUG "xhci_hcd_cleanup");
+}
+module_exit(xhci_hcd_cleanup);
+
+#else
+#ifndef CONFIG_USB_MTK_DUALMODE
static int __init xhci_hcd_init(void)
{
int retval;
+ if (usb_disabled())
+ return -ENODEV;
+
retval = xhci_register_pci();
if (retval < 0) {
printk(KERN_DEBUG "Problem registering PCI driver.");
printk(KERN_DEBUG "Problem registering platform driver.");
goto unreg_pci;
}
+
+ #ifdef CONFIG_MTK_XHCI
+ retval = xhci_attrs_init();
+ if(retval < 0){
+ printk(KERN_DEBUG "Problem creating xhci attributes.");
+ goto unreg_plat;
+ }
+
+ mtk_xhci_wakelock_init();
+ #endif
+
/*
* Check the compiler generated sizes of structures that must be laid
* out in specific ways for hardware access.
/* xhci_run_regs has eight fields and embeds 128 xhci_intr_regs */
BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8);
- if (usb_disabled())
- return -ENODEV;
-
return 0;
+
+#ifdef CONFIG_MTK_XHCI
+unreg_plat:
+ xhci_unregister_plat();
+#endif
unreg_pci:
xhci_unregister_pci();
return retval;
{
xhci_unregister_pci();
xhci_unregister_plat();
+ xhci_attrs_exit();
}
module_exit(xhci_hcd_cleanup);
+#else
+static int __init xhci_hcd_init(void)
+{
+ mtk_xhci_eint_iddig_init();
+ mtk_xhci_switch_init();
+ mtk_xhci_wakelock_init();
+ return 0;
+}
+module_init(xhci_hcd_init);
+
+static void __exit xhci_hcd_cleanup(void)
+{
+}
+module_exit(xhci_hcd_cleanup);
+
+#endif
+#endif
#define HUAWEI_PRODUCT_K3765 0x1465
#define HUAWEI_PRODUCT_K4605 0x14C6
#define HUAWEI_PRODUCT_E173S6 0x1C07
+#define HW_USB_DEVICE_AND_INTERFACE_INFO(vend, cl, sc, pr) \
+ .match_flags = USB_DEVICE_ID_MATCH_INT_INFO \
+ | USB_DEVICE_ID_MATCH_VENDOR, \
+ .idVendor = (vend), \
+ .bInterfaceClass = (cl), \
+ .bInterfaceSubClass = (sc), \
+ .bInterfaceProtocol = (pr)
#define QUANTA_VENDOR_ID 0x0408
#define QUANTA_PRODUCT_Q101 0xEA02
#define TELIT_PRODUCT_CC864_SINGLE 0x1006
#define TELIT_PRODUCT_DE910_DUAL 0x1010
#define TELIT_PRODUCT_UE910_V2 0x1012
+ #define TELIT_PRODUCT_LE922_USBCFG0 0x1042
+ #define TELIT_PRODUCT_LE922_USBCFG3 0x1043
#define TELIT_PRODUCT_LE920 0x1200
#define TELIT_PRODUCT_LE910 0x1201
#define CELOT_VENDOR_ID 0x211f
#define CELOT_PRODUCT_CT680M 0x6801
-/* Samsung products */
+/* SS products */
#define SAMSUNG_VENDOR_ID 0x04e8
#define SAMSUNG_PRODUCT_GT_B3730 0x6889
#define MEDIATEK_PRODUCT_DC_1COM 0x00a0
#define MEDIATEK_PRODUCT_DC_4COM 0x00a5
#define MEDIATEK_PRODUCT_DC_4COM2 0x00a7
+#define MEDIATEK_PRODUCT_DC_4COM3 0x00a8
#define MEDIATEK_PRODUCT_DC_5COM 0x00a4
#define MEDIATEK_PRODUCT_7208_1COM 0x7101
#define MEDIATEK_PRODUCT_7208_2COM 0x7102
.reserved = BIT(1) | BIT(5),
};
+ static const struct option_blacklist_info telit_le922_blacklist_usbcfg0 = {
+ .sendsetup = BIT(2),
+ .reserved = BIT(0) | BIT(1) | BIT(3),
+ };
+
+ static const struct option_blacklist_info telit_le922_blacklist_usbcfg3 = {
+ .sendsetup = BIT(0),
+ .reserved = BIT(1) | BIT(2) | BIT(3),
+ };
+
static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_SINGLE) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_DE910_DUAL) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UE910_V2) },
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG0),
+ .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 },
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG3),
+ .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910),
.driver_info = (kernel_ulong_t)&telit_le910_blacklist },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920),
{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_P) },
{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8),
.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
- { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX) },
+ { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX, 0xff) },
{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PLXX),
.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) },
{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD500),
.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
{ USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */
- { USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_GT_B3730, USB_CLASS_CDC_DATA, 0x00, 0x00) }, /* Samsung GT-B3730 LTE USB modem.*/
+ { USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_GT_B3730, USB_CLASS_CDC_DATA, 0x00, 0x00) }, /* SS GT-B3730 LTE USB modem.*/
{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM600) },
{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM610) },
{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM500) },
{ USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_7106_2COM, 0x02, 0x02, 0x01) },
{ USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM2, 0xff, 0x02, 0x01) },
{ USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM2, 0xff, 0x00, 0x00) },
+ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM3, 0xff, 0x00, 0x00) },
{ USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MEN200) },
{ USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T_600A) },
{ USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T_600E) },
(const struct option_blacklist_info *) id->driver_info))
return -ENODEV;
/*
- * Don't bind network interface on Samsung GT-B3730, it is handled by
+ * Don't bind network interface on SS GT-B3730, it is handled by
* a separate module.
*/
if (dev_desc->idVendor == cpu_to_le16(SAMSUNG_VENDOR_ID) &&
*/
would_dump(bprm, interpreter);
- retval = kernel_read(interpreter, 0, bprm->buf,
- BINPRM_BUF_SIZE);
- if (retval != BINPRM_BUF_SIZE) {
+ /* Get the exec headers */
+ retval = kernel_read(interpreter, 0,
+ (void *)&loc->interp_elf_ex,
+ sizeof(loc->interp_elf_ex));
+ if (retval != sizeof(loc->interp_elf_ex)) {
if (retval >= 0)
retval = -EIO;
goto out_free_dentry;
}
- /* Get the exec headers */
- loc->interp_elf_ex = *((struct elfhdr *)bprm->buf);
break;
}
elf_ppnt++;
* Jeremy Fitzhardinge <jeremy@sw.oz.au>
*/
+#ifdef CONFIG_MTK_EXTMEM
+extern bool extmem_in_mspace(struct vm_area_struct *vma);
+extern unsigned long get_virt_from_mspace(unsigned long pa);
+#endif
+
/*
* The purpose of always_dump_vma() is to make sure that special kernel mappings
* that are useful for post-mortem analysis are included in every core dump.
if (arch_vma_name(vma))
return true;
+#ifdef CONFIG_MTK_EXTMEM
+ if (extmem_in_mspace(vma)) {
+ return true;
+ }
+#endif
return false;
}
Elf_Half e_phnum;
elf_addr_t e_shoff;
+ printk(KERN_WARNING "coredump(%d): start\n", current->pid);
+
/*
* We no longer stop all VM operations.
*
if (!dump_seek(cprm->file, dataoff - foffset))
goto end_coredump;
+ printk(KERN_WARNING "coredump(%d): write output program header and notes\n", current->pid);
+
for (vma = first_vma(current, gate_vma); vma != NULL;
vma = next_vma(vma, gate_vma)) {
unsigned long addr;
end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
+#ifdef CONFIG_MTK_EXTMEM
+ if (extmem_in_mspace(vma)) {
+ void *extmem_va = (void *)get_virt_from_mspace(vma->vm_pgoff << PAGE_SHIFT);
+ for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE, extmem_va += PAGE_SIZE) {
+ int stop;
+ int dump_write_ret = dump_write(cprm->file, extmem_va, PAGE_SIZE);
+ stop = ((size += PAGE_SIZE) > cprm->limit) || (!dump_write_ret);
+ if (stop) {
+ printk(KERN_WARNING "[EXT_MEM]stop addr:0x%lx, size:%zx, limit:0x%lx, dump_write_ret:%d\n",
+ addr, size, cprm->limit, dump_write_ret);
+ goto end_coredump;
+ }
+ }
+ continue;
+ }
+#endif
+
+ //printk(KERN_WARNING "coredump(%d): write out load vm start:%08lx, end:%08lx\n", current->pid, vma->vm_start, end);
for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
struct page *page;
int stop;
PAGE_SIZE);
kunmap(page);
page_cache_release(page);
- } else
+ if (stop) {
+ printk(KERN_WARNING "coredump(%d): failed to write core dump\n", current->pid);
+ }
+ } else {
stop = !dump_seek(cprm->file, PAGE_SIZE);
+ if (stop) {
+ printk(KERN_WARNING "coredump(%d): failed to seek core dump\n", current->pid);
+ }
+ }
if (stop)
goto end_coredump;
}
}
+ printk(KERN_WARNING "coredump(%d): write loads\n", current->pid);
+
if (!elf_core_write_extra_data(cprm->file, &size, cprm->limit))
goto end_coredump;
goto end_coredump;
}
+ printk(KERN_WARNING "coredump(%d): write out completed %lld\n", current->pid, offset);
+
end_coredump:
set_fs(fs);
#include <linux/seqlock.h>
#include <linux/mutex.h>
#include <linux/timer.h>
+ #include <linux/version.h>
#include <linux/wait.h>
#include <linux/blockgroup_lock.h>
#include <linux/percpu_counter.h>
<= (EXT4_GOOD_OLD_INODE_SIZE + \
(einode)->i_extra_isize)) \
+ /*
+ * We use an encoding that preserves the times for extra epoch "00":
+ *
+ * extra msb of adjust for signed
+ * epoch 32-bit 32-bit tv_sec to
+ * bits time decoded 64-bit tv_sec 64-bit tv_sec valid time range
+ * 0 0 1 -0x80000000..-0x00000001 0x000000000 1901-12-13..1969-12-31
+ * 0 0 0 0x000000000..0x07fffffff 0x000000000 1970-01-01..2038-01-19
+ * 0 1 1 0x080000000..0x0ffffffff 0x100000000 2038-01-19..2106-02-07
+ * 0 1 0 0x100000000..0x17fffffff 0x100000000 2106-02-07..2174-02-25
+ * 1 0 1 0x180000000..0x1ffffffff 0x200000000 2174-02-25..2242-03-16
+ * 1 0 0 0x200000000..0x27fffffff 0x200000000 2242-03-16..2310-04-04
+ * 1 1 1 0x280000000..0x2ffffffff 0x300000000 2310-04-04..2378-04-22
+ * 1 1 0 0x300000000..0x37fffffff 0x300000000 2378-04-22..2446-05-10
+ *
+ * Note that previous versions of the kernel on 64-bit systems would
+ * incorrectly use extra epoch bits 1,1 for dates between 1901 and
+ * 1970. e2fsck will correct this, assuming that it is run on the
+ * affected filesystem before 2242.
+ */
+
static inline __le32 ext4_encode_extra_time(struct timespec *time)
{
- return cpu_to_le32((sizeof(time->tv_sec) > 4 ?
- (time->tv_sec >> 32) & EXT4_EPOCH_MASK : 0) |
- ((time->tv_nsec << EXT4_EPOCH_BITS) & EXT4_NSEC_MASK));
+ u32 extra = sizeof(time->tv_sec) > 4 ?
+ ((time->tv_sec - (s32)time->tv_sec) >> 32) & EXT4_EPOCH_MASK : 0;
+ return cpu_to_le32(extra | (time->tv_nsec << EXT4_EPOCH_BITS));
}
static inline void ext4_decode_extra_time(struct timespec *time, __le32 extra)
{
- if (sizeof(time->tv_sec) > 4)
- time->tv_sec |= (__u64)(le32_to_cpu(extra) & EXT4_EPOCH_MASK)
- << 32;
- time->tv_nsec = (le32_to_cpu(extra) & EXT4_NSEC_MASK) >> EXT4_EPOCH_BITS;
+ if (unlikely(sizeof(time->tv_sec) > 4 &&
+ (extra & cpu_to_le32(EXT4_EPOCH_MASK)))) {
+ #if LINUX_VERSION_CODE < KERNEL_VERSION(4,20,0)
+ /* Handle legacy encoding of pre-1970 dates with epoch
+ * bits 1,1. We assume that by kernel version 4.20,
+ * everyone will have run fsck over the affected
+ * filesystems to correct the problem. (This
+ * backwards compatibility may be removed before this
+ * time, at the discretion of the ext4 developers.)
+ */
+ u64 extra_bits = le32_to_cpu(extra) & EXT4_EPOCH_MASK;
+ if (extra_bits == 3 && ((time->tv_sec) & 0x80000000) != 0)
+ extra_bits = 0;
+ time->tv_sec += extra_bits << 32;
+ #else
+ time->tv_sec += (u64)(le32_to_cpu(extra) & EXT4_EPOCH_MASK) << 32;
+ #endif
+ }
+ time->tv_nsec = (le32_to_cpu(extra) & EXT4_NSEC_MASK) >> EXT4_EPOCH_BITS;
}
#define EXT4_INODE_SET_XTIME(xtime, inode, raw_inode) \
ext4_group_t i, struct ext4_group_desc *desc);
extern int ext4_group_add_blocks(handle_t *handle, struct super_block *sb,
ext4_fsblk_t block, unsigned long count);
-extern int ext4_trim_fs(struct super_block *, struct fstrim_range *);
+extern int ext4_trim_fs(struct super_block *, struct fstrim_range *,
+ unsigned long blkdev_flags);
/* inode.c */
struct buffer_head *ext4_getblk(handle_t *, struct inode *,
struct task_struct *t;
if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) {
- if (signal->flags & SIGNAL_GROUP_COREDUMP)
- return sig == SIGKILL;
+ if (signal->flags & SIGNAL_GROUP_COREDUMP) {
+ printk(KERN_DEBUG "[%d:%s] is in the middle of dying so skip sig %d\n",p->pid, p->comm, sig);
+ }
+ return 0;
/*
* The process is in the middle of dying, nothing to do.
*/
}
#endif
+static const char stat_nam[] = TASK_STATE_TO_CHAR_STR;
+
static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
int group, int from_ancestor_ns)
{
struct sigqueue *q;
int override_rlimit;
int ret = 0, result;
+ unsigned state;
+ state = t->state ? __ffs(t->state) + 1 : 0;
+ printk(KERN_DEBUG "[%d:%s] sig %d to [%d:%s] stat=%c\n",
+ current->pid, current->comm, sig, t->pid, t->comm,
+ state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?');
assert_spin_locked(&t->sighand->siglock);
result = TRACE_SIGNAL_IGNORED;
recalc_sigpending();
spin_unlock_irq(&tsk->sighand->siglock);
- timeout = schedule_timeout_interruptible(timeout);
+ timeout = freezable_schedule_timeout_interruptible(timeout);
spin_lock_irq(&tsk->sighand->siglock);
__set_task_blocked(tsk, &tsk->real_blocked);
#endif
- int sigsuspend(sigset_t *set)
+ static int sigsuspend(sigset_t *set)
{
current->saved_sigmask = current->blocked;
set_current_blocked(set);
#include <sound/compress_offload.h>
#include <sound/compress_driver.h>
+ /* struct snd_compr_codec_caps overflows the ioctl bit size for some
+ * architectures, so we need to disable the relevant ioctls.
+ */
+ #if _IOC_SIZEBITS < 14
+ #define COMPR_CODEC_CAPS_OVERFLOW
+ #endif
+
/* TODO:
* - add substream support for multiple devices in case of
* SND_DYNAMIC_MINORS is not used
return retval;
}
+ #ifndef COMPR_CODEC_CAPS_OVERFLOW
static int
snd_compr_get_codec_caps(struct snd_compr_stream *stream, unsigned long arg)
{
kfree(caps);
return retval;
}
+ #endif /* !COMPR_CODEC_CAPS_OVERFLOW */
/* revisit this with snd_pcm_preallocate_xxx */
static int snd_compr_allocate_buffer(struct snd_compr_stream *stream,
{
/* first let's check the buffer parameter's */
if (params->buffer.fragment_size == 0 ||
- params->buffer.fragments > SIZE_MAX / params->buffer.fragment_size)
+ params->buffer.fragments > INT_MAX / params->buffer.fragment_size)
return -EINVAL;
/* now codec parameters */
case _IOC_NR(SNDRV_COMPRESS_GET_CAPS):
retval = snd_compr_get_caps(stream, arg);
break;
+ #ifndef COMPR_CODEC_CAPS_OVERFLOW
case _IOC_NR(SNDRV_COMPRESS_GET_CODEC_CAPS):
retval = snd_compr_get_codec_caps(stream, arg);
break;
+ #endif
case _IOC_NR(SNDRV_COMPRESS_SET_PARAMS):
retval = snd_compr_set_params(stream, arg);
break;
return 0;
}
- static int _snd_timer_stop(struct snd_timer_instance *timeri,
- int keep_flag, int event);
+ static int _snd_timer_stop(struct snd_timer_instance *timeri, int event);
/*
* close a timer instance
spin_unlock_irq(&timer->lock);
mutex_lock(®ister_mutex);
list_del(&timeri->open_list);
- if (timer && list_empty(&timer->open_list_head) &&
+ if (list_empty(&timer->open_list_head) &&
timer->hw.close)
timer->hw.close(timer);
/* remove slave links */
spin_lock_irqsave(&timer->lock, flags);
list_for_each_entry(ts, &ti->slave_active_head, active_list)
if (ts->ccallback)
- ts->ccallback(ti, event + 100, &tstamp, resolution);
+ ts->ccallback(ts, event + 100, &tstamp, resolution);
spin_unlock_irqrestore(&timer->lock, flags);
}
unsigned long flags;
spin_lock_irqsave(&slave_active_lock, flags);
+ if (timeri->flags & SNDRV_TIMER_IFLG_RUNNING) {
+ spin_unlock_irqrestore(&slave_active_lock, flags);
+ return -EBUSY;
+ }
timeri->flags |= SNDRV_TIMER_IFLG_RUNNING;
if (timeri->master && timeri->timer) {
spin_lock(&timeri->timer->lock);
return -EINVAL;
if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE) {
result = snd_timer_start_slave(timeri);
- snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_START);
+ if (result >= 0)
+ snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_START);
return result;
}
timer = timeri->timer;
if (timer == NULL)
return -EINVAL;
spin_lock_irqsave(&timer->lock, flags);
+ if (timeri->flags & (SNDRV_TIMER_IFLG_RUNNING |
+ SNDRV_TIMER_IFLG_START)) {
+ result = -EBUSY;
+ goto unlock;
+ }
timeri->ticks = timeri->cticks = ticks;
timeri->pticks = 0;
result = snd_timer_start1(timer, timeri, ticks);
+ unlock:
spin_unlock_irqrestore(&timer->lock, flags);
- snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_START);
+ if (result >= 0)
+ snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_START);
return result;
}
- static int _snd_timer_stop(struct snd_timer_instance * timeri,
- int keep_flag, int event)
+ static int _snd_timer_stop(struct snd_timer_instance *timeri, int event)
{
struct snd_timer *timer;
unsigned long flags;
return -ENXIO;
if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE) {
- if (!keep_flag) {
- spin_lock_irqsave(&slave_active_lock, flags);
- timeri->flags &= ~SNDRV_TIMER_IFLG_RUNNING;
- list_del_init(&timeri->ack_list);
- list_del_init(&timeri->active_list);
+ spin_lock_irqsave(&slave_active_lock, flags);
+ if (!(timeri->flags & SNDRV_TIMER_IFLG_RUNNING)) {
spin_unlock_irqrestore(&slave_active_lock, flags);
+ return -EBUSY;
}
+ if (timeri->timer)
+ spin_lock(&timeri->timer->lock);
+ timeri->flags &= ~SNDRV_TIMER_IFLG_RUNNING;
+ list_del_init(&timeri->ack_list);
+ list_del_init(&timeri->active_list);
+ if (timeri->timer)
+ spin_unlock(&timeri->timer->lock);
+ spin_unlock_irqrestore(&slave_active_lock, flags);
goto __end;
}
timer = timeri->timer;
if (!timer)
return -EINVAL;
spin_lock_irqsave(&timer->lock, flags);
+ if (!(timeri->flags & (SNDRV_TIMER_IFLG_RUNNING |
+ SNDRV_TIMER_IFLG_START))) {
+ spin_unlock_irqrestore(&timer->lock, flags);
+ return -EBUSY;
+ }
list_del_init(&timeri->ack_list);
list_del_init(&timeri->active_list);
if ((timeri->flags & SNDRV_TIMER_IFLG_RUNNING) &&
}
}
}
- if (!keep_flag)
- timeri->flags &=
- ~(SNDRV_TIMER_IFLG_RUNNING | SNDRV_TIMER_IFLG_START);
+ timeri->flags &= ~(SNDRV_TIMER_IFLG_RUNNING | SNDRV_TIMER_IFLG_START);
spin_unlock_irqrestore(&timer->lock, flags);
__end:
if (event != SNDRV_TIMER_EVENT_RESOLUTION)
unsigned long flags;
int err;
- err = _snd_timer_stop(timeri, 0, SNDRV_TIMER_EVENT_STOP);
+ err = _snd_timer_stop(timeri, SNDRV_TIMER_EVENT_STOP);
if (err < 0)
return err;
timer = timeri->timer;
if (! timer)
return -EINVAL;
spin_lock_irqsave(&timer->lock, flags);
+ if (timeri->flags & SNDRV_TIMER_IFLG_RUNNING) {
+ result = -EBUSY;
+ goto unlock;
+ }
if (!timeri->cticks)
timeri->cticks = 1;
timeri->pticks = 0;
result = snd_timer_start1(timer, timeri, timer->sticks);
+ unlock:
spin_unlock_irqrestore(&timer->lock, flags);
snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_CONTINUE);
return result;
*/
int snd_timer_pause(struct snd_timer_instance * timeri)
{
- return _snd_timer_stop(timeri, 0, SNDRV_TIMER_EVENT_PAUSE);
+ return _snd_timer_stop(timeri, SNDRV_TIMER_EVENT_PAUSE);
}
/*
ti->cticks = ti->ticks;
} else {
ti->flags &= ~SNDRV_TIMER_IFLG_RUNNING;
- if (--timer->running)
- list_del_init(&ti->active_list);
+ --timer->running;
+ list_del_init(&ti->active_list);
}
if ((timer->hw.flags & SNDRV_TIMER_HW_TASKLET) ||
(ti->flags & SNDRV_TIMER_IFLG_FAST))
tu->tstamp = *tstamp;
if ((tu->filter & (1 << event)) == 0 || !tu->tread)
return;
+ memset(&r1, 0, sizeof(r1));
r1.event = event;
r1.tstamp = *tstamp;
r1.val = resolution;
}
if ((tu->filter & (1 << SNDRV_TIMER_EVENT_RESOLUTION)) &&
tu->last_resolution != resolution) {
+ memset(&r1, 0, sizeof(r1));
r1.event = SNDRV_TIMER_EVENT_RESOLUTION;
r1.tstamp = tstamp;
r1.val = resolution;
if (tu->timeri->flags & SNDRV_TIMER_IFLG_EARLY_EVENT) {
if (tu->tread) {
struct snd_timer_tread tread;
+ memset(&tread, 0, sizeof(tread));
tread.event = SNDRV_TIMER_EVENT_EARLY;
tread.tstamp.tv_sec = 0;
tread.tstamp.tv_nsec = 0;