spia_pedr=
spia_peddr=
+ stack_guard_gap= [MM]
+ override the default stack gap protection. The value
+ is in page units and it defines how many pages prior
+ to (for stacks growing down) resp. after (for stacks
+ growing up) the main stack are reserved for no other
+ mapping. Default value is 256 pages.
+
stacktrace [FTRACE]
Enabled the stack tracer on boot up.
HIGHMEM regardless of setting
of CONFIG_HIGHPTE.
+ uuid_debug= (Boolean) whether to enable debugging of TuxOnIce's
+ uuid support.
+
vdso= [X86,SH]
vdso=2: enable compat VDSO (default with COMPAT_VDSO)
vdso=1: enable VDSO (default)
VERSION = 3
PATCHLEVEL = 10
- SUBLEVEL = 106
+ SUBLEVEL = 107
EXTRAVERSION =
NAME = TOSSUG Baby Fish
-Werror-implicit-function-declaration \
-Wno-format-security \
-fno-delete-null-pointer-checks \
- -std=gnu89
+ -w -std=gnu89
KBUILD_AFLAGS_KERNEL :=
KBUILD_CFLAGS_KERNEL :=
#define CPUID_TCM 2
#define CPUID_TLBTYPE 3
#define CPUID_MPIDR 5
+#define CPUID_REVIDR 6
#define CPUID_EXT_PFR0 "c1, 0"
#define CPUID_EXT_PFR1 "c1, 1"
#define ARM_CPU_PART_CORTEX_A5 0xC050
#define ARM_CPU_PART_CORTEX_A15 0xC0F0
#define ARM_CPU_PART_CORTEX_A7 0xC070
+#define ARM_CPU_PART_CORTEX_A12 0xC0D0
+#define ARM_CPU_PART_CORTEX_A17 0xC0E0
+#define ARM_CPU_PART_CORTEX_A53 0xD030
#define ARM_CPU_XSCALE_ARCH_MASK 0xe000
#define ARM_CPU_XSCALE_ARCH_V1 0x2000
#define ARM_CPU_XSCALE_ARCH_V2 0x4000
#define ARM_CPU_XSCALE_ARCH_V3 0x6000
+ /* Qualcomm implemented cores */
+ #define ARM_CPU_PART_SCORPION 0x510002d0
+
extern unsigned int processor_id;
#ifdef CONFIG_CPU_CP15
static void __init pm_init(void)
{
- cpu_pm_register_notifier(&dbg_cpu_pm_nb);
+ if (has_ossr)
+ cpu_pm_register_notifier(&dbg_cpu_pm_nb);
}
#else
static inline void pm_init(void)
return 0;
}
+ /*
+ * Scorpion CPUs (at least those in APQ8060) seem to set DBGPRSR.SPD
+ * whenever a WFI is issued, even if the core is not powered down, in
+ * violation of the architecture. When DBGPRSR.SPD is set, accesses to
+ * breakpoint and watchpoint registers are treated as undefined, so
+ * this results in boot time and runtime failures when these are
+ * accessed and we unexpectedly take a trap.
+ *
+ * It's not clear if/how this can be worked around, so we blacklist
+ * Scorpion CPUs to avoid these issues.
+ */
+ if ((read_cpuid_id() & 0xff00fff0) == ARM_CPU_PART_SCORPION) {
+ pr_info("Scorpion CPU detected. Hardware breakpoints and watchpoints disabled\n");
+ return 0;
+ }
+
has_ossr = core_has_os_save_restore();
/* Determine how many BRPs/WRPs are available. */
atomic_dec(&modifying_ftrace_code);
}
-int __init ftrace_dyn_arch_init(void *data)
+int __init ftrace_dyn_arch_init(void)
{
- /* The return code is retured via data */
- *(unsigned long *)data = 0;
-
return 0;
}
#endif
unsigned long return_hooker = (unsigned long)
&return_to_handler;
+ /*
+ * When resuming from suspend-to-ram, this function can be indirectly
+ * called from early CPU startup code while the CPU is in real mode,
+ * which would fail miserably. Make sure the stack pointer is a
+ * virtual address.
+ *
+ * This check isn't as accurate as virt_addr_valid(), but it should be
+ * good enough for this purpose, and it's fast.
+ */
+ if (unlikely((long)__builtin_frame_address(0) >= 0))
+ return;
+
if (unlikely(atomic_read(¤t->tracing_graph_pause)))
return;
#include <linux/kobj_map.h>
#include <linux/mutex.h>
#include <linux/idr.h>
+#include <linux/ctype.h>
+#include <linux/fs_uuid.h>
#include <linux/log2.h>
#include <linux/pm_runtime.h>
kobject_put(disk->part0.holder_dir);
kobject_put(disk->slave_dir);
- disk->driverfs_dev = NULL;
if (!sysfs_deprecated)
sysfs_remove_link(block_depr, dev_name(disk_to_dev(disk)));
pm_runtime_set_memalloc_noio(disk_to_dev(disk), false);
blk_put_queue(disk->queue);
kfree(disk);
}
+
+static int disk_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ struct gendisk *disk = dev_to_disk(dev);
+ struct disk_part_iter piter;
+ struct hd_struct *part;
+ int cnt = 0;
+
+ disk_part_iter_init(&piter, disk, 0);
+ while((part = disk_part_iter_next(&piter)))
+ cnt++;
+ disk_part_iter_exit(&piter);
+ add_uevent_var(env, "NPARTS=%u", cnt);
+ return 0;
+}
+
struct class block_class = {
.name = "block",
};
.groups = disk_attr_groups,
.release = disk_release,
.devnode = block_devnode,
+ .uevent = disk_uevent,
};
#ifdef CONFIG_PROC_FS
EXPORT_SYMBOL(invalidate_partition);
+dev_t blk_lookup_fs_info(struct fs_info *seek)
+{
+ dev_t devt = MKDEV(0, 0);
+ struct class_dev_iter iter;
+ struct device *dev;
+ int best_score = 0;
+
+ class_dev_iter_init(&iter, &block_class, NULL, &disk_type);
+ while (best_score < 3 && (dev = class_dev_iter_next(&iter))) {
+ struct gendisk *disk = dev_to_disk(dev);
+ struct disk_part_iter piter;
+ struct hd_struct *part;
+
+ disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0);
+
+ while (best_score < 3 && (part = disk_part_iter_next(&piter))) {
+ int score = part_matches_fs_info(part, seek);
+ if (score > best_score) {
+ devt = part_devt(part);
+ best_score = score;
+ }
+ }
+ disk_part_iter_exit(&piter);
+ }
+ class_dev_iter_exit(&iter);
+ return devt;
+}
+EXPORT_SYMBOL_GPL(blk_lookup_fs_info);
+
+/* Caller uses NULL, key to start. For each match found, we return a bdev on
+ * which we have done blkdev_get, and we do the blkdev_put on block devices
+ * that are passed to us. When no more matches are found, we return NULL.
+ */
+struct block_device *next_bdev_of_type(struct block_device *last,
+ const char *key)
+{
+ dev_t devt = MKDEV(0, 0);
+ struct class_dev_iter iter;
+ struct device *dev;
+ struct block_device *next = NULL, *bdev;
+ int got_last = 0;
+
+ if (!key)
+ goto out;
+
+ class_dev_iter_init(&iter, &block_class, NULL, &disk_type);
+ while (!devt && (dev = class_dev_iter_next(&iter))) {
+ struct gendisk *disk = dev_to_disk(dev);
+ struct disk_part_iter piter;
+ struct hd_struct *part;
+
+ disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0);
+
+ while ((part = disk_part_iter_next(&piter))) {
+ bdev = bdget(part_devt(part));
+ if (last && !got_last) {
+ if (last == bdev)
+ got_last = 1;
+ continue;
+ }
+
+ if (blkdev_get(bdev, FMODE_READ, 0))
+ continue;
+
+ if (bdev_matches_key(bdev, key)) {
+ next = bdev;
+ break;
+ }
+
+ blkdev_put(bdev, FMODE_READ);
+ }
+ disk_part_iter_exit(&piter);
+ }
+ class_dev_iter_exit(&iter);
+out:
+ if (last)
+ blkdev_put(last, FMODE_READ);
+ return next;
+}
+EXPORT_SYMBOL_GPL(next_bdev_of_type);
+
/*
* Disk events - monitor disk events like media change and eject request.
*/
static const char *disk_events_strs[] = {
[ilog2(DISK_EVENT_MEDIA_CHANGE)] = "media_change",
[ilog2(DISK_EVENT_EJECT_REQUEST)] = "eject_request",
+#ifdef CONFIG_MTK_MULTI_PARTITION_MOUNT_ONLY_SUPPORT
+ [ilog2(DISK_EVENT_MEDIA_DISAPPEAR)] = "media_disappear",
+#endif
};
static char *disk_uevents[] = {
[ilog2(DISK_EVENT_MEDIA_CHANGE)] = "DISK_MEDIA_CHANGE=1",
[ilog2(DISK_EVENT_EJECT_REQUEST)] = "DISK_EJECT_REQUEST=1",
+#ifdef CONFIG_MTK_MULTI_PARTITION_MOUNT_ONLY_SUPPORT
+ [ilog2(DISK_EVENT_MEDIA_DISAPPEAR)] = "DISK_EVENT_MEDIA_DISAPPEAR=1",
+#endif
};
/* list of all disk_events */
static LIST_HEAD(disk_events);
/* disable in-kernel polling by default */
-static unsigned long disk_events_dfl_poll_msecs = 0;
+//ALPS00319570, CL955952 merged back, begin
+//static unsigned long disk_events_dfl_poll_msecs = 0; //original
+static unsigned long disk_events_dfl_poll_msecs = 2000;
+//ALPS00319570, CL955952 merged back, end
static unsigned long disk_events_poll_jiffies(struct gendisk *disk)
{
obj-$(CONFIG_CRYPTO_SHA256) += sha256_generic.o
obj-$(CONFIG_CRYPTO_SHA512) += sha512_generic.o
obj-$(CONFIG_CRYPTO_WP512) += wp512.o
+ CFLAGS_wp512.o := $(call cc-option,-fno-schedule-insns) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79149
obj-$(CONFIG_CRYPTO_TGR192) += tgr192.o
obj-$(CONFIG_CRYPTO_GF128MUL) += gf128mul.o
obj-$(CONFIG_CRYPTO_ECB) += ecb.o
obj-$(CONFIG_CRYPTO_TWOFISH) += twofish_generic.o
obj-$(CONFIG_CRYPTO_TWOFISH_COMMON) += twofish_common.o
obj-$(CONFIG_CRYPTO_SERPENT) += serpent_generic.o
+ CFLAGS_serpent_generic.o := $(call cc-option,-fsched-pressure) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79149
obj-$(CONFIG_CRYPTO_AES) += aes_generic.o
obj-$(CONFIG_CRYPTO_CAMELLIA) += camellia_generic.o
obj-$(CONFIG_CRYPTO_CAST_COMMON) += cast_common.o
obj-$(CONFIG_CRYPTO_CRC32) += crc32.o
obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o authencesn.o
obj-$(CONFIG_CRYPTO_LZO) += lzo.o
+obj-$(CONFIG_CRYPTO_LZ4K) += lz4kc.o
obj-$(CONFIG_CRYPTO_842) += 842.o
obj-$(CONFIG_CRYPTO_RNG2) += rng.o
obj-$(CONFIG_CRYPTO_RNG2) += krng.o
obj-$(CONFIG_XOR_BLOCKS) += xor.o
obj-$(CONFIG_ASYNC_CORE) += async_tx/
obj-$(CONFIG_ASYMMETRIC_KEY_TYPE) += asymmetric_keys/
+obj-$(CONFIG_CRYPTO_ABLK_HELPER) += ablk_helper.o
source "drivers/tty/Kconfig"
+config DEVMEM
+ bool "Memory device driver"
+ default y
+ help
+ The memory driver provides two character devices, mem and kmem, which
+ provide access to the system's memory. The mem device is a view of
+ physical memory, and each byte in the device corresponds to the
+ matching physical address. The kmem device is the same as mem, but
+ the addresses correspond to the kernel's virtual address space rather
+ than physical memory. These devices are standard parts of a Linux
+ system and most users should say Y here. You might say N if very
+ security conscience or memory is tight.
+
config DEVKMEM
bool "/dev/kmem virtual device support"
default y
controlling the behavior of this hardware.
config DEVPORT
- bool
+ bool "/dev/port character device"
depends on ISA || PCI
default y
+ help
+ Say Y here if you want to support the /dev/port device. The /dev/port
+ device is similar to /dev/mem, but for I/O ports.
+config DCC_TTY
+ tristate "DCC tty driver"
+ depends on ARM
+
source "drivers/s390/char/Kconfig"
config MSM_SMD_PKT
}
#endif
+#if defined(CONFIG_DEVMEM) || defined(CONFIG_DEVKMEM)
#ifdef CONFIG_STRICT_DEVMEM
+ static inline int page_is_allowed(unsigned long pfn)
+ {
+ return devmem_is_allowed(pfn);
+ }
static inline int range_is_allowed(unsigned long pfn, unsigned long size)
{
u64 from = ((u64)pfn) << PAGE_SHIFT;
return 1;
}
#else
+ static inline int page_is_allowed(unsigned long pfn)
+ {
+ return 1;
+ }
static inline int range_is_allowed(unsigned long pfn, unsigned long size)
{
return 1;
}
#endif
+#endif
+#ifdef CONFIG_DEVMEM
void __weak unxlate_dev_mem_ptr(unsigned long phys, void *addr)
{
}
while (count > 0) {
unsigned long remaining;
+ int allowed;
sz = size_inside_page(p, count);
- if (!range_is_allowed(p >> PAGE_SHIFT, count))
+ allowed = page_is_allowed(p >> PAGE_SHIFT);
+ if (!allowed)
return -EPERM;
+ if (allowed == 2) {
+ /* Show zeros for restricted memory. */
+ remaining = clear_user(buf, sz);
+ } else {
+ /*
+ * On ia64 if a page has been mapped somewhere as
+ * uncached, then it must also be accessed uncached
+ * by the kernel or data corruption may occur.
+ */
+ ptr = xlate_dev_mem_ptr(p);
+ if (!ptr)
+ return -EFAULT;
- /*
- * On ia64 if a page has been mapped somewhere as uncached, then
- * it must also be accessed uncached by the kernel or data
- * corruption may occur.
- */
- ptr = xlate_dev_mem_ptr(p);
- if (!ptr)
- return -EFAULT;
+ remaining = copy_to_user(buf, ptr, sz);
+
+ unxlate_dev_mem_ptr(p, ptr);
+ }
- remaining = copy_to_user(buf, ptr, sz);
- unxlate_dev_mem_ptr(p, ptr);
if (remaining)
return -EFAULT;
#endif
while (count > 0) {
+ int allowed;
+
sz = size_inside_page(p, count);
- if (!range_is_allowed(p >> PAGE_SHIFT, sz))
+ allowed = page_is_allowed(p >> PAGE_SHIFT);
+ if (!allowed)
return -EPERM;
- /*
- * On ia64 if a page has been mapped somewhere as uncached, then
- * it must also be accessed uncached by the kernel or data
- * corruption may occur.
- */
- ptr = xlate_dev_mem_ptr(p);
- if (!ptr) {
- if (written)
- break;
- return -EFAULT;
- }
-
- copied = copy_from_user(ptr, buf, sz);
- unxlate_dev_mem_ptr(p, ptr);
- if (copied) {
- written += sz - copied;
- if (written)
- break;
- return -EFAULT;
+ /* Skip actual writing when a page is marked as restricted. */
+ if (allowed == 1) {
+ /*
+ * On ia64 if a page has been mapped somewhere as
+ * uncached, then it must also be accessed uncached
+ * by the kernel or data corruption may occur.
+ */
+ ptr = xlate_dev_mem_ptr(p);
+ if (!ptr) {
+ if (written)
+ break;
+ return -EFAULT;
+ }
+
+ copied = copy_from_user(ptr, buf, sz);
+ unxlate_dev_mem_ptr(p, ptr);
+ if (copied) {
+ written += sz - copied;
+ if (written)
+ break;
+ return -EFAULT;
+ }
}
buf += sz;
*ppos += written;
return written;
}
+#endif /* CONFIG_DEVMEM */
+
+#if defined(CONFIG_DEVMEM) || defined(CONFIG_DEVKMEM)
int __weak phys_mem_access_prot_allowed(struct file *file,
unsigned long pfn, unsigned long size, pgprot_t *vma_prot)
}
return 0;
}
+#endif /* CONFIG_DEVMEM */
#ifdef CONFIG_DEVKMEM
static int mmap_kmem(struct file *file, struct vm_area_struct *vma)
return file->f_pos = 0;
}
+#if defined(CONFIG_DEVMEM) || defined(CONFIG_DEVKMEM) || defined(CONFIG_DEVPORT)
+
/*
* The memory devices use the full 32/64 bits of the offset, and so we cannot
* check against negative addresses: they are ok. The return value is weird,
return ret;
}
+#endif
+
+#if defined(CONFIG_DEVMEM) || defined(CONFIG_DEVKMEM) || defined(CONFIG_DEVPORT)
static int open_port(struct inode *inode, struct file *filp)
{
return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
}
+#endif
#define zero_lseek null_lseek
#define full_lseek null_lseek
#define open_kmem open_mem
#define open_oldmem open_mem
+#ifdef CONFIG_DEVMEM
static const struct file_operations mem_fops = {
.llseek = memory_lseek,
.read = read_mem,
.open = open_mem,
.get_unmapped_area = get_unmapped_area_mem,
};
+#endif
#ifdef CONFIG_DEVKMEM
static const struct file_operations kmem_fops = {
const struct file_operations *fops;
struct backing_dev_info *dev_info;
} devlist[] = {
+#ifdef CONFIG_DEVMEM
[1] = { "mem", 0, &mem_fops, &directly_mappable_cdev_bdi },
+#endif
#ifdef CONFIG_DEVKMEM
[2] = { "kmem", 0, &kmem_fops, &directly_mappable_cdev_bdi },
#endif
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <asm/cputime.h>
#include <linux/kernel.h>
+#include <linux/kernel_stat.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/notifier.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
+#include <linux/tick.h>
#include <linux/device.h>
#include <linux/slab.h>
#include <linux/cpu.h>
{
return cpufreq_driver->have_governor_per_policy;
}
+EXPORT_SYMBOL_GPL(have_governor_per_policy);
+
+struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
+{
+ if (have_governor_per_policy())
+ return &policy->kobj;
+ else
+ return cpufreq_global_kobject;
+}
+EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
+
+static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
+{
+ u64 idle_time;
+ u64 cur_wall_time;
+ u64 busy_time;
+
+ cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
+
+ busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
+ busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
+ busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
+ busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
+ busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
+ busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
+
+ idle_time = cur_wall_time - busy_time;
+ if (wall)
+ *wall = cputime_to_usecs(cur_wall_time);
+
+ return cputime_to_usecs(idle_time);
+}
+
+u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
+{
+ u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
+
+ if (idle_time == -1ULL)
+ return get_cpu_idle_time_jiffy(cpu, wall);
+ else if (!io_busy)
+ idle_time += get_cpu_iowait_time_us(cpu, wall);
+
+ return idle_time;
+}
+EXPORT_SYMBOL_GPL(get_cpu_idle_time);
static struct cpufreq_policy *__cpufreq_cpu_get(unsigned int cpu, bool sysfs)
{
char *buf)
{
unsigned int cur_freq = __cpufreq_get(policy->cpu);
- if (!cur_freq)
- return sprintf(buf, "<unknown>");
- return sprintf(buf, "%u\n", cur_freq);
+
+ if (cur_freq)
+ return sprintf(buf, "%u\n", cur_freq);
+
+ return sprintf(buf, "<unknown>\n");
}
return err;
}
- if (bytes == 0) {
- err = ubi_wl_flush(ubi, UBI_ALL, UBI_ALL);
- if (err)
- return err;
+ err = ubi_wl_flush(ubi, UBI_ALL, UBI_ALL);
+ if (err)
+ return err;
+ if (bytes == 0) {
err = clear_update_marker(ubi, vol, 0);
if (err)
return err;
return err;
vol->updating = 0;
err = to_write;
- vfree(vol->upd_buf);
+ kfree(vol->upd_buf);
}
return err;
if (vol->upd_received == vol->upd_bytes) {
vol->changing_leb = 0;
err = count;
- vfree(vol->upd_buf);
+ kfree(vol->upd_buf);
}
return err;
}
if (tun->flags & TUN_VNET_HDR) {
- if (len < tun->vnet_hdr_sz)
+ int vnet_hdr_sz = ACCESS_ONCE(tun->vnet_hdr_sz);
+
+ if (len < vnet_hdr_sz)
return -EINVAL;
- len -= tun->vnet_hdr_sz;
+ len -= vnet_hdr_sz;
if (memcpy_fromiovecend((void *)&gso, iv, offset, sizeof(gso)))
return -EFAULT;
if (gso.hdr_len > len)
return -EINVAL;
- offset += tun->vnet_hdr_sz;
+ offset += vnet_hdr_sz;
}
if ((tun->flags & TUN_TYPE_MASK) == TUN_TAP_DEV) {
int vnet_hdr_sz = 0;
if (tun->flags & TUN_VNET_HDR)
- vnet_hdr_sz = tun->vnet_hdr_sz;
+ vnet_hdr_sz = ACCESS_ONCE(tun->vnet_hdr_sz);
if (!(tun->flags & TUN_NO_PI)) {
if ((len -= sizeof(pi)) < 0)
int vnet_hdr_sz;
int ret;
+#ifdef CONFIG_ANDROID_PARANOID_NETWORK
+ if (cmd != TUNGETIFF && !capable(CAP_NET_ADMIN)) {
+ return -EPERM;
+ }
+#endif
+
if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == 0x89) {
if (copy_from_user(&ifr, argp, ifreq_len))
return -EFAULT;
int rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
{
+#ifdef RTC_LEGACY_ALARM_IMPL
+ return __rtc_read_alarm(rtc, alarm);
+#else
int err;
err = mutex_lock_interruptible(&rtc->ops_lock);
mutex_unlock(&rtc->ops_lock);
return err;
+#endif
}
EXPORT_SYMBOL_GPL(rtc_read_alarm);
static int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
{
+#ifdef RTC_LEGACY_ALARM_IMPL
+ WARN(1, "__rtc_set_alarm() is not supported!!\n");
+ return -EPERM;
+#else
struct rtc_time tm;
long now, scheduled;
int err;
err = rtc->ops->set_alarm(rtc->dev.parent, alarm);
return err;
+#endif
}
int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
err = mutex_lock_interruptible(&rtc->ops_lock);
if (err)
return err;
+#ifdef RTC_LEGACY_ALARM_IMPL
+ if (!rtc->ops)
+ err = -ENODEV;
+ else if (!rtc->ops->set_alarm)
+ err = -EINVAL;
+ else
+ err = rtc->ops->set_alarm(rtc->dev.parent, alarm);
+#else
+
if (rtc->aie_timer.enabled) {
rtc_timer_remove(rtc, &rtc->aie_timer);
}
if (alarm->enabled) {
err = rtc_timer_enqueue(rtc, &rtc->aie_timer);
}
+#endif
mutex_unlock(&rtc->ops_lock);
return err;
}
EXPORT_SYMBOL_GPL(rtc_set_alarm);
+int rtc_set_alarm_poweron(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
+{
+ int err;
+
+ err = rtc_valid_tm(&alarm->time);
+ if (err != 0)
+ return err;
+
+ err = mutex_lock_interruptible(&rtc->ops_lock);
+ if (err)
+ return err;
+
+ if (!rtc->ops)
+ err = -ENODEV;
+ else if (!rtc->ops->set_alarm)
+ err = -EINVAL;
+ else
+ err = rtc->ops->set_alarm(rtc->dev.parent, alarm);
+
+ mutex_unlock(&rtc->ops_lock);
+ return err;
+}
+EXPORT_SYMBOL_GPL(rtc_set_alarm_poweron);
+
/* Called once per device from rtc_device_register */
int rtc_initialize_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
{
+#ifdef RTC_LEGACY_ALARM_IMPL
+ return 0;
+#else
int err;
struct rtc_time now;
}
mutex_unlock(&rtc->ops_lock);
return err;
+#endif
}
EXPORT_SYMBOL_GPL(rtc_initialize_alarm);
*/
static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer)
{
+ struct timerqueue_node *next = timerqueue_getnext(&rtc->timerqueue);
+ struct rtc_time tm;
+ ktime_t now;
+
timer->enabled = 1;
+ __rtc_read_time(rtc, &tm);
+ now = rtc_tm_to_ktime(tm);
+
+ /* Skip over expired timers */
+ while (next) {
+ if (next->expires.tv64 >= now.tv64)
+ break;
+ next = timerqueue_iterate_next(next);
+ }
+
timerqueue_add(&rtc->timerqueue, &timer->node);
- if (&timer->node == timerqueue_getnext(&rtc->timerqueue)) {
+ if (!next) {
struct rtc_wkalrm alarm;
int err;
alarm.time = rtc_ktime_to_tm(timer->node.expires);
**/
static unsigned int sd_check_events(struct gendisk *disk, unsigned int clearing)
{
- struct scsi_disk *sdkp = scsi_disk(disk);
- struct scsi_device *sdp = sdkp->device;
+ struct scsi_disk *sdkp = scsi_disk_get(disk);
+ struct scsi_device *sdp;
struct scsi_sense_hdr *sshdr = NULL;
int retval;
+ if (!sdkp)
+ return 0;
+
+ sdp = sdkp->device;
SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_check_events\n"));
/*
*/
kfree(sshdr);
retval = sdp->changed ? DISK_EVENT_MEDIA_CHANGE : 0;
+#ifdef CONFIG_MTK_MULTI_PARTITION_MOUNT_ONLY_SUPPORT
+ //add for sdcard hotplug start
+ if(1 == retval){
+ if(sdkp->old_media_present != sdkp->media_present){
+ retval |= sdkp->media_present ? 0 : DISK_EVENT_MEDIA_DISAPPEAR;
+ sdkp->old_media_present = sdkp->media_present;
+ }
+ }
+ //add for sdcard hotplug end
+#endif
sdp->changed = 0;
+ scsi_disk_put(sdkp);
return retval;
}
* doesn't have any media in it, don't bother
* with any more polling.
*/
+ /*
if (media_not_present(sdkp, &sshdr))
return;
+ */
if (the_result)
sense_valid = scsi_sense_valid(&sshdr);
retries++;
- } while (retries < 3 &&
- (!scsi_status_is_good(the_result) ||
+ if(!scsi_status_is_good(the_result) ||
((driver_byte(the_result) & DRIVER_SENSE) &&
- sense_valid && sshdr.sense_key == UNIT_ATTENTION)));
+ sense_valid && sshdr.sense_key == UNIT_ATTENTION)) {
+ msleep(100);
+ } else {
+ break;
+ }
+ } while (retries < 5);
if ((driver_byte(the_result) & DRIVER_SENSE) == 0) {
/* no sense, TUR either succeeded or failed
#define READ_CAPACITY_RETRIES_ON_RESET 10
+ /*
+ * Ensure that we don't overflow sector_t when CONFIG_LBDAF is not set
+ * and the reported logical block size is bigger than 512 bytes. Note
+ * that last_sector is a u64 and therefore logical_to_sectors() is not
+ * applicable.
+ */
+ static bool sd_addressable_capacity(u64 lba, unsigned int sector_size)
+ {
+ u64 last_sector = (lba + 1ULL) << (ilog2(sector_size) - 9);
+
+ if (sizeof(sector_t) == 4 && last_sector > U32_MAX)
+ return false;
+
+ return true;
+ }
+
static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
unsigned char *buffer)
{
return -ENODEV;
}
- if ((sizeof(sdkp->capacity) == 4) && (lba >= 0xffffffffULL)) {
+ if (!sd_addressable_capacity(lba, sector_size)) {
sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use a "
"kernel compiled with support for large block "
"devices.\n");
return sector_size;
}
- if ((sizeof(sdkp->capacity) == 4) && (lba == 0xffffffff)) {
+ if (!sd_addressable_capacity(lba, sector_size)) {
sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use a "
"kernel compiled with support for large block "
"devices.\n");
sdkp->max_medium_access_timeouts = SD_MAX_MEDIUM_TIMEOUTS;
sd_revalidate_disk(gd);
+#ifdef CONFIG_MTK_MULTI_PARTITION_MOUNT_ONLY_SUPPORT
+ sdkp->old_media_present = sdkp->media_present; //add for sdcard hotplug
+#endif
blk_queue_prep_rq(sdp->request_queue, sd_prep_fn);
blk_queue_unprep_rq(sdp->request_queue, sd_unprep_fn);
gd->flags = GENHD_FL_EXT_DEVT;
if (sdp->removable) {
gd->flags |= GENHD_FL_REMOVABLE;
+#ifdef CONFIG_MTK_MULTI_PARTITION_MOUNT_ONLY_SUPPORT
+ gd->events |= DISK_EVENT_MEDIA_CHANGE|DISK_EVENT_MEDIA_DISAPPEAR; //add for sdcard hotplug
+#else
gd->events |= DISK_EVENT_MEDIA_CHANGE;
+#endif
}
blk_pm_runtime_init(sdp->request_queue, dev);
#include "cdc-acm.h"
+#ifdef CONFIG_MTK_DT_USB_SUPPORT
+
+/* adjust to 1 to avoid musb bug when lots write with clean urb */
+#undef ACM_NW
+#define ACM_NW 1
+/* adjust SZ to hsmaxp*20 to prevent tty disc don't accept big size write */
+#define ACM_WB_SZ (512*20)
+
+#define DATA_DUMP_BYTES 15 // wx, how many bytes we'll print out for each packet
+#define DATA_DUMP_SIZE 64 // wx, should be large enough to hold DATA_DUMP_DIGITS
+static unsigned char data_out[DATA_DUMP_SIZE];
+static unsigned char data_in[DATA_DUMP_SIZE];
+/* Debug functions */
+static int enable_debug = 0;
+static int enable_dump = 0;
+//origninal CDC-ACM log, more detail
+#undef dev_dbg
+#define dev_dbg(dev, format, args...) \
+ do{ \
+ if(enable_debug) { \
+ dev_printk(KERN_WARNING, dev, "[CDC-ACM] " format, ##args); \
+ } \
+ }while(0)
+#undef dev_vdbg
+#define dev_vdbg dev_dbg
+//MTK added CDC-ACM log, more critical
+#define dbg_mtk(dev, format, args...) \
+ do{ \
+ dev_printk(KERN_WARNING, dev, "[CDC-ACM-MTK] " format "\n", ##args); \
+ }while(0)
+#else
+#define dbg_mtk(dev, format, args...) do{}while(0)
+#endif
+
#define DRIVER_AUTHOR "Armin Fuerst, Pavel Machek, Johannes Erdfelt, Vojtech Pavlik, David Kubicek, Johan Hovold"
#define DRIVER_DESC "USB Abstract Control Model driver for USB modems and ISDN adapters"
static DEFINE_MUTEX(acm_table_lock);
+#define MYDBG(fmt, args...) do {printk(KERN_WARNING "MTK_ACM, <%s(), %d> " fmt, __func__, __LINE__, ## args); }while(0)
+
+#ifdef CONFIG_MTK_DT_USB_SUPPORT
+
+#define RECORD_SZ 100
+#define DUMP_DELAY 3
+struct record_entry
+{
+ struct timeval cur_time;
+ u32 transfer_buffer_length;
+ u32 actual_length;
+ int status;
+ unsigned char data;
+
+}w1[RECORD_SZ], w2[RECORD_SZ], r1[RECORD_SZ], r2[RECORD_SZ];
+
+static int w1_idx, w2_idx, r1_idx, r2_idx;
+static int record_enable = 0;
+static struct timeval tv_start;
+void dump_record(void)
+{
+ int i, index_limit;
+ struct record_entry *ptr;
+
+ index_limit = w1_idx;
+ ptr = w1;
+ for(i = 0; i < index_limit; i++){
+ MYDBG("w1, time:(%d,%d), reqlen:%d, data:%x\n",
+ (unsigned int)ptr[i].cur_time.tv_sec, (unsigned int)ptr[i].cur_time.tv_usec, (unsigned int)ptr[i].transfer_buffer_length, ptr[i].data);
+ mdelay(DUMP_DELAY);
+ }
+
+ index_limit = r1_idx;
+ ptr = r1;
+ for(i = 0; i < index_limit; i++){
+ MYDBG("r1, time:(%d,%d), reqlen:%d\n",
+ (unsigned int)ptr[i].cur_time.tv_sec, (unsigned int)ptr[i].cur_time.tv_usec, ptr[i].transfer_buffer_length);
+ mdelay(DUMP_DELAY);
+ }
+
+ index_limit = w2_idx;
+ ptr = w2;
+ for(i = 0; i < index_limit; i++){
+ MYDBG("w2, time:(%d,%d), reqlen:%d, actlen:%d, status:%d, data:%x\n",
+ (unsigned int)ptr[i].cur_time.tv_sec, (unsigned int)ptr[i].cur_time.tv_usec, ptr[i].transfer_buffer_length, ptr[i].actual_length, ptr[i].status, ptr[i].data);
+ mdelay(DUMP_DELAY);
+ }
+
+ index_limit = r2_idx;
+ ptr = r2;
+ for(i = 0; i < index_limit; i++){
+ MYDBG("r2, time:(%d,%d), reqlen:%d, actlen:%d, status:%d, data:%x\n",
+ (unsigned int)ptr[i].cur_time.tv_sec, (unsigned int)ptr[i].cur_time.tv_usec, ptr[i].transfer_buffer_length, ptr[i].actual_length, ptr[i].status, ptr[i].data);
+ mdelay(DUMP_DELAY);
+ }
+
+}
+void record_activity(struct urb *urb, int is_in, int is_complete)
+{
+ struct timeval tv_time;
+
+ if(!record_enable)
+ return;
+
+ do_gettimeofday(&tv_time);
+ tv_time.tv_sec = tv_time.tv_sec - tv_start.tv_sec;
+ tv_time.tv_usec = tv_time.tv_usec - tv_start.tv_usec;
+ if(is_in){
+ if(is_complete){
+ if(r2_idx >= RECORD_SZ)
+ return;
+
+ r2[r2_idx].cur_time = tv_time;
+ r2[r2_idx].transfer_buffer_length = urb->transfer_buffer_length;
+ r2[r2_idx].actual_length = urb->actual_length;
+ r2[r2_idx].status = urb->status;
+ r2[r2_idx].data = *((unsigned char*)urb->transfer_buffer);
+ r2_idx++;
+ return;
+ }
+ if(r1_idx >= RECORD_SZ)
+ return;
+ r1[r1_idx].cur_time = tv_time;
+ r1[r1_idx].transfer_buffer_length = urb->transfer_buffer_length;
+ r1[r1_idx].actual_length = urb->actual_length;
+ r1[r1_idx].status = urb->status;
+ r1_idx++;
+ }else{
+ if(is_complete){
+ if(w2_idx >= RECORD_SZ)
+ return;
+ w2[w2_idx].cur_time = tv_time;
+ w2[w2_idx].transfer_buffer_length = urb->transfer_buffer_length;
+ w2[w2_idx].actual_length = urb->actual_length;
+ w2[w2_idx].status = urb->status;
+ w2[w2_idx].data = *((unsigned char*)urb->transfer_buffer);
+ w2_idx++;
+ return;
+ }
+ if(w1_idx >= RECORD_SZ)
+ return;
+ w1[w1_idx].cur_time = tv_time;
+ w1[w1_idx].transfer_buffer_length = urb->transfer_buffer_length;
+ w1[w1_idx].actual_length = urb->actual_length;
+ w1[w1_idx].status = urb->status;
+ w1[w1_idx].data = *((unsigned char*)urb->transfer_buffer);
+ w1_idx++;
+ }
+}
+
+bool usb_h_acm_all_clear(void)
+{
+ int i;
+ int count = 0;
+ for (i = 0; i < ACM_TTY_MINORS; i++) {
+ if(acm_table[i] != NULL) {
+ count++;
+ }
+ }
+ MYDBG("count<%d>\n", count);
+ return !count;
+}
+EXPORT_SYMBOL_GPL(usb_h_acm_all_clear);
+
+#define CHECK_INTERVAL 2
+#define CB_NUM 3
+extern unsigned long volatile jiffies;
+static unsigned long callback_check_timeout[CB_NUM];
+static char *callback_name[CB_NUM] = {
+ "acm_read_bulk_callback",
+ "acm_write_bulk",
+ "acm_ctrl_irq",
+};
+void mark_callback_alive(char *func_name, struct urb *urb, struct acm *acm)
+{
+
+
+ int i;
+ for(i = 0; i < CB_NUM ; i++)
+ {
+ if(!strcmp(func_name, callback_name[i])){
+ if(enable_debug || time_after(jiffies, callback_check_timeout[i]))
+ {
+ MYDBG("%s,ep(%d),len(%d,%d),data(%x),sts(%d), minor(%d)\n",
+ func_name,
+ urb->ep->desc.bEndpointAddress,
+ urb->actual_length,
+ urb->transfer_buffer_length,
+ *((unsigned char*)urb->transfer_buffer),
+ urb->status,
+ acm->minor);
+ callback_check_timeout[i] = jiffies + HZ * CHECK_INTERVAL;
+ }
+ break;
+ }
+ }
+}
+
+
+#endif
+
/*
* acm_table accessors
*/
wb->urb->transfer_buffer_length = wb->len;
wb->urb->dev = acm->dev;
+#ifdef CONFIG_MTK_DT_USB_SUPPORT
+ record_activity(wb->urb, 0, 0);
+#endif
rc = usb_submit_urb(wb->urb, GFP_ATOMIC);
if (rc < 0) {
dev_err(&acm->data->dev,
case -ENOENT:
case -ESHUTDOWN:
/* this urb is terminated, clean up */
- dev_dbg(&acm->control->dev,
+ dev_err(&acm->control->dev,
"%s - urb shutting down with status: %d\n",
__func__, status);
return;
default:
- dev_dbg(&acm->control->dev,
+ dev_err(&acm->control->dev,
"%s - nonzero urb status received: %d\n",
__func__, status);
goto exit;
}
usb_mark_last_busy(acm->dev);
+#ifdef CONFIG_MTK_DT_USB_SUPPORT
+ mark_callback_alive(__func__, urb, acm);
+#endif
data = (unsigned char *)(dr + 1);
switch (dr->bNotificationType) {
{
int res;
- if (!test_and_clear_bit(index, &acm->read_urbs_free))
+ if (!test_and_clear_bit(index, &acm->read_urbs_free)){
return 0;
+ }
dev_vdbg(&acm->data->dev, "%s - urb %d\n", __func__, index);
+#ifdef CONFIG_MTK_DT_USB_SUPPORT
+ record_activity(acm->read_urbs[index], 1, 0);
+#endif
+
res = usb_submit_urb(acm->read_urbs[index], mem_flags);
if (res) {
+ MYDBG("urb fail(%d)\n", res);
if (res != -EPERM) {
dev_err(&acm->data->dev,
"%s - usb_submit_urb failed: %d\n",
for (i = 0; i < acm->rx_buflimit; ++i) {
res = acm_submit_read_urb(acm, i, mem_flags);
- if (res)
+ if (res){
return res;
+ }
}
return 0;
static void acm_process_read_urb(struct acm *acm, struct urb *urb)
{
- if (!urb->actual_length)
+
+#ifdef CONFIG_MTK_DT_USB_SUPPORT
+ int i, len;
+#endif
+ if (!urb->actual_length){
return;
+ }
+#ifdef CONFIG_MTK_DT_USB_SUPPORT
+ if(enable_dump) {
+ len = sprintf(data_in, "DT-I: ");
+ for(i=0; i<urb->actual_length && i<DATA_DUMP_BYTES; i++) {
+ len += sprintf(data_in+len, "%02X ", *(((unsigned char *)(urb->transfer_buffer))+i));
+ }
+ sprintf(data_in+len, "\n");
+ dbg_mtk(&acm->data->dev, "%s", data_in);
+ }
+#endif
tty_insert_flip_string(&acm->port, urb->transfer_buffer,
urb->actual_length);
rb->index, urb->actual_length);
set_bit(rb->index, &acm->read_urbs_free);
+#ifdef CONFIG_MTK_DT_USB_SUPPORT
+ record_activity(urb, 1, 1);
+#endif
+
if (!acm->dev) {
dev_dbg(&acm->data->dev, "%s - disconnected\n", __func__);
return;
usb_mark_last_busy(acm->dev);
if (urb->status) {
- dev_dbg(&acm->data->dev, "%s - non-zero urb status: %d\n",
+ dev_err(&acm->data->dev, "%s - non-zero urb status: %d\n",
__func__, urb->status);
return;
}
+#ifdef CONFIG_MTK_DT_USB_SUPPORT
+ mark_callback_alive(__func__, urb, acm);
+#endif
acm_process_read_urb(acm, urb);
/* throttle device if requested by tty */
struct acm *acm = wb->instance;
unsigned long flags;
- if (urb->status || (urb->actual_length != urb->transfer_buffer_length))
- dev_vdbg(&acm->data->dev, "%s - len %d/%d, status %d\n",
+
+ if (urb->status || (urb->actual_length != urb->transfer_buffer_length)){
+ dev_err(&acm->data->dev, "%s - len %d/%d, status %d, data(%x)\n",
__func__,
urb->actual_length,
urb->transfer_buffer_length,
- urb->status);
+ urb->status,
+ *((char *)(urb->transfer_buffer)));
+ }
+#ifdef CONFIG_MTK_DT_USB_SUPPORT
+ record_activity(urb, 0, 1);
+#endif
+#ifdef CONFIG_MTK_DT_USB_SUPPORT
+ mark_callback_alive(__func__, urb, acm);
+#endif
spin_lock_irqsave(&acm->write_lock, flags);
acm_write_done(acm, wb);
spin_unlock_irqrestore(&acm->write_lock, flags);
return retval;
}
+#ifdef CONFIG_MTK_DT_USB_SUPPORT
+extern struct usb_device *get_usb11_child_udev(void);
+extern int usb_autoresume_device(struct usb_device *udev);
+#endif
static int acm_tty_open(struct tty_struct *tty, struct file *filp)
{
- struct acm *acm = tty->driver_data;
+#ifdef CONFIG_MTK_DT_USB_SUPPORT
+ struct usb_device *udev;
+ int result;
+#endif
+ struct acm *acm;
+ acm = tty->driver_data;
+
+#ifdef CONFIG_MTK_DT_USB_SUPPORT
+ dbg_mtk(&acm->control->dev, "%s port_cnt=%d", __func__, acm->port.count);
+ MYDBG("ctrl:%x, read:%x, write:%x\n",
+ (acm->control->cur_altsetting->endpoint[0].desc).bEndpointAddress,
+ (acm->data->cur_altsetting->endpoint[0].desc).bEndpointAddress,
+ (acm->data->cur_altsetting->endpoint[1].desc).bEndpointAddress);
+
+#define META_BIN_NAME "meta_tst"
+#define MDDOWNLOADER_BIN_NAME "downloader"
+
+ /* make sure usb1 is always alive in dl/meta mode */
+ if(!strcmp(META_BIN_NAME, current->comm) || !strcmp(MDDOWNLOADER_BIN_NAME, current->comm)){
+ udev = get_usb11_child_udev();
+ result = usb_autoresume_device(udev);
+ dbg_mtk(&acm->control->dev, "%s, auto result:%d", __func__, result);
+ }
+
+ if(!strcmp(MDDOWNLOADER_BIN_NAME, current->comm)){
+ record_enable = 1;
+ w1_idx = w2_idx = r1_idx = r2_idx = 0;
+ do_gettimeofday(&tv_start);
+ }
+
+#else
+ dev_dbg(&acm->control->dev, "%s\n", __func__);
+#endif
dev_dbg(tty->dev, "%s\n", __func__);
return tty_port_open(&acm->port, tty, filp);
*/
set_bit(TTY_NO_WRITE_SPLIT, &tty->flags);
acm->control->needs_remote_wakeup = 1;
+#ifdef CONFIG_MTK_DT_USB_SUPPORT
+#ifdef CONFIG_PM_RUNTIME
+ acm->control->needs_remote_wakeup = 0;
+#endif
+#endif
acm->ctrlurb->dev = acm->dev;
- if (usb_submit_urb(acm->ctrlurb, GFP_KERNEL)) {
+ retval = usb_submit_urb(acm->ctrlurb, GFP_KERNEL);
+ if (retval) {
dev_err(&acm->control->dev,
"%s - usb_submit_urb(ctrl irq) failed\n", __func__);
goto error_submit_urb;
}
acm->ctrlout = ACM_CTRL_DTR | ACM_CTRL_RTS;
- if (acm_set_control(acm, acm->ctrlout) < 0 &&
- (acm->ctrl_caps & USB_CDC_CAP_LINE))
+ retval = acm_set_control(acm, acm->ctrlout);
+ if (retval < 0 && (acm->ctrl_caps & USB_CDC_CAP_LINE))
goto error_set_control;
- usb_autopm_put_interface(acm->control);
-
/*
* Unthrottle device in case the TTY was closed while throttled.
*/
acm->throttle_req = 0;
spin_unlock_irq(&acm->read_lock);
- if (acm_submit_read_urbs(acm, GFP_KERNEL))
+ retval = acm_submit_read_urbs(acm, GFP_KERNEL);
+ if (retval)
goto error_submit_read_urbs;
+ usb_autopm_put_interface(acm->control);
+
mutex_unlock(&acm->mutex);
return 0;
error_get_interface:
disconnected:
mutex_unlock(&acm->mutex);
- return retval;
+
+ return usb_translate_errors(retval);
}
static void acm_port_destruct(struct tty_port *port)
{
struct acm *acm = tty->driver_data;
dev_dbg(&acm->control->dev, "%s\n", __func__);
+#ifdef CONFIG_MTK_DT_USB_SUPPORT
+ dbg_mtk(&acm->control->dev, "%s port_cnt=%d", __func__, acm->port.count);
+ if(!strcmp(MDDOWNLOADER_BIN_NAME, current->comm)){
+ record_enable = 0;
+ dump_record();
+ }
+#endif
tty_port_close(&acm->port, tty, filp);
}
unsigned long flags;
int wbn;
struct acm_wb *wb;
+#ifdef CONFIG_MTK_DT_USB_SUPPORT
+ int i, len;
+#endif
if (!count)
return 0;
dev_vdbg(&acm->data->dev, "%s - count %d\n", __func__, count);
+#ifdef CONFIG_MTK_DT_USB_SUPPORT
+ if(enable_dump) {
+ len = sprintf(data_out, "DT-O: ");
+ for(i=0; i<count && i<DATA_DUMP_BYTES; i++) {
+ len += sprintf(data_out+len, "%02X ", *(buf+i));
+ }
+ sprintf(data_out+len, "\n");
+ dbg_mtk(&acm->data->dev, "%s", data_out);
+ }
+#endif
spin_lock_irqsave(&acm->write_lock, flags);
wbn = acm_wb_alloc(acm);
if (wbn < 0) {
{
struct acm *acm = tty->driver_data;
+#ifdef CONFIG_MTK_DT_USB_SUPPORT
+ if(enable_dump) {
+ dbg_mtk(&acm->control->dev, "tiocmget ctrlin=%x\n", acm->ctrlin);
+ }
+#endif
return (acm->ctrlout & ACM_CTRL_DTR ? TIOCM_DTR : 0) |
(acm->ctrlout & ACM_CTRL_RTS ? TIOCM_RTS : 0) |
(acm->ctrlin & ACM_CTRL_DSR ? TIOCM_DSR : 0) |
struct acm *acm = tty->driver_data;
unsigned int newctrl;
+#ifdef CONFIG_MTK_DT_USB_SUPPORT
+ if(enable_dump) {
+ dbg_mtk(&acm->control->dev, "tiocmset ctrlout=%x\n", acm->ctrlout);
+ }
+#endif
newctrl = acm->ctrlout;
set = (set & TIOCM_DTR ? ACM_CTRL_DTR : 0) |
(set & TIOCM_RTS ? ACM_CTRL_RTS : 0);
readsize = usb_endpoint_maxp(epread) *
(quirks == SINGLE_RX_URB ? 1 : 2);
acm->combined_interfaces = combined_interfaces;
+#ifdef CONFIG_MTK_DT_USB_SUPPORT
+ acm->writesize = ACM_WB_SZ;
+#else
acm->writesize = usb_endpoint_maxp(epwrite) * 20;
+#endif
acm->control = control_interface;
acm->data = data_interface;
acm->minor = minor;
struct acm *acm = usb_get_intfdata(intf);
int cnt;
+ dbg_mtk(&acm->control->dev, "%s intf=%d", __func__, intf->cur_altsetting->desc.bInterfaceNumber);
spin_lock_irq(&acm->read_lock);
spin_lock(&acm->write_lock);
if (PMSG_IS_AUTO(message)) {
spin_unlock_irq(&acm->read_lock);
return -EBUSY;
}
+ }else{
+ int i;
+ for (i = 0; i < ACM_NW; i++){
+ if(acm->wb[i].use){
+ spin_unlock(&acm->write_lock);
+ spin_unlock_irq(&acm->read_lock);
+ return -EBUSY;
+ }
+ }
}
+
cnt = acm->susp_count++;
spin_unlock(&acm->write_lock);
spin_unlock_irq(&acm->read_lock);
struct urb *urb;
int rv = 0;
+ dbg_mtk(&acm->control->dev, "%s intf=%d", __func__, intf->cur_altsetting->desc.bInterfaceNumber);
+
spin_lock_irq(&acm->read_lock);
spin_lock(&acm->write_lock);
* delayed error checking because we must
* do the write path at all cost
*/
- if (rv < 0)
+ if (rv < 0){
+ MYDBG("urb fail:%d\n", rv);
goto out;
+ }
rv = acm_submit_read_urbs(acm, GFP_ATOMIC);
}
static int __init acm_init(void)
{
int retval;
+#ifdef CONFIG_MTK_DT_USB_SUPPORT
+ int i;
+#endif
acm_tty_driver = alloc_tty_driver(ACM_TTY_MINORS);
if (!acm_tty_driver)
return -ENOMEM;
acm_tty_driver->init_termios = tty_std_termios;
acm_tty_driver->init_termios.c_cflag = B9600 | CS8 | CREAD |
HUPCL | CLOCAL;
+#ifdef CONFIG_MTK_DT_USB_SUPPORT
+ /* wx, disable echo and other flags in the very beginning.
+ * otherwise RILD will disable them via calling tcsetattr() after it opened tty port,
+ * so there may be a gap between port opening and calling tcsetattr(). If modem send data
+ * at that time, things goes ugly.
+ */
+ acm_tty_driver->init_termios.c_iflag = 0;
+ acm_tty_driver->init_termios.c_oflag = 0;
+ acm_tty_driver->init_termios.c_lflag = 0;
+ for(i= 0; i < CB_NUM ; i++){
+ callback_check_timeout[i] = jiffies;
+ }
+#endif
tty_set_operations(acm_tty_driver, &acm_ops);
retval = tty_register_driver(acm_tty_driver);
module_init(acm_init);
module_exit(acm_exit);
+#ifdef CONFIG_MTK_DT_USB_SUPPORT
+module_param(enable_debug, int, 0644);
+module_param(enable_dump, int, 0644);
+#endif
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
#include <asm/uaccess.h>
#include <asm/byteorder.h>
-
#include "hub.h"
+
+#ifdef CONFIG_MTK_ICUSB_SUPPORT
+int is_musbfsh_rh(struct usb_device *udev);
+void set_icusb_sts_disconnect_done(void);
+#endif
+
+#ifdef CONFIG_MTK_DT_USB_SUPPORT
+#include <linux/proc_fs.h>
+#include <linux/uaccess.h>
+static struct usb_device *g_dsda_dev = NULL;
+
+#ifdef CONFIG_PM_RUNTIME
+struct usb_hub *usb11_hub = NULL;
+int is_musbfsh_rh(struct usb_device *udev);
+
+struct usb_device *get_usb11_child_udev(void)
+{
+ if(usb11_hub){
+ MYDBG("\n");
+ return usb11_hub->ports[0]->child;
+ }else{
+ MYDBG("\n");
+ return NULL;
+ }
+}
+#endif
+
+void dump_data(char *buf, int len)
+{
+ int i;
+ for(i =0 ; i< len ; i++)
+ {
+ MYDBG("data[%d]: %x\n", i, buf[i]);
+ }
+}
+
+void test_dsda_device_ep0(void)
+{
+
+ int ret;
+ char data_buf[256];
+ ret = usb_control_msg(g_dsda_dev, usb_rcvctrlpipe(g_dsda_dev, 0),
+ USB_REQ_GET_DESCRIPTOR,
+ USB_DIR_IN,
+ USB_DT_DEVICE << 8,
+ 0,
+ data_buf,
+ 64,
+ USB_CTRL_GET_TIMEOUT);
+
+
+
+ if (ret < 0) {
+ MYDBG("test ep fail, ret : %d\n", ret);
+ }
+ else
+ {
+ MYDBG("test ep0 ok, ret : %d\n", ret);
+ dump_data(data_buf, ret);
+ }
+
+}
+
+void release_usb11_wakelock(void);
+static ssize_t dsda_tmp_proc_entry(struct file *file_ptr, const char __user *user_buffer, size_t count, loff_t *position)
+{
+ char cmd[64];
+
+ int ret = copy_from_user((char *) &cmd, user_buffer, count);
+
+ if(ret != 0)
+ {
+ return -EFAULT;
+ }
+
+ /* apply action here */
+ if(cmd[0] == '0')
+ {
+ MYDBG("");
+ test_dsda_device_ep0();
+ }
+ if(cmd[0] == '1')
+ {
+ MYDBG("");
+ release_usb11_wakelock();
+ }
+
+ MYDBG("");
+
+ return count;
+}
+
+struct file_operations dsda_tmp_proc_fops = {
+ .write = dsda_tmp_proc_entry
+};
+
+
+void create_dsda_tmp_entry(void)
+{
+ struct proc_dir_entry *prEntry;
+
+ MYDBG("");
+
+ prEntry = proc_create("DSDA_TMP_ENTRY", 0660, 0, &dsda_tmp_proc_fops);
+ if (prEntry)
+ {
+ MYDBG("add /proc/DSDA_TMP_ENTRY ok\n");
+ }
+ else
+ {
+ MYDBG("add /proc/DSDA_TMP_ENTRY fail\n");
+ }
+}
+#endif
+
/* if we are in debug mode, always announce new devices */
#ifdef DEBUG
#ifndef CONFIG_USB_ANNOUNCE_NEW_DEVICES
#define USB_VENDOR_GENESYS_LOGIC 0x05e3
#define HUB_QUIRK_CHECK_PORT_AUTOSUSPEND 0x01
+#if defined(CONFIG_USBIF_COMPLIANCE) && defined(CONFIG_USB_XHCI_HCD)
+extern int usbif_u3h_send_event(char* event) ;
+#include "otg_whitelist.h"
+#endif
+
+
static inline int hub_is_superspeed(struct usb_device *hdev)
{
return (hdev->descriptor.bDeviceProtocol == USB_HUB_PR_SS);
static void hub_release(struct kref *kref);
static int usb_reset_and_verify_device(struct usb_device *udev);
+#define usb_sndaddr0pipe() (PIPE_CONTROL << 30)
+#define usb_rcvaddr0pipe() ((PIPE_CONTROL << 30) | USB_DIR_IN)
+
+
static inline char *portspeed(struct usb_hub *hub, int portstatus)
{
if (hub_is_superspeed(hub->hdev))
*/
static int set_port_feature(struct usb_device *hdev, int port1, int feature)
{
+ MYDBG("");
return usb_control_msg(hdev, usb_sndctrlpipe(hdev, 0),
USB_REQ_SET_FEATURE, USB_RT_PORT, feature, port1,
NULL, 0, 1000);
"%s failed (err = %d)\n", __func__, ret);
} else {
*status = le16_to_cpu(hub->status->hub.wHubStatus);
- *change = le16_to_cpu(hub->status->hub.wHubChange);
+ *change = le16_to_cpu(hub->status->hub.wHubChange);
ret = 0;
}
mutex_unlock(&hub->status_mutex);
hub->mA_per_port = hdev->bus_mA;
hub->limited_power = 1;
}
- } else if ((hubstatus & (1 << USB_DEVICE_SELF_POWERED)) == 0) {
+ } else if ((hubstatus & (1 << USB_DEVICE_SELF_POWERED)) == 0) { // bus powered
int remaining = hdev->bus_mA -
hub->descriptor->bHubContrCurrent;
hub->descriptor->bHubContrCurrent);
hub->limited_power = 1;
- if (remaining < hdev->maxchild * unit_load)
+ if (remaining < hdev->maxchild * unit_load){
+#if defined(CONFIG_USBIF_COMPLIANCE) && defined(CONFIG_USB_XHCI_HCD)
+ usbif_u3h_send_event("DEV_OVER_CURRENT");
+#endif
dev_warn(hub_dev,
"insufficient power available "
"to use all downstream ports\n");
+ }
hub->mA_per_port = unit_load; /* 7.2.1 */
} else { /* Self-powered external hub */
struct usb_device *hdev;
struct usb_hub *hub;
+
desc = intf->cur_altsetting;
hdev = interface_to_usbdev(intf);
if (hdev->level == MAX_TOPO_LEVEL) {
dev_err(&intf->dev,
"Unsupported bus topology: hub nested too deep\n");
+#if defined(CONFIG_USBIF_COMPLIANCE) && defined(CONFIG_USB_XHCI_HCD)
+ usbif_u3h_send_event("MAX_HUB_TIER_EXCEED");
+#endif
return -E2BIG;
}
#ifdef CONFIG_USB_OTG_BLACKLIST_HUB
if (hdev->parent) {
dev_warn(&intf->dev, "ignoring external hub\n");
+#if defined(CONFIG_USBIF_COMPLIANCE) && defined(CONFIG_USB_XHCI_HCD)
+ usbif_u3h_send_event("HUB_NOT_SUPPORTED");
+#endif
return -ENODEV;
}
#endif
struct usb_device *udev = *pdev;
struct usb_hub *hub = usb_hub_to_struct_hub(udev);
int i;
+ struct timeval tv_begin, tv_end;
+ struct timeval tv_before, tv_after;
+ do_gettimeofday(&tv_begin);
+
+#ifdef CONFIG_MTK_ICUSB_SUPPORT
+ int is_icusb_rh;
+#endif
+
+#ifdef CONFIG_MTK_ICUSB_SUPPORT
+ is_icusb_rh = is_musbfsh_rh(udev->parent);
+#endif
+
/* mark the device as inactive, so any further urb submissions for
* this device (and any of its children) will fail immediately.
* so that the hardware is now fully quiesced.
*/
dev_dbg (&udev->dev, "unregistering device\n");
+
+ do_gettimeofday(&tv_before);
usb_disable_device(udev, 0);
+ do_gettimeofday(&tv_after);
+ MYDBG("usb_disable_device(), time spent, sec : %d, usec : %d\n", (unsigned int)(tv_after.tv_sec - tv_before.tv_sec), (unsigned int)(tv_after.tv_usec - tv_before.tv_usec));
+
usb_hcd_synchronize_unlinks(udev);
if (udev->parent) {
port_dev->did_runtime_put = false;
}
+ do_gettimeofday(&tv_before);
usb_remove_ep_devs(&udev->ep0);
+ do_gettimeofday(&tv_after);
+ MYDBG("usb_remove_ep_devs(), time spent, sec : %d, usec : %d\n", (unsigned int)(tv_after.tv_sec - tv_before.tv_sec), (unsigned int)(tv_after.tv_usec - tv_before.tv_usec));
+
usb_unlock_device(udev);
/* Unregister the device. The device driver is responsible
* for de-configuring the device and invoking the remove-device
* notifier chain (used by usbfs and possibly others).
*/
+ do_gettimeofday(&tv_before);
device_del(&udev->dev);
+ do_gettimeofday(&tv_after);
+ MYDBG("device_del(), time spent, sec : %d, usec : %d\n", (unsigned int)(tv_after.tv_sec - tv_before.tv_sec), (unsigned int)(tv_after.tv_usec - tv_before.tv_usec));
/* Free the device number and delete the parent's children[]
* (or root_hub) pointer.
hub_free_dev(udev);
put_device(&udev->dev);
+
+#ifdef CONFIG_MTK_ICUSB_SUPPORT
+ if (is_icusb_rh)
+ {
+ set_icusb_sts_disconnect_done();
+ MYDBG("ICUSB Disconnect\n");
+ }
+#endif
+ do_gettimeofday(&tv_end);
+ MYDBG("time spent, sec : %d, usec : %d\n", (unsigned int)(tv_end.tv_sec - tv_begin.tv_sec), (unsigned int)(tv_end.tv_usec - tv_begin.tv_usec));
}
#ifdef CONFIG_USB_ANNOUNCE_NEW_DEVICES
udev->serial = usb_cache_string(udev, udev->descriptor.iSerialNumber);
err = usb_enumerate_device_otg(udev);
+
+#if defined(CONFIG_USBIF_COMPLIANCE) && defined(CONFIG_USB_XHCI_HCD)
+ if (udev->parent){ // we don't have to check ourself (roothub)
+ if (!is_targeted(udev)) {
+ usbif_u3h_send_event("DEV_NOT_SUPPORTED");
+ err = -ENOTSUPP;
+ }
+ }
+#endif
+
if (err < 0)
return err;
* sysfs power/wakeup controls wakeup enabled/disabled
*/
device_init_wakeup(&udev->dev, 0);
+ MYDBG("udev :%p\n", udev);
+#ifdef CONFIG_MTK_DT_USB_SUPPORT
+#ifdef CONFIG_PM_RUNTIME
+ if(is_musbfsh_rh(udev->parent)){
+ MYDBG("\n");
+ /*find out struct *usb_hub and hook it */
+ usb11_hub = usb_hub_to_struct_hub(udev->parent);
+ }
+#endif
+#endif
}
/* Tell the runtime-PM framework the device is active */
msleep(delay);
/* read and decode port status */
+ MYDBG("");
ret = hub_port_status(hub, port1, &portstatus, &portchange);
+ MYDBG("");
if (ret < 0)
return ret;
- /* The port state is unknown until the reset completes. */
- if (!(portstatus & USB_PORT_STAT_RESET))
+ /*
+ * The port state is unknown until the reset completes.
+ *
+ * On top of that, some chips may require additional time
+ * to re-establish a connection after the reset is complete,
+ * so also wait for the connection to be re-established.
+ */
+ if (!(portstatus & USB_PORT_STAT_RESET) &&
+ (portstatus & USB_PORT_STAT_CONNECTION))
break;
/* switch to the long delay after two short delay failures */
/* Reset the port */
for (i = 0; i < PORT_RESET_TRIES; i++) {
+ MYDBG("");
status = set_port_feature(hub->hdev, port1, (warm ?
USB_PORT_FEAT_BH_PORT_RESET :
USB_PORT_FEAT_RESET));
+ MYDBG("");
if (status == -ENODEV) {
+ MYDBG("");
; /* The hub is gone */
} else if (status) {
+ MYDBG("");
dev_err(hub->intfdev,
"cannot %sreset port %d (err = %d)\n",
warm ? "warm " : "", port1, status);
} else {
+ MYDBG("");
status = hub_port_wait_reset(hub, port1, udev, delay,
warm);
- if (status && status != -ENOTCONN && status != -ENODEV)
+ if (status && status != -ENOTCONN)
+ {
+ MYDBG("");
dev_dbg(hub->intfdev,
"port_wait_reset: err = %d\n",
status);
+ }
}
+ MYDBG("");
/* Check for disconnect or reset */
if (status == 0 || status == -ENOTCONN || status == -ENODEV) {
+ MYDBG("");
hub_port_finish_reset(hub, port1, udev, &status);
+ MYDBG("");
if (!hub_is_superspeed(hub->hdev))
goto done;
warm = true;
}
}
+ MYDBG("");
dev_dbg (hub->intfdev,
"port %d not enabled, trying %sreset again...\n",
port1, warm ? "warm " : "");
delay = HUB_LONG_RESET_TIME;
}
+ MYDBG("");
+
dev_err (hub->intfdev,
"Cannot enable port %i. Maybe the USB cable is bad?\n",
done:
if (!hub_is_superspeed(hub->hdev))
+ {
+ MYDBG("");
up_read(&ehci_cf_port_reset_rwsem);
+ }
+
+ MYDBG("");
return status;
}
status);
/* bail if autosuspend is requested */
if (PMSG_IS_AUTO(msg))
+ {
+ MYDBG("");
goto err_wakeup;
+ }
}
}
if (usb_disable_ltm(udev)) {
dev_err(&udev->dev, "Failed to disable LTM before suspend\n.");
status = -ENOMEM;
+ MYDBG("");
if (PMSG_IS_AUTO(msg))
goto err_ltm;
}
if (usb_unlocked_disable_lpm(udev)) {
dev_err(&udev->dev, "Failed to disable LPM before suspend\n.");
status = -ENOMEM;
+ MYDBG("");
if (PMSG_IS_AUTO(msg))
goto err_lpm3;
}
/* see 7.1.7.6 */
if (hub_is_superspeed(hub->hdev))
+ {
+ MYDBG("");
status = hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_U3);
+#if 0 /* behavior for kernel 3.10 */
/*
* For system suspend, we do not need to enable the suspend feature
* on individual USB-2 ports. The devices will automatically go
* Therefore we will turn on the suspend feature if udev or any of its
* descendants is enabled for remote wakeup.
*/
- else if (PMSG_IS_AUTO(msg) || wakeup_enabled_descendants(udev) > 0)
+ } else if (PMSG_IS_AUTO(msg) || wakeup_enabled_descendants(udev) > 0) {
+ MYDBG("");
status = set_port_feature(hub->hdev, port1,
USB_PORT_FEAT_SUSPEND);
- else {
+ } else {
really_suspend = false;
status = 0;
}
+#else /*roll back behavior to kernel 3.4 */
+ }else{
+ MYDBG("");
+ status = set_port_feature(hub->hdev, port1,
+ USB_PORT_FEAT_SUSPEND);
+ }
+#endif
+
if (status) {
dev_dbg(hub->intfdev, "can't suspend port %d, status %d\n",
port1, status);
+ MYDBG("");
/* Try to enable USB3 LPM and LTM again */
usb_unlocked_enable_lpm(udev);
*/
if (status == 0) {
devstatus = 0;
+ MYDBG("\n");
status = usb_get_status(udev, USB_RECIP_DEVICE, 0, &devstatus);
+ MYDBG("%d\n", status);
if (status >= 0)
status = (status > 0 ? 0 : -ENODEV);
* Between connect detection and reset signaling there must be a delay
* of 100ms at least for debounce and power-settling. The corresponding
* timer shall restart whenever the downstream port detects a disconnect.
- *
+ *
* Apparently there are some bluetooth and irda-dongles and a number of
* low-speed devices for which this debounce period may last over a second.
* Not covered by the spec - but easy to deal with.
}
EXPORT_SYMBOL_GPL(usb_ep0_reinit);
-#define usb_sndaddr0pipe() (PIPE_CONTROL << 30)
-#define usb_rcvaddr0pipe() ((PIPE_CONTROL << 30) | USB_DIR_IN)
static int hub_set_address(struct usb_device *udev, int devnum)
{
const char *speed;
int devnum = udev->devnum;
+ dump_stack();
/* root hub ports have a slightly longer reset period
* (from USB 2.0 spec, section 7.1.7.5)
*/
/* Reset the device; full speed may morph to high speed */
/* FIXME a USB 2.0 device may morph into SuperSpeed on reset. */
+ MYDBG("");
retval = hub_port_reset(hub, port1, udev, delay, false);
+ MYDBG("");
if (retval < 0) /* error or disconnect */
goto fail;
/* success, speed is known */
default:
goto fail;
}
+ MYDBG("");
if (udev->speed == USB_SPEED_WIRELESS)
speed = "variable speed Wireless";
udev->tt = &hub->tt;
udev->ttport = port1;
}
-
+
/* Why interleave GET_DESCRIPTOR and SET_ADDRESS this way?
* Because device hardware and firmware is sometimes buggy in
* this area, and this is how Linux has done it for ages.
* value.
*/
for (i = 0; i < GET_DESCRIPTOR_TRIES; (++i, msleep(100))) {
+ MYDBG("");
if (USE_NEW_SCHEME(retry_counter) && !(hcd->driver->flags & HCD_USB3)) {
struct usb_device_descriptor *buf;
int r = 0;
*/
if (r == 0 || (r == -ETIMEDOUT && j == 0))
break;
+
+#if defined(CONFIG_USBIF_COMPLIANCE) && defined(CONFIG_USB_XHCI_HCD)
+ if (buf->bMaxPacketSize0 == 0) {
+ usbif_u3h_send_event("DEV_CONN_TMOUT");
+ }
+#endif
+
}
udev->descriptor.bMaxPacketSize0 =
buf->bMaxPacketSize0;
udev->ep0.desc.wMaxPacketSize = cpu_to_le16(i);
usb_ep0_reinit(udev);
}
-
+
retval = usb_get_device_descriptor(udev, USB_DT_DEVICE_SIZE);
if (retval < (signed)sizeof(udev->descriptor)) {
if (retval != -ENODEV)
remaining -= delta;
}
if (remaining < 0) {
+#if defined(CONFIG_USBIF_COMPLIANCE) && defined(CONFIG_USB_XHCI_HCD)
+ usbif_u3h_send_event("DEV_OVER_CURRENT");
+#endif
dev_warn(hub->intfdev, "%dmA over power budget!\n",
- remaining);
remaining = 0;
int status, i;
unsigned unit_load;
+ MYDBG("");
dev_dbg (hub_dev,
"port %d, status %04x, change %04x, %s\n",
port1, portstatus, portchange, portspeed(hub, portstatus));
}
/* reset (non-USB 3.0 devices) and get descriptor */
+ MYDBG("");
status = hub_port_init(hub, udev, port1, i);
if (status < 0)
+ {
+ MYDBG("");
goto loop;
+ }
+ MYDBG("");
if (udev->quirks & USB_QUIRK_DELAY_INIT)
msleep(1000);
goto loop_disable;
}
}
-
+
/* check for devices running slower than they could */
if (le16_to_cpu(udev->descriptor.bcdUSB) >= 0x0200
&& udev->speed == USB_SPEED_FULL
hub->ports[port1 - 1]->child = NULL;
spin_unlock_irq(&device_state_lock);
}
+#ifdef CONFIG_MTK_DT_USB_SUPPORT
+ g_dsda_dev = udev;
+ MYDBG("get new device !!!, BUILD TIME : %s, g_dsda_dev : %p\n", __TIME__, g_dsda_dev);
+#endif
}
if (status)
dev_err(hub_dev, "unable to enumerate USB device on port %d\n",
port1);
}
-
+
done:
hub_port_disable(hub, port1, 1);
if (hcd->driver->relinquish_port && !hub->hdev->parent)
dev_dbg (hub_dev, "resetting for error %d\n",
hub->error);
+ MYDBG("");
ret = usb_reset_device(hdev);
if (ret) {
dev_dbg (hub_dev,
* EM interference sometimes causes badly
* shielded USB devices to be shutdown by
* the hub, this hack enables them again.
- * Works at least with mouse driver.
+ * Works at least with mouse driver.
*/
if (!(portstatus & USB_PORT_STAT_ENABLE)
&& !connect_change
.supports_autosuspend = 1,
};
+#if defined(CONFIG_MTK_XHCI) && defined(CONFIG_USB_MTK_DUALMODE)
+extern void mtk_hub_event_steal(spinlock_t *lock, struct list_head* list);
+#endif
int usb_hub_init(void)
{
if (usb_register(&hub_driver) < 0) {
return -1;
}
+#if defined(CONFIG_MTK_XHCI) && defined(CONFIG_USB_MTK_DUALMODE)
+ mtk_hub_event_steal(&hub_event_lock, &hub_event_list);
+#endif
+
khubd_task = kthread_run(hub_thread, NULL, "khubd");
if (!IS_ERR(khubd_task))
return 0;
int i, ret = 0;
int port1 = udev->portnum;
+ MYDBG("");
if (udev->state == USB_STATE_NOTATTACHED ||
udev->state == USB_STATE_SUSPENDED) {
dev_dbg(&udev->dev, "device reset not allowed in state %d\n",
if (ret < 0)
goto re_enumerate;
-
+
/* Device might have changed firmware (DFU or similar) */
if (descriptors_changed(udev, &descriptor)) {
dev_info(&udev->dev, "device firmware changed\n");
usb_unlocked_enable_lpm(udev);
usb_enable_ltm(udev);
return 0;
-
+
re_enumerate:
/* LPM state doesn't matter when we're about to destroy the device. */
hub_port_logical_disconnect(parent_hub, port1);
unsigned int noio_flag;
struct usb_host_config *config = udev->actconfig;
+ MYDBG("");
if (udev->state == USB_STATE_NOTATTACHED ||
udev->state == USB_STATE_SUSPENDED) {
dev_dbg(&udev->dev, "device reset not allowed in state %d\n",
/* #define VERBOSE_DEBUG */
+#ifdef pr_fmt
+#undef pr_fmt
+#endif
+#define pr_fmt(fmt) "["KBUILD_MODNAME"]" fmt
+
#include <linux/kallsyms.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/usb/composite.h>
#include <asm/unaligned.h>
+#include <linux/printk.h>
+
+
+
/*
* The code in this file is utility code, used to build a gadget driver
* from one or more "function" drivers, one or more "configuration"
ep_found:
/* commit results */
- _ep->maxpacket = usb_endpoint_maxp(chosen_desc);
+ _ep->maxpacket = usb_endpoint_maxp(chosen_desc) & 0x7ff;
_ep->desc = chosen_desc;
_ep->comp_desc = NULL;
_ep->maxburst = 0;
- _ep->mult = 0;
+ _ep->mult = 1;
+
+ if (g->speed == USB_SPEED_HIGH && (usb_endpoint_xfer_isoc(_ep->desc) ||
+ usb_endpoint_xfer_int(_ep->desc)))
+ _ep->mult = ((usb_endpoint_maxp(_ep->desc) & 0x1800) >> 11) + 1;
+
if (!want_comp_desc)
return 0;
switch (usb_endpoint_type(_ep->desc)) {
case USB_ENDPOINT_XFER_ISOC:
/* mult: bits 1:0 of bmAttributes */
- _ep->mult = comp_desc->bmAttributes & 0x3;
+ _ep->mult = (comp_desc->bmAttributes & 0x3) + 1;
case USB_ENDPOINT_XFER_BULK:
case USB_ENDPOINT_XFER_INT:
_ep->maxburst = comp_desc->bMaxBurst + 1;
struct usb_function *function)
{
int value = -EINVAL;
+
+ pr_debug("[XLOG_DEBUG][USB][COM]%s: \n", __func__);
- DBG(config->cdev, "adding '%s'/%p to config '%s'/%p\n",
+ INFO(config->cdev, "adding '%s'/%p to config '%s'/%p\n",
function->name, function,
config->label, config);
done:
if (value)
- DBG(config->cdev, "adding '%s'/%p --> %d\n",
+ INFO(config->cdev, "adding '%s'/%p --> %d\n",
function->name, function, value);
return value;
}
void usb_remove_function(struct usb_configuration *c, struct usb_function *f)
{
- if (f->disable)
+ if (f->disable) {
+ INFO(c->cdev, "disable function '%s'/%p\n", f->name, f);
f->disable(f);
+ }
bitmap_zero(f->endpoints, 32);
list_del(&f->list);
- if (f->unbind)
+ if (f->unbind) {
+ INFO(c->cdev, "unbind function '%s'/%p\n", f->name, f);
f->unbind(c, f);
+ }
}
EXPORT_SYMBOL_GPL(usb_remove_function);
usb_ext->bLength = USB_DT_USB_EXT_CAP_SIZE;
usb_ext->bDescriptorType = USB_DT_DEVICE_CAPABILITY;
usb_ext->bDevCapabilityType = USB_CAP_TYPE_EXT;
- usb_ext->bmAttributes = cpu_to_le32(USB_LPM_SUPPORT | USB_BESL_SUPPORT);
+#ifdef CONFIG_USBIF_COMPLIANCE
+ usb_ext->bmAttributes = cpu_to_le32(USB_LPM_SUPPORT) | cpu_to_le32(USB_BESL_SUPPORT) ;
+#else
+ usb_ext->bmAttributes = cpu_to_le32(USB_LPM_SUPPORT);
+#endif
/*
* The Superspeed USB Capability descriptor shall be implemented by all
DBG(cdev, "reset config\n");
list_for_each_entry(f, &cdev->config->functions, list) {
+ INFO(cdev, "disable function '%s'/%p\n", f->name, f);
if (f->disable)
f->disable(f);
struct usb_function, list);
list_del(&f->list);
if (f->unbind) {
- DBG(cdev, "unbind function '%s'/%p\n",
+ INFO(cdev, "unbind function '%s'/%p\n",
f->name, f);
f->unbind(config, f);
/* may free memory for "f" */
}
}
list_del(&config->list);
+ pr_debug("[XLOG_DEBUG][USB][COM]bind fialed and the list should be init because there is one entry only");
config->cdev = NULL;
} else {
unsigned i;
- DBG(cdev, "cfg %d/%p speeds:%s%s%s\n",
+ INFO(cdev, "cfg %d/%p speeds:%s%s%s\n",
config->bConfigurationValue, config,
config->superspeed ? " super" : "",
config->highspeed ? " high" : "",
}
EXPORT_SYMBOL_GPL(usb_add_config);
-static void remove_config(struct usb_composite_dev *cdev,
+static void unbind_config(struct usb_composite_dev *cdev,
struct usb_configuration *config)
{
while (!list_empty(&config->functions)) {
struct usb_function, list);
list_del(&f->list);
if (f->unbind) {
- DBG(cdev, "unbind function '%s'/%p\n", f->name, f);
+ INFO(cdev, "unbind function '%s'/%p\n", f->name, f);
f->unbind(config, f);
/* may free memory for "f" */
}
}
- list_del(&config->list);
if (config->unbind) {
- DBG(cdev, "unbind config '%s'/%p\n", config->label, config);
+ INFO(cdev, "unbind config '%s'/%p\n", config->label, config);
config->unbind(config);
/* may free memory for "c" */
}
+
+ /* reset all driver data to prevent leakage of ep allocation */
+ usb_ep_autoconfig_reset(cdev->gadget);
}
/**
if (cdev->config == config)
reset_config(cdev);
+
+ if(config->cdev != NULL)
+ {
+ list_del(&config->list);
+ }else
+ {
+ DBG(cdev, "%s: config->list has been delete!! \n", __func__);
+ }
+
spin_unlock_irqrestore(&cdev->lock, flags);
- remove_config(cdev, config);
+ unbind_config(cdev, config);
}
/*-------------------------------------------------------------------------*/
/*-------------------------------------------------------------------------*/
-static void composite_setup_complete(struct usb_ep *ep, struct usb_request *req)
+void composite_setup_complete(struct usb_ep *ep, struct usb_request *req)
{
if (req->status || req->actual != req->length)
DBG((struct usb_composite_dev *) ep->driver_data,
req->status, req->actual, req->length);
}
+EXPORT_SYMBOL_GPL(composite_setup_complete);
+
/*
* The setup() callback implements all the ep0 functionality that's
* not handled lower down, in hardware or the hardware driver(like
struct usb_function *f = NULL;
u8 endp;
+ pr_debug("[XLOG_DEBUG][USB][COM]%s bRequest=0x%X\n",
+ __func__, ctrl->bRequest);
+
/* partial re-init of the response message; the function or the
* gadget might need to intercept e.g. a control-OUT completion
* when we delegate to it.
if (ctrl->bRequestType != USB_DIR_IN)
goto unknown;
switch (w_value >> 8) {
-
+#ifdef CONFIG_USBIF_COMPLIANCE
+ case USB_DT_OTG:
+ {
+ struct usb_otg_descriptor *otg_desc = req->buf;
+ otg_desc->bLength = sizeof(*otg_desc);
+ otg_desc->bDescriptorType = USB_DT_OTG;
+ otg_desc->bmAttributes = USB_OTG_SRP | USB_OTG_HNP;
+ otg_desc->bcdOTG = cpu_to_le16(0x0200);
+ value = min_t(int, w_length,sizeof(struct usb_otg_descriptor));
+ }
+ break;
+#endif
case USB_DT_DEVICE:
cdev->desc.bNumConfigurations =
count_configs(cdev, USB_DT_DEVICE);
value = min(w_length, (u16) sizeof cdev->desc);
memcpy(req->buf, &cdev->desc, value);
+ pr_debug("[XLOG_DEBUG][USB][COM]USB_REQ_GET_DESCRIPTOR: "
+ "USB_DT_DEVICE, value=%d\n",value);
break;
case USB_DT_DEVICE_QUALIFIER:
if (!gadget_is_dualspeed(gadget) ||
device_qual(cdev);
value = min_t(int, w_length,
sizeof(struct usb_qualifier_descriptor));
+ pr_debug("[XLOG_DEBUG][USB][COM]USB_REQ_GET_DESCRIPTOR: "
+ "USB_DT_DEVICE_QUALIFIER, value=%d\n",value);
break;
case USB_DT_OTHER_SPEED_CONFIG:
+ pr_debug("[XLOG_DEBUG][USB][COM]USB_REQ_GET_DESCRIPTOR: "
+ "USB_DT_OTHER_SPEED_CONFIG\n");
if (!gadget_is_dualspeed(gadget) ||
gadget->speed >= USB_SPEED_SUPER)
break;
value = config_desc(cdev, w_value);
if (value >= 0)
value = min(w_length, (u16) value);
+ pr_debug("[XLOG_DEBUG][USB][COM]USB_REQ_GET_DESCRIPTOR: "
+ "USB_DT_CONFIG, value=%d\n",value);
break;
case USB_DT_STRING:
value = get_string(cdev, req->buf,
w_index, w_value & 0xff);
- if (value >= 0)
+ if (value >= 0) {
value = min(w_length, (u16) value);
+ pr_debug("[XLOG_DEBUG][USB][COM]USB_REQ_GET_DESCRIPTOR: "
+ "USB_DT_STRING, value=%d\n" ,value);
+ }
break;
case USB_DT_BOS:
if (gadget_is_superspeed(gadget)) {
value = bos_desc(cdev);
value = min(w_length, (u16) value);
}
+ pr_debug("[XLOG_DEBUG][USB][COM]USB_REQ_GET_DESCRIPTOR: "
+ "USB_DT_BOS, value=%d\n",value);
+ break;
+ default:
+ pr_debug("[XLOG_DEBUG][USB][COM]USB_REQ_GET_DESCRIPTOR w_value=0x%X\n", w_value);
break;
}
break;
spin_lock(&cdev->lock);
value = set_config(cdev, ctrl, w_value);
spin_unlock(&cdev->lock);
+ pr_debug("[XLOG_DEBUG][USB][COM]USB_REQ_SET_CONFIGURATION: "
+ "value=%d\n",value);
break;
case USB_REQ_GET_CONFIGURATION:
if (ctrl->bRequestType != USB_DIR_IN)
goto unknown;
if (!cdev->config || intf >= MAX_CONFIG_INTERFACES)
break;
- f = cdev->config->interface[intf];
+
+ if (cdev->config)
+ f = cdev->config->interface[intf];
+ else
+ pr_debug("%s: cdev->config = NULL \n", __func__);
+
if (!f)
break;
* interface of the function
*/
case USB_REQ_GET_STATUS:
+ pr_debug("[XLOG_DEBUG][USB][COM]USB_REQ_GET_STATUS\n");
if (!gadget_is_superspeed(gadget))
goto unknown;
if (ctrl->bRequestType != (USB_DIR_IN | USB_RECIP_INTERFACE))
*/
case USB_REQ_CLEAR_FEATURE:
case USB_REQ_SET_FEATURE:
+ pr_debug("[XLOG_DEBUG][USB][COM]%s w_value=%d\n",
+ ((ctrl->bRequest==USB_REQ_SET_FEATURE)? "USB_REQ_SET_FEATURE" : "USB_REQ_CLEAR_FEATURE"), w_value);
if (!gadget_is_superspeed(gadget))
goto unknown;
if (ctrl->bRequestType != (USB_DIR_OUT | USB_RECIP_INTERFACE))
goto unknown;
switch (w_value) {
case USB_INTRF_FUNC_SUSPEND:
+ pr_debug("[COM]USB_INTRF_FUNC_SUSPEND\n");
if (!cdev->config || intf >= MAX_CONFIG_INTERFACES)
break;
f = cdev->config->interface[intf];
break;
}
break;
+ case USB_REQ_SET_SEL:
+ pr_debug("[XLOG_DEBUG][USB][COM]USB_REQ_SET_SEL Pretend success\n");
+ value = 0;
+ break;
default:
unknown:
VDBG(cdev,
}
done:
+ if(value < 0) {
+ pr_debug("[XLOG_DEBUG][USB][COM]composite_setup: value=%d,"
+ "bRequestType=0x%x, bRequest=0x%x, w_value=0x%x, w_length=0x%x \n", value,
+ ctrl->bRequestType, ctrl->bRequest, w_value, w_length);
+ }
/* device either stalls (value < 0) or reports success */
return value;
}
reset_config(cdev);
if (cdev->driver->disconnect)
cdev->driver->disconnect(cdev);
+
+ /* ALPS00235316 and ALPS00234976 */
+ /* reset the complet function */
+ if(cdev->req->complete) {
+ pr_debug("[XLOG_DEBUG][USB][COM]%s: reassign the complete function!!\n", __func__);
+ cdev->req->complete = composite_setup_complete;
+ }
+
spin_unlock_irqrestore(&cdev->lock, flags);
}
struct usb_configuration *c;
c = list_first_entry(&cdev->configs,
struct usb_configuration, list);
- remove_config(cdev, c);
+ list_del(&c->list);
+ unbind_config(cdev, c);
}
if (cdev->driver->unbind && unbind_driver)
cdev->driver->unbind(cdev);
if (!driver || !driver->dev || !driver->bind)
return -EINVAL;
+ pr_debug("[XLOG_DEBUG][USB][COM]%s: driver->name = %s", __func__, driver->name);
+
if (!driver->name)
driver->name = "composite";
*/
/* #define VERBOSE_DEBUG */
+#ifdef pr_fmt
+#undef pr_fmt
+#endif
+#define pr_fmt(fmt) "["KBUILD_MODNAME"]" fmt
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/err.h>
-
+#include <linux/printk.h>
#include "u_serial.h"
#include "gadget_chips.h"
+#define ACM_LOG "USB_ACM"
+
/*
* This CDC ACM function support just wraps control functions and
* notifications around the generic serial-over-usb code.
* nothing unless we control a real RS232 line.
*/
acm->port_line_coding = *value;
+
+ pr_debug("[XLOG_INFO][USB_ACM] %s: rate=%d, stop=%d, parity=%d, data=%d\n", __func__, \
+ acm->port_line_coding.dwDTERate, acm->port_line_coding.bCharFormat, \
+ acm->port_line_coding.bParityType, acm->port_line_coding.bDataBits);
}
}
* to them by stalling. Options include get/set/clear comm features
* (not that useful) and SEND_BREAK.
*/
+
+ pr_debug("[XLOG_INFO][USB_ACM]%s: ttyGS%d req%02x.%02x v%04x i%04x len=%d\n", __func__, \
+ acm->port_num, ctrl->bRequestType, ctrl->bRequest, w_value, w_index, w_length);
+
switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
/* SET_LINE_CODING ... just read and save what the host sends */
value = min_t(unsigned, w_length,
sizeof(struct usb_cdc_line_coding));
memcpy(req->buf, &acm->port_line_coding, value);
+
+ pr_debug("[XLOG_INFO][USB_ACM]%s: rate=%d,stop=%d,parity=%d,data=%d\n", __func__, \
+ acm->port_line_coding.dwDTERate, acm->port_line_coding.bCharFormat, \
+ acm->port_line_coding.bParityType, acm->port_line_coding.bDataBits);
+
break;
/* SET_CONTROL_LINE_STATE ... save what the host sent */
struct f_acm *acm = func_to_acm(f);
struct usb_composite_dev *cdev = f->config->cdev;
- DBG(cdev, "acm ttyGS%d deactivated\n", acm->port_num);
+ INFO(cdev, "acm ttyGS%d deactivated\n", acm->port_num);
gserial_disconnect(&acm->port);
usb_ep_disable(acm->notify);
acm->notify->driver_data = NULL;
{
struct usb_composite_dev *cdev = acm->port.func.config->cdev;
int status;
+ __le16 serial_state;
spin_lock(&acm->lock);
if (acm->notify_req) {
DBG(cdev, "acm ttyGS%d serial state %04x\n",
acm->port_num, acm->serial_state);
+ serial_state = cpu_to_le16(acm->serial_state);
status = acm_cdc_notify(acm, USB_CDC_NOTIFY_SERIAL_STATE,
- 0, &acm->serial_state, sizeof(acm->serial_state));
+ 0, &serial_state, sizeof(acm->serial_state));
} else {
acm->pending = true;
status = 0;
if (status)
goto fail;
+ pr_debug("[XLOG_INFO][USB_ACM]%s: ttyGS%d: %s speed IN/%s OUT/%s NOTIFY/%s\n", \
+ __func__, acm->port_num, \
+ gadget_is_superspeed(c->cdev->gadget) ? "super" : \
+ gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full", \
+ acm->port.in->name, acm->port.out->name, acm->notify->name);
+
DBG(cdev, "acm ttyGS%d: %s speed IN/%s OUT/%s NOTIFY/%s\n",
acm->port_num,
gadget_is_superspeed(c->cdev->gadget) ? "super" :
#include <linux/dmapool.h>
#include "xhci.h"
+#include <mach/mt_boot.h>
+#include <linux/dma-mapping.h>
/*
* Allocates a generic ring segment from the ring pool, sets the dma address,
val |= TRB_TYPE(TRB_LINK);
/* Always set the chain bit with 0.95 hardware */
/* Set chain bit for isoc rings on AMD 0.96 host */
+#ifndef CONFIG_MTK_XHCI
if (xhci_link_trb_quirk(xhci) ||
(type == TYPE_ISOC &&
(xhci->quirks & XHCI_AMD_0x96_HOST)))
val |= TRB_CHAIN;
+#endif
prev->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val);
}
}
unsigned int num_stream_ctxs,
struct xhci_stream_ctx *stream_ctx, dma_addr_t dma)
{
- struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
+ struct device *dev = xhci_to_hcd(xhci)->self.controller;
if (num_stream_ctxs > MEDIUM_STREAM_ARRAY_SIZE)
- dma_free_coherent(&pdev->dev,
- sizeof(struct xhci_stream_ctx)*num_stream_ctxs,
- stream_ctx, dma);
+ dma_free_coherent(dev,
+ sizeof(struct xhci_stream_ctx)*num_stream_ctxs,
+#ifdef CONFIG_MTK_XHCI
+ xhci->erst.entries, xhci->erst.erst_dma_addr);
+#else
+ stream_ctx, dma);
+#endif
else if (num_stream_ctxs <= SMALL_STREAM_ARRAY_SIZE)
return dma_pool_free(xhci->small_streams_pool,
stream_ctx, dma);
unsigned int num_stream_ctxs, dma_addr_t *dma,
gfp_t mem_flags)
{
- struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
+ struct device *dev = xhci_to_hcd(xhci)->self.controller;
if (num_stream_ctxs > MEDIUM_STREAM_ARRAY_SIZE)
- return dma_alloc_coherent(&pdev->dev,
- sizeof(struct xhci_stream_ctx)*num_stream_ctxs,
- dma, mem_flags);
+ return dma_alloc_coherent(dev,
+ sizeof(struct xhci_stream_ctx)*num_stream_ctxs,
+ dma, mem_flags);
else if (num_stream_ctxs <= SMALL_STREAM_ARRAY_SIZE)
return dma_pool_alloc(xhci->small_streams_pool,
mem_flags, dma);
xhci->devs[slot_id] = NULL;
}
+ /*
+ * Free a virt_device structure.
+ * If the virt_device added a tt_info (a hub) and has children pointing to
+ * that tt_info, then free the child first. Recursive.
+ * We can't rely on udev at this point to find child-parent relationships.
+ */
+ void xhci_free_virt_devices_depth_first(struct xhci_hcd *xhci, int slot_id)
+ {
+ struct xhci_virt_device *vdev;
+ struct list_head *tt_list_head;
+ struct xhci_tt_bw_info *tt_info, *next;
+ int i;
+
+ vdev = xhci->devs[slot_id];
+ if (!vdev)
+ return;
+
+ tt_list_head = &(xhci->rh_bw[vdev->real_port - 1].tts);
+ list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) {
+ /* is this a hub device that added a tt_info to the tts list */
+ if (tt_info->slot_id == slot_id) {
+ /* are any devices using this tt_info? */
+ for (i = 1; i < HCS_MAX_SLOTS(xhci->hcs_params1); i++) {
+ vdev = xhci->devs[i];
+ if (vdev && (vdev->tt_info == tt_info))
+ xhci_free_virt_devices_depth_first(
+ xhci, i);
+ }
+ }
+ }
+ /* we are now at a leaf device */
+ xhci_free_virt_device(xhci, slot_id);
+ }
+
int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
struct usb_device *udev, gfp_t flags)
{
break;
case USB_SPEED_FULL:
case USB_SPEED_LOW:
+ {
+ CHIP_SW_VER sw_code = mt_get_chip_sw_ver();
+ unsigned int hw_code = mt_get_chip_hw_code();
+
+ if((hw_code == 0x6595) && (sw_code <= CHIP_SW_VER_01)){
+ /* workaround for maxp size issue of RXXE */
+ if((max_packet % 4 == 2) && (max_packet % 16 != 14) &&
+ (max_burst == 0) && usb_endpoint_dir_in(&ep->desc))
+ max_packet += 2;
+ }
break;
+ }
default:
BUG();
}
{
int num_sp;
int i;
- struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
+ struct device *dev = xhci_to_hcd(xhci)->self.controller;
if (!xhci->scratchpad)
return;
num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2);
for (i = 0; i < num_sp; i++) {
- dma_free_coherent(&pdev->dev, xhci->page_size,
+ dma_free_coherent(dev, xhci->page_size,
xhci->scratchpad->sp_buffers[i],
xhci->scratchpad->sp_dma_buffers[i]);
}
kfree(xhci->scratchpad->sp_dma_buffers);
kfree(xhci->scratchpad->sp_buffers);
- dma_free_coherent(&pdev->dev, num_sp * sizeof(u64),
+ dma_free_coherent(dev, num_sp * sizeof(u64),
xhci->scratchpad->sp_array,
xhci->scratchpad->sp_dma);
kfree(xhci->scratchpad);
void xhci_mem_cleanup(struct xhci_hcd *xhci)
{
- struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
+ struct device *dev = xhci_to_hcd(xhci)->self.controller;
struct dev_info *dev_info, *next;
struct xhci_cd *cur_cd, *next_cd;
unsigned long flags;
/* Free the Event Ring Segment Table and the actual Event Ring */
size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries);
if (xhci->erst.entries)
- dma_free_coherent(&pdev->dev, size,
+ dma_free_coherent(dev, size,
xhci->erst.entries, xhci->erst.erst_dma_addr);
xhci->erst.entries = NULL;
xhci_dbg(xhci, "Freed ERST\n");
}
}
- for (i = 1; i < MAX_HC_SLOTS; ++i)
- xhci_free_virt_device(xhci, i);
+ for (i = HCS_MAX_SLOTS(xhci->hcs_params1); i > 0; i--)
+ xhci_free_virt_devices_depth_first(xhci, i);
if (xhci->segment_pool)
dma_pool_destroy(xhci->segment_pool);
xhci_dbg(xhci, "Freed medium stream array pool\n");
if (xhci->dcbaa)
- dma_free_coherent(&pdev->dev, sizeof(*xhci->dcbaa),
+ dma_free_coherent(dev, sizeof(*xhci->dcbaa),
xhci->dcbaa, xhci->dcbaa->dma);
xhci->dcbaa = NULL;
#include <linux/platform_device.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/dma-mapping.h>
#include "xhci.h"
+#ifdef CONFIG_MTK_XHCI
+#include <linux/xhci/xhci-mtk.h>
+#include <linux/of.h>
+#endif
+
static void xhci_plat_quirks(struct device *dev, struct xhci_hcd *xhci)
{
/*
* dev struct in order to setup MSI
*/
xhci->quirks |= XHCI_PLAT;
+ //CC: MTK host controller gives a spurious successful event after a
+ // short transfer. Ignore it.
+ xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
+ xhci->quirks |= XHCI_LPM_SUPPORT;
}
/* called during probe() after chip reset completes */
.bus_resume = xhci_bus_resume,
};
+#if defined(CONFIG_MTK_LM_MODE)
+#define XHCI_DMA_BIT_MASK DMA_BIT_MASK(64)
+#else
+#define XHCI_DMA_BIT_MASK DMA_BIT_MASK(32)
+#endif
+
+static u64 xhci_dma_mask = XHCI_DMA_BIT_MASK;
+
+static void xhci_hcd_release (struct device *dev)
+{
+ printk(KERN_INFO "dev = 0x%p\n", dev);
+}
+
static int xhci_plat_probe(struct platform_device *pdev)
{
const struct hc_driver *driver;
driver = &xhci_plat_xhci_driver;
+#ifdef CONFIG_MTK_XHCI /* device tree support */
+ irq = platform_get_irq_byname(pdev, XHCI_DRIVER_NAME);
+ printk("%s(%d): %d\n", __func__, __LINE__, irq);
+ if(irq < 0)
+ return -ENODEV;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, XHCI_BASE_REGS_ADDR_RES_NAME);
+ if (!res)
+ return -ENODEV;
+
+ pdev->dev.coherent_dma_mask = XHCI_DMA_BIT_MASK;
+ pdev->dev.dma_mask = &xhci_dma_mask;
+ pdev->dev.release = xhci_hcd_release;
+#else
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return -ENODEV;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -ENODEV;
+#endif
hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev));
if (!hcd)
goto release_mem_region;
}
+ printk("%s(%d): logic 0x%p, phys 0x%p\n", __func__, __LINE__,
+ (void *)(unsigned long)res->start, hcd->regs);
+
+ #ifdef CONFIG_MTK_XHCI
+ ret = usb_add_hcd(hcd, irq, IRQF_SHARED | IRQF_TRIGGER_LOW);
+ #else
ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
+ #endif
+
if (ret)
goto unmap_registers;
*/
*((struct xhci_hcd **) xhci->shared_hcd->hcd_priv) = xhci;
+ #ifdef CONFIG_MTK_XHCI
+ ret = usb_add_hcd(xhci->shared_hcd, irq, IRQF_SHARED | IRQF_TRIGGER_LOW);
+ #else
ret = usb_add_hcd(xhci->shared_hcd, irq, IRQF_SHARED);
+ #endif
if (ret)
goto put_usb3_hcd;
struct usb_hcd *hcd = platform_get_drvdata(dev);
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ xhci->xhc_state |= XHCI_STATE_REMOVING;
+
usb_remove_hcd(xhci->shared_hcd);
usb_put_hcd(xhci->shared_hcd);
iounmap(hcd->regs);
release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
usb_put_hcd(hcd);
+ #ifdef CONFIG_MTK_XHCI
+ mtk_xhci_reset(xhci);
+ #endif
kfree(xhci);
return 0;
}
+#ifdef CONFIG_MTK_XHCI
+static const struct of_device_id mtk_xhci_of_match[] = {
+ {
+ .compatible = "mediatek,USB3_XHCI",
+ },
+ { },
+};
+#endif
+
static struct platform_driver usb_xhci_driver = {
.probe = xhci_plat_probe,
.remove = xhci_plat_remove,
.driver = {
.name = "xhci-hcd",
+#ifdef CONFIG_MTK_XHCI
+ .of_match_table = of_match_ptr(mtk_xhci_of_match),
+#endif
},
};
MODULE_ALIAS("platform:xhci-hcd");
#include "xhci.h"
+#ifdef CONFIG_MTK_XHCI
+#include <asm/uaccess.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/xhci/xhci-mtk-scheduler.h>
+#include <linux/xhci/xhci-mtk-power.h>
+#include <linux/xhci/xhci-mtk.h>
+
+#ifdef CONFIG_USBIF_COMPLIANCE
+#include <linux/proc_fs.h>
+#include <asm/uaccess.h>
+#include <linux/seq_file.h>
+#include <linux/kobject.h>
+#include <linux/miscdevice.h>
+
+static struct miscdevice mu3h_uevent_device = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "usbif_u3h_uevent",
+ .fops = NULL,
+};
+#endif
+#endif
+
#define DRIVER_AUTHOR "Sarah Sharp"
#define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver"
module_param(link_quirk, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(link_quirk, "Don't clear the chain bit on a link TRB");
+#ifdef CONFIG_USBIF_COMPLIANCE
+int usbif_u3h_send_event(char* event)
+{
+ char udev_event[128];
+ char *envp[] = {udev_event, NULL };
+ int ret ;
+
+ snprintf(udev_event, 128, "USBIF_EVENT=%s",event);
+ printk("usbif_u3h_send_event - sending event - %s in %s\n", udev_event, kobject_get_path(&mu3h_uevent_device.this_device->kobj, GFP_KERNEL));
+ ret = kobject_uevent_env(&mu3h_uevent_device.this_device->kobj, KOBJ_CHANGE, envp);
+ if (ret < 0)
+ printk("usbif_u3h_send_event sending failed with ret = %d, \n", ret);
+
+ return ret;
+}
+#endif
+
/* TODO: copied from ehci-hcd.c - can this be refactored? */
/*
* xhci_handshake - spin reading hc until handshake completes or fails
"waited %u microseconds.\n",
XHCI_MAX_HALT_USEC);
if (!ret)
- xhci->xhc_state &= ~(XHCI_STATE_HALTED | XHCI_STATE_DYING);
+ /* clear state flags. Including dying, halted or removing */
+ xhci->xhc_state = 0;
return ret;
}
} else {
xhci_dbg(xhci, "xHCI doesn't need link TRB QUIRK\n");
}
+
retval = xhci_mem_init(xhci, GFP_KERNEL);
xhci_dbg(xhci, "Finished xhci_init\n");
xhci_halt(xhci);
return -ENODEV;
}
+
xhci->shared_hcd->state = HC_STATE_RUNNING;
xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
xhci_ring_cmd_db(xhci);
xhci_dbg(xhci, "Finished xhci_run for USB3 roothub\n");
+
return 0;
}
xhci_dbg(xhci, "%s: compliance mode recovery timer deleted\n",
__func__);
}
-
+#ifndef CONFIG_MTK_XHCI
if (xhci->quirks & XHCI_AMD_PLL_FIX)
usb_amd_dev_put();
-
+#endif
xhci_dbg(xhci, "// Disabling event ring interrupts\n");
temp = xhci_readl(xhci, &xhci->op_regs->status);
xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status);
u32 drop_flag;
u32 new_add_flags, new_drop_flags, new_slot_info;
int ret;
+#ifdef CONFIG_MTK_XHCI
+ struct sch_ep *sch_ep = NULL;
+ int isTT;
+ int ep_type = 0;
+#endif
ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
if (ret <= 0)
xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep);
+#ifdef CONFIG_MTK_XHCI
+ slot_ctx = xhci_get_slot_ctx(xhci, xhci->devs[udev->slot_id]->out_ctx);
+ if((slot_ctx->tt_info & 0xff) > 0){
+ isTT = 1;
+ }
+ else{
+ isTT = 0;
+ }
+ if(usb_endpoint_xfer_int(&ep->desc)){
+ ep_type = USB_EP_INT;
+ }
+ else if(usb_endpoint_xfer_isoc(&ep->desc)){
+ ep_type = USB_EP_ISOC;
+ }
+ else if(usb_endpoint_xfer_bulk(&ep->desc)){
+ ep_type = USB_EP_BULK;
+ }
+ sch_ep = mtk_xhci_scheduler_remove_ep(udev->speed, usb_endpoint_dir_in(&ep->desc)
+ , isTT, ep_type, (mtk_u32 *)ep);
+ if(sch_ep != NULL){
+ kfree(sch_ep);
+ }
+ else{
+ xhci_warn(xhci, "[MTK]Doesn't find ep_sch instance when removing endpoint\n");
+ }
+#endif
+
xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n",
(unsigned int) ep->desc.bEndpointAddress,
udev->slot_id,
(unsigned int) new_drop_flags,
(unsigned int) new_add_flags,
(unsigned int) new_slot_info);
+
+ #if defined(CONFIG_MTK_XHCI) && defined(CONFIG_USB_MTK_DUALMODE)
+ mtk_ep_count_dec();
+ #endif
+
return 0;
}
u32 new_add_flags, new_drop_flags, new_slot_info;
struct xhci_virt_device *virt_dev;
int ret = 0;
+#ifdef CONFIG_MTK_XHCI
+ struct xhci_ep_ctx *in_ep_ctx;
+ struct sch_ep *sch_ep;
+ int isTT;
+ int ep_type = 0;
+ int maxp = 0;
+ int burst = 0;
+ int mult = 0;
+ int interval = 0;
+#endif
ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
if (ret <= 0) {
return -ENOMEM;
}
+#ifdef CONFIG_MTK_XHCI
+ in_ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
+ slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
+
+ if((slot_ctx->tt_info & 0xff) > 0){
+ isTT = 1;
+ }
+ else{
+ isTT = 0;
+ }
+ if(usb_endpoint_xfer_int(&ep->desc)){
+ ep_type = USB_EP_INT;
+ }
+ else if(usb_endpoint_xfer_isoc(&ep->desc)){
+ ep_type = USB_EP_ISOC;
+ }
+ else if(usb_endpoint_xfer_bulk(&ep->desc)){
+ ep_type = USB_EP_BULK;
+ }
+ if(udev->speed == USB_SPEED_FULL || udev->speed == USB_SPEED_HIGH
+ || udev->speed == USB_SPEED_LOW){
+ maxp = ep->desc.wMaxPacketSize & 0x7FF;
+ burst = ep->desc.wMaxPacketSize >> 11;
+ mult = 0;
+ }
+ else if(udev->speed == USB_SPEED_SUPER){
+ maxp = ep->desc.wMaxPacketSize & 0x7FF;
+ burst = ep->ss_ep_comp.bMaxBurst;
+ mult = ep->ss_ep_comp.bmAttributes & 0x3;
+ }
+ interval = (1 << ((in_ep_ctx->ep_info >> 16) & 0xff));
+ sch_ep = kmalloc(sizeof(struct sch_ep), GFP_KERNEL);
+ if(mtk_xhci_scheduler_add_ep(udev->speed, usb_endpoint_dir_in(&ep->desc),
+ isTT, ep_type, maxp, interval, burst, mult, (mtk_u32 *)ep
+ , (mtk_u32 *)in_ep_ctx, sch_ep) != SCH_SUCCESS){
+ xhci_err(xhci, "[MTK] not enough bandwidth\n");
+ return -ENOSPC;
+ }
+#endif
+
ctrl_ctx->add_flags |= cpu_to_le32(added_ctxs);
new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);
(unsigned int) new_drop_flags,
(unsigned int) new_add_flags,
(unsigned int) new_slot_info);
+
+ #if defined(CONFIG_MTK_XHCI) && defined(CONFIG_USB_MTK_DUALMODE)
+ mtk_ep_count_inc();
+ #endif
+
return 0;
}
if (ret <= 0)
return ret;
xhci = hcd_to_xhci(hcd);
- if (xhci->xhc_state & XHCI_STATE_DYING)
+ if ((xhci->xhc_state & XHCI_STATE_DYING) ||
+ (xhci->xhc_state & XHCI_STATE_REMOVING))
return -ENODEV;
xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
struct xhci_virt_device *virt_dev;
+#ifndef CONFIG_USB_DEFAULT_PERSIST
struct device *dev = hcd->self.controller;
+#endif
unsigned long flags;
u32 state;
int i, ret;
return 0;
}
+#ifdef CONFIG_MTK_XHCI
+ retval = mtk_xhci_ip_init(hcd, xhci);
+ if(retval)
+ goto error;
+#endif
+
xhci->cap_regs = hcd->regs;
xhci->op_regs = hcd->regs +
HC_LENGTH(xhci_readl(xhci, &xhci->cap_regs->hc_capbase));
if (retval)
goto error;
xhci_dbg(xhci, "Called HCD init\n");
+
+ printk("%s(%d): do mtk_xhci_set\n", __func__, __LINE__);
+
return 0;
error:
kfree(xhci);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_LICENSE("GPL");
+#ifdef CONFIG_USBIF_COMPLIANCE
+#ifndef CONFIG_USB_MTK_DUALMODE
+static int xhci_hcd_driver_init(void)
+{
+ int retval;
+
+ retval = xhci_register_pci();
+ if (retval < 0) {
+ printk(KERN_DEBUG "Problem registering PCI driver.");
+ return retval;
+ }
+
+ #ifdef CONFIG_MTK_XHCI
+ mtk_xhci_ip_init();
+ #endif
+
+ retval = xhci_register_plat();
+ if (retval < 0) {
+ printk(KERN_DEBUG "Problem registering platform driver.");
+ goto unreg_pci;
+ }
+
+ #ifdef CONFIG_MTK_XHCI
+ retval = xhci_attrs_init();
+ if(retval < 0){
+ printk(KERN_DEBUG "Problem creating xhci attributes.");
+ goto unreg_plat;
+ }
+
+ mtk_xhci_wakelock_init();
+ #endif
+
+ /*
+ * Check the compiler generated sizes of structures that must be laid
+ * out in specific ways for hardware access.
+ */
+ BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8);
+ BUILD_BUG_ON(sizeof(struct xhci_slot_ctx) != 8*32/8);
+ BUILD_BUG_ON(sizeof(struct xhci_ep_ctx) != 8*32/8);
+ /* xhci_device_control has eight fields, and also
+ * embeds one xhci_slot_ctx and 31 xhci_ep_ctx
+ */
+ BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8);
+ BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8);
+ BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8);
+ BUILD_BUG_ON(sizeof(struct xhci_cap_regs) != 7*32/8);
+ BUILD_BUG_ON(sizeof(struct xhci_intr_reg) != 8*32/8);
+ /* xhci_run_regs has eight fields and embeds 128 xhci_intr_regs */
+ BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8);
+ return 0;
+
+#ifdef CONFIG_MTK_XHCI
+unreg_plat:
+ xhci_unregister_plat();
+#endif
+unreg_pci:
+ xhci_unregister_pci();
+ return retval;
+}
+
+static void xhci_hcd_driver_cleanup(void)
+{
+ xhci_unregister_pci();
+ xhci_unregister_plat();
+ xhci_attrs_exit();
+}
+#else
+static int xhci_hcd_driver_init(void)
+{
+ // init in mt_devs.c
+ mtk_xhci_eint_iddig_init();
+ mtk_xhci_switch_init();
+ //mtk_xhci_wakelock_init();
+ return 0;
+}
+
+static void xhci_hcd_driver_cleanup(void)
+{
+ mtk_xhci_eint_iddig_deinit() ;
+}
+
+#endif
+
+static int mu3h_normal_driver_on = 0 ;
+
+static int xhci_mu3h_proc_show(struct seq_file *seq, void *v)
+{
+ seq_printf(seq, "xhci_mu3h_proc_show, mu3h is %d (on:1, off:0)\n", mu3h_normal_driver_on);
+ return 0;
+}
+
+static int xhci_mu3h_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, xhci_mu3h_proc_show, inode->i_private);
+}
+
+static ssize_t xhci_mu3h_proc_write(struct file *file, const char __user *buf, size_t length, loff_t *ppos)
+{
+ int ret ;
+ char msg[32] ;
+ int result;
+
+ if (length >= sizeof(msg)) {
+ printk( "xhci_mu3h_proc_write length error, the error len is %d\n", (unsigned int)length);
+ return -EINVAL;
+ }
+ if (copy_from_user(msg, buf, length))
+ return -EFAULT;
+
+ msg[length] = 0 ;
+
+ printk("xhci_mu3h_proc_write: %s, current driver on/off: %d\n", msg, mu3h_normal_driver_on);
+
+ if ((msg[0] == '1') && (mu3h_normal_driver_on == 0)){
+ xhci_hcd_driver_init() ;
+ mu3h_normal_driver_on = 1 ;
+ printk("registe mu3h driver : m3h xhci driver\n");
+ }else if ((msg[0] == '0') && (mu3h_normal_driver_on == 1)){
+ xhci_hcd_driver_cleanup();
+ mu3h_normal_driver_on = 0 ;
+ printk("unregiste m3h xhci driver.\n");
+ }else{
+ printk("xhci_mu3h_proc_write write faile !\n");
+ }
+ return length;
+}
+
+static const struct file_operations mu3h_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = xhci_mu3h_proc_open,
+ .write = xhci_mu3h_proc_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+
+};
+
+static int __init xhci_hcd_init(void)
+{
+ struct proc_dir_entry *prEntry;
+
+ printk(KERN_DEBUG "xhci_hcd_init");
+
+ // set xhci up at boot up
+ xhci_hcd_driver_init() ;
+ mtk_xhci_wakelock_init();
+ mu3h_normal_driver_on = 1;
+
+ // USBIF
+ prEntry = proc_create("mu3h_driver_init", 0666, NULL, &mu3h_proc_fops);
+ if (prEntry)
+ {
+ printk("create the mu3h init proc OK!\n") ;
+ }else{
+ printk("[ERROR] create the mu3h init proc FAIL\n") ;
+ }
+
+#ifdef CONFIG_MTK_XHCI
+
+ if (!misc_register(&mu3h_uevent_device)){
+ printk("create the mu3h_uevent_device uevent device OK!\n") ;
+
+ }else{
+ printk("[ERROR] create the mu3h_uevent_device uevent device fail\n") ;
+ }
+
+#endif
+
+ return 0 ;
+
+}
+module_init(xhci_hcd_init);
+
+static void __exit xhci_hcd_cleanup(void)
+{
+#ifdef CONFIG_MTK_XHCI
+ misc_deregister(&mu3h_uevent_device);
+#endif
+ printk(KERN_DEBUG "xhci_hcd_cleanup");
+}
+module_exit(xhci_hcd_cleanup);
+
+#else
+#ifndef CONFIG_USB_MTK_DUALMODE
static int __init xhci_hcd_init(void)
{
int retval;
printk(KERN_DEBUG "Problem registering platform driver.");
goto unreg_pci;
}
+
+ #ifdef CONFIG_MTK_XHCI
+ retval = xhci_attrs_init();
+ if(retval < 0){
+ printk(KERN_DEBUG "Problem creating xhci attributes.");
+ goto unreg_plat;
+ }
+
+ mtk_xhci_wakelock_init();
+ #endif
+
/*
* Check the compiler generated sizes of structures that must be laid
* out in specific ways for hardware access.
BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8);
return 0;
+
+#ifdef CONFIG_MTK_XHCI
+unreg_plat:
+ xhci_unregister_plat();
+#endif
unreg_pci:
xhci_unregister_pci();
return retval;
{
xhci_unregister_pci();
xhci_unregister_plat();
+ xhci_attrs_exit();
}
module_exit(xhci_hcd_cleanup);
+#else
+static int __init xhci_hcd_init(void)
+{
+ mtk_xhci_eint_iddig_init();
+ mtk_xhci_switch_init();
+ mtk_xhci_wakelock_init();
+ return 0;
+}
+module_init(xhci_hcd_init);
+
+static void __exit xhci_hcd_cleanup(void)
+{
+}
+module_exit(xhci_hcd_cleanup);
+
+#endif
+#endif
#include "xhci-ext-caps.h"
#include "pci-quirks.h"
+
/* xHCI PCI Configuration Registers */
#define XHCI_SBRN_OFFSET (0x60)
/* deq bitmasks */
#define EP_CTX_CYCLE_MASK (1 << 0)
+#ifdef CONFIG_MTK_XHCI
+/* mtk scheduler bitmasks */
+#define BPKTS(p) ((p) & 0x3f)
+#define BCSCOUNT(p) (((p) & 0x7) << 8)
+#define BBM(p) ((p) << 11)
+#define BOFFSET(p) ((p) & 0x3fff)
+#define BREPEAT(p) (((p) & 0x7fff) << 16)
+#endif
/**
* struct xhci_input_control_context
/* Our HCD's current interrupter register set */
struct xhci_intr_reg __iomem *ir_set;
+ #ifdef CONFIG_MTK_XHCI
+ unsigned long base_regs;
+ unsigned long sif_regs;
+ unsigned long sif2_regs;
+ #endif
+
/* Cached register copies of read-only HC data */
__u32 hcs_params1;
__u32 hcs_params2;
*/
#define XHCI_STATE_DYING (1 << 0)
#define XHCI_STATE_HALTED (1 << 1)
+ #define XHCI_STATE_REMOVING (1 << 2)
/* Statistics */
int error_bitmask;
unsigned int quirks;
#define XHCI_LINK_TRB_QUIRK (1 << 0)
#define XHCI_RESET_EP_QUIRK (1 << 1)
#define XHCI_NEC_HOST (1 << 2)
+#ifndef CONFIG_MTK_XHCI
#define XHCI_AMD_PLL_FIX (1 << 3)
+#endif
#define XHCI_SPURIOUS_SUCCESS (1 << 4)
/*
* Certain Intel host controllers have a limit to the number of endpoint
#define XHCI_BROKEN_MSI (1 << 6)
#define XHCI_RESET_ON_RESUME (1 << 7)
#define XHCI_SW_BW_CHECKING (1 << 8)
+#ifndef CONFIG_MTK_XHCI
#define XHCI_AMD_0x96_HOST (1 << 9)
+#endif
#define XHCI_TRUST_TX_LENGTH (1 << 10)
#define XHCI_LPM_SUPPORT (1 << 11)
#define XHCI_INTEL_HOST (1 << 12)
/* TODO: copied from ehci.h - can be refactored? */
/* xHCI spec says all registers are little endian */
static inline unsigned int xhci_readl(const struct xhci_hcd *xhci,
- __le32 __iomem *regs)
+ void __iomem *regs)
{
return readl(regs);
}
static inline void xhci_writel(struct xhci_hcd *xhci,
- const unsigned int val, __le32 __iomem *regs)
+ const unsigned int val, void __iomem *regs)
{
writel(val, regs);
}
#include "truncate.h"
#include <trace/events/ext4.h>
+#include <linux/blkdev.h>
#define MPAGE_DA_EXTENT_TAIL 0x01
csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum,
csum_size);
offset += csum_size;
- csum = ext4_chksum(sbi, csum, (__u8 *)raw + offset,
- EXT4_INODE_SIZE(inode->i_sb) -
- offset);
}
+ csum = ext4_chksum(sbi, csum, (__u8 *)raw + offset,
+ EXT4_INODE_SIZE(inode->i_sb) - offset);
}
return csum;
struct page *page;
pgoff_t index;
unsigned from, to;
+#if defined(FEATURE_STORAGE_PID_LOGGER)
+ extern unsigned char *page_logger;
+ struct page_pid_logger *tmp_logger;
+ unsigned long page_index;
+ extern spinlock_t g_locker;
+ unsigned long g_flags;
+#endif
trace_ext4_write_begin(inode, pos, len, flags);
/*
return ret;
}
*pagep = page;
+#if defined(FEATURE_STORAGE_PID_LOGGER)
+ if( page_logger && (*pagep)) {
+ //#if defined(CONFIG_FLATMEM)
+ //page_index = (unsigned long)((*pagep) - mem_map) ;
+ //#else
+ page_index = (unsigned long)(__page_to_pfn(*pagep))- PHYS_PFN_OFFSET;
+ //#endif
+ tmp_logger =((struct page_pid_logger *)page_logger) + page_index;
+ spin_lock_irqsave(&g_locker, g_flags);
+ if( page_index < num_physpages) {
+ if( tmp_logger->pid1 == 0XFFFF)
+ tmp_logger->pid1 = current->pid;
+ else if( tmp_logger->pid1 != current->pid)
+ tmp_logger->pid2 = current->pid;
+ }
+ spin_unlock_irqrestore(&g_locker, g_flags);
+ }
+#endif
return ret;
}
int ext4_punch_hole(struct file *file, loff_t offset, loff_t length)
{
+#if 0
struct inode *inode = file_inode(file);
struct super_block *sb = inode->i_sb;
ext4_lblk_t first_block, stop_block;
out_mutex:
mutex_unlock(&inode->i_mutex);
return ret;
+#else
+ /*
+ * Disabled as per b/28760453
+ */
+ return -EOPNOTSUPP;
+#endif
}
/*
trace_ext4_load_inode(inode);
get_bh(bh);
bh->b_end_io = end_buffer_read_sync;
+#ifdef FEATURE_STORAGE_META_LOG
+ if( bh && bh->b_bdev && bh->b_bdev->bd_disk)
+ set_metadata_rw_status(bh->b_bdev->bd_disk->first_minor, WAIT_READ_CNT);
+#endif
submit_bh(READ | REQ_META | REQ_PRIO, bh);
wait_on_buffer(bh);
if (!buffer_uptodate(bh)) {
}
static inline int ext4_issue_discard(struct super_block *sb,
- ext4_group_t block_group, ext4_grpblk_t cluster, int count)
+ ext4_group_t block_group, ext4_grpblk_t cluster, int count,
+ unsigned long flags)
{
ext4_fsblk_t discard_block;
count = EXT4_C2B(EXT4_SB(sb), count);
trace_ext4_discard_blocks(sb,
(unsigned long long) discard_block, count);
- return sb_issue_discard(sb, discard_block, count, GFP_NOFS, 0);
+ return sb_issue_discard(sb, discard_block, count, GFP_NOFS, flags);
}
/*
if (test_opt(sb, DISCARD)) {
err = ext4_issue_discard(sb, entry->efd_group,
entry->efd_start_cluster,
- entry->efd_count);
+ entry->efd_count, 0);
if (err && err != -EOPNOTSUPP)
ext4_msg(sb, KERN_WARNING, "discard request in"
" group:%d block:%d count:%d failed"
if (ar->pright && start + size - 1 >= ar->lright)
size -= start + size - ar->lright;
+ /*
+ * Trim allocation request for filesystems with artificially small
+ * groups.
+ */
+ if (size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb))
+ size = EXT4_BLOCKS_PER_GROUP(ac->ac_sb);
+
end = start + size;
/* check we don't cross already preallocated blocks */
* them with group lock_held
*/
if (test_opt(sb, DISCARD)) {
- err = ext4_issue_discard(sb, block_group, bit, count);
+ err = ext4_issue_discard(sb, block_group, bit, count,
+ 0);
if (err && err != -EOPNOTSUPP)
ext4_msg(sb, KERN_WARNING, "discard request in"
" group:%d block:%d count:%lu failed"
* @count: number of blocks to TRIM
* @group: alloc. group we are working with
* @e4b: ext4 buddy for the group
+ * @blkdev_flags: flags for the block device
*
* Trim "count" blocks starting at "start" in the "group". To assure that no
* one will allocate those blocks, mark it as used in buddy bitmap. This must
* be called with under the group lock.
*/
static int ext4_trim_extent(struct super_block *sb, int start, int count,
- ext4_group_t group, struct ext4_buddy *e4b)
+ ext4_group_t group, struct ext4_buddy *e4b,
+ unsigned long blkdev_flags)
{
struct ext4_free_extent ex;
int ret = 0;
*/
mb_mark_used(e4b, &ex);
ext4_unlock_group(sb, group);
- ret = ext4_issue_discard(sb, group, start, count);
+ ret = ext4_issue_discard(sb, group, start, count, blkdev_flags);
ext4_lock_group(sb, group);
mb_free_blocks(NULL, e4b, start, ex.fe_len);
return ret;
* @start: first group block to examine
* @max: last group block to examine
* @minblocks: minimum extent block count
+ * @blkdev_flags: flags for the block device
*
* ext4_trim_all_free walks through group's buddy bitmap searching for free
* extents. When the free block is found, ext4_trim_extent is called to TRIM
static ext4_grpblk_t
ext4_trim_all_free(struct super_block *sb, ext4_group_t group,
ext4_grpblk_t start, ext4_grpblk_t max,
- ext4_grpblk_t minblocks)
+ ext4_grpblk_t minblocks, unsigned long blkdev_flags)
{
void *bitmap;
ext4_grpblk_t next, count = 0, free_count = 0;
if ((next - start) >= minblocks) {
ret = ext4_trim_extent(sb, start,
- next - start, group, &e4b);
+ next - start, group, &e4b,
+ blkdev_flags);
if (ret && ret != -EOPNOTSUPP)
break;
ret = 0;
* ext4_trim_fs() -- trim ioctl handle function
* @sb: superblock for filesystem
* @range: fstrim_range structure
+ * @blkdev_flags: flags for the block device
*
* start: First Byte to trim
* len: number of Bytes to trim from start
* start to start+len. For each such a group ext4_trim_all_free function
* is invoked to trim all free space.
*/
-int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
+int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range,
+ unsigned long blkdev_flags)
{
struct ext4_group_info *grp;
ext4_group_t group, first_group, last_group;
if (grp->bb_free >= minlen) {
cnt = ext4_trim_all_free(sb, group, first_cluster,
- end, minlen);
+ end, minlen, blkdev_flags);
if (cnt < 0) {
ret = cnt;
break;
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
struct ext4_super_block *es = sbi->s_es;
+ int aborted = 0;
int i, err;
ext4_unregister_li_request(sb);
destroy_workqueue(sbi->dio_unwritten_wq);
if (sbi->s_journal) {
+ aborted = is_journal_aborted(sbi->s_journal);
err = jbd2_journal_destroy(sbi->s_journal);
sbi->s_journal = NULL;
- if (err < 0)
+ if ((err < 0) && !aborted)
ext4_abort(sb, "Couldn't clean up the journal");
}
ext4_ext_release(sb);
ext4_xattr_put_super(sb);
- if (!(sb->s_flags & MS_RDONLY)) {
+ if (!(sb->s_flags & MS_RDONLY) && !aborted) {
EXT4_CLEAR_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER);
es->s_state = cpu_to_le16(sbi->s_mount_state);
}
unsigned long next_wakeup, cur;
BUG_ON(NULL == eli);
+ set_freezable();
cont_thread:
while (true) {
schedule_timeout_interruptible(next_wakeup - cur);
- if (kthread_should_stop()) {
+ if (kthread_freezable_should_stop(NULL)) {
ext4_clear_request_list();
goto exit_thread;
}
ext4_set_bit(s++, buf);
count++;
}
- for (j = ext4_bg_num_gdb(sb, grp); j > 0; j--) {
- ext4_set_bit(EXT4_B2C(sbi, s++), buf);
- count++;
+ j = ext4_bg_num_gdb(sb, grp);
+ if (s + j > EXT4_BLOCKS_PER_GROUP(sb)) {
+ ext4_error(sb, "Invalid number of block group "
+ "descriptor blocks: %d", j);
+ j = EXT4_BLOCKS_PER_GROUP(sb) - s;
}
+ count += j;
+ for (; j > 0; j--)
+ ext4_set_bit(EXT4_B2C(sbi, s++), buf);
}
if (!count)
return 0;
char *orig_data = kstrdup(data, GFP_KERNEL);
struct buffer_head *bh;
struct ext4_super_block *es = NULL;
- struct ext4_sb_info *sbi;
+ struct ext4_sb_info *sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
ext4_fsblk_t block;
ext4_fsblk_t sb_block = get_sb_block(&data);
ext4_fsblk_t logical_sb_block;
unsigned int journal_ioprio = DEFAULT_JOURNAL_IOPRIO;
ext4_group_t first_not_zeroed;
- sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
- if (!sbi)
- goto out_free_orig;
+ if ((data && !orig_data) || !sbi)
+ goto out_free_base;
sbi->s_blockgroup_lock =
kzalloc(sizeof(struct blockgroup_lock), GFP_KERNEL);
- if (!sbi->s_blockgroup_lock) {
- kfree(sbi);
- goto out_free_orig;
- }
+ if (!sbi->s_blockgroup_lock)
+ goto out_free_base;
+
sb->s_fs_info = sbi;
sbi->s_sb = sb;
sbi->s_inode_readahead_blks = EXT4_DEF_INODE_READAHEAD_BLKS;
*/
sbi->s_li_wait_mult = EXT4_DEF_LI_WAIT_MULT;
- if (!parse_options((char *) sbi->s_es->s_mount_opts, sb,
- &journal_devnum, &journal_ioprio, 0)) {
- ext4_msg(sb, KERN_WARNING,
- "failed to parse options in superblock: %s",
- sbi->s_es->s_mount_opts);
+ if (sbi->s_es->s_mount_opts[0]) {
+ char *s_mount_opts = kstrndup(sbi->s_es->s_mount_opts,
+ sizeof(sbi->s_es->s_mount_opts),
+ GFP_KERNEL);
+ if (!s_mount_opts)
+ goto failed_mount;
+ if (!parse_options(s_mount_opts, sb, &journal_devnum,
+ &journal_ioprio, 0)) {
+ ext4_msg(sb, KERN_WARNING,
+ "failed to parse options in superblock: %s",
+ s_mount_opts);
+ }
+ kfree(s_mount_opts);
}
sbi->s_def_mount_opt = sbi->s_mount_opt;
if (!parse_options((char *) data, sb, &journal_devnum,
sbi->s_blocks_per_group = le32_to_cpu(es->s_blocks_per_group);
sbi->s_inodes_per_group = le32_to_cpu(es->s_inodes_per_group);
- if (EXT4_INODE_SIZE(sb) == 0 || EXT4_INODES_PER_GROUP(sb) == 0)
- goto cantfind_ext4;
sbi->s_inodes_per_block = blocksize / EXT4_INODE_SIZE(sb);
if (sbi->s_inodes_per_block == 0)
goto cantfind_ext4;
+ if (sbi->s_inodes_per_group < sbi->s_inodes_per_block ||
+ sbi->s_inodes_per_group > blocksize * 8) {
+ ext4_msg(sb, KERN_ERR, "invalid inodes per group: %lu\n",
+ sbi->s_blocks_per_group);
+ goto failed_mount;
+ }
sbi->s_itb_per_group = sbi->s_inodes_per_group /
sbi->s_inodes_per_block;
sbi->s_desc_per_block = blocksize / EXT4_DESC_SIZE(sb);
}
sbi->s_cluster_ratio = clustersize / blocksize;
- if (sbi->s_inodes_per_group > blocksize * 8) {
- ext4_msg(sb, KERN_ERR,
- "#inodes per group too big: %lu",
- sbi->s_inodes_per_group);
- goto failed_mount;
- }
-
/* Do we have standard group size of clustersize * 8 blocks ? */
if (sbi->s_blocks_per_group == clustersize << 3)
set_opt2(sb, STD_GROUP_SIZE);
(EXT4_MAX_BLOCK_FILE_PHYS / EXT4_BLOCKS_PER_GROUP(sb)));
db_count = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - 1) /
EXT4_DESC_PER_BLOCK(sb);
+ if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_META_BG)) {
+ if (le32_to_cpu(es->s_first_meta_bg) > db_count) {
+ ext4_msg(sb, KERN_WARNING,
+ "first meta block group too large: %u "
+ "(group descriptor block count %u)",
+ le32_to_cpu(es->s_first_meta_bg), db_count);
+ goto failed_mount;
+ }
+ }
sbi->s_group_desc = ext4_kvmalloc(db_count *
sizeof(struct buffer_head *),
GFP_KERNEL);
*/
if (!test_opt(sb, NOLOAD) &&
EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_HAS_JOURNAL)) {
- if (ext4_load_journal(sb, es, journal_devnum))
+ err = ext4_load_journal(sb, es, journal_devnum);
+ if (err)
goto failed_mount3;
} else if (test_opt(sb, NOLOAD) && !(sb->s_flags & MS_RDONLY) &&
EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER)) {
}
ext4_msg(sb, KERN_INFO, "mounted filesystem with%s. "
- "Opts: %s%s%s", descr, sbi->s_es->s_mount_opts,
+ "Opts: %.*s%s%s", descr,
+ (int) sizeof(sbi->s_es->s_mount_opts),
+ sbi->s_es->s_mount_opts,
*sbi->s_es->s_mount_opts ? "; " : "", orig_data);
if (es->s_error_count)
out_fail:
sb->s_fs_info = NULL;
kfree(sbi->s_blockgroup_lock);
+ out_free_base:
kfree(sbi);
- out_free_orig:
kfree(orig_data);
return err ? err : ret;
}
struct page **pagep, void **fsdata)
{
int err;
+#if defined(FEATURE_STORAGE_PID_LOGGER)
+ extern unsigned char *page_logger;
+ struct page_pid_logger *tmp_logger;
+ unsigned long page_index;
+ extern spinlock_t g_locker;
+ unsigned long g_flags;
+#endif
*pagep = NULL;
err = cont_write_begin(file, mapping, pos, len, flags,
pagep, fsdata, fat_get_block,
&MSDOS_I(mapping->host)->mmu_private);
+#if defined(FEATURE_STORAGE_PID_LOGGER)
+ if( page_logger && (*pagep)) {
+ //printk(KERN_INFO"fat write_begin hank logger count:%d init %x currentpid:%d page:%x mem_map:%x pfn:%d page->index:%d\n", num_physpages, page_logger, current->pid, *pagep, mem_map, (unsigned)((*pagep) - mem_map), (*pagep)->index);
+ //printk(KERN_INFO"page_logger_lock:%x %d", page_logger_lock, ((num_physpages+(1<<PAGE_LOCKER_SHIFT)-1)>>PAGE_LOCKER_SHIFT));
+ //#if defined(CONFIG_FLATMEM)
+ //page_index = (unsigned long)((*pagep) - mem_map) ;
+ //#else
+ page_index = (unsigned long)(__page_to_pfn(*pagep))- PHYS_PFN_OFFSET;
+ //#endif
+ tmp_logger =((struct page_pid_logger *)page_logger) + page_index;
+ spin_lock_irqsave(&g_locker, g_flags);
+ if( page_index < num_physpages) {
+ if( tmp_logger->pid1 == 0XFFFF)
+ tmp_logger->pid1 = current->pid;
+ else if( tmp_logger->pid1 != current->pid)
+ tmp_logger->pid2 = current->pid;
+ }
+ spin_unlock_irqrestore(&g_locker, g_flags);
+ //printk(KERN_INFO"tmp logger pid1:%u pid2:%u pfn:%d page:%x pos:%x host:%x max_mapnr:%x\n", tmp_logger->pid1, tmp_logger->pid2, (unsigned long)((*pagep) - mem_map),(*pagep), pos, mapping->host, max_mapnr );
+ //printk(KERN_INFO"tmp logger pid1:%u pid2:%u pfn:%lu page:%p\n", tmp_logger->pid1, tmp_logger->pid2, (unsigned long)(__page_to_pfn(*pagep)),(*pagep));
+ }
+#endif
if (err < 0)
fat_write_failed(mapping, pos + len);
return err;
mark_buffer_dirty(bh);
err = 0;
if (wait)
+ {
err = sync_dirty_buffer(bh);
+ }else
+ {
+#ifdef FEATURE_STORAGE_META_LOG
+ if( bh && bh->b_bdev && bh->b_bdev->bd_disk)
+ set_metadata_rw_status(bh->b_bdev->bd_disk->first_minor, NOWAIT_WRITE_CNT);
+#endif
+ }
brelse(bh);
return err;
}
return 0;
}
+ static void fat_dummy_inode_init(struct inode *inode)
+ {
+ /* Initialize this dummy inode to work as no-op. */
+ MSDOS_I(inode)->mmu_private = 0;
+ MSDOS_I(inode)->i_start = 0;
+ MSDOS_I(inode)->i_logstart = 0;
+ MSDOS_I(inode)->i_attrs = 0;
+ MSDOS_I(inode)->i_pos = 0;
+ }
+
static int fat_read_root(struct inode *inode)
{
struct super_block *sb = inode->i_sb;
struct inode *fsinfo_inode = NULL;
struct buffer_head *bh;
struct fat_boot_sector *b;
+ struct fat_boot_bsx *bsx;
struct msdos_sb_info *sbi;
u16 logical_sector_size;
u32 total_sectors, total_clusters, fat_clusters, rootdir_sectors;
goto out_fail;
}
+ bsx = (struct fat_boot_bsx *)(bh->b_data + FAT32_BSX_OFFSET);
+
fsinfo = (struct fat_boot_fsinfo *)fsinfo_bh->b_data;
if (!IS_FSINFO(fsinfo)) {
fat_msg(sb, KERN_WARNING, "Invalid FSINFO signature: "
}
brelse(fsinfo_bh);
+ } else {
+ bsx = (struct fat_boot_bsx *)(bh->b_data + FAT16_BSX_OFFSET);
}
+ /* interpret volume ID as a little endian 32 bit integer */
+ sbi->vol_id = (((u32)bsx->vol_id[0]) | ((u32)bsx->vol_id[1] << 8) |
+ ((u32)bsx->vol_id[2] << 16) | ((u32)bsx->vol_id[3] << 24));
+
sbi->dir_per_block = sb->s_blocksize / sizeof(struct msdos_dir_entry);
sbi->dir_per_block_bits = ffs(sbi->dir_per_block) - 1;
fat_inode = new_inode(sb);
if (!fat_inode)
goto out_fail;
- MSDOS_I(fat_inode)->i_pos = 0;
+ fat_dummy_inode_init(fat_inode);
sbi->fat_inode = fat_inode;
fsinfo_inode = new_inode(sb);
if (!fsinfo_inode)
goto out_fail;
+ fat_dummy_inode_init(fsinfo_inode);
fsinfo_inode->i_ino = MSDOS_FSINFO_INO;
sbi->fsinfo_inode = fsinfo_inode;
insert_inode_hash(fsinfo_inode);
*timeout = NFS4_POLL_RETRY_MIN;
if (*timeout > NFS4_POLL_RETRY_MAX)
*timeout = NFS4_POLL_RETRY_MAX;
- freezable_schedule_timeout_killable(*timeout);
+ freezable_schedule_timeout_killable_unsafe(*timeout);
if (fatal_signal_pending(current))
res = -ERESTARTSYS;
*timeout <<= 1;
*/
static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
{
- struct page *pages[NFS4ACL_MAXPAGES] = {NULL, };
+ struct page *pages[NFS4ACL_MAXPAGES + 1] = {NULL, };
struct nfs_getaclargs args = {
.fh = NFS_FH(inode),
.acl_pages = pages,
.rpc_argp = &args,
.rpc_resp = &res,
};
- unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE);
+ unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE) + 1;
int ret = -ENOMEM, i;
- /* As long as we're doing a round trip to the server anyway,
- * let's be prepared for a page of acl data. */
- if (npages == 0)
- npages = 1;
if (npages > ARRAY_SIZE(pages))
return -ERANGE;
static unsigned long
nfs4_set_lock_task_retry(unsigned long timeout)
{
- freezable_schedule_timeout_killable(timeout);
+ freezable_schedule_timeout_killable_unsafe(timeout);
timeout <<= 1;
if (timeout > NFS4_LOCK_MAXTIMEOUT)
return NFS4_LOCK_MAXTIMEOUT;
}
#endif
+static void seq_print_vma_name(struct seq_file *m, struct vm_area_struct *vma)
+{
+ const char __user *name = vma_get_anon_name(vma);
+ struct mm_struct *mm = vma->vm_mm;
+
+ unsigned long page_start_vaddr;
+ unsigned long page_offset;
+ unsigned long num_pages;
+ unsigned long max_len = NAME_MAX;
+ int i;
+
+ page_start_vaddr = (unsigned long)name & PAGE_MASK;
+ page_offset = (unsigned long)name - page_start_vaddr;
+ num_pages = DIV_ROUND_UP(page_offset + max_len, PAGE_SIZE);
+
+ seq_puts(m, "[anon:");
+
+ for (i = 0; i < num_pages; i++) {
+ int len;
+ int write_len;
+ const char *kaddr;
+ long pages_pinned;
+ struct page *page;
+
+ pages_pinned = get_user_pages(current, mm, page_start_vaddr,
+ 1, 0, 0, &page, NULL);
+ if (pages_pinned < 1) {
+ seq_puts(m, "<fault>]");
+ return;
+ }
+
+ kaddr = (const char *)kmap(page);
+ len = min(max_len, PAGE_SIZE - page_offset);
+ write_len = strnlen(kaddr + page_offset, len);
+ seq_write(m, kaddr + page_offset, write_len);
+ kunmap(page);
+ put_page(page);
+
+ /* if strnlen hit a null terminator then we're done */
+ if (write_len != len)
+ break;
+
+ max_len -= len;
+ page_offset = 0;
+ page_start_vaddr += PAGE_SIZE;
+ }
+
+ seq_putc(m, ']');
+}
+
static void vma_stop(struct proc_maps_private *priv, struct vm_area_struct *vma)
{
if (vma && vma != priv->tail_vma) {
/* We don't show the stack guard page in /proc/maps */
start = vma->vm_start;
- if (stack_guard_page_start(vma, start))
- start += PAGE_SIZE;
end = vma->vm_end;
- if (stack_guard_page_end(vma, end))
- end -= PAGE_SIZE;
seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
start,
pad_len_spaces(m, len);
seq_printf(m, "[stack:%d]", tid);
}
+ goto done;
+ }
+
+ if (vma_get_anon_name(vma)) {
+ pad_len_spaces(m, len);
+ seq_print_vma_name(m, vma);
}
}
unsigned long swap;
unsigned long nonlinear;
u64 pss;
+ u64 pswap;
};
+#ifdef CONFIG_SWAP
+extern struct swap_info_struct *swap_info_get(swp_entry_t entry);
+extern void swap_info_unlock(struct swap_info_struct *si);
+#endif // CONFIG_SWAP
+
+static inline unsigned char swap_count(unsigned char ent)
+{
+ return ent & ~SWAP_HAS_CACHE; /* may include SWAP_HAS_CONT flag */
+}
static void smaps_pte_entry(pte_t ptent, unsigned long addr,
unsigned long ptent_size, struct mm_walk *walk)
} else if (is_swap_pte(ptent)) {
swp_entry_t swpent = pte_to_swp_entry(ptent);
- if (!non_swap_entry(swpent))
+ if (!non_swap_entry(swpent)) {
+#ifdef CONFIG_SWAP
+ swp_entry_t entry;
+ struct swap_info_struct *p;
+#endif // CONFIG_SWAP
+
mss->swap += ptent_size;
- else if (is_migration_entry(swpent))
+
+#ifdef CONFIG_SWAP
+ entry = pte_to_swp_entry(ptent);
+ if (non_swap_entry(entry))
+ return;
+ p = swap_info_get(entry);
+ if (p) {
+ int swapcount = swap_count(p->swap_map[swp_offset(entry)]);
+ if (swapcount == 0) {
+ swapcount = 1;
+ }
+ mss->pswap += (ptent_size << PSS_SHIFT) / swapcount;
+ swap_info_unlock(p);
+ }
+#endif // CONFIG_SWAP
+ } else if (is_migration_entry(swpent))
page = migration_entry_to_page(swpent);
} else if (pte_file(ptent)) {
if (pte_to_pgoff(ptent) != pgoff)
"Anonymous: %8lu kB\n"
"AnonHugePages: %8lu kB\n"
"Swap: %8lu kB\n"
+ "PSwap: %8lu kB\n"
"KernelPageSize: %8lu kB\n"
"MMUPageSize: %8lu kB\n"
"Locked: %8lu kB\n",
mss.anonymous >> 10,
mss.anonymous_thp >> 10,
mss.swap >> 10,
+ (unsigned long)(mss.pswap >> (10 + PSS_SHIFT)),
vma_kernel_pagesize(vma) >> 10,
vma_mmu_pagesize(vma) >> 10,
(vma->vm_flags & VM_LOCKED) ?
show_smap_vma_flags(m, vma);
+ if (vma_get_anon_name(vma)) {
+ seq_puts(m, "Name: ");
+ seq_print_vma_name(m, vma);
+ seq_putc(m, '\n');
+ }
+
if (m->count < m->size) /* vma is copied successfully */
m->version = (vma != get_gate_vma(task->mm))
? vma->vm_start : 0;
#define VM_DONTCOPY 0x00020000 /* Do not copy this vma on fork */
#define VM_DONTEXPAND 0x00040000 /* Cannot expand with mremap() */
+#define VM_RESERVED 0x00080000 /* Count as reserved_vm like IO */
#define VM_ACCOUNT 0x00100000 /* Is a VM accounted object */
#define VM_NORESERVE 0x00200000 /* should the VM suppress accounting */
#define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */
}
#endif
+extern void kvfree(const void *addr);
+
static inline void compound_lock(struct page *page)
{
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
extern void show_free_areas(unsigned int flags);
extern bool skip_free_areas_node(unsigned int flags, int nid);
+void shmem_set_file(struct vm_area_struct *vma, struct file *file);
int shmem_zero_setup(struct vm_area_struct *);
extern int can_do_mlock(void);
int set_page_dirty_lock(struct page *page);
int clear_page_dirty_for_io(struct page *page);
- /* Is the vma a continuation of the stack vma above it? */
- static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
- {
- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
- }
-
- static inline int stack_guard_page_start(struct vm_area_struct *vma,
- unsigned long addr)
- {
- return (vma->vm_flags & VM_GROWSDOWN) &&
- (vma->vm_start == addr) &&
- !vma_growsdown(vma->vm_prev, addr);
- }
-
- /* Is the vma a continuation of the stack vma below it? */
- static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
- {
- return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
- }
-
- static inline int stack_guard_page_end(struct vm_area_struct *vma,
- unsigned long addr)
- {
- return (vma->vm_flags & VM_GROWSUP) &&
- (vma->vm_end == addr) &&
- !vma_growsup(vma->vm_next, addr);
- }
-
extern pid_t
vm_is_stack(struct task_struct *task, struct vm_area_struct *vma, int in_group);
extern struct vm_area_struct *vma_merge(struct mm_struct *,
struct vm_area_struct *prev, unsigned long addr, unsigned long end,
unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t,
- struct mempolicy *);
+ struct mempolicy *, const char __user *);
extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *);
extern int split_vma(struct mm_struct *,
struct vm_area_struct *, unsigned long addr, int new_below);
struct address_space *mapping,
struct file *filp);
+ extern unsigned long stack_guard_gap;
/* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */
extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
return vma;
}
+ static inline unsigned long vm_start_gap(struct vm_area_struct *vma)
+ {
+ unsigned long vm_start = vma->vm_start;
+
+ if (vma->vm_flags & VM_GROWSDOWN) {
+ vm_start -= stack_guard_gap;
+ if (vm_start > vma->vm_start)
+ vm_start = 0;
+ }
+ return vm_start;
+ }
+
+ static inline unsigned long vm_end_gap(struct vm_area_struct *vma)
+ {
+ unsigned long vm_end = vma->vm_end;
+
+ if (vma->vm_flags & VM_GROWSUP) {
+ vm_end += stack_guard_gap;
+ if (vm_end < vma->vm_end)
+ vm_end = -PAGE_SIZE;
+ }
+ return vm_end;
+ }
+
static inline unsigned long vma_pages(struct vm_area_struct *vma)
{
return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
unsigned long shrink_slab(struct shrink_control *shrink,
unsigned long nr_pages_scanned,
unsigned long lru_pages);
+void drop_pagecache(void);
#ifndef CONFIG_MMU
#define randomize_va_space 0
if ((shmflg & SHM_NORESERVE) &&
sysctl_overcommit_memory != OVERCOMMIT_NEVER)
acctflag = VM_NORESERVE;
- file = shmem_file_setup(name, size, acctflag);
+ file = shmem_file_setup(name, size, acctflag, 0);
}
error = PTR_ERR(file);
if (IS_ERR(file))
* "raddr" thing points to kernel space, and there has to be a wrapper around
* this.
*/
- long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
- unsigned long shmlba)
+ long do_shmat(int shmid, char __user *shmaddr, int shmflg,
+ ulong *raddr, unsigned long shmlba)
{
struct shmid_kernel *shp;
unsigned long addr;
goto out;
else if ((addr = (ulong)shmaddr)) {
if (addr & (shmlba - 1)) {
- if (shmflg & SHM_RND)
- addr &= ~(shmlba - 1); /* round down */
+ /*
+ * Round down to the nearest multiple of shmlba.
+ * For sane do_mmap_pgoff() parameters, avoid
+ * round downs that trigger nil-page and MAP_FIXED.
+ */
+ if ((shmflg & SHM_RND) && addr >= shmlba)
+ addr &= ~(shmlba - 1);
else
#ifndef __ARCH_FORCE_SHMLBA
if (addr & ~PAGE_MASK)
#include <linux/ptrace.h>
#include <linux/sched/rt.h>
#include <linux/hugetlb.h>
+#include <linux/freezer.h>
#include <asm/futex.h>
* is no timeout, or if it has yet to expire.
*/
if (!timeout || timeout->task)
- schedule();
+ freezable_schedule();
}
__set_current_state(TASK_RUNNING);
}
{
struct hrtimer_sleeper timeout, *to = NULL;
struct rt_mutex_waiter rt_waiter;
- struct rt_mutex *pi_mutex = NULL;
struct futex_hash_bucket *hb;
union futex_key key2 = FUTEX_KEY_INIT;
struct futex_q q = futex_q_init;
if (q.pi_state && (q.pi_state->owner != current)) {
spin_lock(q.lock_ptr);
ret = fixup_pi_state_owner(uaddr2, &q, current);
+ if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current)
+ rt_mutex_unlock(&q.pi_state->pi_mutex);
/*
* Drop the reference to the pi state which
* the requeue_pi() code acquired for us.
spin_unlock(q.lock_ptr);
}
} else {
+ struct rt_mutex *pi_mutex;
+
/*
* We have been woken up by futex_unlock_pi(), a timeout, or a
* signal. futex_unlock_pi() will not destroy the lock_ptr nor
if (res)
ret = (res < 0) ? res : 0;
+ /*
+ * If fixup_pi_state_owner() faulted and was unable to handle
+ * the fault, unlock the rt_mutex and return the fault to
+ * userspace.
+ */
+ if (ret && rt_mutex_owner(pi_mutex) == current)
+ rt_mutex_unlock(pi_mutex);
+
/* Unqueue and drop the lock. */
unqueue_me_pi(&q);
}
- /*
- * If fixup_pi_state_owner() faulted and was unable to handle the
- * fault, unlock the rt_mutex and return the fault to userspace.
- */
- if (ret == -EFAULT) {
- if (pi_mutex && rt_mutex_owner(pi_mutex) == current)
- rt_mutex_unlock(pi_mutex);
- } else if (ret == -EINTR) {
+ if (ret == -EINTR) {
/*
* We've already been requeued, but cannot restart by calling
* futex_lock_pi() directly. We could restart this syscall, but
return 0;
}
- __initcall(futex_init);
+ core_initcall(futex_init);
* 01Mar01 Andrew Morton
*/
+
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/tty.h>
#include <linux/memblock.h>
#include <linux/aio.h>
#include <linux/syscalls.h>
+#include <linux/suspend.h>
#include <linux/kexec.h>
#include <linux/kdb.h>
#include <linux/ratelimit.h>
#include <linux/poll.h>
#include <linux/irq_work.h>
#include <linux/utsname.h>
+#include <linux/mt_sched_mon.h>
+#include <linux/aee.h>
#include <asm/uaccess.h>
#define CREATE_TRACE_POINTS
#include <trace/events/printk.h>
+/* Some options {*/
+#define LOG_TOO_MUCH_WARNING
+#ifdef LOG_TOO_MUCH_WARNING
+static int log_in_resume;
+#endif
+/* Some options }*/
+#ifdef CONFIG_EARLY_PRINTK_DIRECT
+extern void printascii(char *);
+#endif
+
+bool printk_disable_uart = 0;
+static DEFINE_PER_CPU(char, printk_state);
/* printk's without a loglevel use this.. */
#define DEFAULT_MESSAGE_LOGLEVEL CONFIG_DEFAULT_MESSAGE_LOGLEVEL
/* We show everything that is MORE important than this.. */
#define MINIMUM_CONSOLE_LOGLEVEL 1 /* Minimum loglevel we let people use */
-#define DEFAULT_CONSOLE_LOGLEVEL 7 /* anything MORE serious than KERN_DEBUG */
+#define DEFAULT_CONSOLE_LOGLEVEL 6 /* anything MORE serious than KERN_INFO */
int console_printk[4] = {
DEFAULT_CONSOLE_LOGLEVEL, /* console_loglevel */
MINIMUM_CONSOLE_LOGLEVEL, /* minimum_console_loglevel */
DEFAULT_CONSOLE_LOGLEVEL, /* default_console_loglevel */
};
+EXPORT_SYMBOL_GPL(console_printk);
/*
* Low level drivers may need that to know if they can schedule in
static size_t syslog_partial;
/* index and sequence number of the first record stored in the buffer */
-static u64 log_first_seq;
-static u32 log_first_idx;
+/*static*/ u64 log_first_seq;
+/*static*/ u32 log_first_idx;
/* index and sequence number of the next record to store in the buffer */
-static u64 log_next_seq;
-static u32 log_next_idx;
+/*static*/ u64 log_next_seq;
+/*static*/ u32 log_next_idx;
/* the next printk record to write to the console */
static u64 console_seq;
static char *log_buf = __log_buf;
static u32 log_buf_len = __LOG_BUF_LEN;
+#ifdef CONFIG_MT_PRINTK_UART_CONSOLE
+
+extern int mt_need_uart_console;
+inline void mt_disable_uart()
+{
+ if (mt_need_uart_console == 0) {
+ printk("<< printk console disable >>\n");
+ printk_disable_uart = 1;
+ } else {
+ printk("<< printk console can't be disabled >>\n");
+ }
+}
+inline void mt_enable_uart()
+{
+ if (mt_need_uart_console == 1) {
+ if (printk_disable_uart == 0)
+ return;
+ printk_disable_uart = 0;
+ printk("<< printk console enable >>\n");
+ } else {
+ printk("<< printk console can't be enabled >>\n");
+ }
+}
+
+#endif
/* cpu currently holding logbuf_lock */
static volatile unsigned int logbuf_cpu = UINT_MAX;
{
struct log *msg;
u32 size, pad_len;
-
+ int this_cpu = smp_processor_id();
+ char state = __raw_get_cpu_var(printk_state);
+ if (state == 0) {
+ __raw_get_cpu_var(printk_state) = ' ';
+ state = ' ';
+ }
+ /*printk prefix {*/
+ char tbuf[50];
+ unsigned tlen;
+ if (console_suspended == 0) {
+ tlen = snprintf(tbuf, sizeof(tbuf), "%c(%x)[%d:%s]",
+ state, this_cpu, current->pid, current->comm);
+ } else {
+ tlen = snprintf(tbuf, sizeof(tbuf), "%c%x)", state, this_cpu);
+ }
+ /*printk prefix }*/
/* number of '\0' padding bytes to next message */
- size = sizeof(struct log) + text_len + dict_len;
+ size = sizeof(struct log) + text_len +tlen + dict_len;
pad_len = (-size) & (LOG_ALIGN - 1);
size += pad_len;
/* fill message */
msg = (struct log *)(log_buf + log_next_idx);
- memcpy(log_text(msg), text, text_len);
+ //memcpy(log_text(msg), text, text_len);
+ memcpy(log_text(msg), tbuf, tlen);
+ memcpy(log_text(msg) + tlen, text, text_len);
+ text_len += tlen;
msg->text_len = text_len;
memcpy(log_dict(msg), dict, dict_len);
msg->dict_len = dict_len;
static bool printk_time;
#endif
module_param_named(time, printk_time, bool, S_IRUGO | S_IWUSR);
+module_param_named(disable_uart, printk_disable_uart, bool, S_IRUGO | S_IWUSR);
static size_t print_time(u64 ts, char *buf)
{
rem_nsec = do_div(ts, 1000000000);
if (!buf)
- return snprintf(NULL, 0, "[%5lu.000000] ", (unsigned long)ts);
+ return snprintf(NULL, 0, "[%5lu.000000]", (unsigned long)ts);
- return sprintf(buf, "[%5lu.%06lu] ",
+ return sprintf(buf, "[%5lu.%06lu]",
(unsigned long)ts, rem_nsec / 1000);
}
{
struct console *con;
- trace_console(text, len);
+ trace_console_rcuidle(text, len);
if (level >= console_loglevel && !ignore_loglevel)
return;
return;
for_each_console(con) {
+ if (printk_disable_uart && (con->flags & CON_CONSDEV))
+ continue;
if (exclusive_console && con != exclusive_console)
continue;
if (!(con->flags & CON_ENABLED))
unsigned long flags;
int this_cpu;
int printed_len = 0;
-
+ int in_irq_disable, in_non_preempt;
+ in_irq_disable = irqs_disabled();
+ in_non_preempt = in_atomic();
+ vscnprintf(text, sizeof(textbuf), fmt, args);
+ memset(text, 0x0, sizeof(textbuf));
boot_delay_msec(level);
printk_delay();
}
}
+#ifdef CONFIG_EARLY_PRINTK_DIRECT
+ printascii(text);
+#endif
+
if (level == -1)
level = default_message_loglevel;
if (dict)
lflags |= LOG_PREFIX|LOG_NEWLINE;
-
+
+#ifdef CONFIG_PRINTK_PROCESS_INFO
+ if (in_irq_disable)
+ __raw_get_cpu_var(printk_state) = '-';
+#ifdef CONFIG_MT_PRINTK_UART_CONSOLE
+ else if (printk_disable_uart == 0)
+ __raw_get_cpu_var(printk_state) = '.';
+#endif
+ else
+ __raw_get_cpu_var(printk_state) = ' ';
+#endif
+
if (!(lflags & LOG_NEWLINE)) {
/*
* Flush the conflicting buffer. An earlier newline was missing,
console_lock();
console_suspended = 1;
up(&console_sem);
+ mutex_release(&console_lock_dep_map, 1, _RET_IP_);
}
+EXPORT_SYMBOL_GPL(suspend_console);
void resume_console(void)
{
if (!console_suspend_enabled)
return;
down(&console_sem);
+ mutex_acquire(&console_lock_dep_map, 0, 0, _RET_IP_);
console_suspended = 0;
- console_unlock();
+#ifdef LOG_TOO_MUCH_WARNING
+// __raw_get_cpu_var(MT_trace_in_resume_console) = 1;
+// log_in_resume = 1;
+ console_unlock();
+// log_in_resume = 0;
+// __raw_get_cpu_var(MT_trace_in_resume_console) = 0;
+#else
+ console_unlock();
+#endif
}
+EXPORT_SYMBOL_GPL(resume_console);
/**
* console_cpu_notify - print deferred console messages after CPU hotplug
*
* console_unlock(); may be called from any context.
*/
+#ifdef LOG_TOO_MUCH_WARNING
+static int console_log_max = 400000;
+static int already_skip_log;
+#endif
void console_unlock(void)
{
static char text[LOG_LINE_MAX + PREFIX_MAX];
unsigned long flags;
bool wake_klogd = false;
bool do_cond_resched, retry;
+#ifdef LOG_TOO_MUCH_WARNING
+ unsigned long total_log_size = 0;
+ unsigned long long t1 = 0, t2 = 0;
+ char aee_str[512];
+ int org_loglevel = console_loglevel;
+#endif
+
if (console_suspended) {
up(&console_sem);
int level;
raw_spin_lock_irqsave(&logbuf_lock, flags);
+#ifdef LOG_TOO_MUCH_WARNING /*For Resume log too much*/
+ if (log_in_resume) {
+ t1 = sched_clock();
+ }
+#endif
+
if (seen_seq != log_next_seq) {
wake_klogd = true;
seen_seq = log_next_seq;
raw_spin_unlock(&logbuf_lock);
stop_critical_timings(); /* don't trace print latency */
- call_console_drivers(level, text, len);
- start_critical_timings();
+#ifdef LOG_TOO_MUCH_WARNING
+ /*
+ For uart console, 10us/per chars
+ 400,000 chars = need to wait 4.0 sec
+ normal case: 4sec
+ */
+ if (log_in_resume) {
+ org_loglevel = console_loglevel;
+ console_loglevel = 4;
+ }
+ total_log_size += len;
+ if (total_log_size < console_log_max)
+ call_console_drivers(level, text, len);
+ else if (!already_skip_log) {
+ sprintf(aee_str, "PRINTK too much:%lu", total_log_size);
+ aee_kernel_warning(aee_str, "Need to shrink kernel log");
+ already_skip_log = 1;
+ }
+ /**/
+ start_critical_timings();
+ /* For Resume log too much*/
+ if (log_in_resume) {
+ t2 = sched_clock();
+ console_loglevel = org_loglevel;
+ if (t2 - t1 > 100000000) {
+ sprintf( aee_str,"[RESUME CONSOLE too long:%lluns>100ms] s:%lluns, e:%lluns\n", t2 - t1, t1, t2);
+ aee_kernel_warning(aee_str, "Need to shrink kernel log");
+ }
+ }
+
+ /**/
+#else
+ start_critical_timings();
+ call_console_drivers(level, text, len);
+#endif
local_irq_restore(flags);
if (do_cond_resched)
static DEFINE_PER_CPU(int, printk_pending);
static DEFINE_PER_CPU(char [PRINTK_BUF_SIZE], printk_sched_buf);
+static DEFINE_PER_CPU(int, printk_sched_length);
static void wake_up_klogd_work_func(struct irq_work *irq_work)
{
if (pending & PRINTK_PENDING_SCHED) {
char *buf = __get_cpu_var(printk_sched_buf);
- printk(KERN_WARNING "[sched_delayed] %s", buf);
+ printk(KERN_WARNING "[printk_delayed:start]\n");
+ printk(KERN_WARNING "%s", buf);
+ printk(KERN_WARNING "[printk_delayed:done]\n");
+ __get_cpu_var(printk_sched_length) = 0;
}
if (pending & PRINTK_PENDING_WAKEUP)
va_list args;
char *buf;
int r;
-
+ int buf_length;
local_irq_save(flags);
buf = __get_cpu_var(printk_sched_buf);
+ buf_length = __get_cpu_var(printk_sched_length);
va_start(args, fmt);
- r = vsnprintf(buf, PRINTK_BUF_SIZE, fmt, args);
+ if(PRINTK_BUF_SIZE >= buf_length){
+ r = vsnprintf((buf_length + buf), PRINTK_BUF_SIZE-buf_length, fmt, args);
+ __get_cpu_var(printk_sched_length) += r;
+ }else{
+ printk("delayed log buf overflow, size:%d\n", buf_length);
+ r = 0;
+ }
va_end(args);
__this_cpu_or(printk_pending, PRINTK_PENDING_SCHED);
task_thread_info(current));
}
+void get_kernel_log_buffer(unsigned long *addr, unsigned long *size, unsigned long *start)
+{
+ *addr = (unsigned long)log_buf;
+ *size = log_buf_len;
+ *start = (unsigned long)&log_first_idx;
+}
#endif
#include "../workqueue_internal.h"
#include "../smpboot.h"
+#ifdef CONFIG_MT65XX_TRACER
+#include "mach/mt_mon.h"
+#include "linux/aee.h"
+#endif
+
+#include <linux/mt_sched_mon.h>
#define CREATE_TRACE_POINTS
#include <trace/events/sched.h>
+#include <mtlbprof/mtlbprof.h>
+#include <mtlbprof/mtlbprof_stat.h>
+
+#ifdef CONFIG_MT_PRIO_TRACER
+# include <linux/prio_tracer.h>
+#endif
+
void start_bandwidth_timer(struct hrtimer *period_timer, ktime_t period)
{
unsigned long delta;
load->inv_weight = prio_to_wmult[prio];
}
+#ifdef CONFIG_MTK_SCHED_CMP_TGS
+static void sched_tg_enqueue(struct rq *rq, struct task_struct *p)
+{
+ int id;
+ unsigned long flags;
+ struct task_struct *tg = p->group_leader;
+
+ if(group_leader_is_empty(p))
+ return;
+ id = get_cluster_id(rq->cpu);
+ if (unlikely(WARN_ON(id < 0)))
+ return;
+
+ raw_spin_lock_irqsave(&tg->thread_group_info_lock, flags);
+ tg->thread_group_info[id].nr_running++;
+ raw_spin_unlock_irqrestore(&tg->thread_group_info_lock, flags);
+
+#if 0
+ mt_sched_printf("enqueue %d:%s %d:%s %d %lu %lu %lu, %lu %lu %lu",
+ tg->pid, tg->comm, p->pid, p->comm, id, rq->cpu,
+ tg->thread_group_info[0].nr_running,
+ tg->thread_group_info[0].cfs_nr_running,
+ tg->thread_group_info[0].load_avg_ratio,
+ tg->thread_group_info[1].nr_running,
+ tg->thread_group_info[1].cfs_nr_running,
+ tg->thread_group_info[1].load_avg_ratio);
+#endif
+ //tgs_log(rq, p);
+}
+
+static void sched_tg_dequeue(struct rq *rq, struct task_struct *p)
+{
+ int id;
+ unsigned long flags;
+ struct task_struct *tg = p->group_leader;
+
+ if(group_leader_is_empty(p))
+ return;
+ id = get_cluster_id(rq->cpu);
+ if (unlikely(WARN_ON(id < 0)))
+ return;
+
+ raw_spin_lock_irqsave(&tg->thread_group_info_lock, flags);
+ //WARN_ON(!tg->thread_group_info[id].nr_running);
+ tg->thread_group_info[id].nr_running--;
+ raw_spin_unlock_irqrestore(&tg->thread_group_info_lock, flags);
+
+#if 0
+ mt_sched_printf("dequeue %d:%s %d:%s %d %d %lu %lu %lu, %lu %lu %lu",
+ tg->pid, tg->comm, p->pid, p->comm, id, rq->cpu,
+ tg->thread_group_info[0].nr_running,
+ tg->thread_group_info[0].cfs_nr_running,
+ tg->thread_group_info[0].load_avg_ratio,
+ tg->thread_group_info[1].nr_running,
+ tg->thread_group_info[1].cfs_nr_running,
+ tg->thread_group_info[1].load_avg_ratio);
+#endif
+ //tgs_log(rq, p);
+}
+
+#endif
+
+#ifdef CONFIG_MTK_SCHED_CMP_TGS
+static void tgs_log(struct rq *rq, struct task_struct *p)
+{
+#ifdef CONFIG_MT_SCHED_INFO
+ struct task_struct *tg = p->group_leader;
+
+ if(group_leader_is_empty(p))
+ return;
+
+ // if(!strncmp(tg->comm,"sched_test", 10)){
+ mt_sched_printf("%d:%s %d:%s %lu %lu %lu, %lu %lu %lu", tg->pid, tg->comm, p->pid, p->comm,
+ tg->thread_group_info[0].nr_running,
+ tg->thread_group_info[0].cfs_nr_running,
+ tg->thread_group_info[0].load_avg_ratio,
+ tg->thread_group_info[1].nr_running,
+ tg->thread_group_info[1].cfs_nr_running,
+ tg->thread_group_info[1].load_avg_ratio);
+ // }
+#endif
+}
+#endif
+
static void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
{
update_rq_clock(rq);
sched_info_queued(p);
p->sched_class->enqueue_task(rq, p, flags);
+#ifdef CONFIG_MTK_SCHED_CMP_TGS
+ sched_tg_enqueue(rq, p);
+ tgs_log(rq, p);
+#endif
}
static void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
update_rq_clock(rq);
sched_info_dequeued(p);
p->sched_class->dequeue_task(rq, p, flags);
+#ifdef CONFIG_MTK_SCHED_CMP_TGS
+ sched_tg_dequeue(rq, p);
+ tgs_log(rq, p);
+#endif
}
void activate_task(struct rq *rq, struct task_struct *p, int flags)
rq->nr_uninterruptible--;
enqueue_task(rq, p, flags);
+
+#ifdef CONFIG_MT_LOAD_BALANCE_PROFILER
+ if( 2 <= rq->nr_running){
+ if (1 == cpumask_weight(&p->cpus_allowed))
+ mt_lbprof_update_state_has_lock(rq->cpu, MT_LBPROF_AFFINITY_STATE);
+ else
+ mt_lbprof_update_state_has_lock(rq->cpu, MT_LBPROF_N_TASK_STATE);
+ }else if ( (1 == rq->nr_running)){
+ mt_lbprof_update_state_has_lock(rq->cpu, MT_LBPROF_ONE_TASK_STATE);
+ }
+#endif
}
void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
rq->nr_uninterruptible++;
dequeue_task(rq, p, flags);
+
+#ifdef CONFIG_MT_LOAD_BALANCE_PROFILER
+ if ( 1 == rq->nr_running )
+ mt_lbprof_update_state_has_lock(rq->cpu, MT_LBPROF_ONE_TASK_STATE);
+ else if (0 == rq->nr_running)
+ mt_lbprof_update_state_has_lock(rq->cpu, MT_LBPROF_NO_TASK_STATE);
+#endif
}
static void update_rq_clock_task(struct rq *rq, s64 delta)
void sched_set_stop_task(int cpu, struct task_struct *stop)
{
- struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
+ //struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
+ struct sched_param param = { .sched_priority = RTPM_PRIO_CPU_CALLBACK };
struct task_struct *old_stop = cpu_rq(cpu)->stop;
if (stop) {
raw_spin_unlock(&rq->lock);
}
-
+enum ipi_msg_type {
+ IPI_RESCHEDULE,
+ IPI_CALL_FUNC,
+ IPI_CALL_FUNC_SINGLE,
+ IPI_CPU_STOP,
+};
void scheduler_ipi(void)
{
if (llist_empty(&this_rq()->wake_list)
&& !tick_nohz_full_cpu(smp_processor_id())
- && !got_nohz_idle_kick())
+ && !got_nohz_idle_kick()){
+ mt_trace_ISR_start(IPI_RESCHEDULE);
+ mt_trace_ISR_end(IPI_RESCHEDULE);
return;
+ }
/*
* Not all reschedule IPI handlers call irq_enter/irq_exit, since
* somewhat pessimize the simple resched case.
*/
irq_enter();
+ mt_trace_ISR_start(IPI_RESCHEDULE);
tick_nohz_full_check();
sched_ttwu_pending();
this_rq()->idle_balance = 1;
raise_softirq_irqoff(SCHED_SOFTIRQ);
}
+ mt_trace_ISR_end(IPI_RESCHEDULE);
irq_exit();
}
cpu = select_task_rq(p, SD_BALANCE_WAKE, wake_flags);
if (task_cpu(p) != cpu) {
+#ifdef CONFIG_MT_LOAD_BALANCE_PROFILER
+ char strings[128]="";
+#endif
wake_flags |= WF_MIGRATED;
+#ifdef CONFIG_MT_LOAD_BALANCE_PROFILER
+ snprintf(strings, 128, "%d:%d:%s:wakeup:%d:%d:%s", task_cpu(current), current->pid, current->comm, cpu, p->pid, p->comm);
+ trace_sched_lbprof_log(strings);
+#endif
set_task_cpu(p, cpu);
}
#endif /* CONFIG_SMP */
#if defined(CONFIG_SMP) && defined(CONFIG_FAIR_GROUP_SCHED)
p->se.avg.runnable_avg_period = 0;
p->se.avg.runnable_avg_sum = 0;
+#ifdef CONFIG_SCHED_HMP
+ /* keep LOAD_AVG_MAX in sync with fair.c if load avg series is changed */
+#define LOAD_AVG_MAX 47742
+ if (p->mm) {
+ p->se.avg.hmp_last_up_migration = 0;
+ p->se.avg.hmp_last_down_migration = 0;
+ p->se.avg.load_avg_ratio = 1023;
+ p->se.avg.load_avg_contrib =
+ (1023 * scale_load_down(p->se.load.weight));
+ p->se.avg.runnable_avg_period = LOAD_AVG_MAX;
+ p->se.avg.runnable_avg_sum = LOAD_AVG_MAX;
+ p->se.avg.usage_avg_sum = LOAD_AVG_MAX;
+ }
+#endif
#endif
#ifdef CONFIG_SCHEDSTATS
memset(&p->se.statistics, 0, sizeof(p->se.statistics));
set_task_cpu(p, select_task_rq(p, SD_BALANCE_FORK, 0));
#endif
+ /* Initialize new task's runnable average */
+ init_task_runnable_average(p);
rq = __task_rq_lock(p);
activate_task(rq, p, 0);
p->on_rq = 1;
prepare_task_switch(rq, prev, next);
+#ifdef CONFIG_MT65XX_TRACER
+ if(get_mt65xx_mon_mode() == MODE_SCHED_SWITCH)
+ trace_mt65xx_mon_sched_switch(prev, next);
+#endif
mm = next->mm;
oldmm = prev->active_mm;
/*
return this->cpu_load[0];
}
+unsigned long get_cpu_load(int cpu)
+{
+ struct rq *this = cpu_rq(cpu);
+ return this->cpu_load[0];
+}
+EXPORT_SYMBOL(get_cpu_load);
/*
* Global load-average calculations
sched_avg_update(this_rq);
}
+# ifdef CONFIG_SMP
+/* moved to kernel/sched/proc.c at Linux 3.11-rc4 */
+static inline unsigned long get_rq_runnable_load(struct rq *rq)
+{
+ return rq->cfs.runnable_load_avg;
+}
+# else
+static inline unsigned long get_rq_runnable_load(struct rq *rq)
+{
+ return rq->load.weight;
+}
+# endif
+
#ifdef CONFIG_NO_HZ_COMMON
/*
* There is no sane way to deal with nohz on smp when using jiffies because the
* Called from nohz_idle_balance() to update the load ratings before doing the
* idle balance.
*/
+/* moved to kernel/sched/proc.c at Linux 3.11-rc4 */
void update_idle_cpu_load(struct rq *this_rq)
{
unsigned long curr_jiffies = ACCESS_ONCE(jiffies);
/*
* Called from tick_nohz_idle_exit() -- try and fix up the ticks we missed.
*/
+/* moved to kernel/sched/proc.c at Linux 3.11-rc4 */
void update_cpu_load_nohz(void)
{
struct rq *this_rq = this_rq();
/*
* Called from scheduler_tick()
*/
+/* moved to kernel/sched/proc.c at Linux 3.11-rc4 */
static void update_cpu_load_active(struct rq *this_rq)
{
+ unsigned long load = get_rq_runnable_load(this_rq);
/*
* See the mess around update_idle_cpu_load() / update_cpu_load_nohz().
*/
this_rq->last_load_update_tick = jiffies;
- __update_cpu_load(this_rq, this_rq->load.weight, 1);
+ __update_cpu_load(this_rq, load, 1);
calc_load_account_active(this_rq);
}
raw_spin_lock(&rq->lock);
update_rq_clock(rq);
- update_cpu_load_active(rq);
curr->sched_class->task_tick(rq, curr, 0);
+ update_cpu_load_active(rq);
+#ifdef CONFIG_MT_RT_SCHED
+ mt_check_rt_policy(rq);
+#endif
raw_spin_unlock(&rq->lock);
perf_event_task_tick();
-
+#ifdef CONFIG_MT_SCHED_MONITOR
+ if(smp_processor_id() == 0) //only record by CPU#0
+ mt_save_irq_counts();
+#endif
#ifdef CONFIG_SMP
rq->idle_balance = idle_cpu(cpu);
trigger_load_balance(rq, cpu);
DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
PREEMPT_MASK - 10);
#endif
- if (preempt_count() == val)
- trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
+ //if (preempt_count() == val)
+ // trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
+ if (preempt_count() == (val & ~PREEMPT_ACTIVE)){
+#ifdef CONFIG_PREEMPT_TRACER
+ trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
+#endif
+#ifdef CONFIG_PREEMPT_MONITOR
+ if(unlikely(__raw_get_cpu_var(mtsched_mon_enabled) & 0x1)){
+ //current->t_add_prmpt = sched_clock();
+ MT_trace_preempt_off();
+ }
+#endif
+ }
}
EXPORT_SYMBOL(add_preempt_count);
return;
#endif
- if (preempt_count() == val)
- trace_preempt_on(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
+ //if (preempt_count() == val)
+ // trace_preempt_on(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
+ if (preempt_count() == (val & ~PREEMPT_ACTIVE)){
+#ifdef CONFIG_PREEMPT_TRACER
+ trace_preempt_on(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
+#endif
+#ifdef CONFIG_PREEMPT_MONITOR
+ if(unlikely(__raw_get_cpu_var(mtsched_mon_enabled) & 0x1)){
+ MT_trace_preempt_on();
+ }
+#endif
+ }
preempt_count() -= val;
}
EXPORT_SYMBOL(sub_preempt_count);
print_irqtrace_events(prev);
dump_stack();
add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
+ BUG_ON(1);
}
/*
if (sched_feat(HRTICK))
hrtick_clear(rq);
+#ifdef CONFIG_MT_SCHED_MONITOR
+ __raw_get_cpu_var(MT_trace_in_sched) = 1;
+#endif
/*
* Make sure that signal_pending_state()->signal_pending() below
} else
raw_spin_unlock_irq(&rq->lock);
+#ifdef CONFIG_MT_RT_SCHED
+ mt_post_schedule(rq);
+#endif
+#ifdef CONFIG_MT_SCHED_MONITOR
+ __raw_get_cpu_var(MT_trace_in_sched) = 0;
+#endif
post_schedule(rq);
sched_preempt_enable_no_resched();
__task_rq_unlock(rq);
}
#endif
+
+#ifdef CONFIG_MT_PRIO_TRACER
+void set_user_nice_core(struct task_struct *p, long nice)
+{
+ int old_prio, delta, on_rq;
+ unsigned long flags;
+ struct rq *rq;
+
+ if (TASK_NICE(p) == nice || nice < -20 || nice > 19)
+ return;
+ /*
+ * We have to be careful, if called from sys_setpriority(),
+ * the task might be in the middle of scheduling on another CPU.
+ */
+ rq = task_rq_lock(p, &flags);
+ /*
+ * The RT priorities are set via sched_setscheduler(), but we still
+ * allow the 'normal' nice value to be set - but as expected
+ * it wont have any effect on scheduling until the task is
+ * SCHED_FIFO/SCHED_RR:
+ */
+ if (task_has_rt_policy(p)) {
+ p->static_prio = NICE_TO_PRIO(nice);
+ goto out_unlock;
+ }
+ on_rq = p->on_rq;
+ if (on_rq)
+ dequeue_task(rq, p, 0);
+
+ p->static_prio = NICE_TO_PRIO(nice);
+ set_load_weight(p);
+ old_prio = p->prio;
+ p->prio = effective_prio(p);
+ delta = p->prio - old_prio;
+
+ if (on_rq) {
+ enqueue_task(rq, p, 0);
+ /*
+ * If the task increased its priority or is running and
+ * lowered its priority, then reschedule its CPU:
+ */
+ if (delta < 0 || (delta > 0 && task_running(rq, p)))
+ resched_task(rq->curr);
+ }
+out_unlock:
+ task_rq_unlock(rq, p, &flags);
+}
+
+void set_user_nice(struct task_struct *p, long nice)
+{
+ set_user_nice_core(p, nice);
+ /* setting nice implies to set a normal sched policy */
+ update_prio_tracer(task_pid_nr(p), NICE_TO_PRIO(nice), 0, PTS_KRNL);
+}
+#else /* !CONFIG_MT_PRIO_TRACER */
void set_user_nice(struct task_struct *p, long nice)
{
int old_prio, delta, on_rq;
out_unlock:
task_rq_unlock(rq, p, &flags);
}
+#endif
EXPORT_SYMBOL(set_user_nice);
/*
retval = security_task_setnice(current, nice);
if (retval)
return retval;
-
+#ifdef CONFIG_MT_PRIO_TRACER
+ set_user_nice_syscall(current, nice);
+#else
set_user_nice(current, nice);
+#endif
return 0;
}
return pid ? find_task_by_vpid(pid) : current;
}
+extern struct cpumask hmp_slow_cpu_mask;
+
/* Actually do priority change: must hold rq lock. */
static void
__setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio)
p->normal_prio = normal_prio(p);
/* we are holding p->pi_lock already */
p->prio = rt_mutex_getprio(p);
- if (rt_prio(p->prio))
+ if (rt_prio(p->prio)) {
p->sched_class = &rt_sched_class;
+ }
else
p->sched_class = &fair_sched_class;
set_load_weight(p);
return match;
}
+static int check_mt_allow_rt(struct sched_param *param)
+{
+ int allow = 0;
+ if(0 == MT_ALLOW_RT_PRIO_BIT){
+ //this condition check will be removed
+ return 1;
+ }
+
+ if(param->sched_priority & MT_ALLOW_RT_PRIO_BIT){
+ param->sched_priority &= ~MT_ALLOW_RT_PRIO_BIT;
+ allow = 1;
+ }
+ return allow;
+}
+
static int __sched_setscheduler(struct task_struct *p, int policy,
const struct sched_param *param, bool user)
{
return -EINVAL;
}
+ if(rt_policy(policy)){
+ if (!check_mt_allow_rt((struct sched_param *)param)){
+ printk("[RT_MONITOR]WARNNING [%d:%s] SET NOT ALLOW RT Prio [%d] for proc [%d:%s]\n", current->pid, current->comm, param->sched_priority, p->pid, p->comm);
+ //dump_stack();
+ }
+ }
+
/*
* Valid priorities for SCHED_FIFO and SCHED_RR are
* 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL,
*
* NOTE that the task may be already dead.
*/
+#ifdef CONFIG_MT_PRIO_TRACER
+int sched_setscheduler_core(struct task_struct *p, int policy,
+ const struct sched_param *param)
+{
+ return __sched_setscheduler(p, policy, param, true);
+}
+
+int sched_setscheduler(struct task_struct *p, int policy,
+ const struct sched_param *param)
+{
+ int retval;
+
+ retval = sched_setscheduler_core(p, policy, param);
+ if (!retval) {
+ int prio = param->sched_priority & ~MT_ALLOW_RT_PRIO_BIT;
+ if (!rt_policy(policy))
+ prio = __normal_prio(p);
+ else
+ prio = MAX_RT_PRIO-1 - prio;
+ update_prio_tracer(task_pid_nr(p), prio, policy, PTS_KRNL);
+ }
+ return retval;
+}
+#else /* !CONFIG_MT_PRIO_TRACER */
int sched_setscheduler(struct task_struct *p, int policy,
const struct sched_param *param)
{
return __sched_setscheduler(p, policy, param, true);
}
+#endif
EXPORT_SYMBOL_GPL(sched_setscheduler);
/**
* stop_machine(): we create temporary high priority worker threads,
* but our caller might not have that capability.
*/
+#ifdef CONFIG_MT_PRIO_TRACER
+int sched_setscheduler_nocheck_core(struct task_struct *p, int policy,
+ const struct sched_param *param)
+{
+ return __sched_setscheduler(p, policy, param, false);
+}
+
+
+int sched_setscheduler_nocheck(struct task_struct *p, int policy,
+ const struct sched_param *param)
+{
+ int retval;
+
+ retval = sched_setscheduler_nocheck_core(p, policy, param);
+ if (!retval) {
+ int prio = param->sched_priority & ~MT_ALLOW_RT_PRIO_BIT;
+ if (!rt_policy(policy))
+ prio = __normal_prio(p);
+ else
+ prio = MAX_RT_PRIO-1 - prio;
+ update_prio_tracer(task_pid_nr(p), prio, policy, PTS_KRNL);
+ }
+ return retval;
+}
+#else /* !CONFIG_MT_PRIO_TRACER */
int sched_setscheduler_nocheck(struct task_struct *p, int policy,
const struct sched_param *param)
{
return __sched_setscheduler(p, policy, param, false);
}
+#endif
static int
do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
rcu_read_lock();
retval = -ESRCH;
p = find_process_by_pid(pid);
+#ifdef CONFIG_MT_PRIO_TRACER
+ if (p != NULL)
+ retval = sched_setscheduler_syscall(p, policy, &lparam);
+#else
if (p != NULL)
retval = sched_setscheduler(p, policy, &lparam);
+#endif
+
rcu_read_unlock();
return retval;
if (!p) {
rcu_read_unlock();
put_online_cpus();
+ printk(KERN_DEBUG "SCHED: setaffinity find process %d fail\n", pid);
return -ESRCH;
}
if (p->flags & PF_NO_SETAFFINITY) {
retval = -EINVAL;
+ printk(KERN_DEBUG "SCHED: setaffinity flags PF_NO_SETAFFINITY fail\n");
goto out_put_task;
}
if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
retval = -ENOMEM;
+ printk(KERN_DEBUG "SCHED: setaffinity allo_cpumask_var for cpus_allowed fail\n");
goto out_put_task;
}
if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
retval = -ENOMEM;
+ printk(KERN_DEBUG "SCHED: setaffinity allo_cpumask_var for new_mask fail\n");
goto out_free_cpus_allowed;
}
retval = -EPERM;
rcu_read_lock();
if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) {
rcu_read_unlock();
+ printk(KERN_DEBUG "SCHED: setaffinity check_same_owner and task_ns_capable fail\n");
goto out_unlock;
}
rcu_read_unlock();
}
retval = security_task_setscheduler(p);
- if (retval)
+ if (retval){
+ printk(KERN_DEBUG "SCHED: setaffinity security_task_setscheduler fail, status: %d\n", retval);
goto out_unlock;
+ }
cpuset_cpus_allowed(p, cpus_allowed);
cpumask_and(new_mask, in_mask, cpus_allowed);
again:
retval = set_cpus_allowed_ptr(p, new_mask);
+ if (retval)
+ printk(KERN_DEBUG "SCHED: set_cpus_allowed_ptr status %d\n", retval);
if (!retval) {
cpuset_cpus_allowed(p, cpus_allowed);
out_put_task:
put_task_struct(p);
put_online_cpus();
+ if (retval)
+ printk(KERN_DEBUG "SCHED: setaffinity status %d\n", retval);
return retval;
}
retval = -ESRCH;
p = find_process_by_pid(pid);
- if (!p)
+ if (!p){
+ printk(KERN_DEBUG "SCHED: getaffinity find process %d fail\n", pid);
goto out_unlock;
+ }
retval = security_task_getscheduler(p);
- if (retval)
+ if (retval){
+ printk(KERN_DEBUG "SCHED: getaffinity security_task_getscheduler fail, status: %d\n", retval);
goto out_unlock;
+ }
raw_spin_lock_irqsave(&p->pi_lock, flags);
cpumask_and(mask, &p->cpus_allowed, cpu_online_mask);
rcu_read_unlock();
put_online_cpus();
+ if (retval){
+ printk(KERN_DEBUG "SCHED: getaffinity status %d\n", retval);
+ }
return retval;
}
}
static const char stat_nam[] = TASK_STATE_TO_CHAR_STR;
-
+#ifdef CONFIG_MT_DEBUG_MUTEXES
+void mt_mutex_state(struct task_struct *p)
+{
+ struct task_struct *locker;
+ if(p->blocked_on){
+ locker = p->blocked_on->task_wait_on;
+ if(find_task_by_vpid(locker->pid) != NULL){
+ printk("Hint: wait on mutex, holder is [%d:%s:%ld]\n", locker->pid, locker->comm, locker->state);
+ if(locker->state != TASK_RUNNING){
+ printk("Mutex holder process[%d:%s] is not running now:\n", locker->pid, locker->comm);
+ show_stack(locker, NULL);
+ printk("----\n");
+ }
+ }else{
+ printk("Hint: wait on mutex, but holder already released lock\n");
+ }
+ }
+}
+#endif
void sched_show_task(struct task_struct *p)
{
unsigned long free = 0;
print_worker_info(KERN_INFO, p);
show_stack(p, NULL);
+#ifdef CONFIG_MT_DEBUG_MUTEXES
+ mt_mutex_state(p);
+#endif
}
void show_state_filter(unsigned long state_filter)
touch_all_softlockup_watchdogs();
#ifdef CONFIG_SCHED_DEBUG
- sysrq_sched_debug_show();
+ if (!state_filter)
+ sysrq_sched_debug_show();
#endif
rcu_read_unlock();
/*
if (!cpumask_intersects(new_mask, cpu_active_mask)) {
ret = -EINVAL;
+ printk(KERN_DEBUG "SCHED: intersects new_mask: %lu, cpu_active_mask: %lu\n", new_mask->bits[0], cpu_active_mask->bits[0]);
goto out;
}
* done here.
*/
rq->stop = NULL;
+ /* MTK patch: prevent could not migrate RT task when RT throttle*/
+ unthrottle_offline_rt_rqs(rq);
for ( ; ; ) {
/*
raw_spin_lock_irqsave(&rq->lock, flags);
if (rq->rd) {
BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
-
set_rq_online(rq);
}
raw_spin_unlock_irqrestore(&rq->lock, flags);
rcu_assign_pointer(rq->sd, sd);
destroy_sched_domains(tmp, cpu);
+#if defined (CONFIG_MTK_SCHED_CMP_PACK_SMALL_TASK) || defined (CONFIG_HMP_PACK_SMALL_TASK)
+ update_packing_domain(cpu);
+#endif /* CONFIG_MTK_SCHED_CMP_PACK_SMALL_TASK || CONFIG_HMP_PACK_SMALL_TASK */
update_top_cache_domain(cpu);
}
return 0*SD_ASYM_PACKING;
}
+#if defined (CONFIG_MTK_SCHED_CMP_PACK_SMALL_TASK) || defined (CONFIG_HMP_PACK_SMALL_TASK)
+int __weak arch_sd_share_power_line(void)
+{
+ return 0*SD_SHARE_POWERLINE;
+}
+#endif /* CONFIG_MTK_SCHED_CMP_PACK_SMALL_TASK || CONFIG_HMP_PACK_SMALL_TASK */
/*
* Initializers for schedule domains
* Non-inlined to reduce accumulated stack pressure in build_sched_domains()
rq->nr_running = 0;
rq->calc_load_active = 0;
rq->calc_load_update = jiffies + LOAD_FREQ;
+#ifdef CONFIG_PROVE_LOCKING
+ rq->cpu = i;
+#endif
init_cfs_rq(&rq->cfs);
init_rt_rq(&rq->rt, rq);
#ifdef CONFIG_FAIR_GROUP_SCHED
return (nested == preempt_offset);
}
+static int __might_sleep_init_called;
+int __init __might_sleep_init(void)
+{
+ __might_sleep_init_called = 1;
+ return 0;
+}
+early_initcall(__might_sleep_init);
+
void __might_sleep(const char *file, int line, int preempt_offset)
{
static unsigned long prev_jiffy; /* ratelimiting */
rcu_sleep_check(); /* WARN_ON_ONCE() by default, no rate limit reqd. */
if ((preempt_count_equals(preempt_offset) && !irqs_disabled()) ||
- system_state != SYSTEM_RUNNING || oops_in_progress)
+ oops_in_progress)
+ return;
+ if (system_state != SYSTEM_RUNNING &&
+ (!__might_sleep_init_called || system_state != SYSTEM_BOOTING))
return;
if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
return;
sched_offline_group(tg);
}
+static int
+cpu_cgroup_allow_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
+{
+ const struct cred *cred = current_cred(), *tcred;
+ struct task_struct *task;
+
+ cgroup_taskset_for_each(task, cgrp, tset) {
+ tcred = __task_cred(task);
+
+ if ((current != task) && !capable(CAP_SYS_NICE) &&
+ cred->euid != tcred->uid && cred->euid != tcred->suid)
+ return -EACCES;
+ }
+
+ return 0;
+}
+
static int cpu_cgroup_can_attach(struct cgroup *cgrp,
struct cgroup_taskset *tset)
{
cgroup_taskset_for_each(task, cgrp, tset) {
#ifdef CONFIG_RT_GROUP_SCHED
if (!sched_rt_can_attach(cgroup_tg(cgrp), task))
- return -EINVAL;
+ return -ERTGROUP;
#else
/* We don't support RT-tasks being in separate groups */
if (task->sched_class != &fair_sched_class)
.css_offline = cpu_cgroup_css_offline,
.can_attach = cpu_cgroup_can_attach,
.attach = cpu_cgroup_attach,
+ .allow_attach = cpu_cgroup_allow_attach,
.exit = cpu_cgroup_exit,
.subsys_id = cpu_cgroup_subsys_id,
.base_cftypes = cpu_files,
pr_info("Task dump for CPU %d:\n", cpu);
sched_show_task(cpu_curr(cpu));
}
+
+unsigned long long mt_get_thread_cputime(pid_t pid)
+{
+ struct task_struct *p;
+ p = pid ? find_task_by_vpid(pid) : current;
+ return task_sched_runtime(p);
+}
+unsigned long long mt_get_cpu_idle(int cpu)
+{
+ unsigned long long *unused = 0;
+ return get_cpu_idle_time_us(cpu, unused);
+}
+unsigned long long mt_sched_clock(void)
+{
+ return sched_clock();
+}
+EXPORT_SYMBOL(mt_get_thread_cputime);
+EXPORT_SYMBOL(mt_get_cpu_idle);
+EXPORT_SYMBOL(mt_sched_clock);
extern unsigned int core_pipe_limit;
#endif
extern int pid_max;
+extern int extra_free_kbytes;
+extern int min_free_order_shift;
extern int pid_max_min, pid_max_max;
extern int percpu_pagelist_fraction;
extern int compat_log;
.proc_handler = min_free_kbytes_sysctl_handler,
.extra1 = &zero,
},
+ {
+ .procname = "extra_free_kbytes",
+ .data = &extra_free_kbytes,
+ .maxlen = sizeof(extra_free_kbytes),
+ .mode = 0644,
+ .proc_handler = min_free_kbytes_sysctl_handler,
+ .extra1 = &zero,
+ },
+ {
+ .procname = "min_free_order_shift",
+ .data = &min_free_order_shift,
+ .maxlen = sizeof(min_free_order_shift),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec
+ },
{
.procname = "percpu_pagelist_fraction",
.data = &percpu_pagelist_fraction,
len = 0;
p = buffer;
while (len < *lenp) {
- if (get_user(c, p++))
+ if (get_user(c, p))
return -EFAULT;
if (c == 0 || c == '\n')
break;
+ p++;
len++;
}
if (len >= maxlen)
break;
if (neg)
continue;
+ val = convmul * val / convdiv;
if ((min && val < *min) || (max && val > *max))
continue;
*i = val;
cond_resched();
find_page:
+ if (fatal_signal_pending(current)) {
+ error = -EINTR;
+ goto out;
+ }
+
page = find_get_page(mapping, index);
if (!page) {
page_cache_sync_readahead(mapping,
} else if (!page) {
/* No page in the page cache at all */
do_sync_mmap_readahead(vma, ra, file, offset);
+#ifdef CONFIG_ZRAM
+ current->fm_flt++;
+#endif
+ count_vm_event(PGFMFAULT);
count_vm_event(PGMAJFAULT);
mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
ret = VM_FAULT_MAJOR;
#include "internal.h"
+#ifdef CONFIG_MTK_EXTMEM
+extern bool extmem_in_mspace(struct vm_area_struct *vma);
+extern unsigned long get_virt_from_mspace(unsigned long pa);
+#endif
+
#ifdef LAST_NID_NOT_IN_PAGE_FLAGS
#warning Unfortunate NUMA and NUMA Balancing config, growing page-frame for last_nid.
#endif
return ERR_PTR(-EFAULT);
return page;
}
+EXPORT_SYMBOL_GPL(follow_page_mask);
- static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
- {
- return stack_guard_page_start(vma, addr) ||
- stack_guard_page_end(vma, addr+PAGE_SIZE);
- }
-
/**
* __get_user_pages() - pin user pages in memory
* @tsk: task_struct of target task
page_mask = 0;
goto next_page;
}
-
+ #ifdef CONFIG_MTK_EXTMEM
+ if (!vma || !(vm_flags & vma->vm_flags))
+ {
+ return i ? : -EFAULT;
+ }
+
+ if (vma->vm_flags & (VM_IO | VM_PFNMAP))
+ {
+ /*Would pass VM_IO | VM_RESERVED | VM_PFNMAP. (for Reserved Physical Memory PFN Mapping Usage)*/
+ if(!((vma->vm_flags&VM_IO)&&(vma->vm_flags&VM_RESERVED)&&(vma->vm_flags&VM_PFNMAP)))
+ return i ? : -EFAULT;
+ }
+ #else
if (!vma ||
(vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
!(vm_flags & vma->vm_flags))
return i ? : -EFAULT;
+ #endif
if (is_vm_hugetlb_page(vma)) {
i = follow_hugetlb_page(mm, vma, pages, vmas,
int ret;
unsigned int fault_flags = 0;
- /* For mlock, just skip the stack guard page. */
- if (foll_flags & FOLL_MLOCK) {
- if (stack_guard_page(vma, start))
- goto next_page;
- }
if (foll_flags & FOLL_WRITE)
fault_flags |= FAULT_FLAG_WRITE;
if (nonblocking)
* un-COW'ed pages by matching them up with "vma->vm_pgoff".
* See vm_normal_page() for details.
*/
+#ifdef CONFIG_MTK_EXTMEM
+ if (addr == vma->vm_start && end == vma->vm_end) {
+ vma->vm_pgoff = pfn;
+ } else if (is_cow_mapping(vma->vm_flags))
+ return -EINVAL;
+#else
if (is_cow_mapping(vma->vm_flags)) {
if (addr != vma->vm_start || end != vma->vm_end)
return -EINVAL;
vma->vm_pgoff = pfn;
}
-
+#endif
err = track_pfn_remap(vma, &prot, pfn, addr, PAGE_ALIGN(size));
if (err)
return -EINVAL;
return ret;
}
- /*
- * This is like a special single-page "expand_{down|up}wards()",
- * except we must first make sure that 'address{-|+}PAGE_SIZE'
- * doesn't hit another vma.
- */
- static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
- {
- address &= PAGE_MASK;
- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
- struct vm_area_struct *prev = vma->vm_prev;
-
- /*
- * Is there a mapping abutting this one below?
- *
- * That's only ok if it's the same stack mapping
- * that has gotten split..
- */
- if (prev && prev->vm_end == address)
- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
-
- return expand_downwards(vma, address - PAGE_SIZE);
- }
- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
- struct vm_area_struct *next = vma->vm_next;
-
- /* As VM_GROWSDOWN but s/below/above/ */
- if (next && next->vm_start == address + PAGE_SIZE)
- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
-
- return expand_upwards(vma, address + PAGE_SIZE);
- }
- return 0;
- }
-
/*
* We enter with non-exclusive mmap_sem (to exclude vma changes,
* but allow concurrent faults), and pte mapped but not yet locked.
if (vma->vm_flags & VM_SHARED)
return VM_FAULT_SIGBUS;
- /* Check if we need to add a guard page to the stack */
- if (check_stack_guard_page(vma, address) < 0)
- return VM_FAULT_SIGSEGV;
-
/* Use the zero-page for reads */
if (!(flags & FAULT_FLAG_WRITE)) {
entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
pgoff_t pgoff = (((address & PAGE_MASK)
- vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
+ /* The VMA was not fully populated on mmap() or missing VM_DONTEXPAND */
+ if (!vma->vm_ops->fault)
+ return VM_FAULT_SIGBUS;
+
pte_unmap(page_table);
/* The VMA was not fully populated on mmap() or missing VM_DONTEXPAND */
if (!vma->vm_ops->fault)
ret = get_user_pages(tsk, mm, addr, 1,
write, 1, &page, &vma);
if (ret <= 0) {
+#ifdef CONFIG_MTK_EXTMEM
+ if (!write) {
+ vma = find_vma(mm, addr);
+ if (!vma || vma->vm_start > addr)
+ break;
+ if (vma->vm_end < addr + len)
+ len = vma->vm_end - addr;
+ if (extmem_in_mspace(vma)) {
+ void *extmem_va = (void *)get_virt_from_mspace(vma->vm_pgoff << PAGE_SHIFT) + (addr - vma->vm_start);
+ memcpy(buf, extmem_va, len);
+ buf += len;
+ break;
+ }
+ }
+#endif
/*
* Check if this is a VM_IO | VM_PFNMAP VMA, which
* we can access using slightly different code.
((vmstart - vma->vm_start) >> PAGE_SHIFT);
prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
vma->anon_vma, vma->vm_file, pgoff,
- new_pol);
+ new_pol, vma_get_anon_name(vma));
if (prev) {
vma = prev;
next = vma->vm_next;
asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask,
compat_ulong_t maxnode)
{
- long err = 0;
unsigned long __user *nm = NULL;
unsigned long nr_bits, alloc_size;
DECLARE_BITMAP(bm, MAX_NUMNODES);
alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
if (nmask) {
- err = compat_get_bitmap(bm, nmask, nr_bits);
+ if (compat_get_bitmap(bm, nmask, nr_bits))
+ return -EFAULT;
nm = compat_alloc_user_space(alloc_size);
- err |= copy_to_user(nm, bm, alloc_size);
+ if (copy_to_user(nm, bm, alloc_size))
+ return -EFAULT;
}
- if (err)
- return -EFAULT;
-
return sys_set_mempolicy(mode, nm, nr_bits+1);
}
compat_ulong_t mode, compat_ulong_t __user *nmask,
compat_ulong_t maxnode, compat_ulong_t flags)
{
- long err = 0;
unsigned long __user *nm = NULL;
unsigned long nr_bits, alloc_size;
nodemask_t bm;
alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
if (nmask) {
- err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits);
+ if (compat_get_bitmap(nodes_addr(bm), nmask, nr_bits))
+ return -EFAULT;
nm = compat_alloc_user_space(alloc_size);
- err |= copy_to_user(nm, nodes_addr(bm), alloc_size);
+ if (copy_to_user(nm, nodes_addr(bm), alloc_size))
+ return -EFAULT;
}
- if (err)
- return -EFAULT;
-
return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
}
unsigned long rlim, retval;
unsigned long newbrk, oldbrk;
struct mm_struct *mm = current->mm;
+ struct vm_area_struct *next;
unsigned long min_brk;
bool populate;
}
/* Check against existing mmap mappings. */
- if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE))
+ next = find_vma(mm, oldbrk);
+ if (next && newbrk + PAGE_SIZE > vm_start_gap(next))
goto out;
/* Ok, looks good - let it rip. */
static long vma_compute_subtree_gap(struct vm_area_struct *vma)
{
- unsigned long max, subtree_gap;
- max = vma->vm_start;
- if (vma->vm_prev)
- max -= vma->vm_prev->vm_end;
+ unsigned long max, prev_end, subtree_gap;
+
+ /*
+ * Note: in the rare case of a VM_GROWSDOWN above a VM_GROWSUP, we
+ * allow two stack_guard_gaps between them here, and when choosing
+ * an unmapped area; whereas when expanding we only require one.
+ * That's a little inconsistent, but keeps the code here simpler.
+ */
+ max = vm_start_gap(vma);
+ if (vma->vm_prev) {
+ prev_end = vm_end_gap(vma->vm_prev);
+ if (max > prev_end)
+ max -= prev_end;
+ else
+ max = 0;
+ }
if (vma->vm_rb.rb_left) {
subtree_gap = rb_entry(vma->vm_rb.rb_left,
struct vm_area_struct, vm_rb)->rb_subtree_gap;
list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
anon_vma_interval_tree_verify(avc);
vma_unlock_anon_vma(vma);
- highest_address = vma->vm_end;
+ highest_address = vm_end_gap(vma);
vma = vma->vm_next;
i++;
}
if (vma->vm_next)
vma_gap_update(vma->vm_next);
else
- mm->highest_vm_end = vma->vm_end;
+ mm->highest_vm_end = vm_end_gap(vma);
/*
* vma->vm_prev wasn't known when we followed the rbtree to find the
vma_gap_update(vma);
if (end_changed) {
if (!next)
- mm->highest_vm_end = end;
+ mm->highest_vm_end = vm_end_gap(vma);
else if (!adjust_next)
vma_gap_update(next);
}
else if (next)
vma_gap_update(next);
else
- mm->highest_vm_end = end;
+ WARN_ON(mm->highest_vm_end != vm_end_gap(vma));
}
if (insert && file)
uprobe_mmap(insert);
* per-vma resources, so we don't attempt to merge those.
*/
static inline int is_mergeable_vma(struct vm_area_struct *vma,
- struct file *file, unsigned long vm_flags)
+ struct file *file, unsigned long vm_flags,
+ const char __user *anon_name)
{
if (vma->vm_flags ^ vm_flags)
return 0;
return 0;
if (vma->vm_ops && vma->vm_ops->close)
return 0;
+ if (vma_get_anon_name(vma) != anon_name)
+ return 0;
return 1;
}
*/
static int
can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
- struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
+ struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff,
+ const char __user *anon_name)
{
- if (is_mergeable_vma(vma, file, vm_flags) &&
+ if (is_mergeable_vma(vma, file, vm_flags, anon_name) &&
is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
if (vma->vm_pgoff == vm_pgoff)
return 1;
*/
static int
can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
- struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
+ struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff,
+ const char __user *anon_name)
{
- if (is_mergeable_vma(vma, file, vm_flags) &&
+ if (is_mergeable_vma(vma, file, vm_flags, anon_name) &&
is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
pgoff_t vm_pglen;
vm_pglen = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
}
/*
- * Given a mapping request (addr,end,vm_flags,file,pgoff), figure out
- * whether that can be merged with its predecessor or its successor.
- * Or both (it neatly fills a hole).
+ * Given a mapping request (addr,end,vm_flags,file,pgoff,anon_name),
+ * figure out whether that can be merged with its predecessor or its
+ * successor. Or both (it neatly fills a hole).
*
* In most cases - when called for mmap, brk or mremap - [addr,end) is
* certain not to be mapped by the time vma_merge is called; but when
struct vm_area_struct *prev, unsigned long addr,
unsigned long end, unsigned long vm_flags,
struct anon_vma *anon_vma, struct file *file,
- pgoff_t pgoff, struct mempolicy *policy)
+ pgoff_t pgoff, struct mempolicy *policy,
+ const char __user *anon_name)
{
pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
struct vm_area_struct *area, *next;
*/
if (prev && prev->vm_end == addr &&
mpol_equal(vma_policy(prev), policy) &&
- can_vma_merge_after(prev, vm_flags,
- anon_vma, file, pgoff)) {
+ can_vma_merge_after(prev, vm_flags, anon_vma,
+ file, pgoff, anon_name)) {
/*
* OK, it can. Can we now merge in the successor as well?
*/
if (next && end == next->vm_start &&
mpol_equal(policy, vma_policy(next)) &&
- can_vma_merge_before(next, vm_flags,
- anon_vma, file, pgoff+pglen) &&
+ can_vma_merge_before(next, vm_flags, anon_vma,
+ file, pgoff+pglen, anon_name) &&
is_mergeable_anon_vma(prev->anon_vma,
next->anon_vma, NULL)) {
/* cases 1, 6 */
*/
if (next && end == next->vm_start &&
mpol_equal(policy, vma_policy(next)) &&
- can_vma_merge_before(next, vm_flags,
- anon_vma, file, pgoff+pglen)) {
+ can_vma_merge_before(next, vm_flags, anon_vma,
+ file, pgoff+pglen, anon_name)) {
if (prev && addr < prev->vm_end) /* case 4 */
err = vma_adjust(prev, prev->vm_start,
addr, prev->vm_pgoff, NULL);
/*
* Can we just expand an old mapping?
*/
- vma = vma_merge(mm, prev, addr, addr + len, vm_flags, NULL, file, pgoff, NULL);
+ vma = vma_merge(mm, prev, addr, addr + len, vm_flags, NULL, file, pgoff,
+ NULL, NULL);
if (vma)
goto out;
while (true) {
/* Visit left subtree if it looks promising */
- gap_end = vma->vm_start;
+ gap_end = vm_start_gap(vma);
if (gap_end >= low_limit && vma->vm_rb.rb_left) {
struct vm_area_struct *left =
rb_entry(vma->vm_rb.rb_left,
}
}
- gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
+ gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0;
check_current:
/* Check if current node has a suitable gap */
if (gap_start > high_limit)
return -ENOMEM;
- if (gap_end >= low_limit && gap_end - gap_start >= length)
+ if (gap_end >= low_limit &&
+ gap_end > gap_start && gap_end - gap_start >= length)
goto found;
/* Visit right subtree if it looks promising */
vma = rb_entry(rb_parent(prev),
struct vm_area_struct, vm_rb);
if (prev == vma->vm_rb.rb_left) {
- gap_start = vma->vm_prev->vm_end;
- gap_end = vma->vm_start;
+ gap_start = vm_end_gap(vma->vm_prev);
+ gap_end = vm_start_gap(vma);
goto check_current;
}
}
while (true) {
/* Visit right subtree if it looks promising */
- gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
+ gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0;
if (gap_start <= high_limit && vma->vm_rb.rb_right) {
struct vm_area_struct *right =
rb_entry(vma->vm_rb.rb_right,
check_current:
/* Check if current node has a suitable gap */
- gap_end = vma->vm_start;
+ gap_end = vm_start_gap(vma);
if (gap_end < low_limit)
return -ENOMEM;
- if (gap_start <= high_limit && gap_end - gap_start >= length)
+ if (gap_start <= high_limit &&
+ gap_end > gap_start && gap_end - gap_start >= length)
goto found;
/* Visit left subtree if it looks promising */
struct vm_area_struct, vm_rb);
if (prev == vma->vm_rb.rb_right) {
gap_start = vma->vm_prev ?
- vma->vm_prev->vm_end : 0;
+ vm_end_gap(vma->vm_prev) : 0;
goto check_current;
}
}
unsigned long len, unsigned long pgoff, unsigned long flags)
{
struct mm_struct *mm = current->mm;
- struct vm_area_struct *vma;
+ struct vm_area_struct *vma, *prev;
struct vm_unmapped_area_info info;
if (len > TASK_SIZE - mmap_min_addr)
if (addr) {
addr = PAGE_ALIGN(addr);
- vma = find_vma(mm, addr);
+ vma = find_vma_prev(mm, addr, &prev);
if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)) &&
+ (!prev || addr >= vm_end_gap(prev)))
return addr;
}
const unsigned long len, const unsigned long pgoff,
const unsigned long flags)
{
- struct vm_area_struct *vma;
+ struct vm_area_struct *vma, *prev;
struct mm_struct *mm = current->mm;
unsigned long addr = addr0;
struct vm_unmapped_area_info info;
/* requesting a specific address */
if (addr) {
addr = PAGE_ALIGN(addr);
- vma = find_vma(mm, addr);
+ vma = find_vma_prev(mm, addr, &prev);
if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)) &&
+ (!prev || addr >= vm_end_gap(prev)))
return addr;
}
* update accounting. This is shared with both the
* grow-up and grow-down cases.
*/
- static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, unsigned long grow)
+ static int acct_stack_growth(struct vm_area_struct *vma,
+ unsigned long size, unsigned long grow)
{
struct mm_struct *mm = vma->vm_mm;
struct rlimit *rlim = current->signal->rlim;
- unsigned long new_start, actual_size;
+ unsigned long new_start;
/* address space limit tests */
if (!may_expand_vm(mm, grow))
return -ENOMEM;
/* Stack limit test */
- actual_size = size;
- if (size && (vma->vm_flags & (VM_GROWSUP | VM_GROWSDOWN)))
- actual_size -= PAGE_SIZE;
- if (actual_size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
+ if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
return -ENOMEM;
/* mlock limit tests */
*/
int expand_upwards(struct vm_area_struct *vma, unsigned long address)
{
- int error;
+ struct vm_area_struct *next;
+ unsigned long gap_addr;
+ int error = 0;
if (!(vma->vm_flags & VM_GROWSUP))
return -EFAULT;
- /*
- * We must make sure the anon_vma is allocated
- * so that the anon_vma locking is not a noop.
- */
+ /* Guard against exceeding limits of the address space. */
+ address &= PAGE_MASK;
+ if (address >= TASK_SIZE)
+ return -ENOMEM;
+ address += PAGE_SIZE;
+
+ /* Enforce stack_guard_gap */
+ gap_addr = address + stack_guard_gap;
+
+ /* Guard against overflow */
+ if (gap_addr < address || gap_addr > TASK_SIZE)
+ gap_addr = TASK_SIZE;
+
+ next = vma->vm_next;
+ if (next && next->vm_start < gap_addr) {
+ if (!(next->vm_flags & VM_GROWSUP))
+ return -ENOMEM;
+ /* Check that both stack segments have the same anon_vma? */
+ }
+
+ /* We must make sure the anon_vma is allocated. */
if (unlikely(anon_vma_prepare(vma)))
return -ENOMEM;
- vma_lock_anon_vma(vma);
/*
* vma->vm_start/vm_end cannot change under us because the caller
* is required to hold the mmap_sem in read mode. We need the
* anon_vma lock to serialize against concurrent expand_stacks.
- * Also guard against wrapping around to address 0.
*/
- if (address < PAGE_ALIGN(address+4))
- address = PAGE_ALIGN(address+4);
- else {
- vma_unlock_anon_vma(vma);
- return -ENOMEM;
- }
- error = 0;
+ vma_lock_anon_vma(vma);
/* Somebody else might have raced and expanded it already */
if (address > vma->vm_end) {
if (vma->vm_next)
vma_gap_update(vma->vm_next);
else
- vma->vm_mm->highest_vm_end = address;
+ vma->vm_mm->highest_vm_end = vm_end_gap(vma);
spin_unlock(&vma->vm_mm->page_table_lock);
perf_event_mmap(vma);
int expand_downwards(struct vm_area_struct *vma,
unsigned long address)
{
+ struct vm_area_struct *prev;
+ unsigned long gap_addr;
int error;
- /*
- * We must make sure the anon_vma is allocated
- * so that the anon_vma locking is not a noop.
- */
- if (unlikely(anon_vma_prepare(vma)))
- return -ENOMEM;
-
address &= PAGE_MASK;
error = security_mmap_addr(address);
if (error)
return error;
- vma_lock_anon_vma(vma);
+ /* Enforce stack_guard_gap */
+ gap_addr = address - stack_guard_gap;
+ if (gap_addr > address)
+ return -ENOMEM;
+ prev = vma->vm_prev;
+ if (prev && prev->vm_end > gap_addr) {
+ if (!(prev->vm_flags & VM_GROWSDOWN))
+ return -ENOMEM;
+ /* Check that both stack segments have the same anon_vma? */
+ }
+
+ /* We must make sure the anon_vma is allocated. */
+ if (unlikely(anon_vma_prepare(vma)))
+ return -ENOMEM;
/*
* vma->vm_start/vm_end cannot change under us because the caller
* is required to hold the mmap_sem in read mode. We need the
* anon_vma lock to serialize against concurrent expand_stacks.
*/
+ vma_lock_anon_vma(vma);
/* Somebody else might have raced and expanded it already */
if (address < vma->vm_start) {
return error;
}
- /*
- * Note how expand_stack() refuses to expand the stack all the way to
- * abut the next virtual mapping, *unless* that mapping itself is also
- * a stack mapping. We want to leave room for a guard page, after all
- * (the guard page itself is not added here, that is done by the
- * actual page faulting logic)
- *
- * This matches the behavior of the guard page logic (see mm/memory.c:
- * check_stack_guard_page()), which only allows the guard page to be
- * removed under these circumstances.
- */
+ /* enforced gap between the expanding stack and other mappings. */
+ unsigned long stack_guard_gap = 256UL<<PAGE_SHIFT;
+
+ static int __init cmdline_parse_stack_guard_gap(char *p)
+ {
+ unsigned long val;
+ char *endptr;
+
+ val = simple_strtoul(p, &endptr, 10);
+ if (!*endptr)
+ stack_guard_gap = val << PAGE_SHIFT;
+
+ return 0;
+ }
+ __setup("stack_guard_gap=", cmdline_parse_stack_guard_gap);
+
#ifdef CONFIG_STACK_GROWSUP
int expand_stack(struct vm_area_struct *vma, unsigned long address)
{
- struct vm_area_struct *next;
-
- address &= PAGE_MASK;
- next = vma->vm_next;
- if (next && next->vm_start == address + PAGE_SIZE) {
- if (!(next->vm_flags & VM_GROWSUP))
- return -ENOMEM;
- }
return expand_upwards(vma, address);
}
#else
int expand_stack(struct vm_area_struct *vma, unsigned long address)
{
- struct vm_area_struct *prev;
-
- address &= PAGE_MASK;
- prev = vma->vm_prev;
- if (prev && prev->vm_end == address) {
- if (!(prev->vm_flags & VM_GROWSDOWN))
- return -ENOMEM;
- }
return expand_downwards(vma, address);
}
vma->vm_prev = prev;
vma_gap_update(vma);
} else
- mm->highest_vm_end = prev ? prev->vm_end : 0;
+ mm->highest_vm_end = prev ? vm_end_gap(prev) : 0;
tail_vma->vm_next = NULL;
if (mm->unmap_area == arch_unmap_area)
addr = prev ? prev->vm_end : mm->mmap_base;
* work. This now handles partial unmappings.
* Jeremy Fitzhardinge <jeremy@goop.org>
*/
+#ifdef CONFIG_MTK_EXTMEM
+extern bool extmem_in_mspace(struct vm_area_struct *vma);
+extern void * get_virt_from_mspace(void * pa);
+extern size_t extmem_get_mem_size(unsigned long pgoff);
+extern void extmem_free(void* mem);
+#endif
+
int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
{
unsigned long end;
+ struct file *file;
struct vm_area_struct *vma, *prev, *last;
if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
vma = find_vma(mm, start);
if (!vma)
return 0;
+ file=vma->vm_file;
+ if(file)
+ {
+ const char *name=file->f_path.dentry->d_iname;
+ if(name && (strstr(name,"app_process") || strstr(name,"app_process64") || strstr(name,"main") || strstr(name,"Binder_")))
+ printk("name:%s unmap vm_start %lx end: %lx\n", name, vma->vm_start, vma->vm_end);
+ }
+ else
+ {
+ const char *name = arch_vma_name(vma);
+ if(name && (strstr(name,"app_process") || strstr(name,"app_process64") || strstr(name,"main") || strstr(name,"Binder_")))
+ printk("name:%s unmap vm_start %lx end: %lx\n", name, vma->vm_start, vma->vm_end);
+ }
prev = vma->vm_prev;
/* we have start < vma->vm_end */
+#ifdef CONFIG_MTK_EXTMEM
+ /* get correct mmap size if in mspace. */
+ if (extmem_in_mspace(vma))
+ len = extmem_get_mem_size(vma->vm_pgoff);
+#endif
+
/* if it doesn't overlap, we have nothing.. */
end = start + len;
if (vma->vm_start >= end)
/* Can we just expand an old private anonymous mapping? */
vma = vma_merge(mm, prev, addr, addr + len, flags,
- NULL, NULL, pgoff, NULL);
+ NULL, NULL, pgoff, NULL, NULL);
if (vma)
goto out;
if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent))
return NULL; /* should never get here */
new_vma = vma_merge(mm, prev, addr, addr + len, vma->vm_flags,
- vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma));
+ vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma),
+ vma_get_anon_name(vma));
if (new_vma) {
/*
* Source vma may have been merged into new_vma
colon = strchr(devname, ':');
if (colon)
*colon = 0;
+ #ifdef CONFIG_MTK_NET_LOGGING
+ printk(KERN_INFO "[mtk_net][RTlog info] ip_rt_ioctl rt.dev =%s \n",devname);
+ #endif
dev = __dev_get_by_name(net, devname);
if (!dev)
return -ENODEV;
if (copy_from_user(&rt, arg, sizeof(rt)))
return -EFAULT;
+ #ifdef CONFIG_MTK_NET_LOGGING
+ printk(KERN_INFO "[mtk_net][RTlog info]ip_rt_ioctl rt.flags =%08x, rt.rt_dst =%08x rt.rt_gateway =%08x \n",rt.rt_flags,sk_extract_addr(&(rt.rt_dst)),sk_extract_addr(&(rt.rt_gateway)));
+ #endif
rtnl_lock();
err = rtentry_to_fib_config(net, cmd, &rt, &cfg);
struct fib_table *tb;
if (cmd == SIOCDELRT) {
+ #ifdef CONFIG_MTK_NET_LOGGING
+ printk(KERN_INFO "[mtk_net][RTlog delete] p_rt_ioctl: cmd == SIOCDELRT! >>\n");
+ #endif
tb = fib_get_table(net, cfg.fc_table);
if (tb)
err = fib_table_delete(tb, &cfg);
else
err = -ESRCH;
} else {
+ #ifdef CONFIG_MTK_NET_LOGGING
+ printk(KERN_INFO "[mtk_net][RTlog insert] p_rt_ioctl: cmd == SIOCADDRT! >>\n");
+ #endif
tb = fib_new_table(net, cfg.fc_table);
if (tb)
err = fib_table_insert(tb, &cfg);
[RTA_METRICS] = { .type = NLA_NESTED },
[RTA_MULTIPATH] = { .len = sizeof(struct rtnexthop) },
[RTA_FLOW] = { .type = NLA_U32 },
+ [RTA_UID] = { .type = NLA_U32 },
};
static int rtm_to_fib_config(struct net *net, struct sk_buff *skb,
struct fib_config cfg;
struct fib_table *tb;
int err;
-
+ #ifdef CONFIG_MTK_NET_LOGGING
+ printk(KERN_INFO "[mtk_net][RTlog delete] inet_rtm_delroute !\n");
+ #endif
err = rtm_to_fib_config(net, skb, nlh, &cfg);
if (err < 0)
goto errout;
-
+ #ifdef CONFIG_MTK_NET_LOGGING
+ printk(KERN_INFO "[mtk_net][RTlog info] inet_rtm_delroute cfg.fc_dst =%08x, cfg.fc_gw =%08x\n",cfg.fc_dst,cfg.fc_gw);
+ #endif
tb = fib_get_table(net, cfg.fc_table);
if (tb == NULL) {
err = -ESRCH;
struct fib_config cfg;
struct fib_table *tb;
int err;
+ #ifdef CONFIG_MTK_NET_LOGGING
+ printk(KERN_INFO "[mtk_net][RTlog insert] inet_rtm_newroute !\n");
+ #endif
err = rtm_to_fib_config(net, skb, nlh, &cfg);
if (err < 0)
goto errout;
+ #ifdef CONFIG_MTK_NET_LOGGING
+ printk(KERN_INFO "[mtk_net][RTlog info] inet_rtm_newroute cfg.fc_dst =%08x,cfg.fc_gw =%08x \n",cfg.fc_dst,cfg.fc_gw);
+ #endif
tb = fib_new_table(net, cfg.fc_table);
if (tb == NULL) {
if (!ipv4_is_zeronet(prefix) && !(ifa->ifa_flags & IFA_F_SECONDARY) &&
(prefix != addr || ifa->ifa_prefixlen < 32)) {
+ /* MTK_NET_CHANGES */
+ if(0 == strncmp(dev->name, "ccmni", 2)){
+ #ifdef CONFIG_MTK_NET_LOGGING
+ printk(KERN_INFO "[mtk_net][RTlog] ignore ccmni subnet route\n");
+ #endif
+ } else {
fib_magic(RTM_NEWROUTE,
dev->flags & IFF_LOOPBACK ? RTN_LOCAL : RTN_UNICAST,
prefix, ifa->ifa_prefixlen, prim);
-
+ }
/* Add network specific broadcasts, when it takes a sense */
if (ifa->ifa_prefixlen < 31) {
fib_magic(RTM_NEWROUTE, RTN_BROADCAST, prefix, 32, prim);
net = sock_net(skb->sk);
nlh = nlmsg_hdr(skb);
- if (skb->len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len ||
+ if (skb->len < nlmsg_total_size(sizeof(*frn)) ||
+ skb->len < nlh->nlmsg_len ||
nlmsg_len(nlh) < sizeof(*frn))
return;
switch (event) {
case NETDEV_UP:
+ #ifdef CONFIG_MTK_NET_LOGGING
+ printk(KERN_INFO "[mtk_net][RTlog insert] fib_inetaddr_event() %s NETDEV_UP!\n", ifa->ifa_dev->dev->name);
+ #endif
fib_add_ifaddr(ifa);
#ifdef CONFIG_IP_ROUTE_MULTIPATH
fib_sync_up(dev);
rt_cache_flush(dev_net(dev));
break;
case NETDEV_DOWN:
+ #ifdef CONFIG_MTK_NET_LOGGING
+ printk(KERN_INFO "[mtk_net][RTlog delete] fib_inetaddr_event() %s NETDEV_DOWN!\n", ifa->ifa_dev->dev->name);
+ #endif
fib_del_ifaddr(ifa, NULL);
atomic_inc(&net->ipv4.dev_addr_genid);
if (ifa->ifa_dev->ifa_list == NULL) {
switch (event) {
case NETDEV_UP:
+ #ifdef CONFIG_MTK_NET_LOGGING
+ printk(KERN_INFO "[mtk_net][RTlog insert] fib_netdev_event() %s NETDEV_UP!\n", dev->name);
+ #endif
for_ifa(in_dev) {
fib_add_ifaddr(ifa);
} endfor_ifa(in_dev);
rt_cache_flush(net);
break;
case NETDEV_DOWN:
+ #ifdef CONFIG_MTK_NET_LOGGING
+ printk(KERN_INFO "[mtk_net][RTlog delete] fib_netdev_event() %s NETDEV_DOWN!\n", dev->name);
+ #endif
fib_disable_ip(dev, 0);
break;
case NETDEV_CHANGEMTU:
int sysctl_tcp_moderate_rcvbuf __read_mostly = 1;
int sysctl_tcp_early_retrans __read_mostly = 3;
+int sysctl_tcp_default_init_rwnd __read_mostly = TCP_DEFAULT_INIT_RCVWND;
#define FLAG_DATA 0x01 /* Incoming frame contained data. */
#define FLAG_WIN_UPDATE 0x02 /* Incoming ACK was a window update. */
static void tcp_fixup_rcvbuf(struct sock *sk)
{
u32 mss = tcp_sk(sk)->advmss;
- u32 icwnd = TCP_DEFAULT_INIT_RCVWND;
+ u32 icwnd = sysctl_tcp_default_init_rwnd;
int rcvmem;
/* Limit to 10 segments if mss <= 1460,
* or 14600/mss segments, with a minimum of two segments.
*/
if (mss > 1460)
- icwnd = max_t(u32, (1460 * TCP_DEFAULT_INIT_RCVWND) / mss, 2);
+ icwnd = max_t(u32, (1460 * icwnd) / mss, 2);
rcvmem = SKB_TRUESIZE(mss + MAX_TCP_HEADER);
while (tcp_win_from_space(rcvmem) < mss)
void tcp_enter_loss(struct sock *sk, int how)
{
const struct inet_connection_sock *icsk = inet_csk(sk);
+ struct inet_connection_sock *icsk1 = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb;
bool new_recovery = false;
tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);
tcp_ca_event(sk, CA_EVENT_LOSS);
}
+ if (icsk->icsk_MMSRB == 1)
+ {
+ #ifdef CONFIG_MTK_NET_LOGGING
+ printk("[mtk_net][mmspb] tcp_enter_loss snd_cwnd=%u, snd_cwnd_cnt=%u\n", tp->snd_cwnd, tp->snd_cwnd_cnt);
+ #endif
+ if (tp->mss_cache != 0)
+ tp->snd_cwnd = (tp->rcv_wnd / tp->mss_cache);
+ else
+ {
+ tp->snd_cwnd = (tp->rcv_wnd / tp->advmss);
+ }
+
+ if (tp->snd_ssthresh > 16)
+ {
+ tp->snd_cwnd = tp->snd_ssthresh / 2;//set snd_cwnd is half of default snd_ssthresh
+ }
+ else
+ {
+ tp->snd_cwnd = tp->snd_ssthresh / 2 + 4;
+ }
+ #ifdef CONFIG_MTK_NET_LOGGING
+ printk("[mtk_net][mmspb] tcp_enter_loss update snd_cwnd=%u\n", tp->snd_cwnd);
+ #endif
+ icsk1->icsk_MMSRB = 0;
+ #ifdef CONFIG_MTK_NET_LOGGING
+ printk("[mtk_net][mmspb] tcp_enter_loss set icsk_MMSRB=0\n");
+ #endif
+ }
+ else
+ {
tp->snd_cwnd = 1;
+ }
+
+ //tp->snd_cwnd = 1;
tp->snd_cwnd_cnt = 0;
tp->snd_cwnd_stamp = tcp_time_stamp;
icsk->icsk_retransmits++;
tcp_retransmit_skb(sk, tcp_write_queue_head(sk));
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
- icsk->icsk_rto, TCP_RTO_MAX);
+ icsk->icsk_rto, sysctl_tcp_rto_max);
return true;
}
return false;
return false;
inet_csk_reset_xmit_timer(sk, ICSK_TIME_EARLY_RETRANS, delay,
- TCP_RTO_MAX);
+ sysctl_tcp_rto_max);
return true;
}
rto = delta;
}
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, rto,
- TCP_RTO_MAX);
+ sysctl_tcp_rto_max);
}
}
*/
} else {
inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
- min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX),
- TCP_RTO_MAX);
+ min_t(unsigned int, icsk->icsk_rto << icsk->icsk_backoff, sysctl_tcp_rto_max),
+ sysctl_tcp_rto_max);
}
}
struct inet_connection_sock *icsk = inet_csk(sk);
tcp_set_state(sk, TCP_ESTABLISHED);
+ icsk->icsk_ack.lrcvtime = tcp_time_stamp;
if (skb != NULL) {
icsk->icsk_af_ops->sk_rx_dst_set(sk, skb);
* to stand against the temptation 8) --ANK
*/
inet_csk_schedule_ack(sk);
- icsk->icsk_ack.lrcvtime = tcp_time_stamp;
tcp_enter_quickack_mode(sk);
inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
- TCP_DELACK_MAX, TCP_RTO_MAX);
+ TCP_DELACK_MAX, sysctl_tcp_rto_max);
discard:
__kfree_skb(skb);
/* OK, now commit destination to socket. */
sk->sk_gso_type = SKB_GSO_TCPV4;
sk_setup_caps(sk, &rt->dst);
-
+ printk(KERN_INFO "[socket_conn]IPV4 socket[%lu] sport:%u \n", SOCK_INODE(sk->sk_socket)->i_ino, ntohs(inet->inet_sport));
if (!tp->write_seq && likely(!tp->repair))
tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
inet->inet_daddr,
*/
void tcp_v4_mtu_reduced(struct sock *sk)
{
- struct dst_entry *dst;
struct inet_sock *inet = inet_sk(sk);
- u32 mtu = tcp_sk(sk)->mtu_info;
+ struct dst_entry *dst;
+ u32 mtu;
+ if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
+ return;
+ mtu = tcp_sk(sk)->mtu_info;
dst = inet_csk_update_pmtu(sk, mtu);
if (!dst)
return;
if (remaining) {
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
- remaining, TCP_RTO_MAX);
+ remaining, sysctl_tcp_rto_max);
} else {
/* RTO revert clocked out retransmission.
* Will retransmit now */
* because it's been added to the accept queue directly.
*/
inet_csk_reset_xmit_timer(child, ICSK_TIME_RETRANS,
- TCP_TIMEOUT_INIT, TCP_RTO_MAX);
+ TCP_TIMEOUT_INIT, sysctl_tcp_rto_max);
/* Add the child socket directly into the accept queue */
inet_csk_reqsk_queue_add(sk, req, child);
ireq->rmt_addr = saddr;
ireq->no_srccheck = inet_sk(sk)->transparent;
ireq->opt = tcp_v4_save_options(skb);
+ ireq->ir_mark = inet_request_mark(sk, skb);
if (security_inet_conn_request(sk, skb, req))
goto drop_and_free;
if (!inet_csk_ack_scheduled(sk))
inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
(3 * tcp_rto_min(sk)) / 4,
- TCP_RTO_MAX);
+ sysctl_tcp_rto_max);
}
return true;
}
struct inet_connection_sock *icsk = inet_csk(sk);
tcp_init_sock(sk);
+ icsk->icsk_MMSRB = 0;
icsk->icsk_af_ops = &ipv4_specific;
}
EXPORT_SYMBOL(tcp_v4_destroy_sock);
+void tcp_v4_handle_retrans_time_by_uid(struct uid_err uid_e)
+{
+ unsigned int bucket;
+ uid_t skuid = (uid_t)(uid_e.appuid);
+ struct inet_connection_sock *icsk = NULL;//inet_csk(sk);
+
+
+ for (bucket = 0; bucket < tcp_hashinfo.ehash_mask; bucket++) {
+ struct hlist_nulls_node *node;
+ struct sock *sk;
+ spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, bucket);
+
+ spin_lock_bh(lock);
+ sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[bucket].chain) {
+
+ if (sysctl_ip_dynaddr && sk->sk_state == TCP_SYN_SENT)
+ continue;
+ if (sock_flag(sk, SOCK_DEAD))
+ continue;
+
+ if(sk->sk_socket){
+ if(SOCK_INODE(sk->sk_socket)->i_uid != skuid)
+ continue;
+ else
+ printk("[mmspb] tcp_v4_handle_retrans_time_by_uid socket uid(%d) match!",
+ SOCK_INODE(sk->sk_socket)->i_uid);
+ } else{
+ continue;
+ }
+
+ sock_hold(sk);
+ spin_unlock_bh(lock);
+
+ local_bh_disable();
+ bh_lock_sock(sk);
+
+ // update sk time out value
+ icsk = inet_csk(sk);
+ printk("[mmspb] tcp_v4_handle_retrans_time_by_uid update timer\n");
+
+ sk_reset_timer(sk, &icsk->icsk_retransmit_timer, jiffies + 2);
+ icsk->icsk_rto = sysctl_tcp_rto_min * 30;
+ icsk->icsk_MMSRB = 1;
+
+ bh_unlock_sock(sk);
+ local_bh_enable();
+ spin_lock_bh(lock);
+ sock_put(sk);
+
+ }
+ spin_unlock_bh(lock);
+ }
+
+}
+
+
+/*
+ * tcp_v4_nuke_addr_by_uid - destroy all sockets of spcial uid
+ */
+void tcp_v4_reset_connections_by_uid(struct uid_err uid_e)
+{
+ unsigned int bucket;
+ uid_t skuid = (uid_t)(uid_e.appuid);
+
+ for (bucket = 0; bucket < tcp_hashinfo.ehash_mask; bucket++) {
+ struct hlist_nulls_node *node;
+ struct sock *sk;
+ spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, bucket);
+
+restart:
+ spin_lock_bh(lock);
+ sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[bucket].chain) {
+
+ if (sysctl_ip_dynaddr && sk->sk_state == TCP_SYN_SENT)
+ continue;
+ if (sock_flag(sk, SOCK_DEAD))
+ continue;
+
+ if(sk->sk_socket){
+ if(SOCK_INODE(sk->sk_socket)->i_uid != skuid)
+ continue;
+ else
+ printk(KERN_INFO "SIOCKILLSOCK socket uid(%d) match!",
+ SOCK_INODE(sk->sk_socket)->i_uid);
+ } else{
+ continue;
+ }
+
+ sock_hold(sk);
+ spin_unlock_bh(lock);
+
+ local_bh_disable();
+ bh_lock_sock(sk);
+ sk->sk_err = uid_e.errNum;
+ printk(KERN_INFO "SIOCKILLSOCK set sk err == %d!! \n", sk->sk_err);
+ sk->sk_error_report(sk);
+
+ tcp_done(sk);
+ bh_unlock_sock(sk);
+ local_bh_enable();
+ sock_put(sk);
+
+ goto restart;
+ }
+ spin_unlock_bh(lock);
+ }
+}
+
+
#ifdef CONFIG_PROC_FS
/* Proc filesystem TCP sock list dumping. */
newtp->srtt = 0;
newtp->mdev = TCP_TIMEOUT_INIT;
newicsk->icsk_rto = TCP_TIMEOUT_INIT;
+ newicsk->icsk_ack.lrcvtime = tcp_time_stamp;
newtp->packets_out = 0;
newtp->retrans_out = 0;
* the idea of fast retransmit in recovery.
*/
if (!inet_rtx_syn_ack(sk, req))
- req->expires = min(TCP_TIMEOUT_INIT << req->num_timeout,
- TCP_RTO_MAX) + jiffies;
+ req->expires = min_t(unsigned int, TCP_TIMEOUT_INIT << req->num_timeout,
+ sysctl_tcp_rto_max) + jiffies;
return NULL;
}
}
/* Set initial window to a value enough for senders starting with
- * initial congestion window of TCP_DEFAULT_INIT_RCVWND. Place
+ * initial congestion window of sysctl_tcp_default_init_rwnd. Place
* a limit on the initial window when mss is larger than 1460.
*/
if (mss > (1 << *rcv_wscale)) {
- int init_cwnd = TCP_DEFAULT_INIT_RCVWND;
+ int init_cwnd = sysctl_tcp_default_init_rwnd;
if (mss > 1460)
- init_cwnd =
- max_t(u32, (1460 * TCP_DEFAULT_INIT_RCVWND) / mss, 2);
+ init_cwnd = max_t(u32, (1460 * init_cwnd) / mss, 2);
/* when initializing use the value from init_rcv_wnd
* rather than the default from above
*/
}
inet_csk_reset_xmit_timer(sk, ICSK_TIME_LOSS_PROBE, timeout,
- TCP_RTO_MAX);
+ sysctl_tcp_rto_max);
return true;
}
rearm_timer:
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
inet_csk(sk)->icsk_rto,
- TCP_RTO_MAX);
+ sysctl_tcp_rto_max);
if (likely(!err))
NET_INC_STATS_BH(sock_net(sk),
int full_space = min_t(int, tp->window_clamp, tcp_full_space(sk));
int window;
- if (mss > full_space)
+ if (unlikely(mss > full_space)) {
mss = full_space;
-
+ if (mss <= 0)
+ return 0;
+ }
if (free_space < (full_space >> 1)) {
icsk->icsk_ack.quick = 0;
if (skb == tcp_write_queue_head(sk))
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
inet_csk(sk)->icsk_rto,
- TCP_RTO_MAX);
+ sysctl_tcp_rto_max);
}
}
/* Timer for repeating the SYN until an answer. */
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
- inet_csk(sk)->icsk_rto, TCP_RTO_MAX);
+ inet_csk(sk)->icsk_rto, sysctl_tcp_rto_max);
return 0;
}
EXPORT_SYMBOL(tcp_connect);
inet_csk_schedule_ack(sk);
inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN;
inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
- TCP_DELACK_MAX, TCP_RTO_MAX);
+ TCP_DELACK_MAX, sysctl_tcp_rto_max);
return;
}
icsk->icsk_backoff++;
icsk->icsk_probes_out++;
inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
- min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX),
- TCP_RTO_MAX);
+ min_t(unsigned int, icsk->icsk_rto << icsk->icsk_backoff, sysctl_tcp_rto_max),
+ sysctl_tcp_rto_max);
} else {
/* If packet was not sent due to local congestion,
* do not backoff and do not remember icsk_probes_out.
inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
min(icsk->icsk_rto << icsk->icsk_backoff,
TCP_RESOURCE_PROBE_INTERVAL),
- TCP_RTO_MAX);
+ sysctl_tcp_rto_max);
}
}
int sysctl_tcp_retries2 __read_mostly = TCP_RETR2;
int sysctl_tcp_orphan_retries __read_mostly;
int sysctl_tcp_thin_linear_timeouts __read_mostly;
+int sysctl_tcp_rto_min __read_mostly = TCP_RTO_MIN;
+EXPORT_SYMBOL(sysctl_tcp_rto_min);
+int sysctl_tcp_rto_max __read_mostly = TCP_RTO_MAX;
+EXPORT_SYMBOL(sysctl_tcp_rto_max);
static void tcp_write_err(struct sock *sk)
{
/* If peer does not open window for long time, or did not transmit
* anything for long time, penalize it. */
- if ((s32)(tcp_time_stamp - tp->lsndtime) > 2*TCP_RTO_MAX || !do_reset)
+ if ((s32)(tcp_time_stamp - tp->lsndtime) > 2*sysctl_tcp_rto_max || !do_reset)
shift++;
/* If some dubious ICMP arrived, penalize even more. */
bool syn_set)
{
unsigned int linear_backoff_thresh, start_ts;
- unsigned int rto_base = syn_set ? TCP_TIMEOUT_INIT : TCP_RTO_MIN;
+ unsigned int rto_base = syn_set ? TCP_TIMEOUT_INIT : sysctl_tcp_rto_min;
if (!inet_csk(sk)->icsk_retransmits)
return false;
start_ts = tcp_sk(sk)->retrans_stamp;
if (likely(timeout == 0)) {
- linear_backoff_thresh = ilog2(TCP_RTO_MAX/rto_base);
+ linear_backoff_thresh = ilog2(sysctl_tcp_rto_max/rto_base);
if (boundary <= linear_backoff_thresh)
timeout = ((2 << boundary) - 1) * rto_base;
else
timeout = ((2 << linear_backoff_thresh) - 1) * rto_base +
- (boundary - linear_backoff_thresh) * TCP_RTO_MAX;
+ (boundary - linear_backoff_thresh) * sysctl_tcp_rto_max;
}
return (tcp_time_stamp - start_ts) >= timeout;
}
retry_until = sysctl_tcp_retries2;
if (sock_flag(sk, SOCK_DEAD)) {
- const int alive = (icsk->icsk_rto < TCP_RTO_MAX);
+ const int alive = (icsk->icsk_rto < sysctl_tcp_rto_max);
retry_until = tcp_orphan_retries(sk, alive);
do_reset = alive ||
sk_mem_reclaim_partial(sk);
- if (sk->sk_state == TCP_CLOSE || !(icsk->icsk_ack.pending & ICSK_ACK_TIMER))
+ if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) ||
+ !(icsk->icsk_ack.pending & ICSK_ACK_TIMER))
goto out;
if (time_after(icsk->icsk_ack.timeout, jiffies)) {
max_probes = sysctl_tcp_retries2;
if (sock_flag(sk, SOCK_DEAD)) {
- const int alive = ((icsk->icsk_rto << icsk->icsk_backoff) < TCP_RTO_MAX);
+ const int alive = ((icsk->icsk_rto << icsk->icsk_backoff) < sysctl_tcp_rto_max);
max_probes = tcp_orphan_retries(sk, alive);
inet_rtx_syn_ack(sk, req);
req->num_timeout++;
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
- TCP_TIMEOUT_INIT << req->num_timeout, TCP_RTO_MAX);
+ TCP_TIMEOUT_INIT << req->num_timeout, sysctl_tcp_rto_max);
}
/*
tp->snd_una, tp->snd_nxt);
}
#endif
- if (tcp_time_stamp - tp->rcv_tstamp > TCP_RTO_MAX) {
+ if (tcp_time_stamp - tp->rcv_tstamp > sysctl_tcp_rto_max) {
tcp_write_err(sk);
goto out;
}
NET_INC_STATS_BH(sock_net(sk), mib_idx);
}
+ if (icsk->icsk_MMSRB == 1)
+ {
+ #ifdef CONFIG_MTK_NET_LOGGING
+ printk(KERN_DEBUG "[mtk_net][mmspb] tcp_retransmit_timer enter loss\n");
+ #endif
+ }
tcp_enter_loss(sk, 0);
if (tcp_retransmit_skb(sk, tcp_write_queue_head(sk)) > 0) {
icsk->icsk_retransmits = 1;
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
min(icsk->icsk_rto, TCP_RESOURCE_PROBE_INTERVAL),
- TCP_RTO_MAX);
+ sysctl_tcp_rto_max);
goto out;
}
tcp_stream_is_thin(tp) &&
icsk->icsk_retransmits <= TCP_THIN_LINEAR_RETRIES) {
icsk->icsk_backoff = 0;
- icsk->icsk_rto = min(__tcp_set_rto(tp), TCP_RTO_MAX);
+ icsk->icsk_rto = min_t(unsigned int, __tcp_set_rto(tp), sysctl_tcp_rto_max);
} else {
/* Use normal (exponential) backoff */
- icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX);
+ icsk->icsk_rto = min_t(unsigned int, icsk->icsk_rto << 1, sysctl_tcp_rto_max);
}
- inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, TCP_RTO_MAX);
+ inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, sysctl_tcp_rto_max);
if (retransmits_timed_out(sk, sysctl_tcp_retries1 + 1, 0, 0))
__sk_dst_reset(sk);
struct inet_connection_sock *icsk = inet_csk(sk);
int event;
- if (sk->sk_state == TCP_CLOSE || !icsk->icsk_pending)
+ if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) ||
+ !icsk->icsk_pending)
goto out;
-
+ if (icsk->icsk_MMSRB != 1)
+ {
if (time_after(icsk->icsk_timeout, jiffies)) {
sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout);
goto out;
}
-
+ }
event = icsk->icsk_pending;
switch (event) {
if (!sock_owned_by_user(sk)) {
tcp_write_timer_handler(sk);
} else {
+
+ //if (icsk->icsk_MMSRB == 1)
+ //printk("[mmspb] tcp_write_timer user owner sock\n");
+
/* deleguate our work to tcp_release_cb() */
if (!test_and_set_bit(TCP_WRITE_TIMER_DEFERRED, &tcp_sk(sk)->tsq_flags))
sock_hold(sk);
static void tcp_synack_timer(struct sock *sk)
{
inet_csk_reqsk_queue_prune(sk, TCP_SYNQ_INTERVAL,
- TCP_TIMEOUT_INIT, TCP_RTO_MAX);
+ TCP_TIMEOUT_INIT, sysctl_tcp_rto_max);
}
void tcp_syn_ack_timeout(struct sock *sk, struct request_sock *req)
.forwarding = 0,
.hop_limit = IPV6_DEFAULT_HOPLIMIT,
.mtu6 = IPV6_MIN_MTU,
- .accept_ra = 1,
+ .accept_ra = 1,
.accept_redirects = 1,
.autoconf = 1,
.force_mld_version = 0,
.rtr_solicit_interval = RTR_SOLICITATION_INTERVAL,
.rtr_solicit_delay = MAX_RTR_SOLICITATION_DELAY,
#ifdef CONFIG_IPV6_PRIVACY
- .use_tempaddr = 0,
+ .use_tempaddr = 1,
.temp_valid_lft = TEMP_VALID_LIFETIME,
.temp_prefered_lft = TEMP_PREFERRED_LIFETIME,
.regen_max_retry = REGEN_MAX_RETRY,
.max_addresses = IPV6_MAX_ADDRESSES,
.accept_ra_defrtr = 1,
.accept_ra_pinfo = 1,
+#ifdef CONFIG_MTK_DHCPV6C_WIFI
+ .ra_info_flag = 0,
+#endif
#ifdef CONFIG_IPV6_ROUTER_PREF
.accept_ra_rtr_pref = 1,
.rtr_probe_interval = 60 * HZ,
.accept_ra_rt_info_max_plen = 0,
#endif
#endif
+ .accept_ra_rt_table = 0,
.proxy_ndp = 0,
.accept_source_route = 0, /* we do not accept RH0 by default. */
.disable_ipv6 = 0,
.rtr_solicit_interval = RTR_SOLICITATION_INTERVAL,
.rtr_solicit_delay = MAX_RTR_SOLICITATION_DELAY,
#ifdef CONFIG_IPV6_PRIVACY
- .use_tempaddr = 0,
+ .use_tempaddr = 1,
.temp_valid_lft = TEMP_VALID_LIFETIME,
.temp_prefered_lft = TEMP_PREFERRED_LIFETIME,
.regen_max_retry = REGEN_MAX_RETRY,
.max_addresses = IPV6_MAX_ADDRESSES,
.accept_ra_defrtr = 1,
.accept_ra_pinfo = 1,
+#ifdef CONFIG_MTK_DHCPV6C_WIFI
+ .ra_info_flag = 0,
+#endif
#ifdef CONFIG_IPV6_ROUTER_PREF
.accept_ra_rtr_pref = 1,
.rtr_probe_interval = 60 * HZ,
.accept_ra_rt_info_max_plen = 0,
#endif
#endif
+ .accept_ra_rt_table = 0,
.proxy_ndp = 0,
.accept_source_route = 0, /* we do not accept RH0 by default. */
.disable_ipv6 = 0,
} else if ((!newf) ^ (!old))
dev_forward_change((struct inet6_dev *)table->extra1);
rtnl_unlock();
-
+
if (newf)
rt6_purge_dflt_routers(net);
+
return 1;
}
#endif
static int ipv6_generate_eui64(u8 *eui, struct net_device *dev)
{
+ /* MTK_NET_CHANGES */
+ if (strncmp(dev->name, "ccmni", 2) == 0)
+ return -1;
+
switch (dev->type) {
case ARPHRD_ETHER:
case ARPHRD_FDDI:
}
#endif
+u32 addrconf_rt_table(const struct net_device *dev, u32 default_table) {
+ /* Determines into what table to put autoconf PIO/RIO/default routes
+ * learned on this device.
+ *
+ * - If 0, use the same table for every device. This puts routes into
+ * one of RT_TABLE_{PREFIX,INFO,DFLT} depending on the type of route
+ * (but note that these three are currently all equal to
+ * RT6_TABLE_MAIN).
+ * - If > 0, use the specified table.
+ * - If < 0, put routes into table dev->ifindex + (-rt_table).
+ */
+ struct inet6_dev *idev = in6_dev_get(dev);
+ u32 table;
+ int sysctl = idev->cnf.accept_ra_rt_table;
+ if (sysctl == 0) {
+ table = default_table;
+ } else if (sysctl > 0) {
+ table = (u32) sysctl;
+ } else {
+ table = (unsigned) dev->ifindex + (-sysctl);
+ }
+ in6_dev_put(idev);
+ return table;
+}
+
/*
* Add prefix route.
*/
unsigned long expires, u32 flags)
{
struct fib6_config cfg = {
- .fc_table = RT6_TABLE_PREFIX,
+ .fc_table = addrconf_rt_table(dev, RT6_TABLE_PREFIX),
.fc_metric = IP6_RT_PRIO_ADDRCONF,
.fc_ifindex = dev->ifindex,
.fc_expires = expires,
struct rt6_info *rt = NULL;
struct fib6_table *table;
- table = fib6_get_table(dev_net(dev), RT6_TABLE_PREFIX);
+ table = fib6_get_table(dev_net(dev),
+ addrconf_rt_table(dev, RT6_TABLE_PREFIX));
if (table == NULL)
return NULL;
array[DEVCONF_ACCEPT_RA_RT_INFO_MAX_PLEN] = cnf->accept_ra_rt_info_max_plen;
#endif
#endif
+ array[DEVCONF_ACCEPT_RA_RT_TABLE] = cnf->accept_ra_rt_table;
array[DEVCONF_PROXY_NDP] = cnf->proxy_ndp;
array[DEVCONF_ACCEPT_SOURCE_ROUTE] = cnf->accept_source_route;
#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
array[DEVCONF_ACCEPT_DAD] = cnf->accept_dad;
array[DEVCONF_FORCE_TLLAO] = cnf->force_tllao;
array[DEVCONF_NDISC_NOTIFY] = cnf->ndisc_notify;
+#ifdef CONFIG_MTK_DHCPV6C_WIFI
+ array[DEVCONF_RA_INFO_FLAG] = cnf->ra_info_flag;
+#endif
}
static inline size_t inet6_ifla6_size(void)
struct net_device *dev;
struct inet6_dev *idev;
- rcu_read_lock();
- for_each_netdev_rcu(net, dev) {
+ for_each_netdev(net, dev) {
idev = __in6_dev_get(dev);
if (idev) {
int changed = (!idev->cnf.disable_ipv6) ^ (!newf);
dev_disable_change(idev);
}
}
- rcu_read_unlock();
}
static int addrconf_disable_ipv6(struct ctl_table *table, int *p, int newf)
},
#endif
#endif
+ {
+ .procname = "accept_ra_rt_table",
+ .data = &ipv6_devconf.accept_ra_rt_table,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
{
.procname = "proxy_ndp",
.data = &ipv6_devconf.proxy_ndp,
.mode = 0644,
.proc_handler = proc_dointvec
},
+#ifdef CONFIG_MTK_DHCPV6C_WIFI
+ {
+ .procname = "ra_info_flag",
+ .data = &ipv6_devconf.ra_info_flag,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec
+ },
+#endif
{
/* sentinel */
}
memset(&fl6, 0, sizeof(fl6));
fl6.flowi6_mark = sk->sk_mark;
+ fl6.flowi6_uid = sock_i_uid(sk);
if (sin6) {
if (addr_len < SIN6_LEN_RFC2133)
spin_lock_bh(&sk->sk_receive_queue.lock);
skb = skb_peek(&sk->sk_receive_queue);
if (skb != NULL)
- amount = skb->tail - skb->transport_header;
+ amount = skb->len;
spin_unlock_bh(&sk->sk_receive_queue.lock);
return put_user(amount, (int __user *)arg);
}
struct sk_buff *skb);
#ifdef CONFIG_IPV6_ROUTE_INFO
-static struct rt6_info *rt6_add_route_info(struct net *net,
+static struct rt6_info *rt6_add_route_info(struct net_device *dev,
const struct in6_addr *prefix, int prefixlen,
- const struct in6_addr *gwaddr, int ifindex,
- unsigned int pref);
-static struct rt6_info *rt6_get_route_info(struct net *net,
+ const struct in6_addr *gwaddr, unsigned int pref);
+static struct rt6_info *rt6_get_route_info(struct net_device *dev,
const struct in6_addr *prefix, int prefixlen,
- const struct in6_addr *gwaddr, int ifindex);
+ const struct in6_addr *gwaddr);
#endif
static u32 *ipv6_cow_metrics(struct dst_entry *dst, unsigned long old)
int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
const struct in6_addr *gwaddr)
{
- struct net *net = dev_net(dev);
struct route_info *rinfo = (struct route_info *) opt;
struct in6_addr prefix_buf, *prefix;
unsigned int pref;
if (rinfo->prefix_len == 0)
rt = rt6_get_dflt_router(gwaddr, dev);
else
- rt = rt6_get_route_info(net, prefix, rinfo->prefix_len,
- gwaddr, dev->ifindex);
+ rt = rt6_get_route_info(dev, prefix, rinfo->prefix_len, gwaddr);
if (rt && !lifetime) {
ip6_del_rt(rt);
}
if (!rt && lifetime)
- rt = rt6_add_route_info(net, prefix, rinfo->prefix_len, gwaddr, dev->ifindex,
- pref);
+ rt = rt6_add_route_info(dev, prefix, rinfo->prefix_len, gwaddr, pref);
else if (rt)
rt->rt6i_flags = RTF_ROUTEINFO |
(rt->rt6i_flags & ~RTF_PREF_MASK) | RTF_PREF(pref);
}
void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
- int oif, u32 mark)
+ int oif, u32 mark, kuid_t uid)
{
const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
struct dst_entry *dst;
memset(&fl6, 0, sizeof(fl6));
fl6.flowi6_oif = oif;
- fl6.flowi6_mark = mark;
+ fl6.flowi6_mark = mark ? mark : IP6_REPLY_MARK(net, skb->mark);
fl6.flowi6_flags = 0;
fl6.daddr = iph->daddr;
fl6.saddr = iph->saddr;
fl6.flowlabel = ip6_flowinfo(iph);
+ fl6.flowi6_uid = uid;
dst = ip6_route_output(net, NULL, &fl6);
if (!dst->error)
void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu)
{
ip6_update_pmtu(skb, sock_net(sk), mtu,
- sk->sk_bound_dev_if, sk->sk_mark);
+ sk->sk_bound_dev_if, sk->sk_mark, sock_i_uid(sk));
}
EXPORT_SYMBOL_GPL(ip6_sk_update_pmtu);
continue;
if (cfg->fc_metric && cfg->fc_metric != rt->rt6i_metric)
continue;
+ if (cfg->fc_protocol && cfg->fc_protocol != rt->rt6i_protocol)
+ continue;
dst_hold(&rt->dst);
read_unlock_bh(&table->tb6_lock);
}
#ifdef CONFIG_IPV6_ROUTE_INFO
-static struct rt6_info *rt6_get_route_info(struct net *net,
+static struct rt6_info *rt6_get_route_info(struct net_device *dev,
const struct in6_addr *prefix, int prefixlen,
- const struct in6_addr *gwaddr, int ifindex)
+ const struct in6_addr *gwaddr)
{
struct fib6_node *fn;
struct rt6_info *rt = NULL;
struct fib6_table *table;
- table = fib6_get_table(net, RT6_TABLE_INFO);
+ table = fib6_get_table(dev_net(dev),
+ addrconf_rt_table(dev, RT6_TABLE_INFO));
if (!table)
return NULL;
goto out;
for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
- if (rt->dst.dev->ifindex != ifindex)
+ if (rt->dst.dev->ifindex != dev->ifindex)
continue;
if ((rt->rt6i_flags & (RTF_ROUTEINFO|RTF_GATEWAY)) != (RTF_ROUTEINFO|RTF_GATEWAY))
continue;
return rt;
}
-static struct rt6_info *rt6_add_route_info(struct net *net,
+static struct rt6_info *rt6_add_route_info(struct net_device *dev,
const struct in6_addr *prefix, int prefixlen,
- const struct in6_addr *gwaddr, int ifindex,
- unsigned int pref)
+ const struct in6_addr *gwaddr, unsigned int pref)
{
struct fib6_config cfg = {
- .fc_table = RT6_TABLE_INFO,
+ .fc_table = addrconf_rt_table(dev, RT6_TABLE_INFO),
.fc_metric = IP6_RT_PRIO_USER,
- .fc_ifindex = ifindex,
+ .fc_ifindex = dev->ifindex,
.fc_dst_len = prefixlen,
.fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO |
RTF_UP | RTF_PREF(pref),
.fc_nlinfo.portid = 0,
.fc_nlinfo.nlh = NULL,
- .fc_nlinfo.nl_net = net,
+ .fc_nlinfo.nl_net = dev_net(dev),
};
cfg.fc_dst = *prefix;
ip6_route_add(&cfg);
- return rt6_get_route_info(net, prefix, prefixlen, gwaddr, ifindex);
+ return rt6_get_route_info(dev, prefix, prefixlen, gwaddr);
}
#endif
struct rt6_info *rt;
struct fib6_table *table;
- table = fib6_get_table(dev_net(dev), RT6_TABLE_DFLT);
+ table = fib6_get_table(dev_net(dev),
+ addrconf_rt_table(dev, RT6_TABLE_MAIN));
if (!table)
return NULL;
unsigned int pref)
{
struct fib6_config cfg = {
- .fc_table = RT6_TABLE_DFLT,
+ .fc_table = addrconf_rt_table(dev, RT6_TABLE_DFLT),
.fc_metric = IP6_RT_PRIO_USER,
.fc_ifindex = dev->ifindex,
.fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT |
return rt6_get_dflt_router(gwaddr, dev);
}
-void rt6_purge_dflt_routers(struct net *net)
-{
- struct rt6_info *rt;
- struct fib6_table *table;
- /* NOTE: Keep consistent with rt6_get_dflt_router */
- table = fib6_get_table(net, RT6_TABLE_DFLT);
- if (!table)
- return;
+int rt6_addrconf_purge(struct rt6_info *rt, void *arg) {
+ if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF) &&
+ (!rt->rt6i_idev || rt->rt6i_idev->cnf.accept_ra != 2))
+ return -1;
+ return 0;
+}
-restart:
- read_lock_bh(&table->tb6_lock);
- for (rt = table->tb6_root.leaf; rt; rt = rt->dst.rt6_next) {
- if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF) &&
- (!rt->rt6i_idev || rt->rt6i_idev->cnf.accept_ra != 2)) {
- dst_hold(&rt->dst);
- read_unlock_bh(&table->tb6_lock);
- ip6_del_rt(rt);
- goto restart;
- }
- }
- read_unlock_bh(&table->tb6_lock);
+void rt6_purge_dflt_routers(struct net *net)
+{
+ fib6_clean_all(net, rt6_addrconf_purge, 0, NULL);
}
static void rtmsg_to_fib6_config(struct net *net,
[RTA_PRIORITY] = { .type = NLA_U32 },
[RTA_METRICS] = { .type = NLA_NESTED },
[RTA_MULTIPATH] = { .len = sizeof(struct rtnexthop) },
+ [RTA_UID] = { .type = NLA_U32 },
};
static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
if (tb[RTA_OIF])
oif = nla_get_u32(tb[RTA_OIF]);
+ if (tb[RTA_UID])
+ fl6.flowi6_uid = make_kuid(current_user_ns(),
+ nla_get_u32(tb[RTA_UID]));
+ else
+ fl6.flowi6_uid = iif ? INVALID_UID : current_uid();
+
if (iif) {
struct net_device *dev;
int flags = 0;
int ret;
int chk_addr_ret;
- if (!sock_flag(sk, SOCK_ZAPPED))
- return -EINVAL;
if (addr_len < sizeof(struct sockaddr_l2tpip))
return -EINVAL;
if (addr->l2tp_family != AF_INET)
read_unlock_bh(&l2tp_ip_lock);
lock_sock(sk);
+ if (!sock_flag(sk, SOCK_ZAPPED))
+ goto out;
+
if (sk->sk_state != TCP_CLOSE || addr_len < sizeof(struct sockaddr_l2tpip))
goto out;
drop:
IP_INC_STATS(sock_net(sk), IPSTATS_MIB_INDISCARDS);
kfree_skb(skb);
- return -1;
+ return 0;
}
/* Userspace will call sendmsg() on the tunnel socket to send L2TP
#include <linux/mount.h>
#include <net/checksum.h>
#include <linux/security.h>
+#include <linux/freezer.h>
+
+
+#include <linux/uio.h>
+#include <linux/blkdev.h>
+#include <linux/compat.h>
+#include <linux/rtc.h>
+#include <asm/kmap_types.h>
+#include <linux/device.h>
+
struct hlist_head unix_socket_table[2 * UNIX_HASH_SIZE];
EXPORT_SYMBOL_GPL(unix_socket_table);
#define UNIX_ABSTRACT(sk) (unix_sk(sk)->addr->hash < UNIX_HASH_SIZE)
+
+//for aee interface start
+#define __UNIX_SOCKET_OUTPUT_BUF_SIZE__ 3500
+static struct proc_dir_entry *gunix_socket_track_aee_entry = NULL;
+#define UNIX_SOCK_TRACK_AEE_PROCNAME "driver/usktrk_aee"
+#define UNIX_SOCK_TRACK_PROC_AEE_SIZE 3072
+
+static volatile unsigned int unix_sock_track_stop_flag = 0;
+#define unix_peer(sk) (unix_sk(sk)->peer)
+
+
#ifdef CONFIG_SECURITY_NETWORK
static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
{
return hash&(UNIX_HASH_SIZE-1);
}
-#define unix_peer(sk) (unix_sk(sk)->peer)
+
static inline int unix_our_peer(struct sock *sk, struct sock *osk)
{
WARN_ON(!sk_unhashed(sk));
WARN_ON(sk->sk_socket);
if (!sock_flag(sk, SOCK_DEAD)) {
- printk(KERN_INFO "Attempt to release alive unix socket: %p\n", sk);
+ #ifdef CONFIG_MTK_NET_LOGGING
+ printk(KERN_INFO "[mtk_net][unix]Attempt to release alive unix socket: %p\n", sk);
+ #endif
return;
}
local_bh_disable();
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
local_bh_enable();
-#ifdef UNIX_REFCNT_DEBUG
- printk(KERN_DEBUG "UNIX %p is destroyed, %ld are still alive.\n", sk,
+ #ifdef UNIX_REFCNT_DEBUG
+ printk(KERN_DEBUG "[mtk_net][unix]UNIX %p is destroyed, %ld are still alive.\n", sk,
atomic_long_read(&unix_nr_socks));
-#endif
+ #endif
}
static void unix_release_sock(struct sock *sk, int embrion)
unix_state_unlock(sk);
put_pid(old_pid);
out:
+
return err;
}
unsigned int hash;
struct unix_address *addr;
struct hlist_head *list;
+ struct path path = { NULL, NULL };
err = -EINVAL;
if (sunaddr->sun_family != AF_UNIX)
goto out;
addr_len = err;
+ if (sun_path[0]) {
+ umode_t mode = S_IFSOCK |
+ (SOCK_INODE(sock)->i_mode & ~current_umask());
+ err = unix_mknod(sun_path, mode, &path);
+ if (err) {
+ if (err == -EEXIST)
+ err = -EADDRINUSE;
+ goto out;
+ }
+ }
+
err = mutex_lock_interruptible(&u->readlock);
if (err)
- goto out;
+ goto out_put;
err = -EINVAL;
if (u->addr)
atomic_set(&addr->refcnt, 1);
if (sun_path[0]) {
- struct path path;
-
- umode_t mode = S_IFSOCK |
- (SOCK_INODE(sock)->i_mode & ~current_umask());
- err = unix_mknod(sun_path, mode, &path);
- if (err) {
- if (err == -EEXIST)
- err = -EADDRINUSE;
- unix_release_addr(addr);
- goto out_up;
- }
addr->hash = UNIX_HASH_SIZE;
hash = path.dentry->d_inode->i_ino & (UNIX_HASH_SIZE-1);
spin_lock(&unix_table_lock);
spin_unlock(&unix_table_lock);
out_up:
mutex_unlock(&u->readlock);
+ out_put:
+ if (err)
+ path_put(&path);
out:
+
return err;
}
int err;
if (addr->sa_family != AF_UNSPEC) {
+
err = unix_mkname(sunaddr, alen, &hash);
if (err < 0)
goto out;
unix_peer(sk) = other;
unix_state_double_unlock(sk, other);
}
+
+#ifdef CONFIG_MTK_NET_LOGGING
+ if((SOCK_INODE(sock)!= NULL) && (sunaddr != NULL) && (other->sk_socket != NULL) && (SOCK_INODE(other->sk_socket) != NULL))
+ {
+ printk(KERN_INFO "[mtk_net][socket]unix_dgram_connect[%lu]:connect [%s] other[%lu]\n",SOCK_INODE(sock)->i_ino,sunaddr->sun_path,SOCK_INODE(other->sk_socket)->i_ino);
+ }
+#endif
+
return 0;
out_unlock:
unix_state_double_unlock(sk, other);
sock_put(other);
out:
+
return err;
}
__skb_queue_tail(&other->sk_receive_queue, skb);
spin_unlock(&other->sk_receive_queue.lock);
unix_state_unlock(other);
+
+ #ifdef CONFIG_MTK_NET_LOGGING
+ if((SOCK_INODE(sock)!= NULL) && (sunaddr != NULL) && (other->sk_socket != NULL) && (SOCK_INODE(other->sk_socket) != NULL))
+ {
+ printk(KERN_INFO "[mtk_net][socket]unix_stream_connect[%lu ]: connect [%s] other[%lu] \n",SOCK_INODE(sock)->i_ino,sunaddr->sun_path,SOCK_INODE(other->sk_socket)->i_ino);
+ }
+ #endif
+
other->sk_data_ready(other, 0);
sock_put(other);
+
return 0;
out_unlock:
unix_release_sock(newsk, 0);
if (other)
sock_put(other);
+
return err;
}
/* If socket state is TCP_LISTEN it cannot change (for now...),
* so that no locks are necessary.
*/
-
+
skb = skb_recv_datagram(sk, 0, flags&O_NONBLOCK, &err);
if (!skb) {
/* This means receive shutdown. */
unix_sock_inherit_flags(sock, newsock);
sock_graft(tsk, newsock);
unix_state_unlock(tsk);
+
return 0;
out:
+
return err;
}
int max_level;
int data_len = 0;
int sk_locked;
- <<<<<<< HEAD
-
- =======
- >>>>>>> v3.10.95
if (NULL == siocb->scm)
siocb->scm = &tmp_scm;
wait_for_unix_gc();
sock_put(other);
if (!sk_locked)
- <<<<<<< HEAD
- unix_state_lock(sk);
- =======
unix_state_lock(sk);
err = 0;
- >>>>>>> v3.10.95
if (unix_peer(sk) == other) {
unix_peer(sk) = NULL;
unix_dgram_peer_wake_disconnect_wakeup(sk, other);
goto out_unlock;
}
- <<<<<<< HEAD
- if (unlikely(unix_peer(other) != sk && unix_recvq_full(other))) {
+ /* other == sk && unix_peer(other) != sk if
+ * - unix_peer(sk) == NULL, destination address bound to sk
+ * - unix_peer(sk) == sk by time of get but disconnected before lock
+ */
+ if (other != sk &&
+ unlikely(unix_peer(other) != sk && unix_recvq_full(other))) {
- =======
- if (unlikely(unix_peer(other) != sk && unix_recvq_full(other))) {
- >>>>>>> v3.10.95
if (timeo) {
timeo = unix_wait_for_peer(other, timeo);
other->sk_data_ready(other, len);
sock_put(other);
scm_destroy(siocb->scm);
+
return len;
out_unlock:
if (other)
sock_put(other);
scm_destroy(siocb->scm);
+
return err;
}
if (NULL == siocb->scm)
siocb->scm = &tmp_scm;
+
wait_for_unix_gc();
err = scm_send(sock, msg, siocb->scm, false);
if (err < 0)
skb = sock_alloc_send_skb(sk, size, msg->msg_flags&MSG_DONTWAIT,
&err);
+
if (skb == NULL)
goto out_err;
if (sock_flag(other, SOCK_DEAD) ||
(other->sk_shutdown & RCV_SHUTDOWN))
+ {
+ if( other->sk_socket )
+ {
+ if(sk->sk_socket)
+ {
+
+ #ifdef CONFIG_MTK_NET_LOGGING
+ printk(KERN_INFO " [mtk_net][unix]: sendmsg[%lu:%lu]:peer close\n" ,SOCK_INODE(sk->sk_socket)->i_ino,SOCK_INODE(other->sk_socket)->i_ino);
+ #endif
+ }
+ else{
+ #ifdef CONFIG_MTK_NET_LOGGING
+ printk(KERN_INFO " [mtk_net][unix]: sendmsg[null:%lu]:peer close\n" ,SOCK_INODE(other->sk_socket)->i_ino);
+ #endif
+ }
+
+ }
+ else
+ {
+ #ifdef CONFIG_MTK_NET_LOGGING
+ printk(KERN_INFO " [mtk_net][unix]: sendmsg:peer close \n" );
+ #endif
+ }
+
+
goto pipe_err_free;
+ }
maybe_add_creds(skb, sock, other);
skb_queue_tail(&other->sk_receive_queue, skb);
out_err:
scm_destroy(siocb->scm);
siocb->scm = NULL;
+
return sent ? : err;
}
out_unlock:
mutex_unlock(&u->readlock);
out:
+
return err;
}
set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
unix_state_unlock(sk);
- timeo = schedule_timeout(timeo);
+ timeo = freezable_schedule_timeout(timeo);
unix_state_lock(sk);
if (sock_flag(sk, SOCK_DEAD))
int err = 0;
long timeo;
int skip;
+ struct sock * other = unix_peer(sk);
err = -EINVAL;
if (sk->sk_state != TCP_ESTABLISHED)
if (err)
goto unlock;
if (sk->sk_shutdown & RCV_SHUTDOWN)
+ {
+ if(sk && sk->sk_socket )
+ {
+ if(other && other->sk_socket ){
+ #ifdef CONFIG_MTK_NET_LOGGING
+
+ printk(KERN_INFO " [mtk_net][unix]: recvmsg[%lu:%lu]:exit read due to peer shutdown \n" ,SOCK_INODE(sk->sk_socket)->i_ino,SOCK_INODE(other->sk_socket)->i_ino);
+ #endif
+ }else{
+ #ifdef CONFIG_MTK_NET_LOGGING
+ printk(KERN_INFO "[mtk_net][unix]: recvmsg[%lu:null]:exit read due to peer shutdown \n" ,SOCK_INODE(sk->sk_socket)->i_ino);
+ #endif
+ }
+ }
+ else{
+ #ifdef CONFIG_MTK_NET_LOGGING
+ printk(KERN_INFO " [mtk_net][unix]: recvmsg: exit read due to peer shutdown \n" );
+ #endif
+ }
goto unlock;
-
+ }
unix_state_unlock(sk);
err = -EAGAIN;
if (!timeo)
mutex_unlock(&u->readlock);
timeo = unix_stream_data_wait(sk, timeo, last);
+ if (!timeo)
+ {
+ if(sk && sk->sk_socket )
+ {
+ if(other && other->sk_socket ){
+ #ifdef CONFIG_MTK_NET_LOGGING
+ printk(KERN_INFO " [mtk_net][unix]: recvmsg[%lu:%lu]:exit read due to timeout \n" ,SOCK_INODE(sk->sk_socket)->i_ino,SOCK_INODE(other->sk_socket)->i_ino);
+ #endif
+ }else{
+ #ifdef CONFIG_MTK_NET_LOGGING
+ printk(KERN_INFO " [mtk_net][unix]: recvmsg[%lu:null]:exit read due to timeout \n" ,SOCK_INODE(sk->sk_socket)->i_ino);
+ #endif
+ }
+ }
+ else
+ {
+ #ifdef CONFIG_MTK_NET_LOGGING
+ printk(KERN_INFO " [mtk_net][unix]: recvmsg:exit read due to timeout \n" );
+ #endif
+ }
+
+ }
if (signal_pending(current)) {
err = sock_intr_errno(timeo);
mutex_unlock(&u->readlock);
scm_recv(sock, msg, siocb->scm, flags);
out:
+
return copied ? : err;
}
mask |= POLLHUP;
/* connection hasn't started yet? */
if (sk->sk_state == TCP_SYN_SENT)
+ {
+
return mask;
- }
+ }
+ }
/* No write status requested, avoid expensive OUT tests. */
if (!(poll_requested_events(wait) & (POLLWRBAND|POLLWRNORM|POLLOUT)))
+ {
return mask;
+ }
writable = unix_writable(sk);
if (writable) {
new_profile = aa_get_profile(ns->unconfined);
info = "ux fallback";
} else {
- error = -ENOENT;
+ error = -EACCES;
info = "profile not found";
}
}
* There is no exception for unconfined as change_hat is not
* available.
*/
- if (current->no_new_privs)
+ if (task_no_new_privs(current))
return -EPERM;
/* released below */
* no_new_privs is set because this aways results in a reduction
* of permissions.
*/
- if (current->no_new_privs && !unconfined(profile)) {
+ if (task_no_new_privs(current) && !unconfined(profile)) {
put_cred(cred);
return -EPERM;
}