VERSION = 3
PATCHLEVEL = 10
- SUBLEVEL = 75
+ SUBLEVEL = 76
EXTRAVERSION =
NAME = TOSSUG Baby Fish
-fno-strict-aliasing -fno-common \
-Werror-implicit-function-declaration \
-Wno-format-security \
- -fno-delete-null-pointer-checks
+ -fno-delete-null-pointer-checks \
+ -w
+
KBUILD_AFLAGS_KERNEL :=
KBUILD_CFLAGS_KERNEL :=
KBUILD_AFLAGS := -D__ASSEMBLY__
dw8250_force_idle(p);
writeb(value, p->membase + (UART_LCR << p->regshift));
}
- dev_err(p->dev, "Couldn't set LCR to %d\n", value);
+ /*
+ * FIXME: this deadlocks if port->lock is already held
+ * dev_err(p->dev, "Couldn't set LCR to %d\n", value);
+ */
}
}
{
struct dw8250_data *d = p->private_data;
- if (offset == UART_MCR)
- d->last_mcr = value;
+ if (offset == UART_LCR)
+ d->last_lcr = value;
- offset <<= p->regshift;
- writel(value, p->membase + offset);
+ writel(value, p->membase + (offset << p->regshift));
+
+ /* Make sure LCR write wasn't ignored */
+ if (offset == UART_LCR) {
+ int tries = 1000;
+ while (tries--) {
+ unsigned int lcr = p->serial_in(p, UART_LCR);
+ if ((value & ~UART_LCR_SPAR) == (lcr & ~UART_LCR_SPAR))
+ return;
+ dw8250_force_idle(p);
+ writel(value, p->membase + (UART_LCR << p->regshift));
+ }
+ /*
+ * FIXME: this deadlocks if port->lock is already held
+ * dev_err(p->dev, "Couldn't set LCR to %d\n", value);
+ */
+ }
}
static unsigned int dw8250_serial_in32(struct uart_port *p, int offset)
{
- unsigned int value = readl(p->membase + (offset << p->regshift));
+ offset <<= p->regshift;
- return dw8250_modify_msr(p, offset, value);
+ return readl(p->membase + offset);
}
static int dw8250_handle_irq(struct uart_port *p)
{
+ struct dw8250_data *d = p->private_data;
unsigned int iir = p->serial_in(p, UART_IIR);
if (serial8250_handle_irq(p, iir)) {
return 1;
} else if ((iir & UART_IIR_BUSY) == UART_IIR_BUSY) {
- /* Clear the USR */
+ /* Clear the USR and write the LCR again. */
(void)p->serial_in(p, DW_UART_USR);
+ p->serial_out(p, UART_LCR, d->last_lcr);
return 1;
}
#include <linux/magic.h>
#include <linux/slab.h>
-#define DEBUGFS_DEFAULT_MODE 0700
+#define DEBUGFS_DEFAULT_MODE 0755
static struct vfsmount *debugfs_mount;
static int debugfs_mount_count;
parent = dentry;
down:
mutex_lock(&parent->d_inode->i_mutex);
- list_for_each_entry_safe(child, next, &parent->d_subdirs, d_u.d_child) {
+ list_for_each_entry_safe(child, next, &parent->d_subdirs, d_child) {
if (!debugfs_positive(child))
continue;
mutex_lock(&parent->d_inode->i_mutex);
if (child != dentry) {
- next = list_entry(child->d_u.d_child.next, struct dentry,
- d_u.d_child);
+ next = list_entry(child->d_child.next, struct dentry,
+ d_child);
goto up;
}
#define VM_DONTCOPY 0x00020000 /* Do not copy this vma on fork */
#define VM_DONTEXPAND 0x00040000 /* Cannot expand with mremap() */
+#define VM_RESERVED 0x00080000 /* Count as reserved_vm like IO */
#define VM_ACCOUNT 0x00100000 /* Is a VM accounted object */
#define VM_NORESERVE 0x00200000 /* should the VM suppress accounting */
#define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */
}
#endif
+extern void kvfree(const void *addr);
+
static inline void compound_lock(struct page *page)
{
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
#define VM_FAULT_WRITE 0x0008 /* Special case for get_user_pages */
#define VM_FAULT_HWPOISON 0x0010 /* Hit poisoned small page */
#define VM_FAULT_HWPOISON_LARGE 0x0020 /* Hit poisoned large page. Index encoded in upper bits */
+ #define VM_FAULT_SIGSEGV 0x0040
#define VM_FAULT_NOPAGE 0x0100 /* ->fault installed the pte, not return page */
#define VM_FAULT_LOCKED 0x0200 /* ->fault locked the returned page */
#define VM_FAULT_HWPOISON_LARGE_MASK 0xf000 /* encodes hpage index for large hwpoison */
- #define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_HWPOISON | \
- VM_FAULT_HWPOISON_LARGE)
+ #define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | \
+ VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE)
/* Encode hstate index for a hwpoisoned large page */
#define VM_FAULT_SET_HINDEX(x) ((x) << 12)
extern void show_free_areas(unsigned int flags);
extern bool skip_free_areas_node(unsigned int flags, int nid);
+void shmem_set_file(struct vm_area_struct *vma, struct file *file);
int shmem_zero_setup(struct vm_area_struct *);
extern int can_do_mlock(void);
extern struct vm_area_struct *vma_merge(struct mm_struct *,
struct vm_area_struct *prev, unsigned long addr, unsigned long end,
unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t,
- struct mempolicy *);
+ struct mempolicy *, const char __user *);
extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *);
extern int split_vma(struct mm_struct *,
struct vm_area_struct *, unsigned long addr, int new_below);
#define FOLL_HWPOISON 0x100 /* check page is hwpoisoned */
#define FOLL_NUMA 0x200 /* force NUMA hinting page fault */
#define FOLL_MIGRATION 0x400 /* wait for page to replace migration entry */
+#define FOLL_COW 0x4000 /* internal GUP flag */
typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
void *data);
unsigned long shrink_slab(struct shrink_control *shrink,
unsigned long nr_pages_scanned,
unsigned long lru_pages);
+void drop_pagecache(void);
#ifndef CONFIG_MMU
#define randomize_va_space 0
parent = dentry->d_parent;
spin_lock(&parent->d_lock);
spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
- list_del_init(&dentry->d_u.d_child);
+ list_del_init(&dentry->d_child);
spin_unlock(&dentry->d_lock);
spin_unlock(&parent->d_lock);
remove_dir(dentry);
return retval;
}
+static int cgroup_allow_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
+{
+ struct cgroup_subsys *ss;
+ int ret;
+
+ for_each_subsys(cgrp->root, ss) {
+ if (ss->allow_attach) {
+ ret = ss->allow_attach(cgrp, tset);
+ if (ret)
+ return ret;
+ } else {
+ return -EACCES;
+ }
+ }
+
+ return 0;
+}
+
/*
* Find the task_struct of the task to attach by vpid and pass it along to the
* function to attach either it or all tasks in its threadgroup. Will lock
if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) &&
!uid_eq(cred->euid, tcred->uid) &&
!uid_eq(cred->euid, tcred->suid)) {
- rcu_read_unlock();
- ret = -EACCES;
- goto out_unlock_cgroup;
+ /*
+ * if the default permission check fails, give each
+ * cgroup a chance to extend the permission check
+ */
+ struct cgroup_taskset tset = { };
+ tset.single.task = tsk;
+ tset.single.cgrp = cgrp;
+ ret = cgroup_allow_attach(cgrp, &tset);
+ if (ret) {
+ rcu_read_unlock();
+ goto out_unlock_cgroup;
+ }
}
} else
tsk = current;
#include <linux/poll.h>
#include <linux/nmi.h>
#include <linux/fs.h>
+
#include <linux/sched/rt.h>
#include "trace.h"
#include "trace_output.h"
+#ifdef CONFIG_MTK_SCHED_TRACERS
+#include <linux/mtk_ftrace.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/mtk_events.h>
+EXPORT_TRACEPOINT_SYMBOL(gpu_freq);
+#endif
+
+#ifdef CONFIG_MTK_EXTMEM
+#include <linux/vmalloc.h>
+#endif
+
/*
* On boot up, the ring buffer is set to the minimum size, so that
* we do not waste memory on systems that are not using tracing.
static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
+#ifdef CONFIG_MTK_SCHED_TRACERS
+#define CPUX_TRACE_BUF_SIZE_DEFAULT 4194304UL
+#define CPU0_to_CPUX_RATIO (1.2)
+extern unsigned int get_max_DRAM_size (void);
+static unsigned long trace_buf_size_cpu0 = (CPUX_TRACE_BUF_SIZE_DEFAULT * CPU0_to_CPUX_RATIO);
+static unsigned long trace_buf_size_cpuX = CPUX_TRACE_BUF_SIZE_DEFAULT;
+static unsigned int trace_buf_size_updated_from_cmdline = 0;
+#endif
+
/* trace_types holds a link list of available tracers. */
static struct tracer *trace_types __read_mostly;
#endif
/* trace_flags holds trace_options default values */
+#ifdef CONFIG_MTK_FTRACE_DEFAULT_ENABLE
+unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
+ TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME |
+ TRACE_ITER_GRAPH_TIME | TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS |
+ TRACE_ITER_FUNCTION;
+#else
unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME |
TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |
TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | TRACE_ITER_FUNCTION;
+#endif
void tracer_tracing_on(struct trace_array *tr)
{
void tracing_on(void)
{
tracer_tracing_on(&global_trace);
+#ifdef CONFIG_MTK_SCHED_TRACERS
+ trace_tracing_on(1, CALLER_ADDR0);
+#endif
}
EXPORT_SYMBOL_GPL(tracing_on);
*/
void tracing_off(void)
{
+#ifdef CONFIG_MTK_SCHED_TRACERS
+ trace_tracing_on(0, CALLER_ADDR0);
+#endif
tracer_tracing_off(&global_trace);
}
EXPORT_SYMBOL_GPL(tracing_off);
if (buf_size == 0)
return 0;
trace_buf_size = buf_size;
+#ifdef CONFIG_MTK_SCHED_TRACERS
+ trace_buf_size_cpu0 =
+ trace_buf_size_cpuX = buf_size ;
+ trace_buf_size_updated_from_cmdline = 1;
+#endif
return 1;
}
__setup("trace_buf_size=", set_buf_size);
"irq-info",
"markers",
"function-trace",
+ "print-tgid",
NULL
};
synchronize_sched();
ring_buffer_reset_cpu(buffer, cpu);
+ printk(KERN_INFO "[ftrace]cpu %d trace reset\n", cpu);
ring_buffer_record_enable(buffer);
}
for_each_online_cpu(cpu)
ring_buffer_reset_cpu(buffer, cpu);
+ printk(KERN_INFO "[ftrace]all cpu trace reset\n");
ring_buffer_record_enable(buffer);
}
#define SAVED_CMDLINES 128
#define NO_CMDLINE_MAP UINT_MAX
+#ifdef CONFIG_MTK_EXTMEM
+extern void* extmem_malloc_page_align(size_t bytes);
+#define SIZEOF_MAP_PID_TO_CMDLINE ((PID_MAX_DEFAULT+1)*sizeof(unsigned))
+#define SIZEOF_MAP_CMDLINE_TO_PID (SAVED_CMDLINES*sizeof(unsigned))
+static unsigned* map_pid_to_cmdline = NULL;
+static unsigned* map_cmdline_to_pid = NULL;
+#else
static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
+#endif
static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
+static unsigned saved_tgids[SAVED_CMDLINES];
static int cmdline_idx;
static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
static void trace_init_cmdlines(void)
{
+#ifdef CONFIG_MTK_EXTMEM
+ map_pid_to_cmdline = (unsigned *) extmem_malloc_page_align(SIZEOF_MAP_PID_TO_CMDLINE);
+ if(map_pid_to_cmdline == NULL) {
+ pr_err("%s[%s] ext memory alloc failed!!!\n", __FILE__, __FUNCTION__);
+ map_pid_to_cmdline = (unsigned *)vmalloc(SIZEOF_MAP_PID_TO_CMDLINE);
+ }
+ map_cmdline_to_pid = (unsigned *) extmem_malloc_page_align(SIZEOF_MAP_CMDLINE_TO_PID);
+ if(map_pid_to_cmdline == NULL) {
+ pr_warning("%s[%s] ext memory alloc failed!!!\n", __FILE__, __FUNCTION__);
+ map_cmdline_to_pid = (unsigned *)vmalloc(SIZEOF_MAP_CMDLINE_TO_PID);
+ }
+ memset(map_pid_to_cmdline, NO_CMDLINE_MAP, SIZEOF_MAP_PID_TO_CMDLINE);
+ memset(map_cmdline_to_pid, NO_CMDLINE_MAP, SIZEOF_MAP_CMDLINE_TO_PID);
+#else
memset(&map_pid_to_cmdline, NO_CMDLINE_MAP, sizeof(map_pid_to_cmdline));
memset(&map_cmdline_to_pid, NO_CMDLINE_MAP, sizeof(map_cmdline_to_pid));
+#endif
cmdline_idx = 0;
}
{
struct ring_buffer *buffer;
unsigned long flags;
+ int reset_ftrace = 0;
if (tracing_disabled)
return;
/* Someone screwed up their debugging */
WARN_ON_ONCE(1);
global_trace.stop_count = 0;
+ reset_ftrace = 1;
}
goto out;
- }
+ }else
+ reset_ftrace = 1;
+
/* Prevent the buffers from switching */
arch_spin_lock(&ftrace_max_lock);
out:
raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
+
+#ifdef CONFIG_MTK_SCHED_TRACERS
+ // reset ring buffer when all readers left
+ if(reset_ftrace == 1 && global_trace.stop_count == 0)
+ tracing_reset_online_cpus(&global_trace.trace_buffer);
+#endif
}
static void tracing_start_tr(struct trace_array *tr)
}
memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN);
+ saved_tgids[idx] = tsk->tgid;
arch_spin_unlock(&trace_cmdline_lock);
preempt_enable();
}
+int trace_find_tgid(int pid)
+{
+ unsigned map;
+ int tgid;
+
+ preempt_disable();
+ arch_spin_lock(&trace_cmdline_lock);
+ map = map_pid_to_cmdline[pid];
+ if (map != NO_CMDLINE_MAP)
+ tgid = saved_tgids[map];
+ else
+ tgid = -1;
+
+ arch_spin_unlock(&trace_cmdline_lock);
+ preempt_enable();
+
+ return tgid;
+}
+
void tracing_record_cmdline(struct task_struct *tsk)
{
if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
pr_info("ftrace: Allocated trace_printk buffers\n");
/* Expand the buffers to set size */
- tracing_update_buffers();
+ /* M: avoid to expand buffer because of trace_printk in kernel */
+ /* tracing_update_buffers(); */
buffers_allocated = 1;
get_total_entries(buf, &total, &entries);
seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
entries, total, num_online_cpus());
+#ifdef CONFIG_MTK_SCHED_TRACERS
+ print_enabled_events(m);
+#endif
seq_puts(m, "#\n");
}
seq_puts(m, "# | | | | |\n");
}
+static void print_func_help_header_tgid(struct trace_buffer *buf, struct seq_file *m)
+{
+ print_event_info(buf, m);
+ seq_puts(m, "# TASK-PID TGID CPU# TIMESTAMP FUNCTION\n");
+ seq_puts(m, "# | | | | | |\n");
+}
+
static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m)
{
print_event_info(buf, m);
seq_puts(m, "# | | | |||| | |\n");
}
+static void print_func_help_header_irq_tgid(struct trace_buffer *buf, struct seq_file *m)
+{
+ print_event_info(buf, m);
+ seq_puts(m, "# _-----=> irqs-off\n");
+ seq_puts(m, "# / _----=> need-resched\n");
+ seq_puts(m, "# | / _---=> hardirq/softirq\n");
+ seq_puts(m, "# || / _--=> preempt-depth\n");
+ seq_puts(m, "# ||| / delay\n");
+ seq_puts(m, "# TASK-PID TGID CPU# |||| TIMESTAMP FUNCTION\n");
+ seq_puts(m, "# | | | | |||| | |\n");
+}
+
void
print_trace_header(struct seq_file *m, struct trace_iterator *iter)
{
} else {
if (!(trace_flags & TRACE_ITER_VERBOSE)) {
if (trace_flags & TRACE_ITER_IRQ_INFO)
- print_func_help_header_irq(iter->trace_buffer, m);
+ if (trace_flags & TRACE_ITER_TGID)
+ print_func_help_header_irq_tgid(iter->trace_buffer, m);
+ else
+ print_func_help_header_irq(iter->trace_buffer, m);
else
- print_func_help_header(iter->trace_buffer, m);
+ if (trace_flags & TRACE_ITER_TGID)
+ print_func_help_header_tgid(iter->trace_buffer, m);
+ else
+ print_func_help_header(iter->trace_buffer, m);
}
}
}
if (iter->trace && iter->trace->close)
iter->trace->close(iter);
+ printk(KERN_INFO "[ftrace]end reading trace file\n");
if (!iter->snapshot)
/* reenable tracing if it was previously enabled */
tracing_start_tr(tr);
}
if (file->f_mode & FMODE_READ) {
+ printk(KERN_INFO "[ftrace]start reading trace file\n");
iter = __tracing_open(inode, file, false);
if (IS_ERR(iter))
ret = PTR_ERR(iter);
}
static const struct file_operations tracing_saved_cmdlines_fops = {
- .open = tracing_open_generic,
- .read = tracing_saved_cmdlines_read,
- .llseek = generic_file_llseek,
+ .open = tracing_open_generic,
+ .read = tracing_saved_cmdlines_read,
+ .llseek = generic_file_llseek,
+};
+
+static ssize_t
+tracing_saved_tgids_read(struct file *file, char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ char *file_buf;
+ char *buf;
+ int len = 0;
+ int pid;
+ int i;
+
+ file_buf = kmalloc(SAVED_CMDLINES*(16+1+16), GFP_KERNEL);
+ if (!file_buf)
+ return -ENOMEM;
+
+ buf = file_buf;
+
+ for (i = 0; i < SAVED_CMDLINES; i++) {
+ int tgid;
+ int r;
+
+ pid = map_cmdline_to_pid[i];
+ if (pid == -1 || pid == NO_CMDLINE_MAP)
+ continue;
+
+ tgid = trace_find_tgid(pid);
+ r = sprintf(buf, "%d %d\n", pid, tgid);
+ buf += r;
+ len += r;
+ }
+
+ len = simple_read_from_buffer(ubuf, cnt, ppos,
+ file_buf, len);
+
+ kfree(file_buf);
+
+ return len;
+}
+
+static const struct file_operations tracing_saved_tgids_fops = {
+ .open = tracing_open_generic,
+ .read = tracing_saved_tgids_read,
+ .llseek = generic_file_llseek,
};
static ssize_t
return ret;
}
-static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
+ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
unsigned long size, int cpu_id)
{
int ret = size;
return ret;
}
-
/**
* tracing_update_buffers - used by tracing facility to expand ring buffers
*
int tracing_update_buffers(void)
{
int ret = 0;
+#ifdef CONFIG_MTK_FTRACE_DEFAULT_ENABLE
+ int i = 0;
+#endif
mutex_lock(&trace_types_lock);
if (!ring_buffer_expanded)
+#ifdef CONFIG_MTK_FTRACE_DEFAULT_ENABLE
+ {
+ if(get_max_DRAM_size() >= 0x40000000 && !trace_buf_size_updated_from_cmdline){
+ trace_buf_size_cpu0 = (CPUX_TRACE_BUF_SIZE_DEFAULT * CPU0_to_CPUX_RATIO * 1.25);
+ trace_buf_size_cpuX = (CPUX_TRACE_BUF_SIZE_DEFAULT * 1.25);
+ }
+
+ for_each_tracing_cpu(i){
+ ret = __tracing_resize_ring_buffer(&global_trace, (i==0?trace_buf_size_cpu0:trace_buf_size_cpuX), i);
+ if(ret < 0){
+ printk("KERN_INFO [ftrace]fail to update cpu%d ring buffer to %lu KB \n",
+ i, (i==0?(trace_buf_size_cpu0>>10):(trace_buf_size_cpuX>>10)));
+ break;
+ }
+ }
+ }
+#else
ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
RING_BUFFER_ALL_CPUS);
+#endif
mutex_unlock(&trace_types_lock);
return ret;
for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
buf[i] = 0;
+ printk(KERN_INFO "[ftrace]set current_tracer to '%s'\n", buf);
err = tracing_set_tracer(buf);
if (err)
return err;
struct inode *inode = file_inode(filp);
struct trace_array *tr = inode->i_private;
unsigned long val;
+ int do_drop_cache = 0;
int ret;
ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
/* value is in KB */
val <<= 10;
+resize_ring_buffer:
ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
- if (ret < 0)
+ if (ret == -ENOMEM && !do_drop_cache) {
+ do_drop_cache++;
+ drop_pagecache();
+ goto resize_ring_buffer;
+ } else if (ret < 0)
return ret;
*ppos += cnt;
return ret;
if (buffer) {
+ if(ring_buffer_record_is_on(buffer) ^ val)
+ printk(KERN_INFO "[ftrace]tracing_on is toggled to %lu\n", val);
mutex_lock(&trace_types_lock);
if (val) {
tracer_tracing_on(tr);
+#ifdef CONFIG_MTK_SCHED_TRACERS
+ trace_tracing_on(val, CALLER_ADDR0);
+#endif
if (tr->current_trace->start)
tr->current_trace->start(tr);
} else {
+#ifdef CONFIG_MTK_SCHED_TRACERS
+ trace_tracing_on(val, CALLER_ADDR0);
+#endif
tracer_tracing_off(tr);
if (tr->current_trace->stop)
tr->current_trace->stop(tr);
.llseek = default_llseek,
};
+#ifdef CONFIG_MTK_KERNEL_MARKER
+static int mt_kernel_marker_enabled = 1;
+static ssize_t
+mt_kernel_marker_enabled_simple_read(struct file *filp, char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ char buf[64];
+ int r;
+
+ r = sprintf(buf, "%d\n", mt_kernel_marker_enabled);
+
+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+}
+static ssize_t
+mt_kernel_marker_enabled_simple_write(struct file *filp, const char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ unsigned long val;
+ int ret;
+
+ ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
+ if (ret)
+ return ret;
+
+ mt_kernel_marker_enabled = !!val;
+
+ (*ppos)++;
+
+ return cnt;
+}
+static const struct file_operations kernel_marker_simple_fops = {
+ .open = tracing_open_generic,
+ .read = mt_kernel_marker_enabled_simple_read,
+ .write = mt_kernel_marker_enabled_simple_write,
+ .llseek = default_llseek,
+};
+#endif
struct dentry *trace_instance_dir;
static void
int ret;
/* Paranoid: Make sure the parent is the "instances" directory */
- parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
+ parent = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias);
if (WARN_ON_ONCE(parent != trace_instance_dir))
return -ENOENT;
int ret;
/* Paranoid: Make sure the parent is the "instances" directory */
- parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
+ parent = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias);
if (WARN_ON_ONCE(parent != trace_instance_dir))
return -ENOENT;
trace_create_file("trace_marker", 0220, d_tracer,
tr, &tracing_mark_fops);
+ trace_create_file("saved_tgids", 0444, d_tracer,
+ tr, &tracing_saved_tgids_fops);
+
trace_create_file("trace_clock", 0644, d_tracer, tr,
&trace_clock_fops);
/* Only allocate trace_printk buffers if a trace_printk exists */
if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
/* Must be called before global_trace.buffer is allocated */
- trace_printk_init_buffers();
+ trace_printk_init_buffers();
/* To save memory, keep the ring buffer size to its minimum */
if (ring_buffer_expanded)
#include "trace_output.h"
+#include <linux/mtk_ftrace.h>
+
#undef TRACE_SYSTEM
#define TRACE_SYSTEM "TRACE_SYSTEM"
int ret = 0;
int disable;
+ if(call->name && ((file->flags & FTRACE_EVENT_FL_ENABLED) ^ enable))
+ printk(KERN_INFO "[ftrace]event '%s' is %s\n", call->name, enable?"enabled":"disabled");
+
switch (enable) {
case 0:
/*
if (dir) {
spin_lock(&dir->d_lock); /* probably unneeded */
- list_for_each_entry(child, &dir->d_subdirs, d_u.d_child) {
+ list_for_each_entry(child, &dir->d_subdirs, d_child) {
if (child->d_inode) /* probably unneeded */
child->d_inode->i_private = NULL;
}
}
__setup("trace_event=", setup_trace_event);
+#ifdef CONFIG_MTK_SCHED_TRACERS
+// collect boot time ftrace, disabled by default
+static int boot_time_ftrace = 0;
+
+static __init int setup_boot_time_ftrace(char *str)
+{
+ boot_time_ftrace = 1;
+ return 1;
+}
+__setup("boot_time_ftrace", setup_boot_time_ftrace);
+
+#ifdef CONFIG_MTK_FTRACE_DEFAULT_ENABLE
+
+// delay the ring buffer expand until lat_initcall stage
+// to avoid impacting the boot time
+static __init int expand_ring_buffer_init(void){
+ if(!boot_time_ftrace)
+ tracing_update_buffers();
+ return 0;
+}
+late_initcall(expand_ring_buffer_init);
+
+#endif /* CONFIG_MTK_FTRACE_DEFAULT_ENABLE */
+#endif /* CONFIG_MTK_SCHED_TRACERS */
+
/* Expects to have event_mutex held when called */
static int
create_event_toplevel_files(struct dentry *parent, struct trace_array *tr)
if (ret)
return ret;
+#ifdef CONFIG_MTK_FTRACE_DEFAULT_ENABLE
+ // enable ftrace facilities
+ mt_ftrace_enable_disable(1);
+
+ // only update buffer eariler if we want to collect boot-time ftrace
+ // to avoid the boot time impacted by early-expanded ring buffer
+ if(boot_time_ftrace)
+ tracing_update_buffers();
+ else
+ set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1);
+ printk(KERN_INFO "[ftrace]ftrace ready...\n");
+#endif
+
ret = register_module_notifier(&trace_module_nb);
if (ret)
pr_warning("Failed to register trace events module notifier\n");
else
ret = VM_FAULT_WRITE;
put_page(page);
- } while (!(ret & (VM_FAULT_WRITE | VM_FAULT_SIGBUS | VM_FAULT_OOM)));
+ } while (!(ret & (VM_FAULT_WRITE | VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | VM_FAULT_OOM)));
/*
* We must loop because handle_mm_fault() may back out if there's
* any difficulty e.g. if pte accessed bit gets updated concurrently.
static int ksm_scan_thread(void *nothing)
{
set_freezable();
- set_user_nice(current, 5);
+ // M: set KSMD's priority to the lowest value
+ set_user_nice(current, 19);
+ //set_user_nice(current, 5);
while (!kthread_should_stop()) {
mutex_lock(&ksm_thread_mutex);
#include "internal.h"
+#ifdef CONFIG_MTK_EXTMEM
+extern bool extmem_in_mspace(struct vm_area_struct *vma);
+extern unsigned long get_virt_from_mspace(unsigned long pa);
+#endif
+
#ifdef LAST_NID_NOT_IN_PAGE_FLAGS
#warning Unfortunate NUMA and NUMA Balancing config, growing page-frame for last_nid.
#endif
}
EXPORT_SYMBOL_GPL(zap_vma_ptes);
+/*
+ * FOLL_FORCE can write to even unwritable pte's, but only
+ * after we've gone through a COW cycle and they are dirty.
+ */
+static inline bool can_follow_write_pte(pte_t pte, unsigned int flags)
+{
+ return pte_write(pte) ||
+ ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte));
+}
+
/**
* follow_page_mask - look up a page descriptor from a user-virtual address
* @vma: vm_area_struct mapping @address
}
if ((flags & FOLL_NUMA) && pte_numa(pte))
goto no_page;
- if ((flags & FOLL_WRITE) && !pte_write(pte))
+ if ((flags & FOLL_WRITE) && !can_follow_write_pte(pte, flags))
goto unlock;
page = vm_normal_page(vma, address, pte);
return ERR_PTR(-EFAULT);
return page;
}
+EXPORT_SYMBOL_GPL(follow_page_mask);
static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
{
page_mask = 0;
goto next_page;
}
-
+ #ifdef CONFIG_MTK_EXTMEM
+ if (!vma || !(vm_flags & vma->vm_flags))
+ {
+ return i ? : -EFAULT;
+ }
+
+ if (vma->vm_flags & (VM_IO | VM_PFNMAP))
+ {
+ /*Would pass VM_IO | VM_RESERVED | VM_PFNMAP. (for Reserved Physical Memory PFN Mapping Usage)*/
+ if(!((vma->vm_flags&VM_IO)&&(vma->vm_flags&VM_RESERVED)&&(vma->vm_flags&VM_PFNMAP)))
+ return i ? : -EFAULT;
+ }
+ #else
if (!vma ||
(vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
!(vm_flags & vma->vm_flags))
return i ? : -EFAULT;
+ #endif
if (is_vm_hugetlb_page(vma)) {
i = follow_hugetlb_page(mm, vma, pages, vmas,
else
return -EFAULT;
}
- if (ret & VM_FAULT_SIGBUS)
+ if (ret & (VM_FAULT_SIGBUS |
+ VM_FAULT_SIGSEGV))
return i ? i : -EFAULT;
BUG();
}
*/
if ((ret & VM_FAULT_WRITE) &&
!(vma->vm_flags & VM_WRITE))
- foll_flags &= ~FOLL_WRITE;
+ foll_flags |= FOLL_COW;
cond_resched();
}
return -ENOMEM;
if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
return -EHWPOISON;
- if (ret & VM_FAULT_SIGBUS)
+ if (ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV))
return -EFAULT;
BUG();
}
* un-COW'ed pages by matching them up with "vma->vm_pgoff".
* See vm_normal_page() for details.
*/
+#ifdef CONFIG_MTK_EXTMEM
+ if (addr == vma->vm_start && end == vma->vm_end) {
+ vma->vm_pgoff = pfn;
+ } else if (is_cow_mapping(vma->vm_flags))
+ return -EINVAL;
+#else
if (is_cow_mapping(vma->vm_flags)) {
if (addr != vma->vm_start || end != vma->vm_end)
return -EINVAL;
vma->vm_pgoff = pfn;
}
-
+#endif
err = track_pfn_remap(vma, &prot, pfn, addr, PAGE_ALIGN(size));
if (err)
return -EINVAL;
pte_unmap(page_table);
+ /* File mapping without ->vm_ops ? */
+ if (vma->vm_flags & VM_SHARED)
+ return VM_FAULT_SIGBUS;
+
/* Check if we need to add a guard page to the stack */
if (check_stack_guard_page(vma, address) < 0)
- return VM_FAULT_SIGBUS;
+ return VM_FAULT_SIGSEGV;
/* Use the zero-page for reads */
if (!(flags & FAULT_FLAG_WRITE)) {
pgoff_t pgoff = (((address & PAGE_MASK)
- vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
+ /* The VMA was not fully populated on mmap() or missing VM_DONTEXPAND */
+ if (!vma->vm_ops->fault)
+ return VM_FAULT_SIGBUS;
+
pte_unmap(page_table);
return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte);
}
if (!pte_present(entry)) {
if (pte_none(entry)) {
if (vma->vm_ops) {
- if (likely(vma->vm_ops->fault))
- return do_linear_fault(mm, vma, address,
- pte, pmd, flags, entry);
+ return do_linear_fault(mm, vma, address,
+ pte, pmd, flags, entry);
}
return do_anonymous_page(mm, vma, address,
pte, pmd, flags);
ret = get_user_pages(tsk, mm, addr, 1,
write, 1, &page, &vma);
if (ret <= 0) {
+#ifdef CONFIG_MTK_EXTMEM
+ if (!write) {
+ vma = find_vma(mm, addr);
+ if (!vma || vma->vm_start > addr)
+ break;
+ if (vma->vm_end < addr + len)
+ len = vma->vm_end - addr;
+ if (extmem_in_mspace(vma)) {
+ void *extmem_va = (void *)get_virt_from_mspace(vma->vm_pgoff << PAGE_SHIFT) + (addr - vma->vm_start);
+ memcpy(buf, extmem_va, len);
+ buf += len;
+ break;
+ }
+ }
+#endif
/*
* Check if this is a VM_IO | VM_PFNMAP VMA, which
* we can access using slightly different code.
#include <linux/module.h>
#include <linux/sysctl.h>
#include <linux/kernel.h>
+#include <linux/reciprocal_div.h>
#include <net/dst.h>
#include <net/tcp.h>
#include <net/inet_common.h>
EXPORT_SYMBOL(sysctl_tcp_adv_win_scale);
/* rfc5961 challenge ack rate limiting */
-int sysctl_tcp_challenge_ack_limit = 100;
+int sysctl_tcp_challenge_ack_limit = 1000;
int sysctl_tcp_stdurg __read_mostly;
int sysctl_tcp_rfc1337 __read_mostly;
int sysctl_tcp_moderate_rcvbuf __read_mostly = 1;
int sysctl_tcp_early_retrans __read_mostly = 3;
+int sysctl_tcp_default_init_rwnd __read_mostly = TCP_DEFAULT_INIT_RCVWND;
#define FLAG_DATA 0x01 /* Incoming frame contained data. */
#define FLAG_WIN_UPDATE 0x02 /* Incoming ACK was a window update. */
static void tcp_fixup_rcvbuf(struct sock *sk)
{
u32 mss = tcp_sk(sk)->advmss;
- u32 icwnd = TCP_DEFAULT_INIT_RCVWND;
+ u32 icwnd = sysctl_tcp_default_init_rwnd;
int rcvmem;
/* Limit to 10 segments if mss <= 1460,
* or 14600/mss segments, with a minimum of two segments.
*/
if (mss > 1460)
- icwnd = max_t(u32, (1460 * TCP_DEFAULT_INIT_RCVWND) / mss, 2);
+ icwnd = max_t(u32, (1460 * icwnd) / mss, 2);
rcvmem = SKB_TRUESIZE(mss + MAX_TCP_HEADER);
while (tcp_win_from_space(rcvmem) < mss)
void tcp_enter_loss(struct sock *sk, int how)
{
const struct inet_connection_sock *icsk = inet_csk(sk);
+ struct inet_connection_sock *icsk1 = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb;
bool new_recovery = false;
tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);
tcp_ca_event(sk, CA_EVENT_LOSS);
}
+ if (icsk->icsk_MMSRB == 1)
+ {
+ #ifdef CONFIG_MTK_NET_LOGGING
+ printk("[mtk_net][mmspb] tcp_enter_loss snd_cwnd=%u, snd_cwnd_cnt=%u\n", tp->snd_cwnd, tp->snd_cwnd_cnt);
+ #endif
+ if (tp->mss_cache != 0)
+ tp->snd_cwnd = (tp->rcv_wnd / tp->mss_cache);
+ else
+ {
+ tp->snd_cwnd = (tp->rcv_wnd / tp->advmss);
+ }
+
+ if (tp->snd_ssthresh > 16)
+ {
+ tp->snd_cwnd = tp->snd_ssthresh / 2;//set snd_cwnd is half of default snd_ssthresh
+ }
+ else
+ {
+ tp->snd_cwnd = tp->snd_ssthresh / 2 + 4;
+ }
+ #ifdef CONFIG_MTK_NET_LOGGING
+ printk("[mtk_net][mmspb] tcp_enter_loss update snd_cwnd=%u\n", tp->snd_cwnd);
+ #endif
+ icsk1->icsk_MMSRB = 0;
+ #ifdef CONFIG_MTK_NET_LOGGING
+ printk("[mtk_net][mmspb] tcp_enter_loss set icsk_MMSRB=0\n");
+ #endif
+ }
+ else
+ {
tp->snd_cwnd = 1;
+ }
+
+ //tp->snd_cwnd = 1;
tp->snd_cwnd_cnt = 0;
tp->snd_cwnd_stamp = tcp_time_stamp;
icsk->icsk_retransmits++;
tcp_retransmit_skb(sk, tcp_write_queue_head(sk));
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
- icsk->icsk_rto, TCP_RTO_MAX);
+ icsk->icsk_rto, sysctl_tcp_rto_max);
return true;
}
return false;
return false;
inet_csk_reset_xmit_timer(sk, ICSK_TIME_EARLY_RETRANS, delay,
- TCP_RTO_MAX);
+ sysctl_tcp_rto_max);
return true;
}
rto = delta;
}
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, rto,
- TCP_RTO_MAX);
+ sysctl_tcp_rto_max);
}
}
if (seq_rtt < 0) {
seq_rtt = ca_seq_rtt;
}
- if (!(sacked & TCPCB_SACKED_ACKED))
+ if (!(sacked & TCPCB_SACKED_ACKED)) {
reord = min(pkts_acked, reord);
- if (!after(scb->end_seq, tp->high_seq))
- flag |= FLAG_ORIG_SACK_ACKED;
+ if (!after(scb->end_seq, tp->high_seq))
+ flag |= FLAG_ORIG_SACK_ACKED;
+ }
}
if (sacked & TCPCB_SACKED_ACKED)
*/
} else {
inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
- min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX),
- TCP_RTO_MAX);
+ min_t(unsigned int, icsk->icsk_rto << icsk->icsk_backoff, sysctl_tcp_rto_max),
+ sysctl_tcp_rto_max);
}
}
static u32 challenge_timestamp;
static unsigned int challenge_count;
u32 now = jiffies / HZ;
+ u32 count;
if (now != challenge_timestamp) {
+ u32 half = (sysctl_tcp_challenge_ack_limit + 1) >> 1;
+
challenge_timestamp = now;
- challenge_count = 0;
+ ACCESS_ONCE(challenge_count) = half +
+ reciprocal_divide(prandom_u32(),
+ sysctl_tcp_challenge_ack_limit);
}
- if (++challenge_count <= sysctl_tcp_challenge_ack_limit) {
+ count = ACCESS_ONCE(challenge_count);
+ if (count > 0) {
+ ACCESS_ONCE(challenge_count) = count - 1;
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPCHALLENGEACK);
tcp_send_ack(sk);
}
icsk->icsk_ack.lrcvtime = tcp_time_stamp;
tcp_enter_quickack_mode(sk);
inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
- TCP_DELACK_MAX, TCP_RTO_MAX);
+ TCP_DELACK_MAX, sysctl_tcp_rto_max);
discard:
__kfree_skb(skb);
/* OK, now commit destination to socket. */
sk->sk_gso_type = SKB_GSO_TCPV4;
sk_setup_caps(sk, &rt->dst);
-
+ printk(KERN_INFO "[socket_conn]IPV4 socket[%lu] sport:%u \n", SOCK_INODE(sk->sk_socket)->i_ino, ntohs(inet->inet_sport));
if (!tp->write_seq && likely(!tp->repair))
tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
inet->inet_daddr,
if (remaining) {
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
- remaining, TCP_RTO_MAX);
+ remaining, sysctl_tcp_rto_max);
} else {
/* RTO revert clocked out retransmission.
* Will retransmit now */
* because it's been added to the accept queue directly.
*/
inet_csk_reset_xmit_timer(child, ICSK_TIME_RETRANS,
- TCP_TIMEOUT_INIT, TCP_RTO_MAX);
+ TCP_TIMEOUT_INIT, sysctl_tcp_rto_max);
/* Add the child socket directly into the accept queue */
inet_csk_reqsk_queue_add(sk, req, child);
ireq->rmt_addr = saddr;
ireq->no_srccheck = inet_sk(sk)->transparent;
ireq->opt = tcp_v4_save_options(skb);
+ ireq->ir_mark = inet_request_mark(sk, skb);
if (security_inet_conn_request(sk, skb, req))
goto drop_and_free;
skb->sk = sk;
skb->destructor = sock_edemux;
if (sk->sk_state != TCP_TIME_WAIT) {
- struct dst_entry *dst = sk->sk_rx_dst;
+ struct dst_entry *dst = ACCESS_ONCE(sk->sk_rx_dst);
if (dst)
dst = dst_check(dst, 0);
if (!inet_csk_ack_scheduled(sk))
inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
(3 * tcp_rto_min(sk)) / 4,
- TCP_RTO_MAX);
+ sysctl_tcp_rto_max);
}
return true;
}
struct inet_connection_sock *icsk = inet_csk(sk);
tcp_init_sock(sk);
+ icsk->icsk_MMSRB = 0;
icsk->icsk_af_ops = &ipv4_specific;
}
EXPORT_SYMBOL(tcp_v4_destroy_sock);
+void tcp_v4_handle_retrans_time_by_uid(struct uid_err uid_e)
+{
+ unsigned int bucket;
+ uid_t skuid = (uid_t)(uid_e.appuid);
+ struct inet_connection_sock *icsk = NULL;//inet_csk(sk);
+
+
+ for (bucket = 0; bucket < tcp_hashinfo.ehash_mask; bucket++) {
+ struct hlist_nulls_node *node;
+ struct sock *sk;
+ spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, bucket);
+
+ spin_lock_bh(lock);
+ sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[bucket].chain) {
+
+ if (sysctl_ip_dynaddr && sk->sk_state == TCP_SYN_SENT)
+ continue;
+ if (sock_flag(sk, SOCK_DEAD))
+ continue;
+
+ if(sk->sk_socket){
+ if(SOCK_INODE(sk->sk_socket)->i_uid != skuid)
+ continue;
+ else
+ printk("[mmspb] tcp_v4_handle_retrans_time_by_uid socket uid(%d) match!",
+ SOCK_INODE(sk->sk_socket)->i_uid);
+ } else{
+ continue;
+ }
+
+ sock_hold(sk);
+ spin_unlock_bh(lock);
+
+ local_bh_disable();
+ bh_lock_sock(sk);
+
+ // update sk time out value
+ icsk = inet_csk(sk);
+ printk("[mmspb] tcp_v4_handle_retrans_time_by_uid update timer\n");
+
+ sk_reset_timer(sk, &icsk->icsk_retransmit_timer, jiffies + 2);
+ icsk->icsk_rto = sysctl_tcp_rto_min * 30;
+ icsk->icsk_MMSRB = 1;
+
+ bh_unlock_sock(sk);
+ local_bh_enable();
+ spin_lock_bh(lock);
+ sock_put(sk);
+
+ }
+ spin_unlock_bh(lock);
+ }
+
+}
+
+
+/*
+ * tcp_v4_nuke_addr_by_uid - destroy all sockets of spcial uid
+ */
+void tcp_v4_reset_connections_by_uid(struct uid_err uid_e)
+{
+ unsigned int bucket;
+ uid_t skuid = (uid_t)(uid_e.appuid);
+
+ for (bucket = 0; bucket < tcp_hashinfo.ehash_mask; bucket++) {
+ struct hlist_nulls_node *node;
+ struct sock *sk;
+ spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, bucket);
+
+restart:
+ spin_lock_bh(lock);
+ sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[bucket].chain) {
+
+ if (sysctl_ip_dynaddr && sk->sk_state == TCP_SYN_SENT)
+ continue;
+ if (sock_flag(sk, SOCK_DEAD))
+ continue;
+
+ if(sk->sk_socket){
+ if(SOCK_INODE(sk->sk_socket)->i_uid != skuid)
+ continue;
+ else
+ printk(KERN_INFO "SIOCKILLSOCK socket uid(%d) match!",
+ SOCK_INODE(sk->sk_socket)->i_uid);
+ } else{
+ continue;
+ }
+
+ sock_hold(sk);
+ spin_unlock_bh(lock);
+
+ local_bh_disable();
+ bh_lock_sock(sk);
+ sk->sk_err = uid_e.errNum;
+ printk(KERN_INFO "SIOCKILLSOCK set sk err == %d!! \n", sk->sk_err);
+ sk->sk_error_report(sk);
+
+ tcp_done(sk);
+ bh_unlock_sock(sk);
+ local_bh_enable();
+ sock_put(sk);
+
+ goto restart;
+ }
+ spin_unlock_bh(lock);
+ }
+}
+
+
#ifdef CONFIG_PROC_FS
/* Proc filesystem TCP sock list dumping. */
}
/* Set initial window to a value enough for senders starting with
- * initial congestion window of TCP_DEFAULT_INIT_RCVWND. Place
+ * initial congestion window of sysctl_tcp_default_init_rwnd. Place
* a limit on the initial window when mss is larger than 1460.
*/
if (mss > (1 << *rcv_wscale)) {
- int init_cwnd = TCP_DEFAULT_INIT_RCVWND;
+ int init_cwnd = sysctl_tcp_default_init_rwnd;
if (mss > 1460)
- init_cwnd =
- max_t(u32, (1460 * TCP_DEFAULT_INIT_RCVWND) / mss, 2);
+ init_cwnd = max_t(u32, (1460 * init_cwnd) / mss, 2);
/* when initializing use the value from init_rcv_wnd
* rather than the default from above
*/
}
inet_csk_reset_xmit_timer(sk, ICSK_TIME_LOSS_PROBE, timeout,
- TCP_RTO_MAX);
+ sysctl_tcp_rto_max);
return true;
}
rearm_timer:
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
inet_csk(sk)->icsk_rto,
- TCP_RTO_MAX);
+ sysctl_tcp_rto_max);
if (likely(!err))
NET_INC_STATS_BH(sock_net(sk),
if (skb == tcp_write_queue_head(sk))
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
inet_csk(sk)->icsk_rto,
- TCP_RTO_MAX);
+ sysctl_tcp_rto_max);
}
}
}
#endif
+ /* Do not fool tcpdump (if any), clean our debris */
+ skb->tstamp.tv64 = 0;
return skb;
}
EXPORT_SYMBOL(tcp_make_synack);
/* Timer for repeating the SYN until an answer. */
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
- inet_csk(sk)->icsk_rto, TCP_RTO_MAX);
+ inet_csk(sk)->icsk_rto, sysctl_tcp_rto_max);
return 0;
}
EXPORT_SYMBOL(tcp_connect);
inet_csk_schedule_ack(sk);
inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN;
inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
- TCP_DELACK_MAX, TCP_RTO_MAX);
+ TCP_DELACK_MAX, sysctl_tcp_rto_max);
return;
}
icsk->icsk_backoff++;
icsk->icsk_probes_out++;
inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
- min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX),
- TCP_RTO_MAX);
+ min_t(unsigned int, icsk->icsk_rto << icsk->icsk_backoff, sysctl_tcp_rto_max),
+ sysctl_tcp_rto_max);
} else {
/* If packet was not sent due to local congestion,
* do not backoff and do not remember icsk_probes_out.
inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
min(icsk->icsk_rto << icsk->icsk_backoff,
TCP_RESOURCE_PROBE_INTERVAL),
- TCP_RTO_MAX);
+ sysctl_tcp_rto_max);
}
}
/* Only set hop_limit on the interface if it is higher than
* the current hop_limit.
*/
- if (in6_dev->cnf.hop_limit < ra_msg->icmph.icmp6_hop_limit)
+ if (in6_dev->cnf.hop_limit < ra_msg->icmph.icmp6_hop_limit) {
in6_dev->cnf.hop_limit = ra_msg->icmph.icmp6_hop_limit;
- else
+ } else {
ND_PRINTK(2, warn, "RA: Got route advertisement with lower hop_limit than current\n");
+ }
if (rt)
dst_metric_set(&rt->dst, RTAX_HOPLIMIT,
ra_msg->icmph.icmp6_hop_limit);
}
}
+#ifdef CONFIG_MTK_DHCPV6C_WIFI
+ if (in6_dev->if_flags & IF_RA_OTHERCONF){
+ printk(KERN_INFO "[mtk_net][ipv6]receive RA with o bit!\n");
+ in6_dev->cnf.ra_info_flag = 1;
+ }
+ if(in6_dev->if_flags & IF_RA_MANAGED){
+ printk(KERN_INFO "[mtk_net][ipv6]receive RA with m bit!\n");
+ in6_dev->cnf.ra_info_flag = 2;
+ }
+ if(in6_dev->cnf.ra_info_flag == 0){
+ printk(KERN_INFO "[mtk_net][ipv6]receive RA neither O nor M bit is set!\n");
+ in6_dev->cnf.ra_info_flag = 4;
+ }
+#endif
+
if (ndopts.nd_useropts) {
struct nd_opt_hdr *p;
for (p = ndopts.nd_useropts;
p;
p = ndisc_next_useropt(p, ndopts.nd_useropts_end)) {
ndisc_ra_useropt(skb, p);
+#ifdef CONFIG_MTK_DHCPV6C_WIFI
+ /* only clear ra_info_flag when O bit is set */
+ if (p->nd_opt_type == ND_OPT_RDNSS &&
+ in6_dev->if_flags & IF_RA_OTHERCONF) {
+ printk(KERN_INFO "[mtk_net][ipv6]RDNSS, ignore RA with o bit!\n");
+ in6_dev->cnf.ra_info_flag = 0;
+ }
+#endif
}
}
}
memset(NEIGH_CB(skb), 0, sizeof(struct neighbour_cb));
-
+
switch (msg->icmph.icmp6_type) {
case NDISC_NEIGHBOUR_SOLICITATION:
ndisc_recv_ns(skb);
fl6.flowi6_mark = sk->sk_mark;
fl6.fl6_dport = usin->sin6_port;
fl6.fl6_sport = inet->inet_sport;
+ fl6.flowi6_uid = sock_i_uid(sk);
final_p = fl6_update_dst(&fl6, np->opt, &final);
if (err)
goto late_failure;
+ printk(KERN_INFO "net_sock, IPV6 socket[%lu] sport:%u \n", SOCK_INODE(sk->sk_socket)->i_ino, ntohs(inet->inet_sport));
if (!tp->write_seq && likely(!tp->repair))
tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
np->daddr.s6_addr32,
fl6.flowi6_proto = IPPROTO_TCP;
if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
fl6.flowi6_oif = inet6_iif(skb);
+ fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark);
fl6.fl6_dport = t1->dest;
fl6.fl6_sport = t1->source;
security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
TCP_ECN_create_request(req, skb, sock_net(sk));
treq->iif = sk->sk_bound_dev_if;
+ inet_rsk(req)->ir_mark = inet_request_mark(sk, skb);
/* So that link locals have meaning */
if (!sk->sk_bound_dev_if &&
skb->sk = sk;
skb->destructor = sock_edemux;
if (sk->sk_state != TCP_TIME_WAIT) {
- struct dst_entry *dst = sk->sk_rx_dst;
+ struct dst_entry *dst = ACCESS_ONCE(sk->sk_rx_dst);
if (dst)
dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);