Merge tag 'v3.10.76' into update
authorStricted <info@stricted.net>
Wed, 21 Mar 2018 21:42:30 +0000 (22:42 +0100)
committerStricted <info@stricted.net>
Wed, 21 Mar 2018 21:42:30 +0000 (22:42 +0100)
This is the 3.10.76 stable release

14 files changed:
1  2 
Makefile
drivers/tty/serial/8250/8250_dw.c
fs/debugfs/inode.c
include/linux/mm.h
kernel/cgroup.c
kernel/trace/trace.c
kernel/trace/trace_events.c
mm/ksm.c
mm/memory.c
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_output.c
net/ipv6/ndisc.c
net/ipv6/tcp_ipv6.c

diff --combined Makefile
index 1b1e26b25f425d11008b88ad246cf7594ebb9ff9,019a6a4b386d7771a0f235c68e659247bb5da590..4d8461f80d5c35e18425e979e5df75bf8267ab44
+++ b/Makefile
@@@ -1,6 -1,6 +1,6 @@@
  VERSION = 3
  PATCHLEVEL = 10
- SUBLEVEL = 75
+ SUBLEVEL = 76
  EXTRAVERSION =
  NAME = TOSSUG Baby Fish
  
@@@ -373,9 -373,7 +373,9 @@@ KBUILD_CFLAGS   := -Wall -Wundef -Wstri
                   -fno-strict-aliasing -fno-common \
                   -Werror-implicit-function-declaration \
                   -Wno-format-security \
 -                 -fno-delete-null-pointer-checks
 +                 -fno-delete-null-pointer-checks \
 +                 -w
 +
  KBUILD_AFLAGS_KERNEL :=
  KBUILD_CFLAGS_KERNEL :=
  KBUILD_AFLAGS   := -D__ASSEMBLY__
index 196c26b038579f8123148137840e8f407b1761a9,86281fa5dcc327424e3ca9a3115e8032874c6147..9f3666c233b328951fbffc3d72105cce94627025
@@@ -98,7 -98,10 +98,10 @@@ static void dw8250_serial_out(struct ua
                        dw8250_force_idle(p);
                        writeb(value, p->membase + (UART_LCR << p->regshift));
                }
-               dev_err(p->dev, "Couldn't set LCR to %d\n", value);
+               /*
+                * FIXME: this deadlocks if port->lock is already held
+                * dev_err(p->dev, "Couldn't set LCR to %d\n", value);
+                */
        }
  }
  
@@@ -113,31 -116,44 +116,46 @@@ static void dw8250_serial_out32(struct 
  {
        struct dw8250_data *d = p->private_data;
  
 -      if (offset == UART_MCR)
 -              d->last_mcr = value;
 +      if (offset == UART_LCR)
 +              d->last_lcr = value;
  
-       offset <<= p->regshift;
-       writel(value, p->membase + offset);
+       writel(value, p->membase + (offset << p->regshift));
+       /* Make sure LCR write wasn't ignored */
+       if (offset == UART_LCR) {
+               int tries = 1000;
+               while (tries--) {
+                       unsigned int lcr = p->serial_in(p, UART_LCR);
+                       if ((value & ~UART_LCR_SPAR) == (lcr & ~UART_LCR_SPAR))
+                               return;
+                       dw8250_force_idle(p);
+                       writel(value, p->membase + (UART_LCR << p->regshift));
+               }
+               /*
+                * FIXME: this deadlocks if port->lock is already held
+                * dev_err(p->dev, "Couldn't set LCR to %d\n", value);
+                */
+       }
  }
  
  static unsigned int dw8250_serial_in32(struct uart_port *p, int offset)
  {
 -      unsigned int value = readl(p->membase + (offset << p->regshift));
 +      offset <<= p->regshift;
  
 -      return dw8250_modify_msr(p, offset, value);
 +      return readl(p->membase + offset);
  }
  
  static int dw8250_handle_irq(struct uart_port *p)
  {
 +      struct dw8250_data *d = p->private_data;
        unsigned int iir = p->serial_in(p, UART_IIR);
  
        if (serial8250_handle_irq(p, iir)) {
                return 1;
        } else if ((iir & UART_IIR_BUSY) == UART_IIR_BUSY) {
 -              /* Clear the USR */
 +              /* Clear the USR and write the LCR again. */
                (void)p->serial_in(p, DW_UART_USR);
 +              p->serial_out(p, UART_LCR, d->last_lcr);
  
                return 1;
        }
diff --combined fs/debugfs/inode.c
index 21fa3c413eab62aaa69e704d8cebaabbaf87cac9,26d7fff8d78e00e3ffc854caaf298e8536fe2bbc..121d2b6d1fbc7369acbf2eb3a975d037ec38040f
@@@ -28,7 -28,7 +28,7 @@@
  #include <linux/magic.h>
  #include <linux/slab.h>
  
 -#define DEBUGFS_DEFAULT_MODE  0700
 +#define DEBUGFS_DEFAULT_MODE  0755
  
  static struct vfsmount *debugfs_mount;
  static int debugfs_mount_count;
@@@ -545,7 -545,7 +545,7 @@@ void debugfs_remove_recursive(struct de
        parent = dentry;
   down:
        mutex_lock(&parent->d_inode->i_mutex);
-       list_for_each_entry_safe(child, next, &parent->d_subdirs, d_u.d_child) {
+       list_for_each_entry_safe(child, next, &parent->d_subdirs, d_child) {
                if (!debugfs_positive(child))
                        continue;
  
        mutex_lock(&parent->d_inode->i_mutex);
  
        if (child != dentry) {
-               next = list_entry(child->d_u.d_child.next, struct dentry,
-                                       d_u.d_child);
+               next = list_entry(child->d_child.next, struct dentry,
+                                       d_child);
                goto up;
        }
  
diff --combined include/linux/mm.h
index d67c88967c9dffff964acf33183bb8d6301dc87c,53b0d70120a10ef7a1aec2c55833a4fed622f30b..0cdc92331246ee44562c8b3f3074e2b8958b71df
@@@ -99,7 -99,6 +99,7 @@@ extern unsigned int kobjsize(const voi
  
  #define VM_DONTCOPY   0x00020000      /* Do not copy this vma on fork */
  #define VM_DONTEXPAND 0x00040000      /* Cannot expand with mremap() */
 +#define VM_RESERVED   0x00080000      /* Count as reserved_vm like IO */
  #define VM_ACCOUNT    0x00100000      /* Is a VM accounted object */
  #define VM_NORESERVE  0x00200000      /* should the VM suppress accounting */
  #define VM_HUGETLB    0x00400000      /* Huge TLB Page VM */
@@@ -326,8 -325,6 +326,8 @@@ static inline int is_vmalloc_or_module_
  }
  #endif
  
 +extern void kvfree(const void *addr);
 +
  static inline void compound_lock(struct page *page)
  {
  #ifdef CONFIG_TRANSPARENT_HUGEPAGE
@@@ -894,6 -891,7 +894,7 @@@ static inline int page_mapped(struct pa
  #define VM_FAULT_WRITE        0x0008  /* Special case for get_user_pages */
  #define VM_FAULT_HWPOISON 0x0010      /* Hit poisoned small page */
  #define VM_FAULT_HWPOISON_LARGE 0x0020  /* Hit poisoned large page. Index encoded in upper bits */
+ #define VM_FAULT_SIGSEGV 0x0040
  
  #define VM_FAULT_NOPAGE       0x0100  /* ->fault installed the pte, not return page */
  #define VM_FAULT_LOCKED       0x0200  /* ->fault locked the returned page */
  
  #define VM_FAULT_HWPOISON_LARGE_MASK 0xf000 /* encodes hpage index for large hwpoison */
  
- #define VM_FAULT_ERROR        (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_HWPOISON | \
-                        VM_FAULT_HWPOISON_LARGE)
+ #define VM_FAULT_ERROR        (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | \
+                        VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE)
  
  /* Encode hstate index for a hwpoisoned large page */
  #define VM_FAULT_SET_HINDEX(x) ((x) << 12)
@@@ -925,7 -923,6 +926,7 @@@ extern void pagefault_out_of_memory(voi
  extern void show_free_areas(unsigned int flags);
  extern bool skip_free_areas_node(unsigned int flags, int nid);
  
 +void shmem_set_file(struct vm_area_struct *vma, struct file *file);
  int shmem_zero_setup(struct vm_area_struct *);
  
  extern int can_do_mlock(void);
@@@ -1504,7 -1501,7 +1505,7 @@@ extern int vma_adjust(struct vm_area_st
  extern struct vm_area_struct *vma_merge(struct mm_struct *,
        struct vm_area_struct *prev, unsigned long addr, unsigned long end,
        unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t,
 -      struct mempolicy *);
 +      struct mempolicy *, const char __user *);
  extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *);
  extern int split_vma(struct mm_struct *,
        struct vm_area_struct *, unsigned long addr, int new_below);
@@@ -1718,7 -1715,6 +1719,7 @@@ static inline struct page *follow_page(
  #define FOLL_HWPOISON 0x100   /* check page is hwpoisoned */
  #define FOLL_NUMA     0x200   /* force NUMA hinting page fault */
  #define FOLL_MIGRATION        0x400   /* wait for page to replace migration entry */
 +#define FOLL_COW      0x4000  /* internal GUP flag */
  
  typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
                        void *data);
@@@ -1766,7 -1762,6 +1767,7 @@@ int drop_caches_sysctl_handler(struct c
  unsigned long shrink_slab(struct shrink_control *shrink,
                          unsigned long nr_pages_scanned,
                          unsigned long lru_pages);
 +void drop_pagecache(void);
  
  #ifndef CONFIG_MMU
  #define randomize_va_space 0
diff --combined kernel/cgroup.c
index cd1c303214f32c20672538283d1e053323abb65c,ef130605ac43efdf22d61e8072476ac88556c1a2..9bb9aee16c6429f117a8cf41d2d6ae4e97a73a7f
@@@ -984,7 -984,7 +984,7 @@@ static void cgroup_d_remove_dir(struct 
        parent = dentry->d_parent;
        spin_lock(&parent->d_lock);
        spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
-       list_del_init(&dentry->d_u.d_child);
+       list_del_init(&dentry->d_child);
        spin_unlock(&dentry->d_lock);
        spin_unlock(&parent->d_lock);
        remove_dir(dentry);
@@@ -2106,24 -2106,6 +2106,24 @@@ out_free_group_list
        return retval;
  }
  
 +static int cgroup_allow_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
 +{
 +      struct cgroup_subsys *ss;
 +      int ret;
 +
 +      for_each_subsys(cgrp->root, ss) {
 +              if (ss->allow_attach) {
 +                      ret = ss->allow_attach(cgrp, tset);
 +                      if (ret)
 +                              return ret;
 +              } else {
 +                      return -EACCES;
 +              }
 +      }
 +
 +      return 0;
 +}
 +
  /*
   * Find the task_struct of the task to attach by vpid and pass it along to the
   * function to attach either it or all tasks in its threadgroup. Will lock
@@@ -2155,18 -2137,9 +2155,18 @@@ retry_find_task
                if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) &&
                    !uid_eq(cred->euid, tcred->uid) &&
                    !uid_eq(cred->euid, tcred->suid)) {
 -                      rcu_read_unlock();
 -                      ret = -EACCES;
 -                      goto out_unlock_cgroup;
 +                      /*
 +                       * if the default permission check fails, give each
 +                       * cgroup a chance to extend the permission check
 +                       */
 +                      struct cgroup_taskset tset = { };
 +                      tset.single.task = tsk;
 +                      tset.single.cgrp = cgrp;
 +                      ret = cgroup_allow_attach(cgrp, &tset);
 +                      if (ret) {
 +                              rcu_read_unlock();
 +                              goto out_unlock_cgroup;
 +                      }
                }
        } else
                tsk = current;
diff --combined kernel/trace/trace.c
index 7920b754458a57e9d7855d1c258a970da2eaa48f,640e4c44b1701ed229faf1d0dedd43ca43b7150f..43267f734606a690a6ccedbb17c14be9f3f465cb
  #include <linux/poll.h>
  #include <linux/nmi.h>
  #include <linux/fs.h>
 +
  #include <linux/sched/rt.h>
  
  #include "trace.h"
  #include "trace_output.h"
  
 +#ifdef CONFIG_MTK_SCHED_TRACERS
 +#include <linux/mtk_ftrace.h>
 +#define CREATE_TRACE_POINTS
 +#include <trace/events/mtk_events.h>
 +EXPORT_TRACEPOINT_SYMBOL(gpu_freq);
 +#endif
 +
 +#ifdef CONFIG_MTK_EXTMEM
 +#include <linux/vmalloc.h>
 +#endif
 +
  /*
   * On boot up, the ring buffer is set to the minimum size, so that
   * we do not waste memory on systems that are not using tracing.
@@@ -297,15 -285,6 +297,15 @@@ int tracing_is_enabled(void
  
  static unsigned long          trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
  
 +#ifdef CONFIG_MTK_SCHED_TRACERS
 +#define CPUX_TRACE_BUF_SIZE_DEFAULT 4194304UL
 +#define CPU0_to_CPUX_RATIO (1.2)
 +extern unsigned int get_max_DRAM_size (void);
 +static unsigned long        trace_buf_size_cpu0 = (CPUX_TRACE_BUF_SIZE_DEFAULT * CPU0_to_CPUX_RATIO);
 +static unsigned long        trace_buf_size_cpuX = CPUX_TRACE_BUF_SIZE_DEFAULT;
 +static unsigned int         trace_buf_size_updated_from_cmdline = 0;
 +#endif
 +
  /* trace_types holds a link list of available tracers. */
  static struct tracer          *trace_types __read_mostly;
  
@@@ -397,17 -376,10 +397,17 @@@ static inline void trace_access_lock_in
  #endif
  
  /* trace_flags holds trace_options default values */
 +#ifdef CONFIG_MTK_FTRACE_DEFAULT_ENABLE
 +unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
 +      TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME |
 +      TRACE_ITER_GRAPH_TIME | TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS |
 +    TRACE_ITER_FUNCTION;
 +#else
  unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
        TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME |
        TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |
        TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | TRACE_ITER_FUNCTION;
 +#endif
  
  void tracer_tracing_on(struct trace_array *tr)
  {
  void tracing_on(void)
  {
        tracer_tracing_on(&global_trace);
 +#ifdef CONFIG_MTK_SCHED_TRACERS
 +    trace_tracing_on(1, CALLER_ADDR0);
 +#endif
  }
  EXPORT_SYMBOL_GPL(tracing_on);
  
@@@ -672,9 -641,6 +672,9 @@@ void tracer_tracing_off(struct trace_ar
   */
  void tracing_off(void)
  {
 +#ifdef CONFIG_MTK_SCHED_TRACERS
 +    trace_tracing_on(0, CALLER_ADDR0);
 +#endif
        tracer_tracing_off(&global_trace);
  }
  EXPORT_SYMBOL_GPL(tracing_off);
@@@ -712,11 -678,6 +712,11 @@@ static int __init set_buf_size(char *st
        if (buf_size == 0)
                return 0;
        trace_buf_size = buf_size;
 +#ifdef CONFIG_MTK_SCHED_TRACERS
 +    trace_buf_size_cpu0 = 
 +        trace_buf_size_cpuX = buf_size ;
 +    trace_buf_size_updated_from_cmdline = 1;
 +#endif
        return 1;
  }
  __setup("trace_buf_size=", set_buf_size);
@@@ -769,7 -730,6 +769,7 @@@ static const char *trace_options[] = 
        "irq-info",
        "markers",
        "function-trace",
 +      "print-tgid",
        NULL
  };
  
@@@ -1240,7 -1200,6 +1240,7 @@@ void tracing_reset(struct trace_buffer 
        synchronize_sched();
        ring_buffer_reset_cpu(buffer, cpu);
  
 +    printk(KERN_INFO "[ftrace]cpu %d trace reset\n", cpu);
        ring_buffer_record_enable(buffer);
  }
  
@@@ -1262,7 -1221,6 +1262,7 @@@ void tracing_reset_online_cpus(struct t
        for_each_online_cpu(cpu)
                ring_buffer_reset_cpu(buffer, cpu);
  
 +    printk(KERN_INFO "[ftrace]all cpu trace reset\n");
        ring_buffer_record_enable(buffer);
  }
  
@@@ -1281,18 -1239,9 +1281,18 @@@ void tracing_reset_all_online_cpus(void
  
  #define SAVED_CMDLINES 128
  #define NO_CMDLINE_MAP UINT_MAX
 +#ifdef CONFIG_MTK_EXTMEM
 +extern void* extmem_malloc_page_align(size_t bytes);
 +#define SIZEOF_MAP_PID_TO_CMDLINE ((PID_MAX_DEFAULT+1)*sizeof(unsigned))
 +#define SIZEOF_MAP_CMDLINE_TO_PID (SAVED_CMDLINES*sizeof(unsigned))
 +static unsigned* map_pid_to_cmdline = NULL;
 +static unsigned* map_cmdline_to_pid = NULL;
 +#else
  static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
  static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
 +#endif
  static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
 +static unsigned saved_tgids[SAVED_CMDLINES];
  static int cmdline_idx;
  static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
  
@@@ -1301,23 -1250,8 +1301,23 @@@ static atomic_t trace_record_cmdline_di
  
  static void trace_init_cmdlines(void)
  {
 +#ifdef CONFIG_MTK_EXTMEM
 +      map_pid_to_cmdline = (unsigned *) extmem_malloc_page_align(SIZEOF_MAP_PID_TO_CMDLINE);
 +      if(map_pid_to_cmdline == NULL) {
 +              pr_err("%s[%s] ext memory alloc failed!!!\n", __FILE__, __FUNCTION__);
 +              map_pid_to_cmdline = (unsigned *)vmalloc(SIZEOF_MAP_PID_TO_CMDLINE);
 +      }
 +      map_cmdline_to_pid = (unsigned *) extmem_malloc_page_align(SIZEOF_MAP_CMDLINE_TO_PID);
 +      if(map_pid_to_cmdline == NULL) {
 +              pr_warning("%s[%s] ext memory alloc failed!!!\n", __FILE__, __FUNCTION__);
 +              map_cmdline_to_pid = (unsigned *)vmalloc(SIZEOF_MAP_CMDLINE_TO_PID);
 +      }
 +      memset(map_pid_to_cmdline, NO_CMDLINE_MAP, SIZEOF_MAP_PID_TO_CMDLINE);
 +      memset(map_cmdline_to_pid, NO_CMDLINE_MAP, SIZEOF_MAP_CMDLINE_TO_PID);  
 +#else
        memset(&map_pid_to_cmdline, NO_CMDLINE_MAP, sizeof(map_pid_to_cmdline));
        memset(&map_cmdline_to_pid, NO_CMDLINE_MAP, sizeof(map_cmdline_to_pid));
 +#endif
        cmdline_idx = 0;
  }
  
@@@ -1351,7 -1285,6 +1351,7 @@@ void tracing_start(void
  {
        struct ring_buffer *buffer;
        unsigned long flags;
 +    int reset_ftrace = 0;
  
        if (tracing_disabled)
                return;
                        /* Someone screwed up their debugging */
                        WARN_ON_ONCE(1);
                        global_trace.stop_count = 0;
 +            reset_ftrace = 1;
                }
                goto out;
 -      }
 +      }else
 +        reset_ftrace = 1;
 +
  
        /* Prevent the buffers from switching */
        arch_spin_lock(&ftrace_max_lock);
  
   out:
        raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
 +
 +#ifdef CONFIG_MTK_SCHED_TRACERS
 +    // reset ring buffer when all readers left
 +    if(reset_ftrace == 1 && global_trace.stop_count == 0)
 +        tracing_reset_online_cpus(&global_trace.trace_buffer);
 +#endif
  }
  
  static void tracing_start_tr(struct trace_array *tr)
@@@ -1519,7 -1443,6 +1519,7 @@@ static int trace_save_cmdline(struct ta
        }
  
        memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN);
 +      saved_tgids[idx] = tsk->tgid;
  
        arch_spin_unlock(&trace_cmdline_lock);
  
@@@ -1557,25 -1480,6 +1557,25 @@@ void trace_find_cmdline(int pid, char c
        preempt_enable();
  }
  
 +int trace_find_tgid(int pid)
 +{
 +      unsigned map;
 +      int tgid;
 +
 +      preempt_disable();
 +      arch_spin_lock(&trace_cmdline_lock);
 +      map = map_pid_to_cmdline[pid];
 +      if (map != NO_CMDLINE_MAP)
 +              tgid = saved_tgids[map];
 +      else
 +              tgid = -1;
 +
 +      arch_spin_unlock(&trace_cmdline_lock);
 +      preempt_enable();
 +
 +      return tgid;
 +}
 +
  void tracing_record_cmdline(struct task_struct *tsk)
  {
        if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
@@@ -2032,8 -1936,7 +2032,8 @@@ void trace_printk_init_buffers(void
        pr_info("ftrace: Allocated trace_printk buffers\n");
  
        /* Expand the buffers to set size */
 -      tracing_update_buffers();
 +    /* M: avoid to expand buffer because of trace_printk in kernel */
 +      /* tracing_update_buffers(); */
  
        buffers_allocated = 1;
  
@@@ -2522,9 -2425,6 +2522,9 @@@ static void print_event_info(struct tra
        get_total_entries(buf, &total, &entries);
        seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu   #P:%d\n",
                   entries, total, num_online_cpus());
 +#ifdef CONFIG_MTK_SCHED_TRACERS
 +    print_enabled_events(m);
 +#endif
        seq_puts(m, "#\n");
  }
  
@@@ -2535,13 -2435,6 +2535,13 @@@ static void print_func_help_header(stru
        seq_puts(m, "#              | |       |          |         |\n");
  }
  
 +static void print_func_help_header_tgid(struct trace_buffer *buf, struct seq_file *m)
 +{
 +      print_event_info(buf, m);
 +      seq_puts(m, "#           TASK-PID    TGID   CPU#      TIMESTAMP  FUNCTION\n");
 +      seq_puts(m, "#              | |        |      |          |         |\n");
 +}
 +
  static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m)
  {
        print_event_info(buf, m);
        seq_puts(m, "#              | |       |   ||||       |         |\n");
  }
  
 +static void print_func_help_header_irq_tgid(struct trace_buffer *buf, struct seq_file *m)
 +{
 +      print_event_info(buf, m);
 +      seq_puts(m, "#                                      _-----=> irqs-off\n");
 +      seq_puts(m, "#                                     / _----=> need-resched\n");
 +      seq_puts(m, "#                                    | / _---=> hardirq/softirq\n");
 +      seq_puts(m, "#                                    || / _--=> preempt-depth\n");
 +      seq_puts(m, "#                                    ||| /     delay\n");
 +      seq_puts(m, "#           TASK-PID    TGID   CPU#  ||||    TIMESTAMP  FUNCTION\n");
 +      seq_puts(m, "#              | |        |      |   ||||       |         |\n");
 +}
 +
  void
  print_trace_header(struct seq_file *m, struct trace_iterator *iter)
  {
@@@ -2866,15 -2747,9 +2866,15 @@@ void trace_default_header(struct seq_fi
        } else {
                if (!(trace_flags & TRACE_ITER_VERBOSE)) {
                        if (trace_flags & TRACE_ITER_IRQ_INFO)
 -                              print_func_help_header_irq(iter->trace_buffer, m);
 +                              if (trace_flags & TRACE_ITER_TGID)
 +                                      print_func_help_header_irq_tgid(iter->trace_buffer, m);
 +                              else
 +                                      print_func_help_header_irq(iter->trace_buffer, m);
                        else
 -                              print_func_help_header(iter->trace_buffer, m);
 +                              if (trace_flags & TRACE_ITER_TGID)
 +                                      print_func_help_header_tgid(iter->trace_buffer, m);
 +                              else
 +                                      print_func_help_header(iter->trace_buffer, m);
                }
        }
  }
@@@ -3140,7 -3015,6 +3140,7 @@@ static int tracing_release(struct inod
        if (iter->trace && iter->trace->close)
                iter->trace->close(iter);
  
 +    printk(KERN_INFO "[ftrace]end reading trace file\n");
        if (!iter->snapshot)
                /* reenable tracing if it was previously enabled */
                tracing_start_tr(tr);
@@@ -3195,7 -3069,6 +3195,7 @@@ static int tracing_open(struct inode *i
        }
  
        if (file->f_mode & FMODE_READ) {
 +        printk(KERN_INFO "[ftrace]start reading trace file\n");
                iter = __tracing_open(inode, file, false);
                if (IS_ERR(iter))
                        ret = PTR_ERR(iter);
@@@ -3728,53 -3601,9 +3728,53 @@@ tracing_saved_cmdlines_read(struct fil
  }
  
  static const struct file_operations tracing_saved_cmdlines_fops = {
 -    .open       = tracing_open_generic,
 -    .read       = tracing_saved_cmdlines_read,
 -    .llseek   = generic_file_llseek,
 +      .open   = tracing_open_generic,
 +      .read   = tracing_saved_cmdlines_read,
 +      .llseek = generic_file_llseek,
 +};
 +
 +static ssize_t
 +tracing_saved_tgids_read(struct file *file, char __user *ubuf,
 +                              size_t cnt, loff_t *ppos)
 +{
 +      char *file_buf;
 +      char *buf;
 +      int len = 0;
 +      int pid;
 +      int i;
 +
 +      file_buf = kmalloc(SAVED_CMDLINES*(16+1+16), GFP_KERNEL);
 +      if (!file_buf)
 +              return -ENOMEM;
 +
 +      buf = file_buf;
 +
 +      for (i = 0; i < SAVED_CMDLINES; i++) {
 +              int tgid;
 +              int r;
 +
 +              pid = map_cmdline_to_pid[i];
 +              if (pid == -1 || pid == NO_CMDLINE_MAP)
 +                      continue;
 +
 +              tgid = trace_find_tgid(pid);
 +              r = sprintf(buf, "%d %d\n", pid, tgid);
 +              buf += r;
 +              len += r;
 +      }
 +
 +      len = simple_read_from_buffer(ubuf, cnt, ppos,
 +                                    file_buf, len);
 +
 +      kfree(file_buf);
 +
 +      return len;
 +}
 +
 +static const struct file_operations tracing_saved_tgids_fops = {
 +      .open   = tracing_open_generic,
 +      .read   = tracing_saved_tgids_read,
 +      .llseek = generic_file_llseek,
  };
  
  static ssize_t
@@@ -3900,7 -3729,7 +3900,7 @@@ static int __tracing_resize_ring_buffer
        return ret;
  }
  
 -static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
 +ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
                                          unsigned long size, int cpu_id)
  {
        int ret = size;
@@@ -3925,6 -3754,7 +3925,6 @@@ out
        return ret;
  }
  
 -
  /**
   * tracing_update_buffers - used by tracing facility to expand ring buffers
   *
  int tracing_update_buffers(void)
  {
        int ret = 0;
 +#ifdef CONFIG_MTK_FTRACE_DEFAULT_ENABLE 
 +    int i = 0;
 +#endif
  
        mutex_lock(&trace_types_lock);
        if (!ring_buffer_expanded)
 +#ifdef CONFIG_MTK_FTRACE_DEFAULT_ENABLE
 +    {
 +        if(get_max_DRAM_size() >= 0x40000000 && !trace_buf_size_updated_from_cmdline){
 +            trace_buf_size_cpu0 = (CPUX_TRACE_BUF_SIZE_DEFAULT * CPU0_to_CPUX_RATIO * 1.25);
 +            trace_buf_size_cpuX = (CPUX_TRACE_BUF_SIZE_DEFAULT * 1.25);
 +        }
 +
 +        for_each_tracing_cpu(i){
 +            ret = __tracing_resize_ring_buffer(&global_trace, (i==0?trace_buf_size_cpu0:trace_buf_size_cpuX), i); 
 +            if(ret < 0){
 +                printk("KERN_INFO [ftrace]fail to update cpu%d ring buffer to %lu KB \n",
 +                        i, (i==0?(trace_buf_size_cpu0>>10):(trace_buf_size_cpuX>>10)));
 +                break;
 +            }
 +        }
 +    }
 +#else
                ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
                                                RING_BUFFER_ALL_CPUS);
 +#endif
        mutex_unlock(&trace_types_lock);
  
        return ret;
@@@ -4083,7 -3892,6 +4083,7 @@@ tracing_set_trace_write(struct file *fi
        for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
                buf[i] = 0;
  
 +    printk(KERN_INFO "[ftrace]set current_tracer to '%s'\n", buf);
        err = tracing_set_tracer(buf);
        if (err)
                return err;
@@@ -4608,7 -4416,6 +4608,7 @@@ tracing_entries_write(struct file *filp
        struct inode *inode = file_inode(filp);
        struct trace_array *tr = inode->i_private;
        unsigned long val;
 +    int do_drop_cache = 0;
        int ret;
  
        ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
  
        /* value is in KB */
        val <<= 10;
 +resize_ring_buffer:
        ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
 -      if (ret < 0)
 +    if (ret == -ENOMEM && !do_drop_cache) {
 +        do_drop_cache++;
 +        drop_pagecache();
 +        goto resize_ring_buffer;
 +    } else if (ret < 0)
                return ret;
  
        *ppos += cnt;
@@@ -6051,20 -5853,12 +6051,20 @@@ rb_simple_write(struct file *filp, cons
                return ret;
  
        if (buffer) {
 +        if(ring_buffer_record_is_on(buffer) ^ val)
 +            printk(KERN_INFO "[ftrace]tracing_on is toggled to %lu\n", val);
                mutex_lock(&trace_types_lock);
                if (val) {
                        tracer_tracing_on(tr);
 +#ifdef CONFIG_MTK_SCHED_TRACERS
 +            trace_tracing_on(val, CALLER_ADDR0);
 +#endif
                        if (tr->current_trace->start)
                                tr->current_trace->start(tr);
                } else {
 +#ifdef CONFIG_MTK_SCHED_TRACERS
 +            trace_tracing_on(val, CALLER_ADDR0);
 +#endif
                        tracer_tracing_off(tr);
                        if (tr->current_trace->stop)
                                tr->current_trace->stop(tr);
@@@ -6085,43 -5879,6 +6085,43 @@@ static const struct file_operations rb_
        .llseek         = default_llseek,
  };
  
 +#ifdef CONFIG_MTK_KERNEL_MARKER
 +static int mt_kernel_marker_enabled = 1;
 +static ssize_t
 +mt_kernel_marker_enabled_simple_read(struct file *filp, char __user *ubuf,
 +             size_t cnt, loff_t *ppos)
 +{
 +      char buf[64];
 +      int r;
 +
 +      r = sprintf(buf, "%d\n", mt_kernel_marker_enabled);
 +
 +      return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
 +}
 +static ssize_t
 +mt_kernel_marker_enabled_simple_write(struct file *filp, const char __user *ubuf,
 +              size_t cnt, loff_t *ppos)
 +{
 +      unsigned long val;
 +      int ret;
 +
 +      ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
 +      if (ret)
 +              return ret;
 +
 +    mt_kernel_marker_enabled = !!val;
 +
 +      (*ppos)++;
 +
 +      return cnt;
 +}
 +static const struct file_operations kernel_marker_simple_fops = {
 +      .open           = tracing_open_generic,
 +      .read           = mt_kernel_marker_enabled_simple_read,
 +      .write          = mt_kernel_marker_enabled_simple_write,
 +      .llseek         = default_llseek,
 +};
 +#endif
  struct dentry *trace_instance_dir;
  
  static void
@@@ -6306,7 -6063,7 +6306,7 @@@ static int instance_mkdir (struct inod
        int ret;
  
        /* Paranoid: Make sure the parent is the "instances" directory */
-       parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
+       parent = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias);
        if (WARN_ON_ONCE(parent != trace_instance_dir))
                return -ENOENT;
  
@@@ -6333,7 -6090,7 +6333,7 @@@ static int instance_rmdir(struct inode 
        int ret;
  
        /* Paranoid: Make sure the parent is the "instances" directory */
-       parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
+       parent = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias);
        if (WARN_ON_ONCE(parent != trace_instance_dir))
                return -ENOENT;
  
@@@ -6400,9 -6157,6 +6400,9 @@@ init_tracer_debugfs(struct trace_array 
        trace_create_file("trace_marker", 0220, d_tracer,
                          tr, &tracing_mark_fops);
  
 +      trace_create_file("saved_tgids", 0444, d_tracer,
 +                        tr, &tracing_saved_tgids_fops);
 +
        trace_create_file("trace_clock", 0644, d_tracer, tr,
                          &trace_clock_fops);
  
@@@ -6661,7 -6415,7 +6661,7 @@@ __init static int tracer_alloc_buffers(
        /* Only allocate trace_printk buffers if a trace_printk exists */
        if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
                /* Must be called before global_trace.buffer is allocated */
 -              trace_printk_init_buffers();
 +        trace_printk_init_buffers();
  
        /* To save memory, keep the ring buffer size to its minimum */
        if (ring_buffer_expanded)
index afb00ccb76d47d4da347ac6751849af58a5d8f67,5a898f15bfc6ea4aff291fdad3de8ad2f654224b..8f3bcca56e0fe4fa00b3a62e3aa8c103feb22b96
@@@ -22,8 -22,6 +22,8 @@@
  
  #include "trace_output.h"
  
 +#include <linux/mtk_ftrace.h>
 +
  #undef TRACE_SYSTEM
  #define TRACE_SYSTEM "TRACE_SYSTEM"
  
@@@ -256,9 -254,6 +256,9 @@@ static int __ftrace_event_enable_disabl
        int ret = 0;
        int disable;
  
 +    if(call->name && ((file->flags & FTRACE_EVENT_FL_ENABLED) ^ enable))
 +        printk(KERN_INFO "[ftrace]event '%s' is %s\n", call->name, enable?"enabled":"disabled");
 +
        switch (enable) {
        case 0:
                /*
@@@ -430,7 -425,7 +430,7 @@@ static void remove_event_file_dir(struc
  
        if (dir) {
                spin_lock(&dir->d_lock);        /* probably unneeded */
-               list_for_each_entry(child, &dir->d_subdirs, d_u.d_child) {
+               list_for_each_entry(child, &dir->d_subdirs, d_child) {
                        if (child->d_inode)     /* probably unneeded */
                                child->d_inode->i_private = NULL;
                }
@@@ -2398,31 -2393,6 +2398,31 @@@ static __init int setup_trace_event(cha
  }
  __setup("trace_event=", setup_trace_event);
  
 +#ifdef CONFIG_MTK_SCHED_TRACERS
 +// collect boot time ftrace, disabled by default
 +static int boot_time_ftrace = 0;
 +
 +static __init int setup_boot_time_ftrace(char *str)
 +{
 +    boot_time_ftrace = 1;
 +    return 1;
 +}
 +__setup("boot_time_ftrace", setup_boot_time_ftrace);
 +
 +#ifdef CONFIG_MTK_FTRACE_DEFAULT_ENABLE
 +
 +// delay the ring buffer expand until lat_initcall stage
 +// to avoid impacting the boot time
 +static __init int expand_ring_buffer_init(void){
 +    if(!boot_time_ftrace)
 +        tracing_update_buffers();
 +    return 0;
 +}
 +late_initcall(expand_ring_buffer_init);
 +
 +#endif /* CONFIG_MTK_FTRACE_DEFAULT_ENABLE */
 +#endif /* CONFIG_MTK_SCHED_TRACERS */
 +
  /* Expects to have event_mutex held when called */
  static int
  create_event_toplevel_files(struct dentry *parent, struct trace_array *tr)
@@@ -2612,19 -2582,6 +2612,19 @@@ static __init int event_trace_init(void
        if (ret)
                return ret;
  
 +#ifdef CONFIG_MTK_FTRACE_DEFAULT_ENABLE
 +    // enable ftrace facilities
 +    mt_ftrace_enable_disable(1);
 +
 +    // only update buffer eariler if we want to collect boot-time ftrace
 +    // to avoid the boot time impacted by early-expanded ring buffer
 +    if(boot_time_ftrace)
 +        tracing_update_buffers();
 +    else 
 +        set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1);
 +    printk(KERN_INFO "[ftrace]ftrace ready...\n");
 +#endif
 +
        ret = register_module_notifier(&trace_module_nb);
        if (ret)
                pr_warning("Failed to register trace events module notifier\n");
diff --combined mm/ksm.c
index e0b9a325b351e0b4c7dd48a167358c25ab8d0286,7bf748f30aab4f05c9828b6a98ee3400b96515e0..5c0d01827542dce79662d05f3edbbdc0bde287e3
+++ b/mm/ksm.c
@@@ -376,7 -376,7 +376,7 @@@ static int break_ksm(struct vm_area_str
                else
                        ret = VM_FAULT_WRITE;
                put_page(page);
-       } while (!(ret & (VM_FAULT_WRITE | VM_FAULT_SIGBUS | VM_FAULT_OOM)));
+       } while (!(ret & (VM_FAULT_WRITE | VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | VM_FAULT_OOM)));
        /*
         * We must loop because handle_mm_fault() may back out if there's
         * any difficulty e.g. if pte accessed bit gets updated concurrently.
@@@ -1714,9 -1714,7 +1714,9 @@@ static int ksmd_should_run(void
  static int ksm_scan_thread(void *nothing)
  {
        set_freezable();
 -      set_user_nice(current, 5);
 +      // M: set KSMD's priority to the lowest value
 +      set_user_nice(current, 19);
 +      //set_user_nice(current, 5);
  
        while (!kthread_should_stop()) {
                mutex_lock(&ksm_thread_mutex);
diff --combined mm/memory.c
index bb12c446be8345a5febdd3c6174cd8d763e0f84b,e6b1da3a8924c54ee547cac496c78e2e6f93d0e4..9feed4bfb32366d3356ba6dcc6390c748655ca75
  
  #include "internal.h"
  
 +#ifdef CONFIG_MTK_EXTMEM
 +extern bool extmem_in_mspace(struct vm_area_struct *vma);
 +extern unsigned long get_virt_from_mspace(unsigned long pa);
 +#endif
 +
  #ifdef LAST_NID_NOT_IN_PAGE_FLAGS
  #warning Unfortunate NUMA and NUMA Balancing config, growing page-frame for last_nid.
  #endif
@@@ -1467,16 -1462,6 +1467,16 @@@ int zap_vma_ptes(struct vm_area_struct 
  }
  EXPORT_SYMBOL_GPL(zap_vma_ptes);
  
 +/*
 + * FOLL_FORCE can write to even unwritable pte's, but only
 + * after we've gone through a COW cycle and they are dirty.
 + */
 +static inline bool can_follow_write_pte(pte_t pte, unsigned int flags)
 +{
 +      return pte_write(pte) ||
 +              ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte));
 +}
 +
  /**
   * follow_page_mask - look up a page descriptor from a user-virtual address
   * @vma: vm_area_struct mapping @address
@@@ -1584,7 -1569,7 +1584,7 @@@ split_fallthrough
        }
        if ((flags & FOLL_NUMA) && pte_numa(pte))
                goto no_page;
 -      if ((flags & FOLL_WRITE) && !pte_write(pte))
 +      if ((flags & FOLL_WRITE) && !can_follow_write_pte(pte, flags))
                goto unlock;
  
        page = vm_normal_page(vma, address, pte);
@@@ -1658,7 -1643,6 +1658,7 @@@ no_page_table
                return ERR_PTR(-EFAULT);
        return page;
  }
 +EXPORT_SYMBOL_GPL(follow_page_mask);
  
  static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
  {
@@@ -1803,24 -1787,11 +1803,24 @@@ long __get_user_pages(struct task_struc
                        page_mask = 0;
                        goto next_page;
                }
 -
 +    #ifdef CONFIG_MTK_EXTMEM
 +        if (!vma || !(vm_flags & vma->vm_flags))
 +              {
 +                  return i ? : -EFAULT;
 +        }
 +
 +              if (vma->vm_flags & (VM_IO | VM_PFNMAP))
 +              {
 +                  /*Would pass VM_IO | VM_RESERVED | VM_PFNMAP. (for Reserved Physical Memory PFN Mapping Usage)*/
 +                  if(!((vma->vm_flags&VM_IO)&&(vma->vm_flags&VM_RESERVED)&&(vma->vm_flags&VM_PFNMAP)))
 +                          return i ? : -EFAULT;
 +        }
 +    #else
                if (!vma ||
                    (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
                    !(vm_flags & vma->vm_flags))
                        return i ? : -EFAULT;
 +    #endif
  
                if (is_vm_hugetlb_page(vma)) {
                        i = follow_hugetlb_page(mm, vma, pages, vmas,
                                                else
                                                        return -EFAULT;
                                        }
-                                       if (ret & VM_FAULT_SIGBUS)
+                                       if (ret & (VM_FAULT_SIGBUS |
+                                                  VM_FAULT_SIGSEGV))
                                                return i ? i : -EFAULT;
                                        BUG();
                                }
                                 */
                                if ((ret & VM_FAULT_WRITE) &&
                                    !(vma->vm_flags & VM_WRITE))
 -                                      foll_flags &= ~FOLL_WRITE;
 +                                      foll_flags |= FOLL_COW;
  
                                cond_resched();
                        }
@@@ -1983,7 -1955,7 +1984,7 @@@ int fixup_user_fault(struct task_struc
                        return -ENOMEM;
                if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
                        return -EHWPOISON;
-               if (ret & VM_FAULT_SIGBUS)
+               if (ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV))
                        return -EFAULT;
                BUG();
        }
@@@ -2396,18 -2368,12 +2397,18 @@@ int remap_pfn_range(struct vm_area_stru
         * un-COW'ed pages by matching them up with "vma->vm_pgoff".
         * See vm_normal_page() for details.
         */
 +#ifdef CONFIG_MTK_EXTMEM
 +      if (addr == vma->vm_start && end == vma->vm_end) {
 +              vma->vm_pgoff = pfn;
 +      } else if (is_cow_mapping(vma->vm_flags))
 +              return -EINVAL;
 +#else
        if (is_cow_mapping(vma->vm_flags)) {
                if (addr != vma->vm_start || end != vma->vm_end)
                        return -EINVAL;
                vma->vm_pgoff = pfn;
        }
 -
 +#endif
        err = track_pfn_remap(vma, &prot, pfn, addr, PAGE_ALIGN(size));
        if (err)
                return -EINVAL;
@@@ -3264,13 -3230,9 +3265,13 @@@ static int do_anonymous_page(struct mm_
  
        pte_unmap(page_table);
  
 +      /* File mapping without ->vm_ops ? */
 +      if (vma->vm_flags & VM_SHARED)
 +              return VM_FAULT_SIGBUS;
 +
        /* Check if we need to add a guard page to the stack */
        if (check_stack_guard_page(vma, address) < 0)
-               return VM_FAULT_SIGBUS;
+               return VM_FAULT_SIGSEGV;
  
        /* Use the zero-page for reads */
        if (!(flags & FAULT_FLAG_WRITE)) {
@@@ -3532,10 -3494,6 +3533,10 @@@ static int do_linear_fault(struct mm_st
        pgoff_t pgoff = (((address & PAGE_MASK)
                        - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
  
 +      /* The VMA was not fully populated on mmap() or missing VM_DONTEXPAND */
 +      if (!vma->vm_ops->fault)
 +              return VM_FAULT_SIGBUS;
 +
        pte_unmap(page_table);
        return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte);
  }
@@@ -3749,8 -3707,9 +3750,8 @@@ int handle_pte_fault(struct mm_struct *
        if (!pte_present(entry)) {
                if (pte_none(entry)) {
                        if (vma->vm_ops) {
 -                              if (likely(vma->vm_ops->fault))
 -                                      return do_linear_fault(mm, vma, address,
 -                                              pte, pmd, flags, entry);
 +                              return do_linear_fault(mm, vma, address,
 +                                      pte, pmd, flags, entry);
                        }
                        return do_anonymous_page(mm, vma, address,
                                                 pte, pmd, flags);
@@@ -4162,21 -4121,6 +4163,21 @@@ static int __access_remote_vm(struct ta
                ret = get_user_pages(tsk, mm, addr, 1,
                                write, 1, &page, &vma);
                if (ret <= 0) {
 +#ifdef CONFIG_MTK_EXTMEM
 +                      if (!write) {
 +                              vma = find_vma(mm, addr);
 +                              if (!vma || vma->vm_start > addr)
 +                                      break;
 +                              if (vma->vm_end < addr + len)
 +                                      len = vma->vm_end - addr;
 +                              if (extmem_in_mspace(vma)) {
 +                                      void *extmem_va = (void *)get_virt_from_mspace(vma->vm_pgoff << PAGE_SHIFT) + (addr - vma->vm_start);
 +                                      memcpy(buf, extmem_va, len);
 +                                      buf += len;
 +                                      break;
 +                              }
 +                      }
 +#endif
                        /*
                         * Check if this is a VM_IO | VM_PFNMAP VMA, which
                         * we can access using slightly different code.
diff --combined net/ipv4/tcp_input.c
index f9dff6427a0be7af2c8be24d90bc7cfc311693e8,a8be45e4d34fcb0b4e577dd59a58f6269961752c..ab151653c0502a8d20fb071b7028095d88d5717b
@@@ -68,7 -68,6 +68,7 @@@
  #include <linux/module.h>
  #include <linux/sysctl.h>
  #include <linux/kernel.h>
 +#include <linux/reciprocal_div.h>
  #include <net/dst.h>
  #include <net/tcp.h>
  #include <net/inet_common.h>
@@@ -88,7 -87,7 +88,7 @@@ int sysctl_tcp_adv_win_scale __read_mos
  EXPORT_SYMBOL(sysctl_tcp_adv_win_scale);
  
  /* rfc5961 challenge ack rate limiting */
 -int sysctl_tcp_challenge_ack_limit = 100;
 +int sysctl_tcp_challenge_ack_limit = 1000;
  
  int sysctl_tcp_stdurg __read_mostly;
  int sysctl_tcp_rfc1337 __read_mostly;
@@@ -99,7 -98,6 +99,7 @@@ int sysctl_tcp_thin_dupack __read_mostl
  
  int sysctl_tcp_moderate_rcvbuf __read_mostly = 1;
  int sysctl_tcp_early_retrans __read_mostly = 3;
 +int sysctl_tcp_default_init_rwnd __read_mostly = TCP_DEFAULT_INIT_RCVWND;
  
  #define FLAG_DATA             0x01 /* Incoming frame contained data.          */
  #define FLAG_WIN_UPDATE               0x02 /* Incoming ACK was a window update.       */
@@@ -353,14 -351,14 +353,14 @@@ static void tcp_grow_window(struct soc
  static void tcp_fixup_rcvbuf(struct sock *sk)
  {
        u32 mss = tcp_sk(sk)->advmss;
 -      u32 icwnd = TCP_DEFAULT_INIT_RCVWND;
 +      u32 icwnd = sysctl_tcp_default_init_rwnd;
        int rcvmem;
  
        /* Limit to 10 segments if mss <= 1460,
         * or 14600/mss segments, with a minimum of two segments.
         */
        if (mss > 1460)
 -              icwnd = max_t(u32, (1460 * TCP_DEFAULT_INIT_RCVWND) / mss, 2);
 +              icwnd = max_t(u32, (1460 * icwnd) / mss, 2);
  
        rcvmem = SKB_TRUESIZE(mss + MAX_TCP_HEADER);
        while (tcp_win_from_space(rcvmem) < mss)
@@@ -1870,7 -1868,6 +1870,7 @@@ void tcp_clear_retrans(struct tcp_sock 
  void tcp_enter_loss(struct sock *sk, int how)
  {
        const struct inet_connection_sock *icsk = inet_csk(sk);
 +      struct inet_connection_sock *icsk1 = inet_csk(sk);
        struct tcp_sock *tp = tcp_sk(sk);
        struct sk_buff *skb;
        bool new_recovery = false;
                tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);
                tcp_ca_event(sk, CA_EVENT_LOSS);
        }
 +      if (icsk->icsk_MMSRB == 1)
 +      {
 +              #ifdef CONFIG_MTK_NET_LOGGING 
 +          printk("[mtk_net][mmspb] tcp_enter_loss snd_cwnd=%u, snd_cwnd_cnt=%u\n", tp->snd_cwnd, tp->snd_cwnd_cnt);
 +        #endif
 +            if (tp->mss_cache != 0)
 +                tp->snd_cwnd = (tp->rcv_wnd / tp->mss_cache);
 +            else
 +            {
 +                tp->snd_cwnd = (tp->rcv_wnd / tp->advmss);
 +            }
 +              
 +            if (tp->snd_ssthresh > 16)
 +            {
 +                tp->snd_cwnd = tp->snd_ssthresh / 2;//set snd_cwnd is half of default snd_ssthresh
 +            }
 +            else
 +            {
 +                tp->snd_cwnd = tp->snd_ssthresh / 2 + 4;
 +            }
 +            #ifdef CONFIG_MTK_NET_LOGGING 
 +            printk("[mtk_net][mmspb] tcp_enter_loss update snd_cwnd=%u\n", tp->snd_cwnd);
 +            #endif
 +            icsk1->icsk_MMSRB = 0;
 +            #ifdef CONFIG_MTK_NET_LOGGING 
 +            printk("[mtk_net][mmspb] tcp_enter_loss set icsk_MMSRB=0\n");
 +            #endif
 +      }
 +        else
 +        {
        tp->snd_cwnd       = 1;
 +        }     
 +  
 +      //tp->snd_cwnd     = 1;
        tp->snd_cwnd_cnt   = 0;
        tp->snd_cwnd_stamp = tcp_time_stamp;
  
@@@ -1980,7 -1944,7 +1980,7 @@@ static bool tcp_check_sack_reneging(str
                icsk->icsk_retransmits++;
                tcp_retransmit_skb(sk, tcp_write_queue_head(sk));
                inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
 -                                        icsk->icsk_rto, TCP_RTO_MAX);
 +                                        icsk->icsk_rto, sysctl_tcp_rto_max);
                return true;
        }
        return false;
@@@ -2029,7 -1993,7 +2029,7 @@@ static bool tcp_pause_early_retransmit(
                return false;
  
        inet_csk_reset_xmit_timer(sk, ICSK_TIME_EARLY_RETRANS, delay,
 -                                TCP_RTO_MAX);
 +                                sysctl_tcp_rto_max);
        return true;
  }
  
@@@ -3017,7 -2981,7 +3017,7 @@@ void tcp_rearm_rto(struct sock *sk
                                rto = delta;
                }
                inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, rto,
 -                                        TCP_RTO_MAX);
 +                                        sysctl_tcp_rto_max);
        }
  }
  
@@@ -3112,10 -3076,11 +3112,11 @@@ static int tcp_clean_rtx_queue(struct s
                        if (seq_rtt < 0) {
                                seq_rtt = ca_seq_rtt;
                        }
-                       if (!(sacked & TCPCB_SACKED_ACKED))
+                       if (!(sacked & TCPCB_SACKED_ACKED)) {
                                reord = min(pkts_acked, reord);
-                       if (!after(scb->end_seq, tp->high_seq))
-                               flag |= FLAG_ORIG_SACK_ACKED;
+                               if (!after(scb->end_seq, tp->high_seq))
+                                       flag |= FLAG_ORIG_SACK_ACKED;
+                       }
                }
  
                if (sacked & TCPCB_SACKED_ACKED)
@@@ -3246,8 -3211,8 +3247,8 @@@ static void tcp_ack_probe(struct sock *
                 */
        } else {
                inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
 -                                        min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX),
 -                                        TCP_RTO_MAX);
 +                                        min_t(unsigned int, icsk->icsk_rto << icsk->icsk_backoff, sysctl_tcp_rto_max),
 +                                        sysctl_tcp_rto_max);
        }
  }
  
@@@ -3323,19 -3288,12 +3324,19 @@@ static void tcp_send_challenge_ack(stru
        static u32 challenge_timestamp;
        static unsigned int challenge_count;
        u32 now = jiffies / HZ;
 +      u32 count;
  
        if (now != challenge_timestamp) {
 +              u32 half = (sysctl_tcp_challenge_ack_limit + 1) >> 1;
 +
                challenge_timestamp = now;
 -              challenge_count = 0;
 +              ACCESS_ONCE(challenge_count) = half +
 +                                      reciprocal_divide(prandom_u32(),
 +                                                      sysctl_tcp_challenge_ack_limit);
        }
 -      if (++challenge_count <= sysctl_tcp_challenge_ack_limit) {
 +      count = ACCESS_ONCE(challenge_count);
 +      if (count > 0) {
 +              ACCESS_ONCE(challenge_count) = count - 1;
                NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPCHALLENGEACK);
                tcp_send_ack(sk);
        }
@@@ -5572,7 -5530,7 +5573,7 @@@ static int tcp_rcv_synsent_state_proces
                        icsk->icsk_ack.lrcvtime = tcp_time_stamp;
                        tcp_enter_quickack_mode(sk);
                        inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
 -                                                TCP_DELACK_MAX, TCP_RTO_MAX);
 +                                                TCP_DELACK_MAX, sysctl_tcp_rto_max);
  
  discard:
                        __kfree_skb(skb);
diff --combined net/ipv4/tcp_ipv4.c
index 7edf30cdda202876c0c1e3a352a4a84c9674bdbd,7c3eec386a4b90af2011139b8489c8e482d196ee..23f97a0cfc18b9f1d7b951b041e865e65b4590d6
@@@ -233,7 -233,7 +233,7 @@@ int tcp_v4_connect(struct sock *sk, str
        /* OK, now commit destination to socket.  */
        sk->sk_gso_type = SKB_GSO_TCPV4;
        sk_setup_caps(sk, &rt->dst);
 -
 +        printk(KERN_INFO "[socket_conn]IPV4 socket[%lu] sport:%u \n", SOCK_INODE(sk->sk_socket)->i_ino, ntohs(inet->inet_sport));
        if (!tp->write_seq && likely(!tp->repair))
                tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
                                                           inet->inet_daddr,
@@@ -446,7 -446,7 +446,7 @@@ void tcp_v4_err(struct sk_buff *icmp_sk
  
                if (remaining) {
                        inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
 -                                                remaining, TCP_RTO_MAX);
 +                                                remaining, sysctl_tcp_rto_max);
                } else {
                        /* RTO revert clocked out retransmission.
                         * Will retransmit now */
@@@ -1426,7 -1426,7 +1426,7 @@@ static int tcp_v4_conn_req_fastopen(str
         * because it's been added to the accept queue directly.
         */
        inet_csk_reset_xmit_timer(child, ICSK_TIME_RETRANS,
 -          TCP_TIMEOUT_INIT, TCP_RTO_MAX);
 +          TCP_TIMEOUT_INIT, sysctl_tcp_rto_max);
  
        /* Add the child socket directly into the accept queue */
        inet_csk_reqsk_queue_add(sk, req, child);
@@@ -1530,7 -1530,6 +1530,7 @@@ int tcp_v4_conn_request(struct sock *sk
        ireq->rmt_addr = saddr;
        ireq->no_srccheck = inet_sk(sk)->transparent;
        ireq->opt = tcp_v4_save_options(skb);
 +      ireq->ir_mark = inet_request_mark(sk, skb);
  
        if (security_inet_conn_request(sk, skb, req))
                goto drop_and_free;
@@@ -1902,7 -1901,7 +1902,7 @@@ void tcp_v4_early_demux(struct sk_buff 
                skb->sk = sk;
                skb->destructor = sock_edemux;
                if (sk->sk_state != TCP_TIME_WAIT) {
-                       struct dst_entry *dst = sk->sk_rx_dst;
+                       struct dst_entry *dst = ACCESS_ONCE(sk->sk_rx_dst);
  
                        if (dst)
                                dst = dst_check(dst, 0);
@@@ -1952,7 -1951,7 +1952,7 @@@ bool tcp_prequeue(struct sock *sk, stru
                if (!inet_csk_ack_scheduled(sk))
                        inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
                                                  (3 * tcp_rto_min(sk)) / 4,
 -                                                TCP_RTO_MAX);
 +                                                sysctl_tcp_rto_max);
        }
        return true;
  }
@@@ -2166,7 -2165,6 +2166,7 @@@ static int tcp_v4_init_sock(struct soc
        struct inet_connection_sock *icsk = inet_csk(sk);
  
        tcp_init_sock(sk);
 +        icsk->icsk_MMSRB = 0;
  
        icsk->icsk_af_ops = &ipv4_specific;
  
@@@ -2222,115 -2220,6 +2222,115 @@@ void tcp_v4_destroy_sock(struct sock *s
  }
  EXPORT_SYMBOL(tcp_v4_destroy_sock);
  
 +void tcp_v4_handle_retrans_time_by_uid(struct uid_err uid_e)
 +{
 +    unsigned int bucket;
 +    uid_t skuid = (uid_t)(uid_e.appuid);
 +      struct inet_connection_sock *icsk = NULL;//inet_csk(sk);
 +
 +
 +    for (bucket = 0; bucket < tcp_hashinfo.ehash_mask; bucket++) {
 +        struct hlist_nulls_node *node;
 +        struct sock *sk;
 +        spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, bucket);
 +    
 +        spin_lock_bh(lock);
 +        sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[bucket].chain) {
 +    
 +            if (sysctl_ip_dynaddr && sk->sk_state == TCP_SYN_SENT)
 +                continue;
 +            if (sock_flag(sk, SOCK_DEAD))
 +                continue;
 +    
 +            if(sk->sk_socket){
 +                if(SOCK_INODE(sk->sk_socket)->i_uid != skuid)
 +                    continue;
 +                else
 +                    printk("[mmspb] tcp_v4_handle_retrans_time_by_uid socket uid(%d) match!",
 +                        SOCK_INODE(sk->sk_socket)->i_uid);
 +            } else{
 +                continue;
 +          }
 +
 +                sock_hold(sk);
 +                spin_unlock_bh(lock);
 +    
 +                local_bh_disable();
 +                bh_lock_sock(sk);
 +
 +                // update sk time out value
 +              icsk = inet_csk(sk);
 +              printk("[mmspb] tcp_v4_handle_retrans_time_by_uid update timer\n");
 +                                      
 +              sk_reset_timer(sk, &icsk->icsk_retransmit_timer, jiffies + 2);
 +              icsk->icsk_rto = sysctl_tcp_rto_min * 30;       
 +              icsk->icsk_MMSRB = 1;
 +                              
 +                bh_unlock_sock(sk);
 +                local_bh_enable();
 +              spin_lock_bh(lock);
 +                sock_put(sk);
 +
 +            }
 +            spin_unlock_bh(lock);
 +        }
 +
 +}
 +
 +
 +/*
 + * tcp_v4_nuke_addr_by_uid - destroy all sockets of spcial uid
 + */
 +void tcp_v4_reset_connections_by_uid(struct uid_err uid_e)
 +{
 +    unsigned int bucket;
 +    uid_t skuid = (uid_t)(uid_e.appuid);
 +
 +    for (bucket = 0; bucket < tcp_hashinfo.ehash_mask; bucket++) {
 +        struct hlist_nulls_node *node;
 +        struct sock *sk;
 +        spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, bucket);
 +    
 +restart:
 +        spin_lock_bh(lock);
 +        sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[bucket].chain) {
 +    
 +            if (sysctl_ip_dynaddr && sk->sk_state == TCP_SYN_SENT)
 +                continue;
 +            if (sock_flag(sk, SOCK_DEAD))
 +                continue;
 +    
 +            if(sk->sk_socket){
 +                if(SOCK_INODE(sk->sk_socket)->i_uid != skuid)
 +                    continue;
 +                else
 +                    printk(KERN_INFO "SIOCKILLSOCK socket uid(%d) match!",
 +                        SOCK_INODE(sk->sk_socket)->i_uid);
 +            } else{
 +                continue;
 +          }
 +
 +                sock_hold(sk);
 +                spin_unlock_bh(lock);
 +    
 +                local_bh_disable();
 +                bh_lock_sock(sk);
 +                sk->sk_err = uid_e.errNum;
 +                printk(KERN_INFO "SIOCKILLSOCK set sk err == %d!! \n", sk->sk_err);
 +                sk->sk_error_report(sk);
 +    
 +                tcp_done(sk);
 +                bh_unlock_sock(sk);
 +                local_bh_enable();
 +                sock_put(sk);
 +
 +                goto restart;
 +            }
 +            spin_unlock_bh(lock);
 +        }
 +}
 +
 +
  #ifdef CONFIG_PROC_FS
  /* Proc filesystem TCP sock list dumping. */
  
diff --combined net/ipv4/tcp_output.c
index 1f3d2747ec45645f1b3d187b710ffe0a1fd86bf3,7681a1bbd97f5d8f8c4155da5d83863e59ba3045..05cce35fcc582dc4e5b07d44dbc95c53021fe6e4
@@@ -231,13 -231,14 +231,13 @@@ void tcp_select_initial_window(int __sp
        }
  
        /* Set initial window to a value enough for senders starting with
 -       * initial congestion window of TCP_DEFAULT_INIT_RCVWND. Place
 +       * initial congestion window of sysctl_tcp_default_init_rwnd. Place
         * a limit on the initial window when mss is larger than 1460.
         */
        if (mss > (1 << *rcv_wscale)) {
 -              int init_cwnd = TCP_DEFAULT_INIT_RCVWND;
 +              int init_cwnd = sysctl_tcp_default_init_rwnd;
                if (mss > 1460)
 -                      init_cwnd =
 -                      max_t(u32, (1460 * TCP_DEFAULT_INIT_RCVWND) / mss, 2);
 +                      init_cwnd = max_t(u32, (1460 * init_cwnd) / mss, 2);
                /* when initializing use the value from init_rcv_wnd
                 * rather than the default from above
                 */
@@@ -1993,7 -1994,7 +1993,7 @@@ bool tcp_schedule_loss_probe(struct soc
        }
  
        inet_csk_reset_xmit_timer(sk, ICSK_TIME_LOSS_PROBE, timeout,
 -                                TCP_RTO_MAX);
 +                                sysctl_tcp_rto_max);
        return true;
  }
  
@@@ -2044,7 -2045,7 +2044,7 @@@ void tcp_send_loss_probe(struct sock *s
  rearm_timer:
        inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
                                  inet_csk(sk)->icsk_rto,
 -                                TCP_RTO_MAX);
 +                                sysctl_tcp_rto_max);
  
        if (likely(!err))
                NET_INC_STATS_BH(sock_net(sk),
@@@ -2566,7 -2567,7 +2566,7 @@@ begin_fwd
                if (skb == tcp_write_queue_head(sk))
                        inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
                                                  inet_csk(sk)->icsk_rto,
 -                                                TCP_RTO_MAX);
 +                                                sysctl_tcp_rto_max);
        }
  }
  
@@@ -2771,6 -2772,8 +2771,8 @@@ struct sk_buff *tcp_make_synack(struct 
        }
  #endif
  
+       /* Do not fool tcpdump (if any), clean our debris */
+       skb->tstamp.tv64 = 0;
        return skb;
  }
  EXPORT_SYMBOL(tcp_make_synack);
@@@ -2987,7 -2990,7 +2989,7 @@@ int tcp_connect(struct sock *sk
  
        /* Timer for repeating the SYN until an answer. */
        inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
 -                                inet_csk(sk)->icsk_rto, TCP_RTO_MAX);
 +                                inet_csk(sk)->icsk_rto, sysctl_tcp_rto_max);
        return 0;
  }
  EXPORT_SYMBOL(tcp_connect);
@@@ -3066,7 -3069,7 +3068,7 @@@ void tcp_send_ack(struct sock *sk
                inet_csk_schedule_ack(sk);
                inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN;
                inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
 -                                        TCP_DELACK_MAX, TCP_RTO_MAX);
 +                                        TCP_DELACK_MAX, sysctl_tcp_rto_max);
                return;
        }
  
@@@ -3186,8 -3189,8 +3188,8 @@@ void tcp_send_probe0(struct sock *sk
                        icsk->icsk_backoff++;
                icsk->icsk_probes_out++;
                inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
 -                                        min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX),
 -                                        TCP_RTO_MAX);
 +                                        min_t(unsigned int, icsk->icsk_rto << icsk->icsk_backoff, sysctl_tcp_rto_max),
 +                                        sysctl_tcp_rto_max);
        } else {
                /* If packet was not sent due to local congestion,
                 * do not backoff and do not remember icsk_probes_out.
                inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
                                          min(icsk->icsk_rto << icsk->icsk_backoff,
                                              TCP_RESOURCE_PROBE_INTERVAL),
 -                                        TCP_RTO_MAX);
 +                                        sysctl_tcp_rto_max);
        }
  }
diff --combined net/ipv6/ndisc.c
index d1bc9987b8dc2c7e1647aa71e9173a110c041ed7,05f361338c2eabd0d27449cd8d85a3c29e19046d..42c40b119c8995e47366722db891b679e9d2c50f
@@@ -1196,10 -1196,11 +1196,11 @@@ static void ndisc_router_discovery(stru
                /* Only set hop_limit on the interface if it is higher than
                 * the current hop_limit.
                 */
-               if (in6_dev->cnf.hop_limit < ra_msg->icmph.icmp6_hop_limit)
+               if (in6_dev->cnf.hop_limit < ra_msg->icmph.icmp6_hop_limit) {
                        in6_dev->cnf.hop_limit = ra_msg->icmph.icmp6_hop_limit;
-               else
+               } else {
                        ND_PRINTK(2, warn, "RA: Got route advertisement with lower hop_limit than current\n");
+               }
                if (rt)
                        dst_metric_set(&rt->dst, RTAX_HOPLIMIT,
                                       ra_msg->icmph.icmp6_hop_limit);
@@@ -1331,35 -1332,12 +1332,35 @@@ skip_routeinfo
                }
        }
  
 +#ifdef CONFIG_MTK_DHCPV6C_WIFI
 +      if (in6_dev->if_flags & IF_RA_OTHERCONF){
 +              printk(KERN_INFO "[mtk_net][ipv6]receive RA with o bit!\n");
 +              in6_dev->cnf.ra_info_flag = 1;
 +      } 
 +      if(in6_dev->if_flags & IF_RA_MANAGED){
 +              printk(KERN_INFO "[mtk_net][ipv6]receive RA with m bit!\n");
 +              in6_dev->cnf.ra_info_flag = 2;
 +      }
 +      if(in6_dev->cnf.ra_info_flag == 0){
 +              printk(KERN_INFO "[mtk_net][ipv6]receive RA neither O nor M bit is set!\n");
 +              in6_dev->cnf.ra_info_flag = 4;
 +      }
 +#endif
 +
        if (ndopts.nd_useropts) {
                struct nd_opt_hdr *p;
                for (p = ndopts.nd_useropts;
                     p;
                     p = ndisc_next_useropt(p, ndopts.nd_useropts_end)) {
                        ndisc_ra_useropt(skb, p);
 +#ifdef CONFIG_MTK_DHCPV6C_WIFI
 +                      /* only clear ra_info_flag when O bit is set */
 +                      if (p->nd_opt_type == ND_OPT_RDNSS &&
 +                                      in6_dev->if_flags & IF_RA_OTHERCONF) {
 +                              printk(KERN_INFO "[mtk_net][ipv6]RDNSS, ignore RA with o bit!\n");
 +                              in6_dev->cnf.ra_info_flag = 0;
 +                      } 
 +#endif
                }
        }
  
@@@ -1571,7 -1549,7 +1572,7 @@@ int ndisc_rcv(struct sk_buff *skb
        }
  
        memset(NEIGH_CB(skb), 0, sizeof(struct neighbour_cb));
 -
 +      
        switch (msg->icmph.icmp6_type) {
        case NDISC_NEIGHBOUR_SOLICITATION:
                ndisc_recv_ns(skb);
diff --combined net/ipv6/tcp_ipv6.c
index c27303d4482fdc0264accfd622c5a1b62597ee7c,4659b8ab55d98176a23009638d7eeb634467e5ca..49b8729204e1f570ad16a560d0ddeffd4bb059d3
@@@ -252,7 -252,6 +252,7 @@@ static int tcp_v6_connect(struct sock *
        fl6.flowi6_mark = sk->sk_mark;
        fl6.fl6_dport = usin->sin6_port;
        fl6.fl6_sport = inet->inet_sport;
 +      fl6.flowi6_uid = sock_i_uid(sk);
  
        final_p = fl6_update_dst(&fl6, np->opt, &final);
  
        if (err)
                goto late_failure;
  
 +        printk(KERN_INFO  "net_sock, IPV6 socket[%lu] sport:%u \n", SOCK_INODE(sk->sk_socket)->i_ino, ntohs(inet->inet_sport));
        if (!tp->write_seq && likely(!tp->repair))
                tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
                                                             np->daddr.s6_addr32,
@@@ -793,7 -791,6 +793,7 @@@ static void tcp_v6_send_response(struc
        fl6.flowi6_proto = IPPROTO_TCP;
        if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
                fl6.flowi6_oif = inet6_iif(skb);
 +      fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark);
        fl6.fl6_dport = t1->dest;
        fl6.fl6_sport = t1->source;
        security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
@@@ -1002,7 -999,6 +1002,7 @@@ static int tcp_v6_conn_request(struct s
                TCP_ECN_create_request(req, skb, sock_net(sk));
  
        treq->iif = sk->sk_bound_dev_if;
 +      inet_rsk(req)->ir_mark = inet_request_mark(sk, skb);
  
        /* So that link locals have meaning */
        if (!sk->sk_bound_dev_if &&
@@@ -1620,7 -1616,7 +1620,7 @@@ static void tcp_v6_early_demux(struct s
                skb->sk = sk;
                skb->destructor = sock_edemux;
                if (sk->sk_state != TCP_TIME_WAIT) {
-                       struct dst_entry *dst = sk->sk_rx_dst;
+                       struct dst_entry *dst = ACCESS_ONCE(sk->sk_rx_dst);
  
                        if (dst)
                                dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);