Merge tag 'v3.10.87' into update
authorStricted <info@stricted.net>
Wed, 21 Mar 2018 21:47:22 +0000 (22:47 +0100)
committerStricted <info@stricted.net>
Wed, 21 Mar 2018 21:47:22 +0000 (22:47 +0100)
This is the 3.10.87 stable release

1  2 
Makefile
arch/arm/kernel/entry-armv.S
arch/arm64/kernel/signal32.c
drivers/md/md.c
drivers/usb/host/xhci-ring.c
kernel/signal.c
mm/vmscan.c

diff --combined Makefile
index 8950dd7ebd85c5ca7e57d505ffd713b66b10b0b1,0d4fd64273491a966936d731d461e1dd0b5b04af..ca112066f1550c52a179f757736506ceedf03d4c
+++ b/Makefile
@@@ -1,6 -1,6 +1,6 @@@
  VERSION = 3
  PATCHLEVEL = 10
- SUBLEVEL = 86
+ SUBLEVEL = 87
  EXTRAVERSION =
  NAME = TOSSUG Baby Fish
  
@@@ -374,7 -374,7 +374,7 @@@ KBUILD_CFLAGS   := -Wall -Wundef -Wstri
                   -Werror-implicit-function-declaration \
                   -Wno-format-security \
                   -fno-delete-null-pointer-checks \
 -                 -std=gnu89
 +                 -w -std=gnu89
  
  KBUILD_AFLAGS_KERNEL :=
  KBUILD_CFLAGS_KERNEL :=
index 925a62e236513082afe62885af4be9cad1565dff,03a1e26ba3a35bc806610dd425115657707fc32a..1d3968c693e625fd5270aa26d893af59d13cb68a
@@@ -358,7 -358,8 +358,8 @@@ ENDPROC(__pabt_svc
        .endm
  
        .macro  kuser_cmpxchg_check
- #if !defined(CONFIG_CPU_32v6K) && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
+ #if !defined(CONFIG_CPU_32v6K) && defined(CONFIG_KUSER_HELPERS) && \
+     !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
  #ifndef CONFIG_MMU
  #warning "NPTL on non MMU needs fixing"
  #else
@@@ -685,29 -686,8 +686,29 @@@ ENDPROC(ret_from_exception
  ENTRY(__switch_to)
   UNWIND(.fnstart      )
   UNWIND(.cantunwind   )
 +#ifdef CONFIG_VFP_OPT
 +      add     ip, r1, #TI_CPU_SAVE
 +      stmfa   ip!, {r0, r1, r2, r5, r6, r8, lr}
 +
 +      @1. save vfp state for previous thread_info
 +      mov     r0, r1
 +      add     r0, r0, #TI_VFPSTATE    @ r0 = workspace
 +      VFPFMRX r1, FPEXC
 +      mov     r5, ip                  @ save ip to r5, because vfp_save_state may change ip
 +      mov     r6, r2                  @ save r2 to r6, because vfp_save_state may change r2
 +      bl      vfp_save_state
 +      mov     ip, r5
 +      mov     r2, r6
 +
 +      @ 2. restore vfp state from next thread_info
 +      add     r2, r2, #TI_VFPSTATE    @ r2 = workspace
 +      VFPFLDMIA       r2, r0          @ reload the working registers while
 +                                      @ FPEXC is in a safe state
 +      ldmia   r2, {r1, r5, r6, r8}    @ load FPEXC, FPSCR, FPINST, FPINST2
 +      VFPFMXR FPSCR, r5               @ restore status
 +      ldmfa   ip!, {r0, r1, r2, r5, r6, r8, lr}
 +#endif
        add     ip, r1, #TI_CPU_SAVE
 -      ldr     r3, [r2, #TI_TP_VALUE]
   ARM( stmia   ip!, {r4 - sl, fp, sp, lr} )    @ Store most regs on stack
   THUMB(       stmia   ip!, {r4 - sl, fp}         )    @ Store most regs on stack
   THUMB(       str     sp, [ip], #4               )
  #ifdef CONFIG_CPU_USE_DOMAINS
        ldr     r6, [r2, #TI_CPU_DOMAIN]
  #endif
 -      set_tls r3, r4, r5
 +      switch_tls r1, r2, r4, r5, r3, r7
  #if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
        ldr     r7, [r2, #TI_TASK]
        ldr     r8, =__stack_chk_guard
index 36db9874c0b7e4cf46039e6b58980a90854701da,b9564b8d6bab4bb3132c5aa1f2615208f873cb6e..6cf01cef31fba9b115b69605a7e6dce486bc2cce
@@@ -26,7 -26,7 +26,7 @@@
  #include <asm/fpsimd.h>
  #include <asm/signal32.h>
  #include <asm/uaccess.h>
 -#include <asm/unistd32.h>
 +#include <asm/unistd.h>
  
  struct compat_sigcontext {
        /* We always set these two fields to 0 */
@@@ -193,7 -193,8 +193,8 @@@ int copy_siginfo_to_user32(compat_sigin
                 * Other callers might not initialize the si_lsb field,
                 * so check explicitely for the right codes here.
                 */
-               if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO)
+               if (from->si_signo == SIGBUS &&
+                   (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO))
                        err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
  #endif
                break;
                err |= __put_user(from->si_uid, &to->si_uid);
                err |= __put_user(from->si_int, &to->si_int);
                break;
 +#ifdef __ARCH_SIGSYS
 +      case __SI_SYS:
 +              err |= __put_user((compat_uptr_t)(unsigned long)
 +                              from->si_call_addr, &to->si_call_addr);
 +              err |= __put_user(from->si_syscall, &to->si_syscall);
 +              err |= __put_user(from->si_arch, &to->si_arch);
 +              break;
 +#endif
        default: /* this is just in case for now ... */
                err |= __put_user(from->si_pid, &to->si_pid);
                err |= __put_user(from->si_uid, &to->si_uid);
  
  int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
  {
-       memset(to, 0, sizeof *to);
        if (copy_from_user(to, from, __ARCH_SI_PREAMBLE_SIZE) ||
            copy_from_user(to->_sifields._pad,
                           from->_sifields._pad, SI_PAD_SIZE))
@@@ -254,7 -245,7 +253,7 @@@ static int compat_preserve_vfp_context(
         * Note that this also saves V16-31, which aren't visible
         * in AArch32.
         */
 -      fpsimd_save_state(fpsimd);
 +      fpsimd_preserve_current_state();
  
        /* Place structure header on the stack */
        __put_user_error(magic, &frame->magic, err);
@@@ -317,8 -308,11 +316,8 @@@ static int compat_restore_vfp_context(s
         * We don't need to touch the exception register, so
         * reload the hardware state.
         */
 -      if (!err) {
 -              preempt_disable();
 -              fpsimd_load_state(&fpsimd);
 -              preempt_enable();
 -      }
 +      if (!err)
 +              fpsimd_update_current_state(&fpsimd);
  
        return err ? -EFAULT : 0;
  }
diff --combined drivers/md/md.c
index a07668b2e68bb5b11620d115bbc693b83c4ed6a4,37ff00d014b42761aa2e5b1b2d4016380c18032c..ed0c6a6b79818fef2fe0d818e16c810270e73020
@@@ -33,7 -33,6 +33,7 @@@
  */
  
  #include <linux/kthread.h>
 +#include <linux/freezer.h>
  #include <linux/blkdev.h>
  #include <linux/sysctl.h>
  #include <linux/seq_file.h>
@@@ -5629,9 -5628,9 +5629,9 @@@ static int get_bitmap_file(struct mdde
        int err = -ENOMEM;
  
        if (md_allow_write(mddev))
-               file = kmalloc(sizeof(*file), GFP_NOIO);
+               file = kzalloc(sizeof(*file), GFP_NOIO);
        else
-               file = kmalloc(sizeof(*file), GFP_KERNEL);
+               file = kzalloc(sizeof(*file), GFP_KERNEL);
  
        if (!file)
                goto out;
@@@ -7372,14 -7371,11 +7372,14 @@@ void md_do_sync(struct md_thread *threa
         *
         */
  
 +      set_freezable();
 +
        do {
                mddev->curr_resync = 2;
  
        try_again:
 -              if (kthread_should_stop())
 +
 +              if (kthread_freezable_should_stop(NULL))
                        set_bit(MD_RECOVERY_INTR, &mddev->recovery);
  
                if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
                                         * time 'round when curr_resync == 2
                                         */
                                        continue;
 +
 +                              try_to_freeze();
 +
                                /* We need to wait 'interruptible' so as not to
                                 * contribute to the load average, and not to
                                 * be caught by 'softlockup'
                                               " share one or more physical units)\n",
                                               desc, mdname(mddev), mdname(mddev2));
                                        mddev_put(mddev2);
 +                                      try_to_freeze();
                                        if (signal_pending(current))
                                                flush_signals(current);
                                        schedule();
                                                 || kthread_should_stop());
                }
  
 -              if (kthread_should_stop())
 +              if (kthread_freezable_should_stop(NULL))
                        goto interrupted;
  
                sectors = mddev->pers->sync_request(mddev, j, &skipped,
                        last_mark = next;
                }
  
 -
 -              if (kthread_should_stop())
 +              if (kthread_freezable_should_stop(NULL))
                        goto interrupted;
  
  
@@@ -7768,10 -7761,8 +7768,10 @@@ no_add
   */
  void md_check_recovery(struct mddev *mddev)
  {
 -      if (mddev->suspended)
 +#ifdef CONFIG_FREEZER
 +      if (mddev->suspended || unlikely(atomic_read(&system_freezing_cnt)))
                return;
 +#endif
  
        if (mddev->bitmap)
                bitmap_daemon_work(mddev);
index 11e6e52c0c97d4d10f7f11e45fb8c25dd7fbf216,fde0277adc2c4884a5cbbc28f7480780c6cd1a06..d52c1653ce658f0d7cf60a467044d58ebcdd3de5
@@@ -85,7 -85,7 +85,7 @@@ dma_addr_t xhci_trb_virt_to_dma(struct 
                return 0;
        /* offset in TRBs */
        segment_offset = trb - seg->trbs;
-       if (segment_offset > TRBS_PER_SEGMENT)
+       if (segment_offset >= TRBS_PER_SEGMENT)
                return 0;
        return seg->dma + (segment_offset * sizeof(*trb));
  }
@@@ -242,13 -242,9 +242,13 @@@ static void inc_enq(struct xhci_hcd *xh
                         * carry over the chain bit of the previous TRB
                         * (which may mean the chain bit is cleared).
                         */
 +                      #ifdef CONFIG_MTK_XHCI
 +                      if (!xhci_link_trb_quirk(xhci)) {
 +                      #else
                        if (!(ring->type == TYPE_ISOC &&
                                        (xhci->quirks & XHCI_AMD_0x96_HOST))
                                                && !xhci_link_trb_quirk(xhci)) {
 +                      #endif
                                next->link.control &=
                                        cpu_to_le32(~TRB_CHAIN);
                                next->link.control |=
  static inline int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
                unsigned int num_trbs)
  {
 +#ifndef CONFIG_MTK_XHCI
        int num_trbs_in_deq_seg;
 +#endif
  
        if (ring->num_trbs_free < num_trbs)
                return 0;
  
 +#ifndef CONFIG_MTK_XHCI
        if (ring->type != TYPE_COMMAND && ring->type != TYPE_EVENT) {
                num_trbs_in_deq_seg = ring->dequeue - ring->deq_seg->trbs;
                if (ring->num_trbs_free < num_trbs + num_trbs_in_deq_seg)
                        return 0;
        }
 +#endif
  
        return 1;
  }
@@@ -738,12 -730,10 +738,12 @@@ static void xhci_giveback_urb_in_irq(st
        if (urb_priv->td_cnt == urb_priv->length) {
                if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
                        xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--;
 +                      #ifndef CONFIG_MTK_XHCI
                        if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
                                if (xhci->quirks & XHCI_AMD_PLL_FIX)
                                        usb_amd_quirk_pll_enable();
                        }
 +                      #endif
                }
                usb_hcd_unlink_urb_from_ep(hcd, urb);
  
@@@ -1992,13 -1982,11 +1992,13 @@@ td_cleanup
                        ret = 1;
                        if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
                                xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--;
 +                              #ifndef CONFIG_MTK_XHCI
                                if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs
                                        == 0) {
                                        if (xhci->quirks & XHCI_AMD_PLL_FIX)
                                                usb_amd_quirk_pll_enable();
                                }
 +                              #endif
                        }
                }
        }
@@@ -2570,7 -2558,7 +2570,7 @@@ static int handle_tx_event(struct xhci_
                                 * successful event after a short transfer.
                                 * Ignore it.
                                 */
 -                              if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) && 
 +                              if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) &&
                                                ep_ring->last_td_was_short) {
                                        ep_ring->last_td_was_short = false;
                                        ret = 0;
@@@ -2939,16 -2927,13 +2939,16 @@@ static int prepare_ring(struct xhci_hc
                        /* If we're not dealing with 0.95 hardware or isoc rings
                         * on AMD 0.96 host, clear the chain bit.
                         */
 +                      #ifndef CONFIG_MTK_XHCI
                        if (!xhci_link_trb_quirk(xhci) &&
                                        !(ring->type == TYPE_ISOC &&
                                         (xhci->quirks & XHCI_AMD_0x96_HOST)))
                                next->link.control &= cpu_to_le32(~TRB_CHAIN);
                        else
                                next->link.control |= cpu_to_le32(TRB_CHAIN);
 -
 +                      #else
 +                      next->link.control &= cpu_to_le32(~TRB_CHAIN);
 +                      #endif
                        wmb();
                        next->link.control ^= cpu_to_le32(TRB_CYCLE);
  
@@@ -3127,29 -3112,6 +3127,29 @@@ int xhci_queue_intr_tx(struct xhci_hcd 
   * right shifted by 10.
   * It must fit in bits 21:17, so it can't be bigger than 31.
   */
 +#ifdef CONFIG_MTK_XHCI
 +static u32 xhci_td_remainder(unsigned int td_transfer_size, unsigned int td_running_total
 +      , unsigned int maxp, unsigned trb_buffer_length)
 +{
 +      u32 max = 31;
 +      int remainder, td_packet_count, packet_transferred;
 +
 +      //0 for the last TRB
 +      //FIXME: need to workaround if there is ZLP in this TD
 +      if (td_running_total + trb_buffer_length == td_transfer_size)
 +              return 0;
 +
 +      //FIXME: need to take care of high-bandwidth (MAX_ESIT)
 +      packet_transferred = (td_running_total /*+ trb_buffer_length*/) / maxp;
 +      td_packet_count = DIV_ROUND_UP(td_transfer_size, maxp);
 +      remainder = td_packet_count - packet_transferred;
 +
 +      if (remainder > max)
 +              return max << 17;
 +      else
 +              return remainder << 17;
 +}
 +#else
  static u32 xhci_td_remainder(unsigned int remainder)
  {
        u32 max = (1 << (21 - 17 + 1)) - 1;
        else
                return (remainder >> 10) << 17;
  }
 +#endif
 +
  
 +#ifndef CONFIG_MTK_XHCI
  /*
   * For xHCI 1.0 host controllers, TD size is the number of max packet sized
   * packets remaining in the TD (*not* including this TRB).
@@@ -3198,7 -3157,6 +3198,7 @@@ static u32 xhci_v1_0_td_remainder(int r
                return 31 << 17;
        return (total_packet_count - packets_transferred) << 17;
  }
 +#endif
  
  static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
                struct urb *urb, int slot_id, unsigned int ep_index)
                                        (unsigned int) addr + trb_buff_len);
                }
  
 +              /* Set the TRB length, TD size, and interrupter fields. */
 +              #ifdef CONFIG_MTK_XHCI
 +              if(num_trbs >1){
 +                      remainder = xhci_td_remainder(urb->transfer_buffer_length,
 +                              running_total, urb->ep->desc.wMaxPacketSize, trb_buff_len);
 +              }
 +              #else
                /* Set the TRB length, TD size, and interrupter fields. */
                if (xhci->hci_version < 0x100) {
                        remainder = xhci_td_remainder(
                                        trb_buff_len, total_packet_count, urb,
                                        num_trbs - 1);
                }
 +              #endif
 +
                length_field = TRB_LEN(trb_buff_len) |
                        remainder |
                        TRB_INTR_TARGET(0);
@@@ -3377,9 -3326,7 +3377,9 @@@ int xhci_queue_bulk_tx(struct xhci_hcd 
        bool more_trbs_coming;
        int start_cycle;
        u32 field, length_field;
 -
 +#ifdef CONFIG_MTK_XHCI
 +      int max_packet = USB_SPEED_HIGH;
 +#endif
        int running_total, trb_buff_len, ret;
        unsigned int total_packet_count;
        u64 addr;
        }
        /* FIXME: this doesn't deal with URB_ZERO_PACKET - need one more */
  
 +#ifdef CONFIG_MTK_XHCI
 +      switch(urb->dev->speed){
 +              case USB_SPEED_SUPER:
 +                      max_packet = urb->ep->desc.wMaxPacketSize;
 +                      break;
 +              case USB_SPEED_HIGH:
 +              case USB_SPEED_FULL:
 +              case USB_SPEED_LOW:
 +        default:
 +                      max_packet = urb->ep->desc.wMaxPacketSize & 0x7ff;
 +                      break;
 +      }
 +      if((urb->transfer_flags & URB_ZERO_PACKET)
 +              && ((urb->transfer_buffer_length % max_packet) == 0)){
 +              num_trbs++;
 +      }
 +#endif
 +
        ret = prepare_transfer(xhci, xhci->devs[slot_id],
                        ep_index, urb->stream_id,
                        num_trbs, urb, 0, mem_flags);
                /* Only set interrupt on short packet for IN endpoints */
                if (usb_urb_dir_in(urb))
                        field |= TRB_ISP;
 -
 +              #ifdef CONFIG_MTK_XHCI
 +              remainder = xhci_td_remainder(urb->transfer_buffer_length, running_total, max_packet, trb_buff_len);
 +              #else
                /* Set the TRB length, TD size, and interrupter fields. */
                if (xhci->hci_version < 0x100) {
                        remainder = xhci_td_remainder(
                                        trb_buff_len, total_packet_count, urb,
                                        num_trbs - 1);
                }
 +              #endif
                length_field = TRB_LEN(trb_buff_len) |
                        remainder |
                        TRB_INTR_TARGET(0);
@@@ -3586,11 -3512,7 +3586,11 @@@ int xhci_queue_ctrl_tx(struct xhci_hcd 
                field |= 0x1;
  
        /* xHCI 1.0 6.4.1.2.1: Transfer Type field */
 +#ifdef CONFIG_MTK_XHCI
 +      if(1){
 +#else
        if (xhci->hci_version == 0x100) {
 +#endif
                if (urb->transfer_buffer_length > 0) {
                        if (setup->bRequestType & USB_DIR_IN)
                                field |= TRB_TX_TYPE(TRB_DATA_IN);
                field = TRB_TYPE(TRB_DATA);
  
        length_field = TRB_LEN(urb->transfer_buffer_length) |
 +      #ifdef CONFIG_MTK_XHCI
 +              //CC: MTK style, no scatter-gather for control transfer
 +              0 |
 +      #else
                xhci_td_remainder(urb->transfer_buffer_length) |
 +      #endif
                TRB_INTR_TARGET(0);
        if (urb->transfer_buffer_length > 0) {
                if (setup->bRequestType & USB_DIR_IN)
@@@ -3742,9 -3659,6 +3742,9 @@@ static int xhci_queue_isoc_tx(struct xh
        u64 start_addr, addr;
        int i, j;
        bool more_trbs_coming;
 +#ifdef CONFIG_MTK_XHCI
 +      int max_packet = USB_SPEED_HIGH;
 +#endif
  
        ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
  
        start_trb = &ep_ring->enqueue->generic;
        start_cycle = ep_ring->cycle_state;
  
 +#ifdef CONFIG_MTK_XHCI
 +      switch(urb->dev->speed){
 +              case USB_SPEED_SUPER:
 +                      max_packet = urb->ep->desc.wMaxPacketSize;
 +                      break;
 +              case USB_SPEED_HIGH:
 +              case USB_SPEED_FULL:
 +              case USB_SPEED_LOW:
 +        default:
 +                      max_packet = urb->ep->desc.wMaxPacketSize & 0x7ff;
 +                      break;
 +      }
 +#endif
        urb_priv = urb->hcpriv;
        /* Queue the first TRB, even if it's zero-length */
        for (i = 0; i < num_tds; i++) {
                                trb_buff_len = td_remain_len;
  
                        /* Set the TRB length, TD size, & interrupter fields. */
 +                      #ifdef CONFIG_MTK_XHCI
 +                      remainder = xhci_td_remainder(urb->transfer_buffer_length, running_total, max_packet, trb_buff_len);
 +                      #else
                        if (xhci->hci_version < 0x100) {
                                remainder = xhci_td_remainder(
                                                td_len - running_total);
                                                total_packet_count, urb,
                                                (trbs_per_td - j - 1));
                        }
 +                      #endif
                        length_field = TRB_LEN(trb_buff_len) |
                                remainder |
                                TRB_INTR_TARGET(0);
                        goto cleanup;
                }
        }
 -
 +      #ifndef CONFIG_MTK_XHCI
        if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
                if (xhci->quirks & XHCI_AMD_PLL_FIX)
                        usb_amd_quirk_pll_disable();
        }
 +      #endif
        xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs++;
  
        giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
diff --combined kernel/signal.c
index 6217aa47d7e1e2a6883012cab0f8b543a5dbfabe,2e51bcbea1e3f9a3d1eef2140e1087d636d3fde4..e8d3c3722ce1fda122ed75f3326ebda5959cbe91
@@@ -861,10 -861,8 +861,10 @@@ static bool prepare_signal(int sig, str
        struct task_struct *t;
  
        if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) {
 -              if (signal->flags & SIGNAL_GROUP_COREDUMP)
 -                      return sig == SIGKILL;
 +              if (signal->flags & SIGNAL_GROUP_COREDUMP) {
 +                      printk(KERN_DEBUG "[%d:%s] is in the middle of dying so skip sig %d\n",p->pid, p->comm, sig);
 +              }
 +              return 0;
                /*
                 * The process is in the middle of dying, nothing to do.
                 */
@@@ -1045,8 -1043,6 +1045,8 @@@ static inline void userns_fixup_signal_
  }
  #endif
  
 +static const char stat_nam[] = TASK_STATE_TO_CHAR_STR;
 +
  static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
                        int group, int from_ancestor_ns)
  {
        struct sigqueue *q;
        int override_rlimit;
        int ret = 0, result;
 +      unsigned state;
  
 +      state = t->state ? __ffs(t->state) + 1 : 0;
 +      printk(KERN_DEBUG "[%d:%s] sig %d to [%d:%s] stat=%c\n",
 +             current->pid, current->comm, sig, t->pid, t->comm,
 +             state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?');
        assert_spin_locked(&t->sighand->siglock);
  
        result = TRACE_SIGNAL_IGNORED;
@@@ -2777,7 -2768,8 +2777,8 @@@ int copy_siginfo_to_user(siginfo_t __us
                 * Other callers might not initialize the si_lsb field,
                 * so check explicitly for the right codes here.
                 */
-               if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO)
+               if (from->si_signo == SIGBUS &&
+                   (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO))
                        err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
  #endif
                break;
@@@ -2857,7 -2849,7 +2858,7 @@@ int do_sigtimedwait(const sigset_t *whi
                recalc_sigpending();
                spin_unlock_irq(&tsk->sighand->siglock);
  
 -              timeout = schedule_timeout_interruptible(timeout);
 +              timeout = freezable_schedule_timeout_interruptible(timeout);
  
                spin_lock_irq(&tsk->sighand->siglock);
                __set_task_blocked(tsk, &tsk->real_blocked);
@@@ -3044,7 -3036,7 +3045,7 @@@ COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo
                        int, sig,
                        struct compat_siginfo __user *, uinfo)
  {
-       siginfo_t info;
+       siginfo_t info = {};
        int ret = copy_siginfo_from_user32(&info, uinfo);
        if (unlikely(ret))
                return ret;
@@@ -3090,7 -3082,7 +3091,7 @@@ COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinf
                        int, sig,
                        struct compat_siginfo __user *, uinfo)
  {
-       siginfo_t info;
+       siginfo_t info = {};
  
        if (copy_siginfo_from_user32(&info, uinfo))
                return -EFAULT;
diff --combined mm/vmscan.c
index 1dcef0ceed212f236a09cee1f6d729ed79b98c99,233f0011f7680d99a2726db7a77fe19fcdd655b4..bf4921af2170ccce73fd8404959af657598f04eb
@@@ -43,7 -43,6 +43,7 @@@
  #include <linux/sysctl.h>
  #include <linux/oom.h>
  #include <linux/prefetch.h>
 +#include <linux/debugfs.h>
  
  #include <asm/tlbflush.h>
  #include <asm/div64.h>
@@@ -156,39 -155,6 +156,39 @@@ static unsigned long get_lru_size(struc
        return zone_page_state(lruvec_zone(lruvec), NR_LRU_BASE + lru);
  }
  
 +struct dentry *debug_file;
 +
 +static int debug_shrinker_show(struct seq_file *s, void *unused)
 +{
 +      struct shrinker *shrinker;
 +      struct shrink_control sc;
 +
 +      sc.gfp_mask = -1;
 +      sc.nr_to_scan = 0;
 +
 +      down_read(&shrinker_rwsem);
 +      list_for_each_entry(shrinker, &shrinker_list, list) {
 +              int num_objs;
 +
 +              num_objs = shrinker->shrink(shrinker, &sc);
 +              seq_printf(s, "%pf %d\n", shrinker->shrink, num_objs);
 +      }
 +      up_read(&shrinker_rwsem);
 +      return 0;
 +}
 +
 +static int debug_shrinker_open(struct inode *inode, struct file *file)
 +{
 +        return single_open(file, debug_shrinker_show, inode->i_private);
 +}
 +
 +static const struct file_operations debug_shrinker_fops = {
 +        .open = debug_shrinker_open,
 +        .read = seq_read,
 +        .llseek = seq_lseek,
 +        .release = single_release,
 +};
 +
  /*
   * Add a shrinker callback to be called from the vm
   */
@@@ -201,15 -167,6 +201,15 @@@ void register_shrinker(struct shrinker 
  }
  EXPORT_SYMBOL(register_shrinker);
  
 +static int __init add_shrinker_debug(void)
 +{
 +      debugfs_create_file("shrinker", 0644, NULL, NULL,
 +                          &debug_shrinker_fops);
 +      return 0;
 +}
 +
 +late_initcall(add_shrinker_debug);
 +
  /*
   * Remove one
   */
@@@ -773,20 -730,15 +773,15 @@@ static unsigned long shrink_page_list(s
                         * could easily OOM just because too many pages are in
                         * writeback and there is nothing else to reclaim.
                         *
-                        * Check __GFP_IO, certainly because a loop driver
+                        * Require may_enter_fs to wait on writeback, because
+                        * fs may not have submitted IO yet. And a loop driver
                         * thread might enter reclaim, and deadlock if it waits
                         * on a page for which it is needed to do the write
                         * (loop masks off __GFP_IO|__GFP_FS for this reason);
                         * but more thought would probably show more reasons.
-                        *
-                        * Don't require __GFP_FS, since we're not going into
-                        * the FS, just waiting on its writeback completion.
-                        * Worryingly, ext4 gfs2 and xfs allocate pages with
-                        * grab_cache_page_write_begin(,,AOP_FLAG_NOFS), so
-                        * testing may_enter_fs here is liable to OOM on them.
                         */
                        if (global_reclaim(sc) ||
-                           !PageReclaim(page) || !(sc->gfp_mask & __GFP_IO)) {
+                           !PageReclaim(page) || !may_enter_fs) {
                                /*
                                 * This is slightly racy - end_page_writeback()
                                 * might have just cleared PageReclaim, then
@@@ -1234,7 -1186,7 +1229,7 @@@ static int too_many_isolated(struct zon
  {
        unsigned long inactive, isolated;
  
 -      if (current_is_kswapd())
 +      if (current_is_kswapd() || sc->hibernation_mode)
                return 0;
  
        if (!global_reclaim(sc))
@@@ -1684,42 -1636,6 +1679,42 @@@ enum scan_balance 
        SCAN_FILE,
  };
  
 +
 +#ifdef CONFIG_ZRAM
 +static int vmscan_swap_file_ratio = 1;
 +module_param_named(swap_file_ratio, vmscan_swap_file_ratio, int, S_IRUGO | S_IWUSR);
 +
 +#if defined(CONFIG_ZRAM) && defined(CONFIG_MTK_LCA_RAM_OPTIMIZE)
 +
 +// vmscan debug
 +static int vmscan_swap_sum = 200;
 +module_param_named(swap_sum, vmscan_swap_sum, int, S_IRUGO | S_IWUSR);
 +
 +
 +static int vmscan_scan_file_sum = 0;
 +static int vmscan_scan_anon_sum = 0;
 +static int vmscan_recent_scanned_anon = 0;
 +static int vmscan_recent_scanned_file = 0;
 +static int vmscan_recent_rotated_anon = 0;
 +static int vmscan_recent_rotated_file = 0;
 +module_param_named(scan_file_sum, vmscan_scan_file_sum, int, S_IRUGO);
 +module_param_named(scan_anon_sum, vmscan_scan_anon_sum, int, S_IRUGO);
 +module_param_named(recent_scanned_anon, vmscan_recent_scanned_anon, int, S_IRUGO);
 +module_param_named(recent_scanned_file, vmscan_recent_scanned_file, int, S_IRUGO);
 +module_param_named(recent_rotated_anon, vmscan_recent_rotated_anon, int, S_IRUGO);
 +module_param_named(recent_rotated_file, vmscan_recent_rotated_file, int, S_IRUGO);
 +#endif // CONFIG_ZRAM
 +
 +
 +#if defined(CONFIG_ZRAM) && defined(CONFIG_MTK_LCA_RAM_OPTIMIZE)
 +//#define LOGTAG "VMSCAN"
 +static unsigned long t=0;
 +static unsigned long history[2] = {0};
 +extern int lowmem_minfree[9];
 +#endif
 +
 +#endif // CONFIG_ZRAM
 +
  /*
   * Determine how aggressively the anon and file LRU lists should be
   * scanned.  The relative value of each set of LRU lists is determined
@@@ -1742,11 -1658,6 +1737,11 @@@ static void get_scan_count(struct lruve
        bool force_scan = false;
        unsigned long ap, fp;
        enum lru_list lru;
 +#if defined(CONFIG_ZRAM) && defined(CONFIG_MTK_LCA_RAM_OPTIMIZE)
 +      int cpu;
 +      unsigned long SwapinCount, SwapoutCount, cached;
 +      bool bThrashing = false;
 +#endif
  
        /*
         * If the zone or memcg is small, nr[l] can be 0.  This
        anon_prio = vmscan_swappiness(sc);
        file_prio = 200 - anon_prio;
  
 +      /*
 +       * With swappiness at 100, anonymous and file have the same priority.
 +       * This scanning priority is essentially the inverse of IO cost.
 +       */
 +#if defined(CONFIG_ZRAM) && defined(CONFIG_MTK_LCA_RAM_OPTIMIZE)
 +    if (vmscan_swap_file_ratio) {
 +
 +              if(t == 0)
 +                      t = jiffies;
 +
 +              if (time_after(jiffies, t + 1 * HZ)) {
 +              
 +                      for_each_online_cpu(cpu) {
 +                              struct vm_event_state *this = &per_cpu(vm_event_states, cpu);
 +                              SwapinCount     += this->event[PSWPIN];
 +                              SwapoutCount    += this->event[PSWPOUT];
 +                      }
 +
 +                      if( ((SwapinCount-history[0] + SwapoutCount - history[1]) / (jiffies-t) * HZ) > 3000){
 +                              bThrashing = true;
 +                              //xlog_printk(ANDROID_LOG_ERROR, LOGTAG, "!!! thrashing !!!\n");
 +                      }else{
 +                              bThrashing = false;
 +                              //xlog_printk(ANDROID_LOG_WARN, LOGTAG, "!!! NO thrashing !!!\n");
 +                      }
 +                      history[0] = SwapinCount;
 +                      history[1] = SwapoutCount;
 +
 +
 +                      t=jiffies;
 +              }
 +
 +
 +              if(!bThrashing){
 +                      anon_prio = (vmscan_swappiness(sc) * anon) / (anon + file + 1);
 +                      file_prio = (vmscan_swap_sum - vmscan_swappiness(sc)) * file / (anon + file + 1);
 +                      //xlog_printk(ANDROID_LOG_DEBUG, LOGTAG, "1 anon_prio: %d, file_prio: %d \n",  anon_prio, file_prio);
 +
 +              } else {
 +                      cached = global_page_state(NR_FILE_PAGES) - global_page_state(NR_SHMEM) - total_swapcache_pages();
 +                      if(cached > lowmem_minfree[2]) {
 +                              anon_prio = vmscan_swappiness(sc);
 +                              file_prio = vmscan_swap_sum - vmscan_swappiness(sc);
 +                              //xlog_printk(ANDROID_LOG_ERROR, LOGTAG, "2 anon_prio: %d, file_prio: %d \n",  anon_prio, file_prio);
 +                      } else {
 +                              anon_prio = (vmscan_swappiness(sc) * anon) / (anon + file + 1);
 +                              file_prio = (vmscan_swap_sum - vmscan_swappiness(sc)) * file / (anon + file + 1);
 +                              //xlog_printk(ANDROID_LOG_ERROR, LOGTAG, "3 anon_prio: %d, file_prio: %d \n",  anon_prio, file_prio);
 +                      }
 +              }
 +
 +      } else {
 +          anon_prio = vmscan_swappiness(sc);
 +          file_prio = vmscan_swap_sum - vmscan_swappiness(sc);
 +    }
 +#elif defined(CONFIG_ZRAM) // CONFIG_ZRAM
 +      if (vmscan_swap_file_ratio) {
 +          anon_prio = anon_prio * anon / (anon + file + 1);
 +          file_prio = file_prio * file / (anon + file + 1);
 +      }
 +#endif // CONFIG_ZRAM
 +
 +
 +
        /*
         * OK, so we have swap space and a fair amount of page cache
         * pages.  We use the recently rotated / recently scanned
@@@ -2229,7 -2076,7 +2224,7 @@@ static bool shrink_zones(struct zonelis
                        if (zone->all_unreclaimable &&
                                        sc->priority != DEF_PRIORITY)
                                continue;       /* Let kswapd poll it */
 -                      if (IS_ENABLED(CONFIG_COMPACTION)) {
 +                      if (IS_ENABLED(CONFIG_COMPACTION) && !sc->hibernation_mode) {
                                /*
                                 * If we already have plenty of memory free for
                                 * compaction in this zone, don't free any more.
@@@ -2331,11 -2178,6 +2326,11 @@@ static unsigned long do_try_to_free_pag
        unsigned long writeback_threshold;
        bool aborted_reclaim;
  
 +#ifdef CONFIG_FREEZER
 +      if (unlikely(pm_freezing && !sc->hibernation_mode))
 +              return 0;
 +#endif
 +
        delayacct_freepages_start();
  
        if (global_reclaim(sc))
@@@ -3262,11 -3104,6 +3257,11 @@@ void wakeup_kswapd(struct zone *zone, i
        if (!populated_zone(zone))
                return;
  
 +#ifdef CONFIG_FREEZER
 +      if (pm_freezing)
 +              return;
 +#endif
 +
        if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
                return;
        pgdat = zone->zone_pgdat;
   * LRU order by reclaiming preferentially
   * inactive > active > active referenced > active mapped
   */
 -unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
 +unsigned long shrink_memory_mask(unsigned long nr_to_reclaim, gfp_t mask)
  {
        struct reclaim_state reclaim_state;
        struct scan_control sc = {
 -              .gfp_mask = GFP_HIGHUSER_MOVABLE,
 +              .gfp_mask = mask,
                .may_swap = 1,
                .may_unmap = 1,
                .may_writepage = 1,
  
        return nr_reclaimed;
  }
 +EXPORT_SYMBOL_GPL(shrink_memory_mask);
 +
 +#ifdef CONFIG_MTKPASR
 +extern void shrink_mtkpasr_all(void);
 +#else
 +#define shrink_mtkpasr_all()  do {} while (0)
 +#endif
 +unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
 +{
 +      shrink_mtkpasr_all();
 +      return shrink_memory_mask(nr_to_reclaim, GFP_HIGHUSER_MOVABLE);
 +}
 +EXPORT_SYMBOL_GPL(shrink_all_memory);
  #endif /* CONFIG_HIBERNATION */
  
  /* It's optimal to keep kswapds on the same CPUs as their memory, but
@@@ -3758,283 -3582,3 +3753,283 @@@ void scan_unevictable_unregister_node(s
        device_remove_file(&node->dev, &dev_attr_scan_unevictable_pages);
  }
  #endif
 +
 +#ifdef CONFIG_MTKPASR
 +void try_to_shrink_slab(void)
 +{
 +      struct shrinker *shrinker;
 +      struct shrink_control shrink = {
 +              .gfp_mask = GFP_KERNEL|__GFP_HIGHMEM,
 +      };
 +
 +      if (!down_read_trylock(&shrinker_rwsem)) {
 +              return;
 +      }
 +      
 +      list_for_each_entry(shrinker, &shrinker_list, list) {
 +              int num_objs;
 +              int shrink_ret = 0;
 +              int retry = 2;
 +              
 +              num_objs = do_shrinker_shrink(shrinker, &shrink, 0);
 +              if (num_objs <= 0)
 +                      continue;
 +
 +              do {
 +                      /* To shrink */
 +                      shrink_ret = do_shrinker_shrink(shrinker, &shrink, num_objs);
 +                      if (shrink_ret == -1)
 +                              break;
 +                      /* Check empty */       
 +                      num_objs = do_shrinker_shrink(shrinker, &shrink, 0);
 +                      if (num_objs <= 0)
 +                              break;
 +              } while (--retry);
 +      }
 +      
 +      up_read(&shrinker_rwsem);
 +}
 +
 +extern void free_hot_cold_page(struct page *page, int cold);
 +/* Isolate pages for PASR */
 +#ifdef CONFIG_MTKPASR_ALLEXTCOMP
 +int mtkpasr_isolate_page(struct page *page, int check_swap)
 +#else
 +int mtkpasr_isolate_page(struct page *page)
 +#endif
 +{
 +      struct zone *zone = page_zone(page);
 +      struct lruvec *lruvec;
 +      unsigned long flags;
 +      isolate_mode_t mode = ISOLATE_ASYNC_MIGRATE;
 +
 +      /* Lock this zone - USE trylock version! */
 +      if (!spin_trylock_irqsave(&zone->lru_lock, flags)) {
 +              printk(KERN_ALERT"\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n");
 +              printk(KERN_ALERT"[%s][%d] Failed to lock this zone!\n",__FUNCTION__,__LINE__);
 +              printk(KERN_ALERT"\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n");
 +              return -EAGAIN;
 +      }
 +
 +#ifdef CONFIG_MTKPASR_ALLEXTCOMP
 +      /* Check whether we should handle SwapBacked, SwapCache pages */
 +      if (check_swap) {
 +              if (PageSwapBacked(page) || PageSwapCache(page)) {
 +                      spin_unlock_irqrestore(&zone->lru_lock, flags);
 +                      return -EACCES;
 +              }
 +      }
 +#endif
 +
 +      /* Try to isolate this page */
 +      if (__isolate_lru_page(page, mode) != 0) {
 +              spin_unlock_irqrestore(&zone->lru_lock, flags);
 +              return -EACCES;
 +      }
 +      
 +      /* Successfully isolated */
 +      lruvec = mem_cgroup_page_lruvec(page, zone);
 +      del_page_from_lru_list(page, lruvec, page_lru(page));
 +      
 +      /* Unlock this zone */
 +      spin_unlock_irqrestore(&zone->lru_lock, flags);
 +
 +      return 0;
 +}
 +
 +/* Drop page (in File/Anon LRUs) (Imitate the behavior of shrink_page_list) */
 +/* If returns error, caller needs to putback page by itself. */
 +int mtkpasr_drop_page(struct page *page)
 +{
 +      int ret;
 +      unsigned long vm_flags = 0x0;
 +      bool active = false;
 +      struct address_space *mapping;
 +      enum ttu_flags unmap_flags = TTU_UNMAP;
 +
 +      /* Suitable scan control */
 +      struct scan_control sc = {
 +              .gfp_mask = GFP_KERNEL,
 +              .order = PAGE_ALLOC_COSTLY_ORDER + 1, 
 +              //.reclaim_mode = RECLAIM_MODE_SINGLE|RECLAIM_MODE_SYNC,        // We only handle "SwapBacked" pages in this reclaim_mode!
 +      };
 +
 +      /* Try to isolate this page */
 +#ifdef CONFIG_MTKPASR_ALLEXTCOMP
 +      ret = mtkpasr_isolate_page(page, 0x1);
 +#else
 +      ret = mtkpasr_isolate_page(page);
 +#endif
 +      if (ret) {
 +              return ret;
 +      }
 +      
 +      /* Check whether it is evictable! */
 +      if (unlikely(!page_evictable(page))) {
 +              putback_lru_page(page);
 +              return -EACCES;
 +      }
 +
 +      /* If it is Active, reference and deactivate it */
 +      if (PageActive(page)) {
 +              active = TestClearPageActive(page);
 +      }
 +
 +      /* If we fail to lock this page, ignore it */   
 +      if (!trylock_page(page)) {
 +              goto putback;
 +      }
 +      
 +      /* If page is in writeback, we don't handle it here! */
 +      if (PageWriteback(page)) {
 +              goto unlock;
 +      }
 +      
 +      /*
 +       * Anonymous process memory has backing store?
 +       * Try to allocate it some swap space here.
 +       */
 +      if (PageAnon(page) && !PageSwapCache(page)) {
 +              /* Check whether we have enough free memory */
 +              if (vm_swap_full()) {
 +                      goto unlock;
 +              }
 +
 +              /* Ok! It is safe to add this page to swap. */
 +              if (!add_to_swap(page, NULL)){
 +                      goto unlock;
 +              }
 +      }
 +      
 +      /* We don't handle dirty file cache here (Related devices may be suspended) */
 +      if (page_is_file_cache(page)) {
 +              /* How do we handle pages in VM_EXEC vmas? */
 +              if ((vm_flags & VM_EXEC)) {
 +                      goto unlock;
 +              }
 +              /* We don't handle dirty file pages! */
 +              if (PageDirty(page)) {
 +#ifdef CONFIG_MTKPASR_DEBUG 
 +                      printk(KERN_ALERT "\n\n\n\n\n\n [%s][%d]\n\n\n\n\n\n",__FUNCTION__,__LINE__);
 +#endif
 +                      goto unlock;
 +              }
 +      }
 +              
 +      /*
 +       * The page is mapped into the page tables of one or more
 +       * processes. Try to unmap it here.
 +       */
 +      mapping = page_mapping(page);
 +      if (page_mapped(page) && mapping) {
 +#if 0
 +              /* Indicate unmap action for SwapBacked pages */
 +              if (PageSwapBacked(page)) {
 +                      unmap_flags |= TTU_IGNORE_ACCESS; 
 +              }
 +#endif
 +              /* To unmap */
 +              switch (try_to_unmap(page, unmap_flags)) {
 +              case SWAP_SUCCESS:
 +                      /* try to free the page below */
 +                      break;
 +              case SWAP_FAIL:
 +                      goto restore_swap;
 +              case SWAP_AGAIN:
 +                      goto restore_swap;
 +              case SWAP_MLOCK:
 +                      goto restore_swap;
 +
 +              }
 +      }
 +      
 +      /* Check whether it is dirtied. 
 +       * We have filtered out dirty file pages above. (IMPORTANT!)
 +       * "VM_BUG_ON(!PageSwapBacked(page))"
 +       * */
 +      if (PageDirty(page)) {
 +              /* Page is dirty, try to write it out here */
 +              /* It's ok for zram swap! */
 +              /* Should we need to apply GFP_IOFS? */
 +              switch (pageout(page, mapping, &sc)) {
 +              case PAGE_SUCCESS:
 +                      if (PageWriteback(page)) {
 +                              goto putback;
 +                      }
 +                      if (PageDirty(page)) {
 +                              goto putback;
 +                      }
 +
 +                      /*
 +                       * A synchronous write - probably a ramdisk.  Go
 +                       * ahead and try to reclaim the page.
 +                       */
 +                      if (!trylock_page(page)) {
 +                              goto putback;
 +                      }
 +                      if (PageDirty(page) || PageWriteback(page)) {
 +                              goto unlock;
 +                      }
 +                      mapping = page_mapping(page);
 +              case PAGE_CLEAN:
 +                      /* try to free the page below */
 +                      break;
 +              default:
 +#ifdef CONFIG_MTKPASR_DEBUG 
 +                      /*printk(KERN_ALERT "\n\n\n\n\n\n [%s][%d]\n\n\n\n\n\n",__FUNCTION__,__LINE__);*/
 +#endif
 +                      goto restore_unmap;
 +              }
 +      }
 +
 +      /* Release buffer */
 +      if (page_has_private(page)) {
 +              if (!try_to_release_page(page, sc.gfp_mask)) {
 +                      goto unlock;
 +              }
 +              if (!mapping && page_count(page) == 1) {
 +                      unlock_page(page);
 +                      if (put_page_testzero(page)) {
 +                              goto freeit;
 +                      } else {
 +                              /* Race! TOCHECK */
 +                              printk(KERN_ALERT "\n\n\n\n\n\n [%s][%d] RACE!!\n\n\n\n\n\n",__FUNCTION__,__LINE__);
 +                              goto notask;
 +                      }
 +              }
 +      }
 +      if (!mapping || !__remove_mapping(mapping, page)) {
 +              goto unlock;
 +      }
 +              
 +      __clear_page_locked(page);
 +
 +freeit:
 +      free_hot_cold_page(page, 0);
 +      return 0;       
 +
 +restore_unmap:
 +      /* Do something */
 +
 +restore_swap:
 +      if (PageSwapCache(page))
 +              try_to_free_swap(page);
 +
 +unlock:
 +      unlock_page(page);
 +
 +putback:      
 +      /* Activate it again if needed! */
 +      if (active)
 +              SetPageActive(page);
 +      
 +      /* We don't putback them to corresponding LRUs, because we want to do more tasks outside this function!
 +      putback_lru_page(page); */
 +
 +      /* Failedly dropped pages. Do migration! */
 +      return -EBUSY;
 +
 +notask:
 +      return 0;
 +}
 +#endif