From fb99b6c3d58f6c2c6c5f53876c04223b1cc7a45e Mon Sep 17 00:00:00 2001 From: ivanmeler Date: Tue, 12 Apr 2022 17:19:01 +0000 Subject: [PATCH] Import N950FXXSGDUG7 kernel source + dreamlte dt --- .../exynos/battery_data_dreamlte_common.dtsi | 12 + .../exynos/battery_data_greatlte_common.dtsi | 8 + .../exynos/exynos8895-dreamlte_common.dtsi | 2 +- .../exynos/exynos8895-greatlte_common.dtsi | 2 +- drivers/gpu/arm/b_r16p0/mali_kbase_mem.c | 18 +- .../gpu/arm/b_r16p0/mali_kbase_mem_linux.c | 51 +- drivers/gpu/arm/tMIx/r9p0/mali_base_kernel.h | 11 +- drivers/gpu/arm/tMIx/r9p0/mali_kbase_mem.c | 40 +- drivers/gpu/arm/tMIx/r9p0/mali_kbase_mem.h | 4 +- .../gpu/arm/tMIx/r9p0/mali_kbase_mem_linux.c | 53 +- drivers/hid/hid-core.c | 6 +- drivers/input/touchscreen/sec_ts/sec_ts.c | 37 +- drivers/input/touchscreen/sec_ts/sec_ts.h | 1 - drivers/input/touchscreen/sec_ts/sec_ts_fn.c | 34 - drivers/input/touchscreen/stm/fts_sec.c | 33 - drivers/input/touchscreen/stm/fts_ts.c | 15 +- drivers/input/touchscreen/stm/fts_ts.h | 2 - .../brcm/bbdpl/bbd_patch_file_great_p.h | 31907 ++++++++-------- drivers/sensorhub/brcm/ssp_firmware.c | 4 +- drivers/staging/android/ion/Kconfig | 9 - drivers/staging/android/ion/Makefile | 1 - .../staging/android/ion/exynos/exynos_ion.c | 28 +- drivers/staging/android/ion/ion.c | 12 - drivers/staging/android/ion/ion_heap.c | 10 - drivers/staging/android/ion/ion_page_pool.c | 22 +- drivers/staging/android/ion/ion_priv.h | 11 - drivers/staging/android/ion/ion_rbin_heap.c | 590 - drivers/staging/android/lowmemorykiller.c | 74 - drivers/staging/android/uapi/ion.h | 4 - drivers/usb/gadget/function/f_acm.c | 25 +- drivers/usb/gadget/function/f_conn_gadget.c | 17 +- fs/exec.c | 18 +- fs/inode.c | 8 - fs/proc/meminfo.c | 12 - include/linux/cma.h | 8 - include/linux/gfp.h | 7 +- include/linux/mfd/max77865-private.h | 2 - include/linux/mm.h | 7 - include/linux/mmzone.h | 43 +- include/linux/vmstat.h | 2 - include/trace/events/gfpflags.h | 1 - include/trace/events/ion.h | 102 - include/uapi/linux/input-event-codes.h | 2 - kernel/ptrace.c | 10 +- kernel/sys.c | 165 +- mm/Kconfig | 11 - mm/cma.c | 39 +- mm/cma.h | 3 - mm/compaction.c | 5 - mm/filemap.c | 2 - mm/internal.h | 3 - mm/page_alloc.c | 108 +- mm/usercopy.c | 8 +- mm/vmscan.c | 231 +- mm/vmstat.c | 4 - net/netfilter/nf_conntrack_h323_main.c | 1 + 56 files changed, 16330 insertions(+), 17515 deletions(-) delete mode 100644 drivers/staging/android/ion/ion_rbin_heap.c diff --git a/arch/arm64/boot/dts/exynos/battery_data_dreamlte_common.dtsi b/arch/arm64/boot/dts/exynos/battery_data_dreamlte_common.dtsi index 3e0e76168611..684ed9b85062 100644 --- a/arch/arm64/boot/dts/exynos/battery_data_dreamlte_common.dtsi +++ b/arch/arm64/boot/dts/exynos/battery_data_dreamlte_common.dtsi @@ -283,6 +283,13 @@ 700 42875 4217 4187 90 1000 42375 4167 4137 89>; + battery,health_condition = < + /* CYCLE ASOC */ + 1200 75 /* GOOD */ + 1500 65 /* NORMAL */ + 1700 55 /* AGED */ + >; + battery,max_input_voltage = <12000>; /* mV */ battery,max_input_current = <3000>; /* mA */ @@ -322,6 +329,11 @@ fuelgauge,qrtable30 = <0x0D00>; fuelgauge,fg_resistor = <2>; fuelgauge,capacity = <0x0C45>; + fuelgauge,rcomp0 = <0x001A>; + fuelgauge,tempco = <0x0B10>; + fuelgauge,dPacc = <0x3200>; + fuelgauge,dQacc = <0x05CB>; + fuelgauge,fullcapnom = <0x172E>; /*fuelgauge,auto_discharge_en;*/ fuelgauge,discharge_temp_threshold = <600>; fuelgauge,discharge_volt_threshold = <4200>; diff --git a/arch/arm64/boot/dts/exynos/battery_data_greatlte_common.dtsi b/arch/arm64/boot/dts/exynos/battery_data_greatlte_common.dtsi index d70601096c28..e0d0f60313cb 100644 --- a/arch/arm64/boot/dts/exynos/battery_data_greatlte_common.dtsi +++ b/arch/arm64/boot/dts/exynos/battery_data_greatlte_common.dtsi @@ -206,6 +206,7 @@ battery,sleep_mode_limit_current = <500>; battery,wc_full_input_limit_current = <100>; battery,wc_cv_current = <820>; + battery,wc_cv_tx_current = <650>; battery,wc_cv_pack_current = <630>; battery,mix_high_temp = <420>; @@ -277,6 +278,13 @@ 700 42875 4217 4187 90 1000 42375 4167 4137 89>; + battery,health_condition = < + /* CYCLE ASOC */ + 1200 75 /* GOOD */ + 1500 65 /* NORMAL */ + 1700 55 /* AGED */ + >; + battery,max_input_voltage = <12000>; /* mV */ battery,max_input_current = <3000>; /* mA */ diff --git a/arch/arm64/boot/dts/exynos/exynos8895-dreamlte_common.dtsi b/arch/arm64/boot/dts/exynos/exynos8895-dreamlte_common.dtsi index aff93910f438..78e8397528d5 100644 --- a/arch/arm64/boot/dts/exynos/exynos8895-dreamlte_common.dtsi +++ b/arch/arm64/boot/dts/exynos/exynos8895-dreamlte_common.dtsi @@ -1293,7 +1293,7 @@ 0 2 2 0 /* IN1 */ 2 2 0 0 /* IN2 */ >; - cirrus,out-mono = <0 0 0 1 0 0>; + cirrus,out-mono = <0 0 1 1 0 0>; cirrus,auxpdm-falling-edge; cirrus,gpio-defaults = < diff --git a/arch/arm64/boot/dts/exynos/exynos8895-greatlte_common.dtsi b/arch/arm64/boot/dts/exynos/exynos8895-greatlte_common.dtsi index 410382840553..cdd6967b682b 100644 --- a/arch/arm64/boot/dts/exynos/exynos8895-greatlte_common.dtsi +++ b/arch/arm64/boot/dts/exynos/exynos8895-greatlte_common.dtsi @@ -1213,7 +1213,7 @@ 0 2 2 0 /* IN1 */ 2 2 0 0 /* IN2 */ >; - cirrus,out-mono = <0 0 0 1 0 0>; + cirrus,out-mono = <0 0 1 1 0 0>; cirrus,auxpdm-falling-edge; cirrus,gpio-defaults = < diff --git a/drivers/gpu/arm/b_r16p0/mali_kbase_mem.c b/drivers/gpu/arm/b_r16p0/mali_kbase_mem.c index ee6c2977c079..4a0069f960b9 100644 --- a/drivers/gpu/arm/b_r16p0/mali_kbase_mem.c +++ b/drivers/gpu/arm/b_r16p0/mali_kbase_mem.c @@ -1164,7 +1164,9 @@ int kbase_gpu_mmap(struct kbase_context *kctx, struct kbase_va_region *reg, u64 if (err) goto bad_insert; - kbase_mem_phy_alloc_gpu_mapped(alloc->imported.alias.aliased[i].alloc); + /* Note: mapping count is tracked at alias + * creation time + */ } else { err = kbase_mmu_insert_single_page(kctx, reg->start_pfn + i * stride, @@ -1204,7 +1206,6 @@ bad_insert: reg->start_pfn + (i * stride), reg->gpu_alloc->imported.alias.aliased[i].length, kctx->as_nr); - kbase_mem_phy_alloc_gpu_unmapped(reg->gpu_alloc->imported.alias.aliased[i].alloc); } } @@ -1226,14 +1227,11 @@ int kbase_gpu_munmap(struct kbase_context *kctx, struct kbase_va_region *reg) return 0; if (reg->gpu_alloc && reg->gpu_alloc->type == KBASE_MEM_TYPE_ALIAS) { - size_t i; - err = kbase_mmu_teardown_pages(kctx->kbdev, &kctx->mmu, reg->start_pfn, reg->nr_pages, kctx->as_nr); - KBASE_DEBUG_ASSERT(reg->gpu_alloc->imported.alias.aliased); - for (i = 0; i < reg->gpu_alloc->imported.alias.nents; i++) - if (reg->gpu_alloc->imported.alias.aliased[i].alloc) - kbase_mem_phy_alloc_gpu_unmapped(reg->gpu_alloc->imported.alias.aliased[i].alloc); + /* We mark the source allocs as unmapped from the GPU when + * putting reg's allocs + */ } else if (reg->gpu_alloc) { err = kbase_mmu_teardown_pages(kctx->kbdev, &kctx->mmu, reg->start_pfn, kbase_reg_current_backed_size(reg), @@ -2368,8 +2366,10 @@ void kbase_mem_kref_free(struct kref *kref) aliased = alloc->imported.alias.aliased; if (aliased) { for (i = 0; i < alloc->imported.alias.nents; i++) - if (aliased[i].alloc) + if (aliased[i].alloc) { + kbase_mem_phy_alloc_gpu_unmapped(aliased[i].alloc); kbase_mem_phy_alloc_put(aliased[i].alloc); + } vfree(aliased); } break; diff --git a/drivers/gpu/arm/b_r16p0/mali_kbase_mem_linux.c b/drivers/gpu/arm/b_r16p0/mali_kbase_mem_linux.c index 88fb7ebd37c7..f053b7d95074 100644 --- a/drivers/gpu/arm/b_r16p0/mali_kbase_mem_linux.c +++ b/drivers/gpu/arm/b_r16p0/mali_kbase_mem_linux.c @@ -811,7 +811,12 @@ int kbase_mem_flags_change(struct kbase_context *kctx, u64 gpu_addr, unsigned in prev_needed = (KBASE_REG_DONT_NEED & reg->flags) == KBASE_REG_DONT_NEED; new_needed = (BASE_MEM_DONT_NEED & flags) == BASE_MEM_DONT_NEED; if (prev_needed != new_needed) { - /* Aliased allocations can't be made ephemeral */ + /* Aliased allocations can't be shrunk as the code doesn't + * support looking up: + * - all physical pages assigned to different GPU VAs + * - CPU mappings for the physical pages at different vm_pgoff + * (==GPU VA) locations. + */ if (atomic_read(®->cpu_alloc->gpu_mappings) > 1) goto out_unlock; @@ -1006,6 +1011,7 @@ static struct kbase_va_region *kbase_mem_from_user_buffer( u32 cache_line_alignment = kbase_get_cache_line_alignment(kctx->kbdev); struct kbase_alloc_import_user_buf *user_buf; struct page **pages = NULL; + int write; if ((address & (cache_line_alignment - 1)) != 0 || (size & (cache_line_alignment - 1)) != 0) { @@ -1114,15 +1120,17 @@ static struct kbase_va_region *kbase_mem_from_user_buffer( down_read(¤t->mm->mmap_sem); + write = reg->flags & (KBASE_REG_CPU_WR | KBASE_REG_GPU_WR); + #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0) faulted_pages = get_user_pages(current, current->mm, address, *va_pages, - reg->flags & KBASE_REG_GPU_WR, 0, pages, NULL); + write, 0, pages, NULL); #elif LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0) faulted_pages = get_user_pages(address, *va_pages, - reg->flags & KBASE_REG_GPU_WR, 0, pages, NULL); + write, 0, pages, NULL); #else faulted_pages = get_user_pages(address, *va_pages, - reg->flags & KBASE_REG_GPU_WR ? FOLL_WRITE : 0, + write ? FOLL_WRITE : 0, pages, NULL); #endif @@ -1294,6 +1302,15 @@ u64 kbase_mem_alias(struct kbase_context *kctx, u64 *flags, u64 stride, goto bad_handle; /* Not found/already free */ if (aliasing_reg->flags & KBASE_REG_DONT_NEED) goto bad_handle; /* Ephemeral region */ + if (aliasing_reg->flags & KBASE_REG_JIT) + goto bad_handle; /* JIT regions can't be + * aliased. NO_USER_FREE flag + * covers the entire lifetime + * of JIT regions. The other + * types of regions covered + * by this flag also shall + * not be aliased. + */ if (!(aliasing_reg->flags & KBASE_REG_GPU_CACHED)) goto bad_handle; /* GPU uncached memory */ if (!aliasing_reg->gpu_alloc) @@ -1323,6 +1340,18 @@ u64 kbase_mem_alias(struct kbase_context *kctx, u64 *flags, u64 stride, reg->gpu_alloc->imported.alias.aliased[i].alloc = kbase_mem_phy_alloc_get(alloc); reg->gpu_alloc->imported.alias.aliased[i].length = ai[i].length; reg->gpu_alloc->imported.alias.aliased[i].offset = ai[i].offset; + + /* Ensure the underlying alloc is marked as being + * mapped at >1 different GPU VA immediately, even + * though mapping might not happen until later. + * + * Otherwise, we would (incorrectly) allow shrinking of + * the source region (aliasing_reg) and so freeing the + * physical pages (without freeing the entire alloc) + * whilst we still hold an implicit reference on those + * physical pages. + */ + kbase_mem_phy_alloc_gpu_mapped(alloc); } } @@ -1366,6 +1395,10 @@ no_cookie: #endif no_mmap: bad_handle: + /* Marking the source allocs as not being mapped on the GPU and putting + * them is handled by putting reg's allocs, so no rollback of those + * actions is done here. + */ kbase_gpu_vm_unlock(kctx); no_aliased_array: invalid_flags: @@ -1604,7 +1637,15 @@ int kbase_mem_commit(struct kbase_context *kctx, u64 gpu_addr, u64 new_pages) if (new_pages > reg->nr_pages) goto out_unlock; - /* can't be mapped more than once on the GPU */ + /* Can't shrink when physical pages are mapped to different GPU + * VAs. The code doesn't support looking up: + * - all physical pages assigned to different GPU VAs + * - CPU mappings for the physical pages at different vm_pgoff + * (==GPU VA) locations. + * + * Note that for Native allocs mapped at multiple GPU VAs, growth of + * such allocs is not a supported use-case. + */ if (atomic_read(®->gpu_alloc->gpu_mappings) > 1) goto out_unlock; /* can't grow regions which are ephemeral */ diff --git a/drivers/gpu/arm/tMIx/r9p0/mali_base_kernel.h b/drivers/gpu/arm/tMIx/r9p0/mali_base_kernel.h index 7835f3826454..259a59a7440f 100644 --- a/drivers/gpu/arm/tMIx/r9p0/mali_base_kernel.h +++ b/drivers/gpu/arm/tMIx/r9p0/mali_base_kernel.h @@ -1,6 +1,6 @@ /* * - * (C) COPYRIGHT 2010-2017 ARM Limited. All rights reserved. + * (C) COPYRIGHT 2010-2021 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the * GNU General Public License version 2 as published by the Free Software @@ -147,9 +147,14 @@ typedef u32 base_mem_alloc_flags; */ #define BASE_MEM_RESERVED_BIT_5 ((base_mem_alloc_flags)1 << 5) #define BASE_MEM_RESERVED_BIT_6 ((base_mem_alloc_flags)1 << 6) -#define BASE_MEM_RESERVED_BIT_7 ((base_mem_alloc_flags)1 << 7) #define BASE_MEM_RESERVED_BIT_8 ((base_mem_alloc_flags)1 << 8) +/* + * Userspace is not allowed to free this memory. + * Flag is only allowed on allocations originating from kbase. + */ +#define BASEP_MEM_NO_USER_FREE ((base_mem_alloc_flags)1 << 7) + /* Grow backing store on GPU Page Fault */ #define BASE_MEM_GROW_ON_GPF ((base_mem_alloc_flags)1 << 9) @@ -235,7 +240,7 @@ typedef u32 base_mem_alloc_flags; */ #define BASE_MEM_FLAGS_RESERVED \ (BASE_MEM_RESERVED_BIT_5 | BASE_MEM_RESERVED_BIT_6 | \ - BASE_MEM_RESERVED_BIT_7 | BASE_MEM_RESERVED_BIT_8 | \ + BASE_MEM_RESERVED_BIT_8 | \ BASE_MEM_RESERVED_BIT_19) /* A mask of all the flags that can be returned via the base_mem_get_flags() diff --git a/drivers/gpu/arm/tMIx/r9p0/mali_kbase_mem.c b/drivers/gpu/arm/tMIx/r9p0/mali_kbase_mem.c index 71fa6e92919c..edabb6c1922b 100644 --- a/drivers/gpu/arm/tMIx/r9p0/mali_kbase_mem.c +++ b/drivers/gpu/arm/tMIx/r9p0/mali_kbase_mem.c @@ -1,6 +1,6 @@ /* * - * (C) COPYRIGHT 2010-2017 ARM Limited. All rights reserved. + * (C) COPYRIGHT 2010-2021 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the * GNU General Public License version 2 as published by the Free Software @@ -975,7 +975,9 @@ int kbase_gpu_mmap(struct kbase_context *kctx, struct kbase_va_region *reg, u64 if (err) goto bad_insert; - kbase_mem_phy_alloc_gpu_mapped(alloc->imported.alias.aliased[i].alloc); + /* Note: mapping count is tracked at alias + * creation time + */ } else { err = kbase_mmu_insert_single_page(kctx, reg->start_pfn + i * stride, @@ -1008,7 +1010,6 @@ bad_insert: while (i--) if (reg->gpu_alloc->imported.alias.aliased[i].alloc) { kbase_mmu_teardown_pages(kctx, reg->start_pfn + (i * stride), reg->gpu_alloc->imported.alias.aliased[i].length); - kbase_mem_phy_alloc_gpu_unmapped(reg->gpu_alloc->imported.alias.aliased[i].alloc); } } @@ -1030,13 +1031,10 @@ int kbase_gpu_munmap(struct kbase_context *kctx, struct kbase_va_region *reg) return 0; if (reg->gpu_alloc && reg->gpu_alloc->type == KBASE_MEM_TYPE_ALIAS) { - size_t i; - err = kbase_mmu_teardown_pages(kctx, reg->start_pfn, reg->nr_pages); - KBASE_DEBUG_ASSERT(reg->gpu_alloc->imported.alias.aliased); - for (i = 0; i < reg->gpu_alloc->imported.alias.nents; i++) - if (reg->gpu_alloc->imported.alias.aliased[i].alloc) - kbase_mem_phy_alloc_gpu_unmapped(reg->gpu_alloc->imported.alias.aliased[i].alloc); + /* We mark the source allocs as unmapped from the GPU when + * putting reg's allocs + */ } else { err = kbase_mmu_teardown_pages(kctx, reg->start_pfn, kbase_reg_current_backed_size(reg)); /* MALI_SEC_INTEGRATION */ @@ -1348,8 +1346,8 @@ int kbase_mem_free_region(struct kbase_context *kctx, struct kbase_va_region *re KBASE_DEBUG_ASSERT(NULL != reg); lockdep_assert_held(&kctx->reg_lock); - if (reg->flags & KBASE_REG_JIT) { - dev_warn(reg->kctx->kbdev->dev, "Attempt to free JIT memory!\n"); + if (reg->flags & KBASE_REG_NO_USER_FREE) { + dev_warn(reg->kctx->kbdev->dev, "Attempt to free GPU memory whose freeing by user space is forbidden!\n"); return -EINVAL; } @@ -1511,6 +1509,9 @@ int kbase_update_region_flags(struct kbase_context *kctx, KBASE_REG_MEMATTR_INDEX(AS_MEMATTR_INDEX_DEFAULT); } + if (flags & BASEP_MEM_NO_USER_FREE) + reg->flags |= KBASE_REG_NO_USER_FREE; + return 0; } @@ -1818,8 +1819,10 @@ void kbase_mem_kref_free(struct kref *kref) aliased = alloc->imported.alias.aliased; if (aliased) { for (i = 0; i < alloc->imported.alias.nents; i++) - if (aliased[i].alloc) + if (aliased[i].alloc) { + kbase_mem_phy_alloc_gpu_unmapped(aliased[i].alloc); kbase_mem_phy_alloc_put(aliased[i].alloc); + } vfree(aliased); } break; @@ -2291,7 +2294,8 @@ static void kbase_jit_destroy_worker(struct work_struct *work) list_del(®->jit_node); mutex_unlock(&kctx->jit_evict_lock); - reg->flags &= ~KBASE_REG_JIT; + + reg->flags &= ~KBASE_REG_NO_USER_FREE; kbase_gpu_vm_lock(kctx); kbase_mem_free_region(kctx, reg); kbase_gpu_vm_unlock(kctx); @@ -2475,7 +2479,7 @@ struct kbase_va_region *kbase_jit_allocate(struct kbase_context *kctx, /* No suitable JIT allocation was found so create a new one */ u64 flags = BASE_MEM_PROT_CPU_RD | BASE_MEM_PROT_GPU_RD | BASE_MEM_PROT_GPU_WR | BASE_MEM_GROW_ON_GPF | - BASE_MEM_COHERENT_LOCAL; + BASE_MEM_COHERENT_LOCAL | BASEP_MEM_NO_USER_FREE; u64 gpu_addr; mutex_unlock(&kctx->jit_evict_lock); @@ -2485,8 +2489,6 @@ struct kbase_va_region *kbase_jit_allocate(struct kbase_context *kctx, if (!reg) goto out_unlocked; - reg->flags |= KBASE_REG_JIT; - mutex_lock(&kctx->jit_evict_lock); list_add(®->jit_node, &kctx->jit_active_head); mutex_unlock(&kctx->jit_evict_lock); @@ -2608,7 +2610,7 @@ bool kbase_jit_evict(struct kbase_context *kctx) mutex_unlock(&kctx->jit_evict_lock); if (reg) { - reg->flags &= ~KBASE_REG_JIT; + reg->flags &= ~KBASE_REG_NO_USER_FREE; kbase_mem_free_region(kctx, reg); } @@ -2635,7 +2637,7 @@ void kbase_jit_term(struct kbase_context *kctx) struct kbase_va_region, jit_node); list_del(&walker->jit_node); mutex_unlock(&kctx->jit_evict_lock); - walker->flags &= ~KBASE_REG_JIT; + walker->flags &= ~KBASE_REG_NO_USER_FREE; kbase_mem_free_region(kctx, walker); mutex_lock(&kctx->jit_evict_lock); } @@ -2646,7 +2648,7 @@ void kbase_jit_term(struct kbase_context *kctx) struct kbase_va_region, jit_node); list_del(&walker->jit_node); mutex_unlock(&kctx->jit_evict_lock); - walker->flags &= ~KBASE_REG_JIT; + walker->flags &= ~KBASE_REG_NO_USER_FREE; kbase_mem_free_region(kctx, walker); mutex_lock(&kctx->jit_evict_lock); } diff --git a/drivers/gpu/arm/tMIx/r9p0/mali_kbase_mem.h b/drivers/gpu/arm/tMIx/r9p0/mali_kbase_mem.h index 53c31418819b..031a1c120db3 100644 --- a/drivers/gpu/arm/tMIx/r9p0/mali_kbase_mem.h +++ b/drivers/gpu/arm/tMIx/r9p0/mali_kbase_mem.h @@ -1,6 +1,6 @@ /* * - * (C) COPYRIGHT 2010-2017 ARM Limited. All rights reserved. + * (C) COPYRIGHT 2010-2021 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the * GNU General Public License version 2 as published by the Free Software @@ -302,7 +302,7 @@ struct kbase_va_region { #define KBASE_REG_TILER_ALIGN_TOP (1ul << 23) /* Memory is handled by JIT - user space should not be able to free it */ -#define KBASE_REG_JIT (1ul << 24) +#define KBASE_REG_NO_USER_FREE (1ul << 24) #define KBASE_REG_ZONE_SAME_VA KBASE_REG_ZONE(0) diff --git a/drivers/gpu/arm/tMIx/r9p0/mali_kbase_mem_linux.c b/drivers/gpu/arm/tMIx/r9p0/mali_kbase_mem_linux.c index 78c2e76f2cf8..0815ebbeb63e 100644 --- a/drivers/gpu/arm/tMIx/r9p0/mali_kbase_mem_linux.c +++ b/drivers/gpu/arm/tMIx/r9p0/mali_kbase_mem_linux.c @@ -1,6 +1,6 @@ /* * - * (C) COPYRIGHT 2010-2017 ARM Limited. All rights reserved. + * (C) COPYRIGHT 2010-2021 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the * GNU General Public License version 2 as published by the Free Software @@ -603,7 +603,12 @@ int kbase_mem_flags_change(struct kbase_context *kctx, u64 gpu_addr, unsigned in prev_needed = (KBASE_REG_DONT_NEED & reg->flags) == KBASE_REG_DONT_NEED; new_needed = (BASE_MEM_DONT_NEED & flags) == BASE_MEM_DONT_NEED; if (prev_needed != new_needed) { - /* Aliased allocations can't be made ephemeral */ + /* Aliased allocations can't be shrunk as the code doesn't + * support looking up: + * - all physical pages assigned to different GPU VAs + * - CPU mappings for the physical pages at different vm_pgoff + * (==GPU VA) locations. + */ if (atomic_read(®->cpu_alloc->gpu_mappings) > 1) goto out_unlock; @@ -924,6 +929,7 @@ static struct kbase_va_region *kbase_mem_from_user_buffer( u32 cache_line_alignment = kbase_get_cache_line_alignment(kctx); struct kbase_alloc_import_user_buf *user_buf; struct page **pages = NULL; + int write; if ((address & (cache_line_alignment - 1)) != 0 || (size & (cache_line_alignment - 1)) != 0) { @@ -1017,15 +1023,17 @@ static struct kbase_va_region *kbase_mem_from_user_buffer( down_read(¤t->mm->mmap_sem); + write = reg->flags & (KBASE_REG_CPU_WR | KBASE_REG_GPU_WR); + #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0) faulted_pages = get_user_pages(current, current->mm, address, *va_pages, - reg->flags & KBASE_REG_GPU_WR, 0, pages, NULL); + write, 0, pages, NULL); #elif LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0) faulted_pages = get_user_pages(address, *va_pages, - reg->flags & KBASE_REG_GPU_WR, 0, pages, NULL); + write, 0, pages, NULL); #else faulted_pages = get_user_pages(address, *va_pages, - reg->flags & KBASE_REG_GPU_WR ? FOLL_WRITE : 0, + write ? FOLL_WRITE : 0, pages, NULL); #endif @@ -1197,6 +1205,15 @@ u64 kbase_mem_alias(struct kbase_context *kctx, u64 *flags, u64 stride, goto bad_handle; /* Free region */ if (aliasing_reg->flags & KBASE_REG_DONT_NEED) goto bad_handle; /* Ephemeral region */ + if (aliasing_reg->flags & KBASE_REG_NO_USER_FREE) + goto bad_handle; /* JIT regions can't be + * aliased. NO_USER_FREE flag + * covers the entire lifetime + * of JIT regions. The other + * types of regions covered + * by this flag also shall + * not be aliased. + */ if (!aliasing_reg->gpu_alloc) goto bad_handle; /* No alloc */ if (aliasing_reg->gpu_alloc->type != KBASE_MEM_TYPE_NATIVE) @@ -1224,6 +1241,18 @@ u64 kbase_mem_alias(struct kbase_context *kctx, u64 *flags, u64 stride, reg->gpu_alloc->imported.alias.aliased[i].alloc = kbase_mem_phy_alloc_get(alloc); reg->gpu_alloc->imported.alias.aliased[i].length = ai[i].length; reg->gpu_alloc->imported.alias.aliased[i].offset = ai[i].offset; + + /* Ensure the underlying alloc is marked as being + * mapped at >1 different GPU VA immediately, even + * though mapping might not happen until later. + * + * Otherwise, we would (incorrectly) allow shrinking of + * the source region (aliasing_reg) and so freeing the + * physical pages (without freeing the entire alloc) + * whilst we still hold an implicit reference on those + * physical pages. + */ + kbase_mem_phy_alloc_gpu_mapped(alloc); } } @@ -1267,6 +1296,10 @@ no_cookie: #endif no_mmap: bad_handle: + /* Marking the source allocs as not being mapped on the GPU and putting + * them is handled by putting reg's allocs, so no rollback of those + * actions is done here. + */ kbase_gpu_vm_unlock(kctx); no_aliased_array: invalid_flags: @@ -1512,7 +1545,15 @@ int kbase_mem_commit(struct kbase_context *kctx, u64 gpu_addr, u64 new_pages) if (new_pages > reg->nr_pages) goto out_unlock; - /* can't be mapped more than once on the GPU */ + /* Can't shrink when physical pages are mapped to different GPU + * VAs. The code doesn't support looking up: + * - all physical pages assigned to different GPU VAs + * - CPU mappings for the physical pages at different vm_pgoff + * (==GPU VA) locations. + * + * Note that for Native allocs mapped at multiple GPU VAs, growth of + * such allocs is not a supported use-case. + */ if (atomic_read(®->gpu_alloc->gpu_mappings) > 1) goto out_unlock; /* can't grow regions which are ephemeral */ diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index a3868e3622a8..908404de5cda 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -91,7 +91,7 @@ EXPORT_SYMBOL_GPL(hid_register_report); * Register a new field for this report. */ -static struct hid_field *hid_register_field(struct hid_report *report, unsigned usages, unsigned values) +static struct hid_field *hid_register_field(struct hid_report *report, unsigned usages) { struct hid_field *field; @@ -102,7 +102,7 @@ static struct hid_field *hid_register_field(struct hid_report *report, unsigned field = kzalloc((sizeof(struct hid_field) + usages * sizeof(struct hid_usage) + - values * sizeof(unsigned)), GFP_KERNEL); + usages * sizeof(unsigned)), GFP_KERNEL); if (!field) return NULL; @@ -254,7 +254,7 @@ static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsign usages = max_t(unsigned, parser->local.usage_index, parser->global.report_count); - field = hid_register_field(report, usages, parser->global.report_count); + field = hid_register_field(report, usages); if (!field) return 0; diff --git a/drivers/input/touchscreen/sec_ts/sec_ts.c b/drivers/input/touchscreen/sec_ts/sec_ts.c index d02aa0fdfdb4..f7f2c8f58883 100644 --- a/drivers/input/touchscreen/sec_ts/sec_ts.c +++ b/drivers/input/touchscreen/sec_ts/sec_ts.c @@ -2038,7 +2038,6 @@ static void sec_ts_set_input_prop(struct sec_ts_data *ts, struct input_dev *dev, set_bit(BTN_TOUCH, dev->keybit); set_bit(BTN_TOOL_FINGER, dev->keybit); set_bit(KEY_BLACK_UI_GESTURE, dev->keybit); - set_bit(KEY_INT_CANCEL, dev->keybit); #ifdef SEC_TS_SUPPORT_TOUCH_KEY if (ts->plat_data->support_mskey) { int i; @@ -2804,24 +2803,18 @@ static void sec_ts_input_close(struct input_dev *dev) ts->pressure_setting_mode = 0; - if (ts->prox_power_off) { - sec_ts_stop_device(ts); - } else { - if (ts->lowpower_mode) { - int ret; - - ret = sec_ts_set_lowpowermode(ts, TO_LOWPOWER_MODE); - if (ts->reset_is_on_going && (ret < 0)) { - input_err(true, &ts->client->dev, "%s: failed to reset, ret:%d\n", __func__, ret); - ts->reset_is_on_going = false; - schedule_delayed_work(&ts->reset_work, msecs_to_jiffies(TOUCH_RESET_DWORK_TIME)); - } - } else { - sec_ts_stop_device(ts); + if (ts->lowpower_mode) { + int ret; + + ret = sec_ts_set_lowpowermode(ts, TO_LOWPOWER_MODE); + if (ts->reset_is_on_going && (ret < 0)) { + input_err(true, &ts->client->dev, "%s: failed to reset, ret:%d\n", __func__, ret); + ts->reset_is_on_going = false; + schedule_delayed_work(&ts->reset_work, msecs_to_jiffies(TOUCH_RESET_DWORK_TIME)); } + } else { + sec_ts_stop_device(ts); } - - ts->prox_power_off = 0; } #endif @@ -2903,17 +2896,9 @@ int sec_ts_stop_device(struct sec_ts_data *ts) goto out; } - disable_irq(ts->client->irq); - ts->power_status = SEC_TS_STATE_POWER_OFF; - if (ts->prox_power_off) { - input_report_key(ts->input_dev, KEY_INT_CANCEL, 1); - input_sync(ts->input_dev); - input_report_key(ts->input_dev, KEY_INT_CANCEL, 0); - input_sync(ts->input_dev); - } - + disable_irq(ts->client->irq); sec_ts_locked_release_all_finger(ts); ts->plat_data->power(ts, false); diff --git a/drivers/input/touchscreen/sec_ts/sec_ts.h b/drivers/input/touchscreen/sec_ts/sec_ts.h index 617d088fba93..b9219343a7ac 100644 --- a/drivers/input/touchscreen/sec_ts/sec_ts.h +++ b/drivers/input/touchscreen/sec_ts/sec_ts.h @@ -675,7 +675,6 @@ struct sec_ts_data { u8 pressure_setting_mode; volatile u8 touch_noise_status; volatile bool input_closed; - long prox_power_off; int touch_count; int tx_count; diff --git a/drivers/input/touchscreen/sec_ts/sec_ts_fn.c b/drivers/input/touchscreen/sec_ts/sec_ts_fn.c index ead3bd42454e..705e55f4d027 100644 --- a/drivers/input/touchscreen/sec_ts/sec_ts_fn.c +++ b/drivers/input/touchscreen/sec_ts/sec_ts_fn.c @@ -934,38 +934,6 @@ static ssize_t ic_status_show(struct device *dev, return snprintf(buf, SEC_CMD_BUF_SIZE, "%s\n", buff); } -static ssize_t prox_power_off_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct sec_cmd_data *sec = dev_get_drvdata(dev); - struct sec_ts_data *ts = container_of(sec, struct sec_ts_data, sec); - - input_info(true, &ts->client->dev, "%s: %d\n", __func__, - ts->prox_power_off); - - return snprintf(buf, SEC_CMD_BUF_SIZE, "%ld", ts->prox_power_off); -} - -static ssize_t prox_power_off_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) -{ - struct sec_cmd_data *sec = dev_get_drvdata(dev); - struct sec_ts_data *ts = container_of(sec, struct sec_ts_data, sec); - long data; - int ret; - - ret = kstrtol(buf, 10, &data); - if (ret < 0) - return ret; - - input_info(true, &ts->client->dev, "%s: %ld\n", __func__, data); - - ts->prox_power_off = data; - - return count; -} - static ssize_t read_support_feature(struct device *dev, struct device_attribute *attr, char *buf) { @@ -1005,7 +973,6 @@ static DEVICE_ATTR(read_ambient_channel_delta, 0444, read_ambient_channel_delta_ static DEVICE_ATTR(get_lp_dump, 0444, get_lp_dump, NULL); static DEVICE_ATTR(force_recal_count, 0444, get_force_recal_count, NULL); static DEVICE_ATTR(status, 0444, ic_status_show, NULL); -static DEVICE_ATTR(prox_power_off, 0664, prox_power_off_show, prox_power_off_store); static DEVICE_ATTR(support_feature, 0444, read_support_feature, NULL); static struct attribute *cmd_attributes[] = { @@ -1030,7 +997,6 @@ static struct attribute *cmd_attributes[] = { &dev_attr_get_lp_dump.attr, &dev_attr_force_recal_count.attr, &dev_attr_status.attr, - &dev_attr_prox_power_off.attr, &dev_attr_support_feature.attr, NULL, }; diff --git a/drivers/input/touchscreen/stm/fts_sec.c b/drivers/input/touchscreen/stm/fts_sec.c index 785a20b95939..b135bf0f6a00 100644 --- a/drivers/input/touchscreen/stm/fts_sec.c +++ b/drivers/input/touchscreen/stm/fts_sec.c @@ -784,37 +784,6 @@ out: return strlen(buf); } -static ssize_t prox_power_off_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct sec_cmd_data *sec = dev_get_drvdata(dev); - struct fts_ts_info *info = container_of(sec, struct fts_ts_info, sec); - - input_info(true, &info->client->dev, "%s: %d\n", __func__, - info->prox_power_off); - - return snprintf(buf, SEC_CMD_BUF_SIZE, "%d", info->prox_power_off); -} - -static ssize_t prox_power_off_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) -{ - struct sec_cmd_data *sec = dev_get_drvdata(dev); - struct fts_ts_info *info = container_of(sec, struct fts_ts_info, sec); - int ret, data; - - ret = kstrtoint(buf, 10, &data); - if (ret < 0) - return ret; - - input_info(true, &info->client->dev, "%s: %d\n", __func__, data); - - info->prox_power_off = data; - - return count; -} - static ssize_t get_force_recal_count(struct device *dev, struct device_attribute *attr, char *buf) { @@ -858,7 +827,6 @@ static DEVICE_ATTR(read_ambient_info, 0444, read_ambient_info_show, NULL); static DEVICE_ATTR(read_ambient_channel_info, 0444, read_ambient_channel_info_show, NULL); static DEVICE_ATTR(read_ambient_channel_delta, 0444, read_ambient_channel_delta_show, NULL); static DEVICE_ATTR(get_lp_dump, 0444, get_lp_dump, NULL); -static DEVICE_ATTR(prox_power_off, 0664, prox_power_off_show, prox_power_off_store); static DEVICE_ATTR(force_recal_count, 0444, get_force_recal_count, NULL); static DEVICE_ATTR(support_feature, 0444, read_support_feature, NULL); @@ -880,7 +848,6 @@ static struct attribute *sec_touch_facotry_attributes[] = { &dev_attr_read_ambient_channel_info.attr, &dev_attr_read_ambient_channel_delta.attr, &dev_attr_get_lp_dump.attr, - &dev_attr_prox_power_off.attr, &dev_attr_force_recal_count.attr, &dev_attr_support_feature.attr, NULL, diff --git a/drivers/input/touchscreen/stm/fts_ts.c b/drivers/input/touchscreen/stm/fts_ts.c index 83a0e8635e4f..39bd4f8f5fb2 100644 --- a/drivers/input/touchscreen/stm/fts_ts.c +++ b/drivers/input/touchscreen/stm/fts_ts.c @@ -2763,7 +2763,6 @@ static void fts_set_input_prop(struct fts_ts_info *info, struct input_dev *dev, set_bit(BTN_TOOL_FINGER, dev->keybit); set_bit(KEY_BLACK_UI_GESTURE, dev->keybit); set_bit(KEY_HOMEPAGE, dev->keybit); - set_bit(KEY_INT_CANCEL, dev->keybit); #ifdef FTS_SUPPORT_TOUCH_KEY if (info->board->support_mskey) { @@ -3253,13 +3252,6 @@ static void fts_input_close(struct input_dev *dev) #endif cancel_delayed_work(&info->reset_work); - if (info->prox_power_off) { - input_report_key(info->input_dev, KEY_INT_CANCEL, 1); - input_sync(info->input_dev); - input_report_key(info->input_dev, KEY_INT_CANCEL, 0); - input_sync(info->input_dev); - } - #ifndef CONFIG_SEC_FACTORY if (info->board->always_lpmode && info->board->support_pressure) info->lowpower_flag |= FTS_MODE_PRESSURE; @@ -3267,12 +3259,7 @@ static void fts_input_close(struct input_dev *dev) info->pressure_setting_mode = 0; - if (info->prox_power_off) - fts_stop_device(info, false); - else - fts_stop_device(info, info->lowpower_flag); - - info->prox_power_off = 0; + fts_stop_device(info, info->lowpower_flag); #ifdef FTS_SUPPORT_HOVER info->retry_hover_enable_after_wakeup = 0; diff --git a/drivers/input/touchscreen/stm/fts_ts.h b/drivers/input/touchscreen/stm/fts_ts.h index 039631c363c3..306c4e5e512f 100644 --- a/drivers/input/touchscreen/stm/fts_ts.h +++ b/drivers/input/touchscreen/stm/fts_ts.h @@ -675,8 +675,6 @@ struct fts_ts_info { unsigned char pressure_cal_base; unsigned char pressure_cal_delta; - int prox_power_off; - int max_baseline; int max_baseline_tx; int max_baseline_rx; diff --git a/drivers/sensorhub/brcm/bbdpl/bbd_patch_file_great_p.h b/drivers/sensorhub/brcm/bbdpl/bbd_patch_file_great_p.h index cfab54573d5e..add2b129e6af 100644 --- a/drivers/sensorhub/brcm/bbdpl/bbd_patch_file_great_p.h +++ b/drivers/sensorhub/brcm/bbdpl/bbd_patch_file_great_p.h @@ -1,6 +1,6 @@ "\n" -"\n" -"\n" +"\n" +"\n" "\n" "\n" "\n" @@ -12,7 +12,7 @@ "\n" +"000000000000FFFFFFFF00000000000000000000000039\n" +"908700B9908700010000000200000060AA830000000000\n" +"0000000080B5002904D04A68C0F8B02009680163A2F799\n" +"F8002002BDD0F8B0204A60006B08600020704700F19402\n" +"53688B629368CB62D3680B6312694A6300F17002D368CB\n" +"60036F0B6093688B6053684B6053694B61D369DB08CB61\n" +"13690B61136A0B6290F8943081F8243090F8BC30002B02\n" +"D0002380F8BC3090698861002070470000DFF800F07F74\n" +"0D0010B5044604F574509C30C046C04604F52C505030FF\n" +"F7F0FF204610BD80B5DDF762F801680968BDE804400847\n" +"80B5DDF75AF801688968BDE80440084710B5DDF752F801\n" +"68C968884742F64C3488F752FC0021215010BD00F59730\n" +"A830704700F597309C30704710B50C46DDF73CF8026852\n" +"6C2146BDE8104010477CB50C4615461E46DCF788F80799\n" +"01913346069900912A46214604686469A04776BD80B5DC\n" +"F77AF801688969BDE80440084780B5DCF772F80168C969\n" +"BDE80440084780B5DCF76AF80168096ABDE80440084780\n" +"B5DCF762F80168496ABDE80440084780B5DCF75AF80168\n" +"896ABDE80440084780B5DCF752F80168C96ABDE8044008\n" +"4780B5DCF74AF80168096BBDE80440084780B5DCF742F8\n" +"0168496BBDE80440084780B5DCF73AF80168896BBDE804\n" +"40084780B5DCF732F80168C96BBDE80440084780B5DCF7\n" +"2AF80168096CBDE8044008470B4600F6A8515D221846A2\n" +"F705B944F64C321150704710B5044604F10800A4F7A3F9\n" +"204610BD017370474CF6B821085CF1BC02D0A387007047\n" +"10B5040006D0B8F7E9FE2046BDE81040B5F731BB10BD08\n" +"30ECF71BBC10B5040006D0B8F774FF2046BDE81040B5F7\n" +"23BB10BD10B5040006D0C046C0462046BDE81040B5F718\n" +"BB10BD10B5040006D0B8F758FF2046BDE81040B5F70DBB\n" +"10BD10B5040006D0B8F71BFE2046BDE81040B5F702BB10\n" +"BD10B5040006D0B8F70AFE2046BDE81040B5F7F7BA10BD\n" +"10B5040006D0F5F7A9FD2046BDE81040B5F7ECBA10BD00\n" +"7F08B93F2070471020704710B5040006D0B8F7E8FD2046\n" +"BDE81040B5F7DBBA10BDB830AAF7DDB9B830704780B502\n" +"68D2F8B02090470168C96CBDE804400847D830AAF7CEB9\n" +"0268D2F89420104780B50268D2F8B0209047B8F787FD00\n" +"B1012002BD80B50268D2F8B0209047B8F77DFD012818BF\n" +"002002BD80B50268D2F8B0209047B8F772FD022801D101\n" +"2002BD002002BD10B5040006D0B8F747FE2046BDE81040\n" +"B5F798BA10BD10B5040006D0F5F73AFD2046BDE81040B5\n" +"F78DBA10BD0420704710B5040006D0EBF79FFB2046BDE8\n" +"1040B5F780BA10BD10B5040006D0B8F7E2FD2046BDE810\n" +"40B5F775BA10BDDFF800F0CD430E0000F5E860FFF7F8BF\n" +"10B5040006D0B8F76DFE2046BDE81040B5F762BA10BD10\n" +"B5040006D0B8F7AEFE2046BDE81040B5F757BA10BD10B5\n" +"040006D0B8F734FE2046BDE81040B5F74CBA10BD10B504\n" +"0006D0ECF7A0FA2046BDE81040B5F741BA10BD10B50446\n" +"B8F740FD204610BD10B5040006D0FFF7F5FF2046BDE810\n" +"40B5F730BA10BD10B5040006D0FCF7D5FB2046BDE81040\n" +"B5F725BA10BD10B5040006D0B8F742FE2046BDE81040B5\n" +"F71ABA10BD38B504460D46284600F005F8A84218BF0024\n" +"204632BD10B5044614F1010F82B00AD08DF80000012200\n" +"A90120FEF7B3FE012801D1204616BD4FF0FF3016BD0022\n" +"00214FF0FF30FEF7A6BEFF83FCFF04F4060048B5870000\n" +"000000ABD1FBFF480D000008EEFFFF00A8870000000000\n" +"09EDFFFFF1100088A68700000000000000000000000000\n" +">\n" diff --git a/drivers/sensorhub/brcm/ssp_firmware.c b/drivers/sensorhub/brcm/ssp_firmware.c index dbc533c96557..ec9f3d505c0d 100644 --- a/drivers/sensorhub/brcm/ssp_firmware.c +++ b/drivers/sensorhub/brcm/ssp_firmware.c @@ -20,7 +20,7 @@ #elif ANDROID_VERSION < 90000 #define SSP_FIRMWARE_REVISION_BCM 18071100 /*Android O*/ #else -#define SSP_FIRMWARE_REVISION_BCM 19101000 /*Android P*/ +#define SSP_FIRMWARE_REVISION_BCM 19032700 /*Android P*/ #endif #elif defined(CONFIG_SENSORS_SSP_GREAT) #if ANDROID_VERSION < 80000 @@ -28,7 +28,7 @@ #elif ANDROID_VERSION < 90000 #define SSP_FIRMWARE_REVISION_BCM 18020700 /*Android O*/ #else -#define SSP_FIRMWARE_REVISION_BCM 19011400 /*Android P*/ +#define SSP_FIRMWARE_REVISION_BCM 19040300 /*Android P*/ #endif #elif defined(CONFIG_SENSORS_SSP_VLTE) #define SSP_FIRMWARE_REVISION_BCM 17063000 diff --git a/drivers/staging/android/ion/Kconfig b/drivers/staging/android/ion/Kconfig index 744a05a63994..18bd5736f221 100644 --- a/drivers/staging/android/ion/Kconfig +++ b/drivers/staging/android/ion/Kconfig @@ -57,15 +57,6 @@ config ION_EXYNOS_STAT_LOG help Say Y if you need to see some stats info via debugfs -config ION_RBIN_HEAP - bool "ION RBIN Heap" - default y - depends on ION && RBIN && ION_EXYNOS - help - Provides ION RBIN heap which utilizes RBIN memory. - While RBIN heap is similar with CMA heap, it assumes that an ION buffer - can be composed of multiple free chunks. - if ION_EXYNOS source drivers/staging/android/ion/exynos/Kconfig endif diff --git a/drivers/staging/android/ion/Makefile b/drivers/staging/android/ion/Makefile index a84b437c9c31..5f2aa470c9f4 100644 --- a/drivers/staging/android/ion/Makefile +++ b/drivers/staging/android/ion/Makefile @@ -1,7 +1,6 @@ obj-$(CONFIG_ION) += ion.o ion_heap.o ion_page_pool.o ion_system_heap.o \ ion_carveout_heap.o ion_chunk_heap.o obj-$(CONFIG_DMA_CMA) += ion_cma_heap.o -obj-$(CONFIG_ION_RBIN_HEAP) += ion_rbin_heap.o obj-$(CONFIG_ION_TEST) += ion_test.o ifdef CONFIG_COMPAT obj-$(CONFIG_ION) += compat_ion.o diff --git a/drivers/staging/android/ion/exynos/exynos_ion.c b/drivers/staging/android/ion/exynos/exynos_ion.c index b7c9c7f5cf64..1815372166a4 100644 --- a/drivers/staging/android/ion/exynos/exynos_ion.c +++ b/drivers/staging/android/ion/exynos/exynos_ion.c @@ -42,7 +42,6 @@ struct exynos_ion_platform_heap { unsigned int compat_ids; bool secure; bool reusable; - bool recyclable; bool protected; bool noprot; atomic_t secure_ref; @@ -380,11 +379,6 @@ static int __init exynos_ion_reserved_mem_setup(struct reserved_mem *rmem) pdata = &plat_heaps[nr_heaps]; pdata->secure = !!of_get_flat_dt_prop(rmem->fdt_node, "ion,secure", NULL); pdata->reusable = !!of_get_flat_dt_prop(rmem->fdt_node, "ion,reusable", NULL); -#ifdef CONFIG_ION_RBIN_HEAP - pdata->recyclable = !!of_get_flat_dt_prop(rmem->fdt_node, "ion,recyclable", NULL); -#else - pdata->recyclable = false; -#endif pdata->noprot = !!of_get_flat_dt_prop(rmem->fdt_node, "ion,noprot", NULL); prop = of_get_flat_dt_prop(rmem->fdt_node, "id", &len); @@ -431,14 +425,11 @@ static int __init exynos_ion_reserved_mem_setup(struct reserved_mem *rmem) else heap_data->align = be32_to_cpu(prop[0]); - if (pdata->reusable || pdata->recyclable) { + if (pdata->reusable) { int ret; struct cma *cma; - if (pdata->reusable) - heap_data->type = ION_HEAP_TYPE_DMA; - else - heap_data->type = ION_HEAP_TYPE_RBIN; + heap_data->type = ION_HEAP_TYPE_DMA; heap_data->priv = &pdata->dev; /* set as non-coherent device */ @@ -450,25 +441,12 @@ static int __init exynos_ion_reserved_mem_setup(struct reserved_mem *rmem) __func__, heap_data->name, ret); return ret; } - if (pdata->recyclable) { - cma_set_rbin(cma); - totalrbin_pages += (heap_data->size / PAGE_SIZE); - /* - * # of cma pages was increased by this RBIN memory in - * cma_init_reserved_mem_with_name(). Need to deduct. - */ - totalcma_pages -= (heap_data->size / PAGE_SIZE); - } dma_contiguous_early_fixup(heap_data->base, heap_data->size); dev_set_cma_area(&pdata->dev, cma); - if (pdata->reusable) - pr_info("CMA memory[%d]: %s:%#lx\n", heap_data->id, - heap_data->name, (unsigned long)rmem->size); - else - pr_info("rbin CMA memory[%d]: %s:%#lx\n", heap_data->id, + pr_info("CMA memory[%d]: %s:%#lx\n", heap_data->id, heap_data->name, (unsigned long)rmem->size); } else { heap_data->type = ION_HEAP_TYPE_CARVEOUT; diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c index ff9e5eddce3a..afc0880137d7 100644 --- a/drivers/staging/android/ion/ion.c +++ b/drivers/staging/android/ion/ion.c @@ -406,7 +406,6 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap, struct sg_table *table; struct scatterlist *sg; int i, ret; - long nr_alloc_cur, nr_alloc_peak; buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL); if (!buffer) @@ -483,10 +482,6 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap, mutex_lock(&dev->buffer_lock); ion_buffer_add(dev, buffer); mutex_unlock(&dev->buffer_lock); - nr_alloc_cur = atomic_long_add_return(len, &heap->total_allocated); - nr_alloc_peak = atomic_long_read(&heap->total_allocated_peak); - if (nr_alloc_cur > nr_alloc_peak) - atomic_long_set(&heap->total_allocated_peak, nr_alloc_cur); return buffer; err: @@ -516,7 +511,6 @@ void ion_buffer_destroy(struct ion_buffer *buffer) kfree(iovm_map); } - atomic_long_sub(buffer->size, &buffer->heap->total_allocated); buffer->heap->ops->unmap_dma(buffer->heap, buffer); buffer->heap->ops->free(buffer); vfree(buffer->pages); @@ -557,9 +551,6 @@ static int ion_buffer_put(struct ion_buffer *buffer) static void ion_buffer_add_to_handle(struct ion_buffer *buffer) { mutex_lock(&buffer->lock); - if (buffer->handle_count == 0) - atomic_long_add(buffer->size, &buffer->heap->total_handles); - buffer->handle_count++; mutex_unlock(&buffer->lock); } @@ -584,7 +575,6 @@ static void ion_buffer_remove_from_handle(struct ion_buffer *buffer) task = current->group_leader; get_task_comm(buffer->task_comm, task); buffer->pid = task_pid_nr(task); - atomic_long_sub(buffer->size, &buffer->heap->total_handles); } mutex_unlock(&buffer->lock); } @@ -2158,8 +2148,6 @@ static int ion_debug_heap_show(struct seq_file *s, void *unused) seq_printf(s, "%16s %16zu\n", "total orphaned", total_orphaned_size); seq_printf(s, "%16s %16zu\n", "total ", total_size); - seq_printf(s, "%16.s %16lu\n", "peak allocated", - atomic_long_read(&heap->total_allocated_peak)); if (heap->flags & ION_HEAP_FLAG_DEFER_FREE) seq_printf(s, "%16s %16zu\n", "deferred free", heap->free_list_size); diff --git a/drivers/staging/android/ion/ion_heap.c b/drivers/staging/android/ion/ion_heap.c index ee531295b779..9cb2b2f05e43 100644 --- a/drivers/staging/android/ion/ion_heap.c +++ b/drivers/staging/android/ion/ion_heap.c @@ -347,11 +347,6 @@ struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data) case ION_HEAP_TYPE_SYSTEM: heap = ion_system_heap_create(heap_data); break; -#ifdef CONFIG_ION_RBIN_HEAP - case ION_HEAP_TYPE_RBIN: - heap = ion_rbin_heap_create(heap_data); - break; -#endif case ION_HEAP_TYPE_CARVEOUT: heap = ion_carveout_heap_create(heap_data); break; @@ -396,11 +391,6 @@ void ion_heap_destroy(struct ion_heap *heap) case ION_HEAP_TYPE_SYSTEM: ion_system_heap_destroy(heap); break; -#ifdef CONFIG_ION_RBIN_HEAP - case ION_HEAP_TYPE_RBIN: - ion_rbin_heap_destroy(heap); - break; -#endif case ION_HEAP_TYPE_CARVEOUT: ion_carveout_heap_destroy(heap); break; diff --git a/drivers/staging/android/ion/ion_page_pool.c b/drivers/staging/android/ion/ion_page_pool.c index 2ca71ebc99a7..2fe655d53474 100644 --- a/drivers/staging/android/ion/ion_page_pool.c +++ b/drivers/staging/android/ion/ion_page_pool.c @@ -50,11 +50,8 @@ static void ion_page_pool_free_pages(struct ion_page_pool *pool, static int ion_page_pool_add(struct ion_page_pool *pool, struct page *page) { #ifdef CONFIG_DEBUG_LIST -#ifdef CONFIG_RBIN - if (!is_ion_rbin_page(page)) -#endif - BUG_ON(page->lru.next != LIST_POISON1 || - page->lru.prev != LIST_POISON2); + BUG_ON(page->lru.next != LIST_POISON1 || + page->lru.prev != LIST_POISON2); #endif if (pool->cached) ion_clear_page_clean(page); @@ -71,7 +68,7 @@ static int ion_page_pool_add(struct ion_page_pool *pool, struct page *page) return 0; } -struct page *ion_page_pool_remove(struct ion_page_pool *pool, bool high) +static struct page *ion_page_pool_remove(struct ion_page_pool *pool, bool high) { struct page *page; @@ -95,9 +92,6 @@ struct page *ion_page_pool_alloc(struct ion_page_pool *pool) BUG_ON(!pool); - if (!pool->high_count && !pool->low_count) - goto done; - spin_lock(&pool->lock); if (pool->high_count) page = ion_page_pool_remove(pool, true); @@ -105,7 +99,6 @@ struct page *ion_page_pool_alloc(struct ion_page_pool *pool) page = ion_page_pool_remove(pool, false); spin_unlock(&pool->lock); -done: return page; } @@ -113,12 +106,7 @@ void ion_page_pool_free(struct ion_page_pool *pool, struct page *page) { int ret; - /* - * ION RBIN heap can utilize ion_page_pool_free() for pages which are - * not compound pages. Thus, comment out the below line. - * - * BUG_ON(pool->order != compound_order(page)); - */ + BUG_ON(pool->order != compound_order(page)); ret = ion_page_pool_add(pool, page); if (ret) @@ -130,7 +118,7 @@ void ion_page_pool_free_immediate(struct ion_page_pool *pool, struct page *page) ion_page_pool_free_pages(pool, page); } -int ion_page_pool_total(struct ion_page_pool *pool, bool high) +static int ion_page_pool_total(struct ion_page_pool *pool, bool high) { int count = pool->low_count; diff --git a/drivers/staging/android/ion/ion_priv.h b/drivers/staging/android/ion/ion_priv.h index 4f19f3da7c42..7685c6067781 100644 --- a/drivers/staging/android/ion/ion_priv.h +++ b/drivers/staging/android/ion/ion_priv.h @@ -247,9 +247,6 @@ struct ion_heap { struct task_struct *task; int (*debug_show)(struct ion_heap *heap, struct seq_file *, void *); - atomic_long_t total_allocated; - atomic_long_t total_allocated_peak; - atomic_long_t total_handles; }; /** @@ -412,12 +409,6 @@ void ion_system_heap_destroy(struct ion_heap *); struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *); void ion_system_contig_heap_destroy(struct ion_heap *); -#ifdef CONFIG_ION_RBIN_HEAP -struct ion_heap *ion_rbin_heap_create(struct ion_platform_heap *); -void ion_rbin_heap_destroy(struct ion_heap *); -bool is_ion_rbin_page(struct page *); -#endif - struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *); void ion_carveout_heap_destroy(struct ion_heap *); @@ -500,11 +491,9 @@ struct ion_page_pool { struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order); void ion_page_pool_destroy(struct ion_page_pool *); void *ion_page_pool_alloc_pages(struct ion_page_pool *pool); -struct page *ion_page_pool_remove(struct ion_page_pool *pool, bool high); struct page *ion_page_pool_alloc(struct ion_page_pool *); void ion_page_pool_free(struct ion_page_pool *, struct page *); void ion_page_pool_free_immediate(struct ion_page_pool *, struct page *); -int ion_page_pool_total(struct ion_page_pool *pool, bool high); #ifdef CONFIG_ION_POOL_CACHE_POLICY static inline void ion_page_pool_alloc_set_cache_policy diff --git a/drivers/staging/android/ion/ion_rbin_heap.c b/drivers/staging/android/ion/ion_rbin_heap.c deleted file mode 100644 index 796c8752ecc8..000000000000 --- a/drivers/staging/android/ion/ion_rbin_heap.c +++ /dev/null @@ -1,590 +0,0 @@ -/* - * drivers/staging/android/ion/ion_rbin_heap.c - * - * Copyright (C) 2011 Google, Inc. - * - * This software is licensed under the terms of the GNU General Public - * License version 2, as published by the Free Software Foundation, and - * may be copied, distributed, and modified under those terms. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "ion.h" -#include "ion_priv.h" - -#define NUM_ORDERS ARRAY_SIZE(orders) - -static const unsigned int orders[] = {10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0}; - -static int order_to_index(unsigned int order) -{ - int i; - for (i = 0; i < NUM_ORDERS; i++) - if (order == orders[i]) - return i; - BUG(); - return -1; -} - -struct ion_rbin_heap { - struct ion_heap heap; - struct cma *cma; - unsigned long base_pfn; - unsigned long count; - wait_queue_head_t waitqueue; - struct task_struct *task; - bool prereclaim_run; - struct task_struct *task_shrink; - bool shrink_run; - struct ion_page_pool *pools[NUM_ORDERS]; -}; - -static struct page *alloc_rbin_page(struct ion_rbin_heap *heap, - struct ion_buffer *buffer, - unsigned long size_org) -{ - unsigned long size = PAGE_ALIGN(size_org); - int order = get_order(size); - struct page *page = NULL; - void *addr; - - trace_ion_rbin_partial_alloc_start(heap->heap.name, buffer, size, NULL); - - if (size >= (MAX_ORDER_NR_PAGES << PAGE_SHIFT)) { - page = cma_alloc(heap->cma, MAX_ORDER_NR_PAGES, MAX_ORDER - 1); - if (page) { - size = MAX_ORDER_NR_PAGES << PAGE_SHIFT; - goto done; - } - order = MAX_ORDER - 2; - } else if (size < (PAGE_SIZE << order)) { - page = cma_alloc(heap->cma, size >> PAGE_SHIFT, 0); - if (page) - goto done; - order--; - } - - for ( ; order >= 0; order--) { - page = cma_alloc(heap->cma, 1 << order, 0); - if (page) { - size = PAGE_SIZE << order; - goto done; - } - } - -done: - /* CAUTION!! : we assume page->private is not touched by anyone. */ - if (page) { - addr = page_address(page); - /* - * alloc_rbin_page() can be called by pre-reclaimer. - * In that case, buffer will be NULL. - * Let the pre-reclaimer zero the region in default. - */ - if (!buffer || !(buffer->flags & ION_FLAG_NOZEROED)) - memset(addr, 0, size); - set_page_private(page, size); - __flush_dcache_area(addr, size); - } - - trace_ion_rbin_partial_alloc_end(heap->heap.name, buffer, - page ? page_private(page) : 0, page); - return page; -} - -static void free_rbin_page(struct ion_rbin_heap *heap, - struct ion_buffer *buffer, struct page *page) -{ - unsigned int size = page_private(page); - - trace_ion_rbin_partial_free_start(heap->heap.name, buffer, size, page); - cma_release(heap->cma, page, (PAGE_ALIGN(size) >> PAGE_SHIFT)); - trace_ion_rbin_partial_free_end(heap->heap.name, buffer, size, NULL); -} - -static inline void do_expand(struct ion_rbin_heap *heap, struct page *page, - unsigned int nr_pages) -{ - unsigned int rem_nr_pages; - unsigned int order; - unsigned int total_nr_pages; - unsigned int free_nr_page; - struct page *free_page; - struct ion_page_pool *pool; - - total_nr_pages = page_private(page) >> PAGE_SHIFT; - rem_nr_pages = total_nr_pages - nr_pages; - free_page = page + total_nr_pages; - - while (rem_nr_pages) { - order = ilog2(rem_nr_pages); - free_nr_page = 1 << order; - free_page -= free_nr_page; - set_page_private(free_page, free_nr_page << PAGE_SHIFT); - pool = heap->pools[order_to_index(order)]; - ion_page_pool_free(pool, free_page); - rem_nr_pages -= free_nr_page; - } - set_page_private(page, nr_pages << PAGE_SHIFT); -} - -static struct page *alloc_rbin_page_from_pool(struct ion_rbin_heap *heap, - struct ion_buffer *buffer, - unsigned long size) -{ - struct page *page = NULL; - unsigned int size_order; - unsigned int nr_pages; - int i; - - trace_ion_rbin_pool_alloc_start(heap->heap.name, buffer, size, NULL); - size_order = get_order(size); - nr_pages = size >> PAGE_SHIFT; - - /* try the same or higher order */ - for (i = NUM_ORDERS - 1; i >= 0; i--) { - if (orders[i] < size_order) - continue; - page = ion_page_pool_alloc(heap->pools[i]); - if (!page) - continue; - if (nr_pages < (1 << orders[i])) - do_expand(heap, page, nr_pages); - atomic_sub(nr_pages, &rbin_pool_pages); - goto done; - } - - /* try lower order */ - for (i = 0; i < NUM_ORDERS; i++) { - if (orders[i] >= size_order) - continue; - page = ion_page_pool_alloc(heap->pools[i]); - if (!page) - continue; - atomic_sub(1 << orders[i], &rbin_pool_pages); - goto done; - } - -done: - trace_ion_rbin_pool_alloc_end(heap->heap.name, buffer, - page ? page_private(page) : 0, page); - return page; -} - -static int ion_rbin_heap_allocate(struct ion_heap *heap, - struct ion_buffer *buffer, - unsigned long size_org, unsigned long align, - unsigned long flags) -{ - struct ion_rbin_heap *rbin_heap = container_of(heap, - struct ion_rbin_heap, - heap); - struct sg_table *table; - struct scatterlist *sg; - struct list_head pages; - struct page *page, *tmp_page; - int i = 0; - unsigned long size, size_remaining; - unsigned int pagesize; - unsigned long nr_total; - long nr_alloc; - unsigned long from_pool_size = 0; - - /* actually does not support align like system heap or carveout heap */ - if (align > PAGE_SIZE) - return -EINVAL; - - size_remaining = size = PAGE_ALIGN(size_org); - nr_total = rbin_heap->count << PAGE_SHIFT; - - nr_alloc = atomic_read(&rbin_allocated_pages) << PAGE_SHIFT; - if (size > (nr_total - nr_alloc)) - return -ENOMEM; - nr_alloc = atomic_add_return(size >> PAGE_SHIFT, &rbin_allocated_pages); - if (nr_alloc > nr_total) { - atomic_sub(size >> PAGE_SHIFT, &rbin_allocated_pages); - return -ENOMEM; - } - - trace_printk("start. len %lu\n", size); - trace_ion_rbin_alloc_start(heap->name, buffer, size, NULL); - - INIT_LIST_HEAD(&pages); - while (size_remaining > 0) { - page = alloc_rbin_page_from_pool(rbin_heap, buffer, - size_remaining); - if (page) - from_pool_size += page_private(page); - else - page = alloc_rbin_page(rbin_heap, buffer, - size_remaining); - if (!page) - goto free_pages; - list_add_tail(&page->lru, &pages); - pagesize = page_private(page); - size_remaining -= pagesize; - i++; - } - table = kmalloc(sizeof(struct sg_table), GFP_KERNEL); - if (!table) - goto free_pages; - - if (sg_alloc_table(table, i, GFP_KERNEL)) - goto free_table; - - sg = table->sgl; - list_for_each_entry_safe(page, tmp_page, &pages, lru) { - unsigned int len; - - pagesize = page_private(page); - len = pagesize; - sg_set_page(sg, page, len, 0); - sg = sg_next(sg); - list_del(&page->lru); - } - - buffer->priv_virt = table; - buffer->sg_table = table; - trace_printk("end success %9lu %9lu\n", - from_pool_size, size - from_pool_size); - trace_ion_rbin_alloc_end(heap->name, buffer, size, NULL); - return 0; - -free_table: - kfree(table); -free_pages: - list_for_each_entry_safe(page, tmp_page, &pages, lru) - free_rbin_page(rbin_heap, buffer, page); - - atomic_sub(size >> PAGE_SHIFT, &rbin_allocated_pages); - trace_printk("end fail %ld %ld %lu\n", nr_total, nr_alloc, size); - trace_ion_rbin_alloc_end(heap->name, buffer, size, (void *)-1UL); - return -ENOMEM; -} - -static void ion_rbin_heap_free(struct ion_buffer *buffer) -{ - struct ion_rbin_heap *rbin_heap = container_of(buffer->heap, - struct ion_rbin_heap, - heap); - struct sg_table *table = buffer->sg_table; - struct scatterlist *sg; - unsigned long size = PAGE_ALIGN(buffer->size); - int i; - - trace_ion_rbin_free_start(buffer->heap->name, buffer, buffer->size, NULL); - for_each_sg(table->sgl, sg, table->nents, i) - free_rbin_page(rbin_heap, buffer, sg_page(sg)); - sg_free_table(table); - kfree(table); - atomic_sub(size >> PAGE_SHIFT, &rbin_allocated_pages); - trace_ion_rbin_free_end(buffer->heap->name, buffer, buffer->size, NULL); -} - -static struct sg_table *ion_rbin_heap_map_dma(struct ion_heap *heap, - struct ion_buffer *buffer) -{ - return buffer->priv_virt; -} - -static void ion_rbin_heap_unmap_dma(struct ion_heap *heap, - struct ion_buffer *buffer) -{ -} - -static struct ion_heap_ops rbin_heap_ops = { - .allocate = ion_rbin_heap_allocate, - .free = ion_rbin_heap_free, - .map_dma = ion_rbin_heap_map_dma, - .unmap_dma = ion_rbin_heap_unmap_dma, - .map_kernel = ion_heap_map_kernel, - .unmap_kernel = ion_heap_unmap_kernel, - .map_user = ion_heap_map_user, -}; - -static int ion_rbin_heap_debug_show(struct ion_heap *heap, struct seq_file *s, - void *unused) -{ - struct ion_rbin_heap *rbin_heap = container_of(heap, - struct ion_rbin_heap, - heap); - int i; - struct ion_page_pool *pool; - unsigned long total = 0; - - if (!s) - return -EINVAL; - - for (i = 0; i < NUM_ORDERS; i++) { - pool = rbin_heap->pools[i]; - - total += (1 << pool->order) * PAGE_SIZE * pool->high_count; - total += (1 << pool->order) * PAGE_SIZE * pool->low_count; - seq_printf(s, "%d order %u highmem pages uncached %lu total\n", - pool->high_count, pool->order, - (PAGE_SIZE << pool->order) * pool->high_count); - seq_printf(s, "%d order %u lowmem pages uncached %lu total\n", - pool->low_count, pool->order, - (PAGE_SIZE << pool->order) * pool->low_count); - } - seq_puts(s, "--------------------------------------------\n"); - seq_printf(s, "total pool %lu Bytes ( %ld.%06ld MB ) \n", total, - total >> 20, total % (1 << 20)); - return 0; -} - -//TODO: currently, we assume there is only one rbin.. -static struct ion_rbin_heap *rbin_heap; - -bool is_ion_rbin_page(struct page *page) -{ - unsigned long pfn = page_to_pfn(page); - return (rbin_heap->base_pfn <= pfn) - && (pfn < rbin_heap->base_pfn + rbin_heap->count); -} - -void wake_ion_rbin_heap_prereclaim(void) -{ - if (rbin_heap) { - rbin_heap->prereclaim_run = 1; - wake_up(&rbin_heap->waitqueue); - } -} - -void wake_ion_rbin_heap_shrink(void) -{ - if (rbin_heap) { - rbin_heap->shrink_run = 1; - wake_up(&rbin_heap->waitqueue); - } -} - -static int ion_rbin_heap_prereclaim(void *data) -{ - struct ion_rbin_heap *heap = data; - struct page *page; - unsigned long totalsize; - unsigned long pagesize; - unsigned int max_pool_order = orders[0]; - unsigned int order; - struct ion_page_pool *pool; - - if (!heap || !heap->cma) - return -EINVAL; - - while (true) { - wait_event_freezable(heap->waitqueue, - heap->prereclaim_run); - - trace_printk("start\n"); - reclaim_contig_migrate_range(heap->base_pfn, - heap->base_pfn + heap->count, 0); - totalsize = 0; - while (true) { - page = alloc_rbin_page(heap, NULL, - PAGE_SIZE << max_pool_order); - if (!page) - break; - pagesize = page_private(page); - totalsize += pagesize; - order = get_order(pagesize); - pool = heap->pools[order_to_index(order)]; - ion_page_pool_free(pool, page); - atomic_add(1 << order, &rbin_pool_pages); - } - trace_printk("end %lu\n", totalsize); - heap->prereclaim_run = 0; - } - - return 0; -} - -static int ion_page_pool_shrink_cma(struct cma *cma, struct ion_page_pool *pool, - int nr_to_scan) -{ - int freed = 0; - - if (nr_to_scan == 0) - return ion_page_pool_total(pool, 1); - - while (freed < nr_to_scan) { - struct page *page; - int page_count = 1 << pool->order; - - spin_lock(&pool->lock); - if (pool->low_count) { - page = ion_page_pool_remove(pool, false); - } else if (pool->high_count) { - page = ion_page_pool_remove(pool, true); - } else { - spin_unlock(&pool->lock); - break; - } - spin_unlock(&pool->lock); - cma_release(cma, page, page_count); - freed += page_count; - } - - return freed; -} - -static int ion_rbin_heap_shrink_all(void *data) -{ - struct ion_rbin_heap *heap = data; - struct ion_page_pool *pool; - int nr_scan, nr_freed; - unsigned long total_freed; - int i; - - if (!heap || !heap->cma) - return -EINVAL; - - while (true) { - wait_event_freezable(heap->waitqueue, - heap->shrink_run); - - trace_printk("start\n"); - total_freed = 0; - for (i = 0; i < NUM_ORDERS; i++) { - pool = heap->pools[i]; - nr_scan = ion_page_pool_shrink_cma(heap->cma, pool, 0); - if (nr_scan) { - nr_freed = ion_page_pool_shrink_cma(heap->cma, - pool, nr_scan); - atomic_sub(nr_freed, &rbin_pool_pages); - total_freed += nr_freed; - } - } - heap->shrink_run = 0; - trace_printk("%lu\n", total_freed); - } - - return 0; -} - -static void ion_rbin_heap_destroy_pools(struct ion_page_pool **pools) -{ - int i; - - for (i = 0; i < NUM_ORDERS; i++) - if (pools[i]) - ion_page_pool_destroy(pools[i]); -} - -static int ion_rbin_heap_create_pools(struct ion_page_pool **pools) -{ - int i; - for (i = 0; i < NUM_ORDERS; i++) { - struct ion_page_pool *pool; - - pool = ion_page_pool_create(GFP_KERNEL, orders[i]); - if (!pool) - goto err_create_pool; - pools[i] = pool; - } - return 0; - -err_create_pool: - ion_rbin_heap_destroy_pools(pools); - return -ENOMEM; -} - -/* - * Affinity setting of RBIN pre-reclaim thread on big cores. - * While the big core set varies depending on chipsets, - * Exynos9810 uses CPU4~7. - * (By the way, is there an API detecting big cores?) - */ -#define BIG_CORE_NUM_FIRST 4 -#define BIG_CORE_NUM_LAST 7 - -static int ion_rbin_heap_cpu_callback(struct notifier_block *nfb, - unsigned long action, void *hcpu) -{ - int i; - struct cpumask cpu_mask; - - if (!rbin_heap) - return NOTIFY_OK; - - switch (action & ~CPU_TASKS_FROZEN) { - case CPU_UP_PREPARE: - case CPU_DEAD: - case CPU_UP_CANCELED: - cpumask_clear(&cpu_mask); - for (i = BIG_CORE_NUM_FIRST; i <= BIG_CORE_NUM_LAST; i++) - cpumask_set_cpu(i, &cpu_mask); - if (cpumask_any_and(cpu_online_mask, &cpu_mask) >= nr_cpu_ids) - cpumask_setall(&cpu_mask); - set_cpus_allowed_ptr(rbin_heap->task, &cpu_mask); - } - - return NOTIFY_OK; -} - -struct ion_heap *ion_rbin_heap_create(struct ion_platform_heap *data) -{ - struct ion_rbin_heap *heap; - struct sched_param param = { .sched_priority = 0 }; - - heap = kzalloc(sizeof(struct ion_rbin_heap), GFP_KERNEL); - if (!heap) - return ERR_PTR(-ENOMEM); - heap->heap.ops = &rbin_heap_ops; - heap->heap.type = ION_HEAP_TYPE_RBIN; - - heap->cma = dev_get_cma_area((struct device *)data->priv); - if (heap->cma) { - heap->base_pfn = PHYS_PFN(cma_get_base(heap->cma)); - heap->count = cma_get_size(heap->cma) >> PAGE_SHIFT; - } - - if (ion_rbin_heap_create_pools(heap->pools)) - goto error_create_pools; - heap->heap.debug_show = ion_rbin_heap_debug_show; - - init_waitqueue_head(&heap->waitqueue); - heap->task = kthread_run(ion_rbin_heap_prereclaim, heap, - "%s", "rbin"); - heap->task_shrink = kthread_run(ion_rbin_heap_shrink_all, heap, - "%s", "rbin_shrink"); - rbin_heap = heap; - - sched_setscheduler(heap->task, SCHED_NORMAL, ¶m); - ion_rbin_heap_cpu_callback(NULL, CPU_UP_PREPARE, NULL); - hotcpu_notifier(ion_rbin_heap_cpu_callback, 0); - - return &heap->heap; - -error_create_pools: - kfree(heap); - return ERR_PTR(-ENOMEM); -} - -void ion_rbin_heap_destroy(struct ion_heap *heap) -{ - struct ion_rbin_heap *rbin_heap = container_of(heap, - struct ion_rbin_heap, - heap); - kfree(rbin_heap); -} - diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c index 808ef3474af9..0cfa2562ab40 100644 --- a/drivers/staging/android/lowmemorykiller.c +++ b/drivers/staging/android/lowmemorykiller.c @@ -37,7 +37,6 @@ #include #include #include -#include #include #include #include @@ -92,67 +91,6 @@ static int test_task_flag(struct task_struct *p, int flag) return 0; } -static void show_memory(void) -{ - unsigned long nr_rbin_free, nr_rbin_pool, nr_rbin_alloc, nr_rbin_file; - - nr_rbin_free = global_page_state(NR_FREE_RBIN_PAGES); - nr_rbin_pool = atomic_read(&rbin_pool_pages); - nr_rbin_alloc = atomic_read(&rbin_allocated_pages); - nr_rbin_file = totalrbin_pages - nr_rbin_free - nr_rbin_pool - - nr_rbin_alloc; - -#define K(x) ((x) << (PAGE_SHIFT - 10)) - printk("Mem-Info:" - " totalram_pages:%lukB" - " free:%lukB" - " active_anon:%lukB" - " inactive_anon:%lukB" - " active_file:%lukB" - " inactive_file:%lukB" - " unevictable:%lukB" - " isolated(anon):%lukB" - " isolated(file):%lukB" - " dirty:%lukB" - " writeback:%lukB" - " mapped:%lukB" - " shmem:%lukB" - " slab_reclaimable:%lukB" - " slab_unreclaimable:%lukB" - " kernel_stack:%lukB" - " pagetables:%lukB" - " free_cma:%lukB" - " rbin_free:%lukB" - " rbin_pool:%lukB" - " rbin_alloc:%lukB" - " rbin_file:%lukB" - "\n", - K(totalram_pages), - K(global_page_state(NR_FREE_PAGES)), - K(global_page_state(NR_ACTIVE_ANON)), - K(global_page_state(NR_INACTIVE_ANON)), - K(global_page_state(NR_ACTIVE_FILE)), - K(global_page_state(NR_INACTIVE_FILE)), - K(global_page_state(NR_UNEVICTABLE)), - K(global_page_state(NR_ISOLATED_ANON)), - K(global_page_state(NR_ISOLATED_FILE)), - K(global_page_state(NR_FILE_DIRTY)), - K(global_page_state(NR_WRITEBACK)), - K(global_page_state(NR_FILE_MAPPED)), - K(global_page_state(NR_SHMEM)), - K(global_page_state(NR_SLAB_RECLAIMABLE)), - K(global_page_state(NR_SLAB_UNRECLAIMABLE)), - K(global_page_state(NR_KERNEL_STACK)), - K(global_page_state(NR_PAGETABLE)), - K(global_page_state(NR_FREE_CMA_PAGES)), - K(nr_rbin_free), - K(nr_rbin_pool), - K(nr_rbin_alloc), - K(nr_rbin_file) - ); -#undef K -} - static unsigned long lowmem_count(struct shrinker *s, struct shrink_control *sc) { @@ -179,17 +117,6 @@ static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc) global_page_state(NR_SHMEM) - global_page_state(NR_UNEVICTABLE) - total_swapcache_pages(); - unsigned long nr_rbin_free, nr_rbin_pool, nr_rbin_alloc, nr_rbin_file; - - if ((sc->gfp_mask & __GFP_RBIN) != __GFP_RBIN) { - nr_rbin_free = global_page_state(NR_FREE_RBIN_PAGES); - nr_rbin_pool = atomic_read(&rbin_pool_pages); - nr_rbin_alloc = atomic_read(&rbin_allocated_pages); - nr_rbin_file = totalrbin_pages - nr_rbin_free - nr_rbin_pool - - nr_rbin_alloc; - other_free -= nr_rbin_free; - other_file -= nr_rbin_file; - } if (lowmem_adj_size < array_size) array_size = lowmem_adj_size; @@ -302,7 +229,6 @@ static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc) cache_size, cache_limit, min_score_adj, free); - show_memory(); lowmem_deathpending_timeout = jiffies + HZ; rem += selected_tasksize; lowmem_lmkcount++; diff --git a/drivers/staging/android/uapi/ion.h b/drivers/staging/android/uapi/ion.h index 3c87e106236c..fa1a68f9d086 100644 --- a/drivers/staging/android/uapi/ion.h +++ b/drivers/staging/android/uapi/ion.h @@ -44,15 +44,11 @@ enum ion_heap_type { * must be last so device specific heaps always * are at the end of this enum */ - ION_HEAP_TYPE_CUSTOM2, ION_NUM_HEAPS = 16, }; /* Exynos specific ION heap types */ #define ION_HEAP_TYPE_HPA ION_HEAP_TYPE_CUSTOM -/* Samsung specific ION heap types */ -#define ION_HEAP_TYPE_RBIN ION_HEAP_TYPE_CUSTOM2 - #define ION_HEAP_SYSTEM_MASK (1 << ION_HEAP_TYPE_SYSTEM) #define ION_HEAP_SYSTEM_CONTIG_MASK (1 << ION_HEAP_TYPE_SYSTEM_CONTIG) #define ION_HEAP_CARVEOUT_MASK (1 << ION_HEAP_TYPE_CARVEOUT) diff --git a/drivers/usb/gadget/function/f_acm.c b/drivers/usb/gadget/function/f_acm.c index 40e319790ac0..771a3ad5fb61 100644 --- a/drivers/usb/gadget/function/f_acm.c +++ b/drivers/usb/gadget/function/f_acm.c @@ -546,20 +546,15 @@ static int acm_notify_serial_state(struct f_acm *acm) spin_lock_irqsave(&acm->lock, flags); - if (acm->notify->enabled) { - if (acm->notify_req) { - dev_dbg(&cdev->gadget->dev, "acm ttyGS%d serial state %04x\n", - acm->port_num, acm->serial_state); - serial_state = cpu_to_le16(acm->serial_state); - status = acm_cdc_notify(acm, USB_CDC_NOTIFY_SERIAL_STATE, + if (acm->notify_req) { + dev_dbg(&cdev->gadget->dev, "acm ttyGS%d serial state %04x\n", + acm->port_num, acm->serial_state); + serial_state = cpu_to_le16(acm->serial_state); + status = acm_cdc_notify(acm, USB_CDC_NOTIFY_SERIAL_STATE, 0, &serial_state, sizeof(acm->serial_state)); - } else { - acm->pending = true; - status = 0; - } } else { - status = -EAGAIN; - printk(KERN_DEBUG "usb: %s acm function already disabled\n", __func__); + acm->pending = true; + status = 0; } spin_unlock_irqrestore(&acm->lock, flags); return status; @@ -587,9 +582,9 @@ static void acm_cdc_notify_complete(struct usb_ep *ep, struct usb_request *req) #ifdef CONFIG_USB_DUN_SUPPORT int acm_notify(void *dev, u16 state) { - struct f_acm *acm = (struct f_acm *)dev; - - if (acm && acm->notify->enabled) { + struct f_acm *acm; + if (dev) { + acm = (struct f_acm *)dev; acm->serial_state = state; acm_notify_serial_state(acm); } else { diff --git a/drivers/usb/gadget/function/f_conn_gadget.c b/drivers/usb/gadget/function/f_conn_gadget.c index d82e79686d92..79689c1879da 100644 --- a/drivers/usb/gadget/function/f_conn_gadget.c +++ b/drivers/usb/gadget/function/f_conn_gadget.c @@ -59,6 +59,7 @@ #include #include #include +#include /* platform specific definitions */ /* ex) #define __ANDROID__ */ @@ -122,6 +123,8 @@ struct conn_gadget_dev { /* flag variable that save flush call status * to check wakeup reason */ atomic_t flush; + + struct kref kref; }; static struct usb_interface_descriptor conn_gadget_interface_desc = { @@ -220,6 +223,7 @@ struct conn_gadget_instance { const char *name; }; +static void conn_gadget_cleanup(struct kref *kref); static inline struct conn_gadget_dev *func_to_conn_gadget(struct usb_function *f) { @@ -682,6 +686,11 @@ static int conn_gadget_open(struct inode *ip, struct file *fp) return -EBUSY; } + if (!kref_get_unless_zero(&_conn_gadget_dev->kref)) { + CONN_GADGET_ERR("already device removed\n"); + return -ENODEV; + } + fp->private_data = _conn_gadget_dev; /* clear the error latch */ @@ -742,6 +751,8 @@ static int conn_gadget_release(struct inode *ip, struct file *fp) atomic_set(&_conn_gadget_dev->flush, 0); conn_gadget_unlock(&_conn_gadget_dev->open_excl); + + kref_put(&_conn_gadget_dev->kref, conn_gadget_cleanup); return 0; } @@ -1207,6 +1218,8 @@ static int conn_gadget_setup(struct conn_gadget_instance *fi_conn_gadget) atomic_set(&dev->write_excl, 0); atomic_set(&dev->flush, 0); + kref_init(&dev->kref); + INIT_LIST_HEAD(&dev->tx_idle); INIT_LIST_HEAD(&dev->rx_idle); INIT_LIST_HEAD(&dev->rx_busy); @@ -1255,7 +1268,7 @@ err_: return ret; } -static void conn_gadget_cleanup(void) +static void conn_gadget_cleanup(struct kref *kref) { printk(KERN_INFO "conn_gadget_cleanup\n"); @@ -1331,8 +1344,8 @@ static void conn_gadget_free_inst(struct usb_function_instance *fi) fi_conn_gadget = to_fi_conn_gadget(fi); kfree(fi_conn_gadget->name); - conn_gadget_cleanup(); kfree(fi_conn_gadget); + kref_put(&_conn_gadget_dev->kref, conn_gadget_cleanup); } struct usb_function_instance *alloc_inst_conn_gadget(void) diff --git a/fs/exec.c b/fs/exec.c index 59c819790efe..97d04340b659 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -73,6 +73,9 @@ #ifdef CONFIG_RKP_KDP #define rkp_is_nonroot(x) ((x->cred->type)>>1 & 1) +#ifdef CONFIG_LOD_SEC +#define rkp_is_lod(x) ((x->cred->type)>>3 & 1) +#endif #endif /*CONFIG_RKP_KDP*/ int suid_dumpable = 0; @@ -1624,6 +1627,14 @@ static int rkp_restrict_fork(struct filename *path) if(!strcmp(path->name,"/system/bin/patchoat")){ return 0 ; } +/* If the Process is from Linux on Dex, +then no need to reduce privilege */ +#ifdef CONFIG_LOD_SEC + if(rkp_is_lod(current)){ + return 0; + } +#endif + if(rkp_is_nonroot(current)){ shellcred = prepare_creds(); if (!shellcred) { @@ -1974,7 +1985,6 @@ SYSCALL_DEFINE3(execve, const char __user *const __user *, argv, const char __user *const __user *, envp) { -#if defined CONFIG_SEC_RESTRICT_FORK #ifdef CONFIG_RKP_KDP struct filename *path = getname(filename); int error = PTR_ERR(path); @@ -1986,6 +1996,7 @@ SYSCALL_DEFINE3(execve, rkp_call(RKP_CMDID(0x4b),(u64)path->name,0,0,0,0); } #endif +#if defined CONFIG_SEC_RESTRICT_FORK if(CHECK_ROOT_UID(current)){ if(sec_restrict_fork()){ PRINT_LOG("Restricted making process. PID = %d(%s) " @@ -2009,10 +2020,11 @@ SYSCALL_DEFINE3(execve, return -EACCES; } } +#endif +#endif // End of CONFIG_SEC_RESTRICT_FORK +#ifdef CONFIG_RKP_KDP putname(path); #endif -#endif // End of CONFIG_SEC_RESTRICT_FORK - return do_execve(getname(filename), argv, envp); } diff --git a/fs/inode.c b/fs/inode.c index 39e9e915f884..5f90db4124fc 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -168,15 +168,7 @@ int inode_init_always(struct super_block *sb, struct inode *inode) mapping->host = inode; mapping->flags = 0; atomic_set(&mapping->i_mmap_writable, 0); -#ifdef CONFIG_RBIN - if ((sb->s_flags & MS_RDONLY) && !shmem_mapping(mapping)) - mapping_set_gfp_mask(mapping, GFP_HIGHUSER_MOVABLE | - __GFP_RBIN); - else - mapping_set_gfp_mask(mapping, GFP_HIGHUSER_MOVABLE); -#else mapping_set_gfp_mask(mapping, GFP_HIGHUSER_MOVABLE); -#endif mapping->private_data = NULL; mapping->writeback_index = 0; #if defined(CONFIG_FMP_ECRYPT_FS) || defined(CONFIG_FMP_EXT4CRYPT_FS) diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c index b16fad3e627e..9155a5a0d3b9 100644 --- a/fs/proc/meminfo.c +++ b/fs/proc/meminfo.c @@ -108,12 +108,6 @@ static int meminfo_proc_show(struct seq_file *m, void *v) #endif #ifndef CONFIG_MMU "MmapCopy: %8lu kB\n" -#endif -#ifdef CONFIG_RBIN - "RbinTotal: %8lu kB\n" - "RbinAllocated: %8d kB\n" - "RbinPool: %8d kB\n" - "RbinFree: %8lu kB\n" #endif "SwapTotal: %8lu kB\n" "SwapFree: %8lu kB\n" @@ -171,12 +165,6 @@ static int meminfo_proc_show(struct seq_file *m, void *v) #endif #ifndef CONFIG_MMU K((unsigned long) atomic_long_read(&mmap_pages_allocated)), -#endif -#ifdef CONFIG_RBIN - K(totalrbin_pages), - K(atomic_read(&rbin_allocated_pages) + atomic_read(&rbin_pool_pages)), - K(atomic_read(&rbin_pool_pages)), - K(global_page_state(NR_FREE_RBIN_PAGES)), #endif K(i.totalswap), K(i.freeswap), diff --git a/include/linux/cma.h b/include/linux/cma.h index 1f7e74fc596f..29f9e774ab76 100644 --- a/include/linux/cma.h +++ b/include/linux/cma.h @@ -16,9 +16,6 @@ struct cma; extern unsigned long totalcma_pages; -extern unsigned long totalrbin_pages; -extern atomic_t rbin_allocated_pages; -extern atomic_t rbin_pool_pages; extern phys_addr_t cma_get_base(const struct cma *cma); extern unsigned long cma_get_size(const struct cma *cma); @@ -26,11 +23,6 @@ extern int __init cma_declare_contiguous(phys_addr_t base, phys_addr_t size, phys_addr_t limit, phys_addr_t alignment, unsigned int order_per_bit, bool fixed, struct cma **res_cma); -#ifdef CONFIG_RBIN -extern void cma_set_rbin(struct cma *cma); -#else -static inline void cma_set_rbin(struct cma *cma) {} -#endif extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size, unsigned int order_per_bit, struct cma **res_cma); diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 10664116aeb2..f3762789d350 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -37,7 +37,6 @@ struct vm_area_struct; #define ___GFP_WRITE 0x1000000u #define ___GFP_KSWAPD_RECLAIM 0x2000000u #define ___GFP_CMA 0x4000000u -#define ___GFP_RBIN 0x8000000u /* If the above are modified, __GFP_BITS_SHIFT may need updating */ /* @@ -52,7 +51,6 @@ struct vm_area_struct; #define __GFP_DMA32 ((__force gfp_t)___GFP_DMA32) #define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* Page is movable */ #define __GFP_CMA ((__force gfp_t)___GFP_CMA) /* Allocate from CMA */ -#define __GFP_RBIN ((__force gfp_t)___GFP_RBIN) /* Allocate from RBIN */ #define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE) /* @@ -186,7 +184,7 @@ struct vm_area_struct; #define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* Room for N __GFP_FOO bits */ -#define __GFP_BITS_SHIFT 28 +#define __GFP_BITS_SHIFT 27 #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) /* @@ -552,7 +550,8 @@ extern int alloc_contig_range_fast(unsigned long start, unsigned long end, extern void free_contig_range(unsigned long pfn, unsigned nr_pages); /* CMA stuff */ -extern void init_cma_reserved_pageblock(struct page *page, bool is_rbin); +extern void init_cma_reserved_pageblock(struct page *page); + #endif #ifdef CONFIG_HPA diff --git a/include/linux/mfd/max77865-private.h b/include/linux/mfd/max77865-private.h index c740c249daa5..3c0b501e03a7 100644 --- a/include/linux/mfd/max77865-private.h +++ b/include/linux/mfd/max77865-private.h @@ -115,13 +115,11 @@ enum max77865_fuelgauge_reg { ICHGTERM_REG = 0x1E, REMCAP_AV_REG = 0x1F, FULLCAP_NOM_REG = 0x23, - LEARN_CFG_REG = 0x28, FILTER_CFG_REG = 0x29, MISCCFG_REG = 0x2B, QRTABLE20_REG = 0x32, FULLCAP_REP_REG = 0x35, RCOMP_REG = 0x38, - TEMPCO_REG = 0x39, VEMPTY_REG = 0x3A, FSTAT_REG = 0x3D, DISCHARGE_THRESHOLD_REG = 0x40, diff --git a/include/linux/mm.h b/include/linux/mm.h index 1c816ace5dd1..83c8032d4680 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1829,13 +1829,6 @@ extern void zone_pcp_reset(struct zone *zone); /* page_alloc.c */ extern int min_free_kbytes; -/* vmscan.c */ -extern void reclaim_contig_migrate_range(unsigned long start, - unsigned long end, bool drain); -/* ion_rbin_heap.c */ -void wake_ion_rbin_heap_prereclaim(void); -void wake_ion_rbin_heap_shrink(void); - /* nommu.c */ extern atomic_long_t mmap_pages_allocated; extern int nommu_shrink_inode_mappings(struct inode *, size_t, size_t); diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 24470dbc9564..300d5ae893b4 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -56,17 +56,6 @@ enum { * a single pageblock. */ MIGRATE_CMA, -#ifdef CONFIG_RBIN - /* - * MIGRATE_RBIN migration type differs from MIGRATE_CMA as - * it is designed to contain specific types of pages. - * For example, allowing only clean file pages may accelerate - * and increases the success rate of CMA allocations. - * Only page allocations with __GFP_RBIN can take MIGRATE_RBIN - * free pages. - */ - MIGRATE_RBIN, -#endif #endif #ifdef CONFIG_MEMORY_ISOLATION MIGRATE_ISOLATE, /* can't allocate from here */ @@ -76,36 +65,10 @@ enum { #ifdef CONFIG_CMA # define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA) -# define is_migrate_cma_page(_page) \ - (get_pageblock_migratetype(_page) == MIGRATE_CMA) -#ifdef CONFIG_RBIN -# define is_migrate_rbin(migratetype) unlikely((migratetype) == MIGRATE_RBIN) -# define is_migrate_rbin_nolikely(migratetype) ((migratetype) == MIGRATE_RBIN) -# define is_migrate_rbin_page(_page) \ - (get_pageblock_migratetype(_page) == MIGRATE_RBIN) -# define migratetype_rbin_or_cma(a) (a ? MIGRATE_RBIN : MIGRATE_CMA) -# define is_migrate_cma_rbin(migratetype) \ - ((unlikely((migratetype) == MIGRATE_CMA)) || \ - (unlikely((migratetype) == MIGRATE_RBIN))) -# define is_migrate_cma_rbin_page(_page) \ - ((get_pageblock_migratetype(_page) == MIGRATE_CMA) || \ - (get_pageblock_migratetype(_page) == MIGRATE_RBIN)) -#else -# define is_migrate_rbin(migratetype) false -# define is_migrate_rbin_nolikely(migratetype) false -# define is_migrate_rbin_page(_page) false -# define migratetype_rbin_or_cma(a) MIGRATE_CMA -# define is_migrate_cma_rbin(migratetype) is_migrate_cma(migratetype) -# define is_migrate_cma_rbin_page(_page) is_migrate_cma_page(_page) -#endif +# define is_migrate_cma_page(_page) (get_pageblock_migratetype(_page) == MIGRATE_CMA) #else # define is_migrate_cma(migratetype) false # define is_migrate_cma_page(_page) false -# define is_migrate_rbin(migratetype) false -# define is_migrate_rbin_nolikely(migratetype) false -# define is_migrate_rbin_page(_page) false -# define is_migrate_cma_rbin(migratetype) false -# define is_migrate_cma_rbin_page(_page) false #endif #define for_each_migratetype_order(order, type) \ @@ -197,7 +160,6 @@ enum zone_stat_item { WORKINGSET_NODERECLAIM, NR_ANON_TRANSPARENT_HUGEPAGES, NR_FREE_CMA_PAGES, - NR_FREE_RBIN_PAGES, NR_VM_ZONE_STAT_ITEMS }; /* @@ -406,9 +368,6 @@ struct zone { * considered dirtyable memory. */ unsigned long dirty_balance_reserve; -#ifdef CONFIG_RBIN - atomic_t rbin_alloc; -#endif #ifndef CONFIG_SPARSEMEM /* diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index 08906ecbac12..73fae8c4a5fb 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h @@ -262,8 +262,6 @@ static inline void __mod_zone_freepage_state(struct zone *zone, int nr_pages, __mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages); if (is_migrate_cma(migratetype)) __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages); - else if (is_migrate_rbin(migratetype)) - __mod_zone_page_state(zone, NR_FREE_RBIN_PAGES, nr_pages); } extern const char * const vmstat_text[]; diff --git a/include/trace/events/gfpflags.h b/include/trace/events/gfpflags.h index f459295ac260..dde6bf092c8a 100644 --- a/include/trace/events/gfpflags.h +++ b/include/trace/events/gfpflags.h @@ -38,7 +38,6 @@ {(unsigned long)__GFP_NOTRACK, "GFP_NOTRACK"}, \ {(unsigned long)__GFP_DIRECT_RECLAIM, "GFP_DIRECT_RECLAIM"}, \ {(unsigned long)__GFP_KSWAPD_RECLAIM, "GFP_KSWAPD_RECLAIM"}, \ - {(unsigned long)__GFP_RBIN, "GFP_RBIN"}, \ {(unsigned long)__GFP_OTHER_NODE, "GFP_OTHER_NODE"} \ ) : "GFP_NOWAIT" diff --git a/include/trace/events/ion.h b/include/trace/events/ion.h index de0d1d8e4fbc..6e4dfb168639 100644 --- a/include/trace/events/ion.h +++ b/include/trace/events/ion.h @@ -271,108 +271,6 @@ DEFINE_EVENT(ion_sync, ion_sync_end, TP_ARGS(caller, dev, dir, size, vaddr, offset, flush_all) ); -DECLARE_EVENT_CLASS(ion_rbin, - - TP_PROTO(const char *heap_name, - void *buffer, - unsigned long size, - void *page), - TP_ARGS(heap_name, buffer, size, page), - - TP_STRUCT__entry( - __field( const char *, heap_name) - __field( void *, buffer ) - __field( unsigned long, size ) - __field( void *, page ) - ), - - TP_fast_assign( - __entry->heap_name = heap_name; - __entry->buffer = buffer; - __entry->size = size; - __entry->page = page; - ), - - TP_printk("heap_name=%s buffer=%p size=%lu page=%p", - __entry->heap_name, - __entry->buffer, - __entry->size, - __entry->page - ) -); - -DEFINE_EVENT(ion_rbin, ion_rbin_alloc_start, - TP_PROTO(const char *heap_name, void *buffer, unsigned long size, - void *page), - - TP_ARGS(heap_name, buffer, size, page) -); - -DEFINE_EVENT(ion_rbin, ion_rbin_alloc_end, - TP_PROTO(const char *heap_name, void *buffer, unsigned long size, - void *page), - - TP_ARGS(heap_name, buffer, size, page) -); - -DEFINE_EVENT(ion_rbin, ion_rbin_free_start, - TP_PROTO(const char *heap_name, void *buffer, unsigned long size, - void *page), - - TP_ARGS(heap_name, buffer, size, page) -); - -DEFINE_EVENT(ion_rbin, ion_rbin_free_end, - TP_PROTO(const char *heap_name, void *buffer, unsigned long size, - void *page), - - TP_ARGS(heap_name, buffer, size, page) -); - -DEFINE_EVENT(ion_rbin, ion_rbin_partial_alloc_start, - TP_PROTO(const char *heap_name, void *buffer, unsigned long size, - void *page), - - TP_ARGS(heap_name, buffer, size, page) -); - -DEFINE_EVENT(ion_rbin, ion_rbin_partial_alloc_end, - TP_PROTO(const char *heap_name, void *buffer, unsigned long size, - void *page), - - TP_ARGS(heap_name, buffer, size, page) -); - -DEFINE_EVENT(ion_rbin, ion_rbin_partial_free_start, - TP_PROTO(const char *heap_name, void *buffer, unsigned long size, - void *page), - - TP_ARGS(heap_name, buffer, size, page) -); - -DEFINE_EVENT(ion_rbin, ion_rbin_partial_free_end, - TP_PROTO(const char *heap_name, void *buffer, unsigned long size, - void *page), - - TP_ARGS(heap_name, buffer, size, page) -); - - -DEFINE_EVENT(ion_rbin, ion_rbin_pool_alloc_start, - - TP_PROTO(const char *heap_name, void *buffer, unsigned long size, - void *page), - - TP_ARGS(heap_name, buffer, size, page) -); - -DEFINE_EVENT(ion_rbin, ion_rbin_pool_alloc_end, - - TP_PROTO(const char *heap_name, void *buffer, unsigned long size, - void *page), - - TP_ARGS(heap_name, buffer, size, page) -); #endif /* _TRACE_ION_H */ /* This part must be outside protection */ diff --git a/include/uapi/linux/input-event-codes.h b/include/uapi/linux/input-event-codes.h index bc26f4d6186d..3f909e5bb458 100644 --- a/include/uapi/linux/input-event-codes.h +++ b/include/uapi/linux/input-event-codes.h @@ -338,8 +338,6 @@ #define KEY_WAKEUP_UNLOCK 253 /* Wake-up to recent view, ex: AOP */ #define KEY_RECENT 254 - -#define KEY_INT_CANCEL 0x2be /* for touch event skip */ #define KEY_WINK 0x2bf /* Intelligence Key */ /* Code 255 is reserved for special needs of AT keyboard driver */ diff --git a/kernel/ptrace.c b/kernel/ptrace.c index 5e2cd1030702..5a448b4aaefa 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c @@ -663,6 +663,10 @@ static int ptrace_peek_siginfo(struct task_struct *child, if (arg.nr < 0) return -EINVAL; + /* Ensure arg.off fits in an unsigned long */ + if (arg.off > ULONG_MAX) + return 0; + if (arg.flags & PTRACE_PEEKSIGINFO_SHARED) pending = &child->signal->shared_pending; else @@ -670,18 +674,20 @@ static int ptrace_peek_siginfo(struct task_struct *child, for (i = 0; i < arg.nr; ) { siginfo_t info; - s32 off = arg.off + i; + unsigned long off = arg.off + i; + bool found = false; spin_lock_irq(&child->sighand->siglock); list_for_each_entry(q, &pending->list, list) { if (!off--) { + found = true; copy_siginfo(&info, &q->info); break; } } spin_unlock_irq(&child->sighand->siglock); - if (off >= 0) /* beyond the end of the list */ + if (!found) /* beyond the end of the list */ break; #ifdef CONFIG_COMPAT diff --git a/kernel/sys.c b/kernel/sys.c index 1b835f4d1f96..4cafce53cb06 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -392,17 +392,21 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid) int retval; kgid_t krgid, kegid; + krgid = make_kgid(ns, rgid); + kegid = make_kgid(ns, egid); + + if ((rgid != (gid_t) -1) && !gid_valid(krgid)) + return -EINVAL; + if ((egid != (gid_t) -1) && !gid_valid(kegid)) + return -EINVAL; + #if defined CONFIG_SEC_RESTRICT_SETUID - if(rgid == 0 || egid == 0) - { - if(sec_restrict_uid()) - return -EACCES; + if (krgid.val == 0 || kegid.val == 0) { + if (sec_restrict_uid()) + return -EACCES; } #endif // End of CONFIG_SEC_RESTRICT_SETUID - krgid = make_kgid(ns, rgid); - kegid = make_kgid(ns, egid); - #ifdef CONFIG_LOD_SEC if (current_is_LOD()) { if (!gid_is_LOD(krgid.val)) @@ -413,11 +417,6 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid) } #endif - if ((rgid != (gid_t) -1) && !gid_valid(krgid)) - return -EINVAL; - if ((egid != (gid_t) -1) && !gid_valid(kegid)) - return -EINVAL; - new = prepare_creds(); if (!new) return -ENOMEM; @@ -467,16 +466,17 @@ SYSCALL_DEFINE1(setgid, gid_t, gid) int retval; kgid_t kgid; + kgid = make_kgid(ns, gid); + if (!gid_valid(kgid)) + return -EINVAL; + #if defined CONFIG_SEC_RESTRICT_SETUID - if(gid == 0) - { - if(sec_restrict_uid()) - return -EACCES; + if (kgid.val == 0) { + if (sec_restrict_uid()) + return -EACCES; } #endif // End of CONFIG_SEC_RESTRICT_SETUID - kgid = make_kgid(ns, gid); - #ifdef CONFIG_LOD_SEC if (current_is_LOD()) { if (!gid_is_LOD(kgid.val)) @@ -484,9 +484,6 @@ SYSCALL_DEFINE1(setgid, gid_t, gid) } #endif - if (!gid_valid(kgid)) - return -EINVAL; - new = prepare_creds(); if (!new) return -ENOMEM; @@ -559,17 +556,21 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid) int retval; kuid_t kruid, keuid; + kruid = make_kuid(ns, ruid); + keuid = make_kuid(ns, euid); + + if ((ruid != (uid_t) -1) && !uid_valid(kruid)) + return -EINVAL; + if ((euid != (uid_t) -1) && !uid_valid(keuid)) + return -EINVAL; + #if defined CONFIG_SEC_RESTRICT_SETUID - if(ruid == 0 || euid == 0) - { - if(sec_restrict_uid()) - return -EACCES; + if (kruid.val == 0 || keuid.val == 0) { + if (sec_restrict_uid()) + return -EACCES; } #endif // End of CONFIG_SEC_RESTRICT_SETUID - kruid = make_kuid(ns, ruid); - keuid = make_kuid(ns, euid); - #ifdef CONFIG_LOD_SEC if (current_is_LOD()) { if (!uid_is_LOD(kruid.val)) @@ -580,11 +581,6 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid) } #endif - if ((ruid != (uid_t) -1) && !uid_valid(kruid)) - return -EINVAL; - if ((euid != (uid_t) -1) && !uid_valid(keuid)) - return -EINVAL; - new = prepare_creds(); if (!new) return -ENOMEM; @@ -648,16 +644,17 @@ SYSCALL_DEFINE1(setuid, uid_t, uid) int retval; kuid_t kuid; + kuid = make_kuid(ns, uid); + if (!uid_valid(kuid)) + return -EINVAL; + #if defined CONFIG_SEC_RESTRICT_SETUID - if(uid == 0) - { - if(sec_restrict_uid()) - return -EACCES; + if (kuid.val == 0) { + if (sec_restrict_uid()) + return -EACCES; } #endif // End of CONFIG_SEC_RESTRICT_SETUID - kuid = make_kuid(ns, uid); - #ifdef CONFIG_LOD_SEC if (current_is_LOD()) { if (!uid_is_LOD(kuid.val)) @@ -665,9 +662,6 @@ SYSCALL_DEFINE1(setuid, uid_t, uid) } #endif - if (!uid_valid(kuid)) - return -EINVAL; - new = prepare_creds(); if (!new) return -ENOMEM; @@ -711,18 +705,26 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid) int retval; kuid_t kruid, keuid, ksuid; -#if defined CONFIG_SEC_RESTRICT_SETUID - if(ruid == 0 || euid == 0 || suid == 0) - { - if(sec_restrict_uid()) - return -EACCES; - } -#endif // End of CONFIG_SEC_RESTRICT_SETUID - kruid = make_kuid(ns, ruid); keuid = make_kuid(ns, euid); ksuid = make_kuid(ns, suid); + if ((ruid != (uid_t) -1) && !uid_valid(kruid)) + return -EINVAL; + + if ((euid != (uid_t) -1) && !uid_valid(keuid)) + return -EINVAL; + + if ((suid != (uid_t) -1) && !uid_valid(ksuid)) + return -EINVAL; + +#if defined CONFIG_SEC_RESTRICT_SETUID + if (kruid.val == 0 || keuid.val == 0 || ksuid.val == 0) { + if (sec_restrict_uid()) + return -EACCES; + } +#endif // End of CONFIG_SEC_RESTRICT_SETUID + #ifdef CONFIG_LOD_SEC if (current_is_LOD()) { if (!uid_is_LOD(kruid.val)) @@ -736,15 +738,6 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid) } #endif - if ((ruid != (uid_t) -1) && !uid_valid(kruid)) - return -EINVAL; - - if ((euid != (uid_t) -1) && !uid_valid(keuid)) - return -EINVAL; - - if ((suid != (uid_t) -1) && !uid_valid(ksuid)) - return -EINVAL; - new = prepare_creds(); if (!new) return -ENOMEM; @@ -819,18 +812,24 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid) int retval; kgid_t krgid, kegid, ksgid; -#if defined CONFIG_SEC_RESTRICT_SETUID - if(rgid == 0 || egid == 0 || sgid == 0) - { - if(sec_restrict_uid()) - return -EACCES; - } -#endif // End of CONFIG_SEC_RESTRICT_SETUID - krgid = make_kgid(ns, rgid); kegid = make_kgid(ns, egid); ksgid = make_kgid(ns, sgid); + if ((rgid != (gid_t) -1) && !gid_valid(krgid)) + return -EINVAL; + if ((egid != (gid_t) -1) && !gid_valid(kegid)) + return -EINVAL; + if ((sgid != (gid_t) -1) && !gid_valid(ksgid)) + return -EINVAL; + +#if defined CONFIG_SEC_RESTRICT_SETUID + if (krgid.val == 0 || kegid.val == 0 || ksgid.val == 0) { + if (sec_restrict_uid()) + return -EACCES; + } +#endif // End of CONFIG_SEC_RESTRICT_SETUID + #ifdef CONFIG_LOD_SEC if (current_is_LOD()) { if (!gid_is_LOD(krgid.val)) @@ -844,13 +843,6 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid) } #endif - if ((rgid != (gid_t) -1) && !gid_valid(krgid)) - return -EINVAL; - if ((egid != (gid_t) -1) && !gid_valid(kegid)) - return -EINVAL; - if ((sgid != (gid_t) -1) && !gid_valid(ksgid)) - return -EINVAL; - new = prepare_creds(); if (!new) return -ENOMEM; @@ -923,6 +915,16 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid) kuid = make_kuid(old->user_ns, uid); + if (!uid_valid(kuid)) + return old_fsuid; + +#if defined CONFIG_SEC_RESTRICT_SETUID + if (kuid.val == 0) { + if (sec_restrict_uid()) + return -EACCES; + } +#endif // End of CONFIG_SEC_RESTRICT_SETUID + #ifdef CONFIG_LOD_SEC if (current_is_LOD()) { if (!uid_is_LOD(kuid.val)) @@ -930,9 +932,6 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid) } #endif - if (!uid_valid(kuid)) - return old_fsuid; - #ifdef CONFIG_SECURITY_DEFEX if (task_defex_enforce(current, NULL, -__NR_setfsuid)) return old_fsuid; @@ -974,6 +973,15 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid) old_fsgid = from_kgid_munged(old->user_ns, old->fsgid); kgid = make_kgid(old->user_ns, gid); + if (!gid_valid(kgid)) + return old_fsgid; + +#if defined CONFIG_SEC_RESTRICT_SETUID + if (kgid.val == 0) { + if (sec_restrict_uid()) + return -EACCES; + } +#endif // End of CONFIG_SEC_RESTRICT_SETUID #ifdef CONFIG_LOD_SEC if (current_is_LOD()) { @@ -982,9 +990,6 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid) } #endif - if (!gid_valid(kgid)) - return old_fsgid; - #ifdef CONFIG_SECURITY_DEFEX if (task_defex_enforce(current, NULL, -__NR_setfsgid)) return old_fsgid; diff --git a/mm/Kconfig b/mm/Kconfig index 047e82261ced..894cc58c4eb5 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -759,14 +759,3 @@ config BALANCE_ANON_FILE_RECLAIM equally. Swapping anonymous pages out to memory can be efficient enough to justify treating anonymous and file backed pages equally. - -config RBIN - bool "RBIN memory support" - default n - depends on CMA - help - This provides RBIN functionality in memory management. - RBIN memory is similar with CMA. Only the movable allocations with - __GFP_RBIN can be allocated in the RBIN memory. (In this way, - we can differentiate the pages going to CMA and RBIN.) Currently, - there is only one user of RBIN memory, ION RBIN heap. diff --git a/mm/cma.c b/mm/cma.c index 4f3d46653617..bd0e1412475e 100644 --- a/mm/cma.c +++ b/mm/cma.c @@ -42,7 +42,6 @@ struct cma cma_areas[MAX_CMA_AREAS]; unsigned cma_area_count; static DEFINE_MUTEX(cma_mutex); -static DEFINE_MUTEX(rbin_mutex); phys_addr_t cma_get_base(const struct cma *cma) { @@ -101,11 +100,6 @@ static int __init cma_activate_area(struct cma *cma) unsigned long base_pfn = cma->base_pfn, pfn = base_pfn; unsigned i = cma->count >> pageblock_order; struct zone *zone; -#ifdef CONFIG_RBIN - bool is_rbin = cma->is_rbin; -#else - bool is_rbin = false; -#endif cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL); @@ -130,7 +124,7 @@ static int __init cma_activate_area(struct cma *cma) if (page_zone(pfn_to_page(pfn)) != zone) goto err; } - init_cma_reserved_pageblock(pfn_to_page(base_pfn), is_rbin); + init_cma_reserved_pageblock(pfn_to_page(base_pfn)); } while (--i); mutex_init(&cma->lock); @@ -214,13 +208,6 @@ int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size, return 0; } -#ifdef CONFIG_RBIN -void cma_set_rbin(struct cma *cma) -{ - cma->is_rbin = true; -} -#endif - /** * cma_declare_contiguous() - reserve custom contiguous area * @base: Base address of the reserved area optional, use 0 for any @@ -382,12 +369,6 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align) unsigned long start = 0; unsigned long bitmap_maxno, bitmap_no, bitmap_count; struct page *page = NULL; -#ifdef CONFIG_RBIN - bool is_rbin = cma ? cma->is_rbin : false; - bool need_mutex = (align < (MAX_ORDER - 1)) ? true : false; -#else - bool is_rbin = false; -#endif int ret; if (!cma || !cma->count) @@ -422,21 +403,9 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align) mutex_unlock(&cma->lock); pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit); - if (!is_rbin) { - mutex_lock(&cma_mutex); - ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA); - mutex_unlock(&cma_mutex); - } -#ifdef CONFIG_RBIN - else { - if (need_mutex) - mutex_lock(&rbin_mutex); - ret = alloc_contig_range_fast(pfn, pfn + count, - MIGRATE_RBIN); - if (need_mutex) - mutex_unlock(&rbin_mutex); - } -#endif + mutex_lock(&cma_mutex); + ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA); + mutex_unlock(&cma_mutex); if (ret == 0) { page = pfn_to_page(pfn); break; diff --git a/mm/cma.h b/mm/cma.h index 758f066f9213..17c75a4246c8 100644 --- a/mm/cma.h +++ b/mm/cma.h @@ -2,9 +2,6 @@ #define __MM_CMA_H__ struct cma { -#ifdef CONFIG_RBIN - bool is_rbin; -#endif unsigned long base_pfn; unsigned long count; unsigned long *bitmap; diff --git a/mm/compaction.c b/mm/compaction.c index d9d1effd762f..a3d3e3638f23 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -1037,9 +1037,6 @@ static void isolate_freepages(struct compact_control *cc) if (!isolation_suitable(cc, page)) continue; - if (is_migrate_rbin_page(page)) - continue; - /* Found a block suitable for isolating free pages from. */ isolate_freepages_block(cc, &isolate_start_pfn, block_end_pfn, freelist, false); @@ -1184,8 +1181,6 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone, if (!isolation_suitable(cc, page)) continue; - if (is_migrate_rbin_page(page)) - continue; /* * For async compaction, also only scan in MOVABLE blocks. * Async compaction is optimistic to see if the minimum amount diff --git a/mm/filemap.c b/mm/filemap.c index adce2e3633b1..b7c9f02c5d98 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -2108,8 +2108,6 @@ repeat: else goto next; } - if (is_migrate_rbin_page(page)) - goto next; if (!page_cache_get_speculative(page)) goto repeat; diff --git a/mm/internal.h b/mm/internal.h index ef11c41172a1..a5b442f0c3b8 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -448,9 +448,6 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone, #define ALLOC_CPUSET 0x40 /* check for correct cpuset */ #define ALLOC_CMA 0x80 /* allow allocations from CMA areas */ #define ALLOC_FAIR 0x100 /* fair zone allocation */ -#define ALLOC_RBIN 0x200 /* allow allocations from RBIN areas */ - -extern void test_and_set_mem_boost_timeout(void); enum ttu_flags; struct tlbflush_unmap_batch; diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 2885e21e61db..59844daa9751 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -114,9 +114,6 @@ static DEFINE_SPINLOCK(managed_page_count_lock); unsigned long totalram_pages __read_mostly; unsigned long totalreserve_pages __read_mostly; unsigned long totalcma_pages __read_mostly; -unsigned long totalrbin_pages __read_mostly; -atomic_t rbin_allocated_pages = ATOMIC_INIT(0); -atomic_t rbin_pool_pages = ATOMIC_INIT(0); /* * When calculating the number of globally allowed dirty pages, there * is a certain number of per-zone reserves that should not be @@ -1317,7 +1314,7 @@ void __init page_alloc_init_late(void) #ifdef CONFIG_CMA /* Free whole pageblock and set its migration type to MIGRATE_CMA. */ -void __init init_cma_reserved_pageblock(struct page *page, bool is_rbin) +void __init init_cma_reserved_pageblock(struct page *page) { unsigned i = pageblock_nr_pages; struct page *p = page; @@ -1327,7 +1324,7 @@ void __init init_cma_reserved_pageblock(struct page *page, bool is_rbin) set_page_count(p, 0); } while (++p, --i); - set_pageblock_migratetype(page, migratetype_rbin_or_cma(is_rbin)); + set_pageblock_migratetype(page, MIGRATE_CMA); if (pageblock_order >= MAX_ORDER) { i = pageblock_nr_pages; @@ -1506,9 +1503,6 @@ static int fallbacks[MIGRATE_TYPES][4] = { [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_TYPES }, #ifdef CONFIG_CMA [MIGRATE_CMA] = { MIGRATE_TYPES }, /* Never used */ -#ifdef CONFIG_RBIN - [MIGRATE_RBIN] = { MIGRATE_TYPES }, /* Never used */ -#endif #endif #ifdef CONFIG_MEMORY_ISOLATION [MIGRATE_ISOLATE] = { MIGRATE_TYPES }, /* Never used */ @@ -1516,14 +1510,13 @@ static int fallbacks[MIGRATE_TYPES][4] = { }; #ifdef CONFIG_CMA -static struct page *__rmqueue_cma_rbin_fallback(struct zone *zone, - unsigned int order, bool is_rbin) +static struct page *__rmqueue_cma_fallback(struct zone *zone, + unsigned int order) { - return __rmqueue_smallest(zone, order, - migratetype_rbin_or_cma(is_rbin)); + return __rmqueue_smallest(zone, order, MIGRATE_CMA); } #else -static inline struct page *__rmqueue_cma_rbin_fallback(struct zone *zone, +static inline struct page *__rmqueue_cma_fallback(struct zone *zone, unsigned int order) { return NULL; } #endif @@ -1733,7 +1726,7 @@ static void reserve_highatomic_pageblock(struct page *page, struct zone *zone, /* Yoink! */ mt = get_pageblock_migratetype(page); if (mt != MIGRATE_HIGHATOMIC && - !is_migrate_isolate(mt) && !is_migrate_cma_rbin(mt)) { + !is_migrate_isolate(mt) && !is_migrate_cma(mt)) { zone->nr_reserved_highatomic += pageblock_nr_pages; set_pageblock_migratetype(page, MIGRATE_HIGHATOMIC); move_freepages_block(zone, page, MIGRATE_HIGHATOMIC); @@ -1873,15 +1866,8 @@ static struct page *__rmqueue(struct zone *zone, unsigned int order, { struct page *page = NULL; - if ((migratetype == MIGRATE_MOVABLE) && (gfp_flags & (__GFP_CMA | __GFP_RBIN))) { -#ifdef CONFIG_RBIN - if (gfp_flags & __GFP_RBIN) - test_and_set_mem_boost_timeout(); - if (!((gfp_flags & __GFP_RBIN) && atomic_read(&zone->rbin_alloc))) -#endif - page = __rmqueue_cma_rbin_fallback(zone, order, - ((gfp_flags & __GFP_RBIN) == __GFP_RBIN)); - } + if ((migratetype == MIGRATE_MOVABLE) && (gfp_flags & __GFP_CMA)) + page = __rmqueue_cma_fallback(zone, order); if (!page) page = __rmqueue_smallest(zone, order, migratetype); @@ -1927,11 +1913,6 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order, if (is_migrate_cma(get_pcppage_migratetype(page))) __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, -(1 << order)); -#ifdef CONFIG_RBIN - else if (is_migrate_rbin(get_pcppage_migratetype(page))) - __mod_zone_page_state(zone, NR_FREE_RBIN_PAGES, - -(1 << order)); -#endif } __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order)); spin_unlock(&zone->lock); @@ -2240,7 +2221,7 @@ int __isolate_free_page(struct page *page, unsigned int order) struct page *endpage = page + (1 << order) - 1; for (; page < endpage; page += pageblock_nr_pages) { int mt = get_pageblock_migratetype(page); - if (!is_migrate_isolate(mt) && !is_migrate_cma_rbin(mt) + if (!is_migrate_isolate(mt) && !is_migrate_cma(mt) && mt != MIGRATE_HIGHATOMIC) set_pageblock_migratetype(page, MIGRATE_MOVABLE); @@ -2289,11 +2270,6 @@ struct page *buffered_rmqueue(struct zone *preferred_zone, unsigned long flags; struct page *page = NULL; -#ifdef CONFIG_RBIN - if (time_before(jiffies, INITIAL_JIFFIES + 20 * HZ)) - gfp_flags &= ~__GFP_RBIN; -#endif - if (likely(order == 0)) { struct per_cpu_pages *pcp; struct list_head *list; @@ -2321,12 +2297,8 @@ struct page *buffered_rmqueue(struct zone *preferred_zone, * in the pcp list is probabily CMA pages because rmqueue_bulk() * fills the list from the free list of the same migratetype. */ - if ((!(gfp_flags & __GFP_CMA) - && is_migrate_cma(get_pcppage_migratetype(page))) || - (!(gfp_flags & __GFP_RBIN) - && is_migrate_rbin_nolikely(get_pcppage_migratetype(page))) || - ((gfp_flags & __GFP_RBIN) - && !is_migrate_rbin_nolikely(get_pcppage_migratetype(page)))) { + if (!(gfp_flags & __GFP_CMA) && + is_migrate_cma(get_pcppage_migratetype(page))) { page = NULL; local_irq_restore(flags); } else { @@ -2494,10 +2466,6 @@ static bool __zone_watermark_ok(struct zone *z, unsigned int order, /* If allocation can't use CMA areas don't use free CMA pages */ if (!(alloc_flags & ALLOC_CMA)) free_pages -= zone_page_state(z, NR_FREE_CMA_PAGES); -#ifdef CONFIG_RBIN - if (!(alloc_flags & ALLOC_RBIN) || atomic_read(&z->rbin_alloc)) - free_pages -= zone_page_state(z, NR_FREE_RBIN_PAGES); -#endif #endif /* @@ -2533,13 +2501,6 @@ static bool __zone_watermark_ok(struct zone *z, unsigned int order, !list_empty(&area->free_list[MIGRATE_CMA])) { return true; } -#ifdef CONFIG_RBIN - else if ((alloc_flags & ALLOC_RBIN) && - !list_empty(&area->free_list[MIGRATE_RBIN])) { - return true; - } - -#endif #endif } return false; @@ -3077,15 +3038,9 @@ gfp_to_alloc_flags(gfp_t gfp_mask) alloc_flags |= ALLOC_NO_WATERMARKS; } #ifdef CONFIG_CMA -#ifdef CONFIG_RBIN - if ((gfpflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE) && - ((gfp_mask & __GFP_RBIN) == __GFP_RBIN)) - alloc_flags |= ALLOC_RBIN; - else -#endif - if ((gfpflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE) - && !!(gfp_mask & __GFP_CMA)) - alloc_flags |= ALLOC_CMA; + if ((gfpflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE) + && !!(gfp_mask & __GFP_CMA)) + alloc_flags |= ALLOC_CMA; #endif return alloc_flags; } @@ -3344,15 +3299,9 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, if (unlikely(!zonelist->_zonerefs->zone)) return NULL; -#ifdef CONFIG_RBIN - if (IS_ENABLED(CONFIG_CMA) && ac.migratetype == MIGRATE_MOVABLE && - ((gfp_mask & __GFP_RBIN) == __GFP_RBIN)) - alloc_flags |= ALLOC_RBIN; - else -#endif - if (IS_ENABLED(CONFIG_CMA) && (ac.migratetype == MIGRATE_MOVABLE) - && !!(gfp_mask & __GFP_CMA)) - alloc_flags |= ALLOC_CMA; + if (IS_ENABLED(CONFIG_CMA) && (ac.migratetype == MIGRATE_MOVABLE) + && !!(gfp_mask & __GFP_CMA)) + alloc_flags |= ALLOC_CMA; retry_cpuset: cpuset_mems_cookie = read_mems_allowed_begin(); @@ -3803,9 +3752,6 @@ static void show_migration_types(unsigned char type) [MIGRATE_HIGHATOMIC] = 'H', #ifdef CONFIG_CMA [MIGRATE_CMA] = 'C', -#ifdef CONFIG_RBIN - [MIGRATE_RBIN] = 'R', -#endif #endif #ifdef CONFIG_MEMORY_ISOLATION [MIGRATE_ISOLATE] = 'I', @@ -3852,7 +3798,7 @@ void show_free_areas(unsigned int filter) " unevictable:%lu dirty:%lu writeback:%lu unstable:%lu\n" " slab_reclaimable:%lu slab_unreclaimable:%lu\n" " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n" - " free:%lu free_pcp:%lu free_cma:%lu free_rbin:%lu\n", + " free:%lu free_pcp:%lu free_cma:%lu\n", global_page_state(NR_ACTIVE_ANON), global_page_state(NR_INACTIVE_ANON), global_page_state(NR_ISOLATED_ANON), @@ -3871,8 +3817,7 @@ void show_free_areas(unsigned int filter) global_page_state(NR_BOUNCE), global_page_state(NR_FREE_PAGES), free_pcp, - global_page_state(NR_FREE_CMA_PAGES), - global_page_state(NR_FREE_RBIN_PAGES)); + global_page_state(NR_FREE_CMA_PAGES)); for_each_populated_zone(zone) { int i; @@ -3913,7 +3858,6 @@ void show_free_areas(unsigned int filter) " free_pcp:%lukB" " local_pcp:%ukB" " free_cma:%lukB" - " free_rbin:%lukB" " writeback_tmp:%lukB" " pages_scanned:%lu" " all_unreclaimable? %s" @@ -3947,7 +3891,6 @@ void show_free_areas(unsigned int filter) K(free_pcp), K(this_cpu_read(zone->pageset->pcp.count)), K(zone_page_state(zone, NR_FREE_CMA_PAGES)), - K(zone_page_state(zone, NR_FREE_RBIN_PAGES)), K(zone_page_state(zone, NR_WRITEBACK_TEMP)), K(zone_page_state(zone, NR_PAGES_SCANNED)), (!zone_reclaimable(zone) ? "yes" : "no") @@ -6684,7 +6627,7 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count, if (zone_idx(zone) == ZONE_MOVABLE) return false; mt = get_pageblock_migratetype(page); - if (mt == MIGRATE_MOVABLE || is_migrate_cma(mt) || is_migrate_rbin(mt)) + if (mt == MIGRATE_MOVABLE || is_migrate_cma(mt)) return false; pfn = page_to_pfn(page); @@ -6786,7 +6729,7 @@ static unsigned long pfn_max_align_up(unsigned long pfn) /* [start, end) must belong to a single zone. */ static int __alloc_contig_migrate_range(struct compact_control *cc, unsigned long start, unsigned long end, - bool cma, unsigned migratetype) + bool cma) { /* This function is based on compact_zone() from compaction.c. */ unsigned long nr_reclaimed; @@ -6810,10 +6753,7 @@ static int __alloc_contig_migrate_range(struct compact_control *cc, ret = -EINTR; break; } - if (is_migrate_rbin_nolikely(migratetype)) - tries = 4; - else - tries = 0; + tries = 0; } else if (++tries == 5) { ret = ret < 0 ? ret : -EBUSY; break; @@ -6903,7 +6843,7 @@ int __alloc_contig_range(unsigned long start, unsigned long end, if (ret) return ret; - ret = __alloc_contig_migrate_range(&cc, start, end, cma, migratetype); + ret = __alloc_contig_migrate_range(&cc, start, end, cma); if (ret) goto done; diff --git a/mm/usercopy.c b/mm/usercopy.c index 4553b9de4637..b34996a3860b 100644 --- a/mm/usercopy.c +++ b/mm/usercopy.c @@ -142,7 +142,7 @@ static inline const char *check_page_span(const void *ptr, unsigned long n, #ifdef CONFIG_HARDENED_USERCOPY_PAGESPAN const void *end = ptr + n - 1; struct page *endpage; - bool is_reserved, is_cma_rbin; + bool is_reserved, is_cma; /* * Sometimes the kernel data regions are not marked Reserved (see @@ -183,15 +183,15 @@ static inline const char *check_page_span(const void *ptr, unsigned long n, * several independently allocated pages. */ is_reserved = PageReserved(page); - is_cma_rbin = is_migrate_cma_rbin_page(page); - if (!is_reserved && !is_cma_rbin) + is_cma = is_migrate_cma_page(page); + if (!is_reserved && !is_cma) return ""; for (ptr += PAGE_SIZE; ptr <= end; ptr += PAGE_SIZE) { page = virt_to_head_page(ptr); if (is_reserved && !PageReserved(page)) return ""; - if (is_cma_rbin && !is_migrate_cma_rbin_page(page)) + if (is_cma && !is_migrate_cma_page(page)) return ""; } #endif diff --git a/mm/vmscan.c b/mm/vmscan.c index 1b255694cc1b..9b21021d8cf6 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -52,7 +52,6 @@ #include #include -#include #include "internal.h" @@ -1270,53 +1269,6 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone, return ret; } -/* A caller should guarantee that start and end pfns are in the same zone */ -void reclaim_contig_migrate_range(unsigned long start, - unsigned long end, bool drain) -{ - /* This function is based on __alloc_contig_migrate_range */ - unsigned long nr_reclaimed; - unsigned long pfn = start; - struct compact_control cc = { - .mode = MIGRATE_SYNC_LIGHT, - }; - unsigned long total_reclaimed = 0; - - cc.nr_freepages = 0; - cc.nr_migratepages = 0; - cc.zone = page_zone(pfn_to_page(start)); - INIT_LIST_HEAD(&cc.freepages); - INIT_LIST_HEAD(&cc.migratepages); - - if (drain) - migrate_prep(); - - while (pfn < end) { - if (fatal_signal_pending(current)) { - pr_warn_once("%s %d got fatal signal\n", - __func__, __LINE__); - break; - } - - if (list_empty(&cc.migratepages)) { - cc.nr_migratepages = 0; - pfn = isolate_migratepages_range(&cc, pfn, end); - if (!pfn) - break; - } - - nr_reclaimed = reclaim_clean_pages_from_list(cc.zone, - &cc.migratepages); - cc.nr_migratepages -= nr_reclaimed; - total_reclaimed += nr_reclaimed; - - /* Skip pages not reclaimed in the above */ - if (cc.nr_migratepages) - putback_movable_pages(&cc.migratepages); - } - trace_printk("%lu\n", total_reclaimed << PAGE_SHIFT); -} - /* * Attempt to remove the specified page from its LRU. Only take this page * if it is of the appropriate PageActive status. Pages which are being @@ -1608,8 +1560,6 @@ static int current_may_throttle(void) bdi_write_congested(current->backing_dev_info); } -static inline bool need_memory_boosting(struct zone *zone); - /* * shrink_inactive_list() is a helper for shrink_zone(). It returns the number * of reclaimed pages @@ -1631,8 +1581,6 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec, int file = is_file_lru(lru); struct zone *zone = lruvec_zone(lruvec); struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat; - bool force_reclaim = false; - enum ttu_flags ttu = TTU_UNMAP; while (unlikely(too_many_isolated(zone, file, sc))) { congestion_wait(BLK_RW_ASYNC, HZ/10); @@ -1669,15 +1617,10 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec, if (nr_taken == 0) return 0; - if (need_memory_boosting(zone)) { - force_reclaim = true; - ttu |= TTU_IGNORE_ACCESS; - } - - nr_reclaimed = shrink_page_list(&page_list, zone, sc, ttu, + nr_reclaimed = shrink_page_list(&page_list, zone, sc, TTU_UNMAP, &nr_dirty, &nr_unqueued_dirty, &nr_congested, &nr_writeback, &nr_immediate, - force_reclaim); + false); spin_lock_irq(&zone->lru_lock); @@ -2016,153 +1959,6 @@ enum scan_balance { SCAN_FILE, }; -/* mem_boost throttles only kswapd's behavior */ -enum mem_boost { - NO_BOOST, - BOOST_MID = 1, - BOOST_HIGH = 2, -}; -static int mem_boost_mode = NO_BOOST; -static unsigned long last_mode_change; -static bool memory_boosting_disabled = false; - -#define MEM_BOOST_MAX_TIME (5 * HZ) /* 5 sec */ - -#ifdef CONFIG_SYSFS -enum rbin_alloc_policy { - RBIN_ALLOW = 0, - RBIN_DENY = 1, -}; - -#ifdef CONFIG_RBIN -static void set_rbin_alloc_policy(enum rbin_alloc_policy val) -{ - struct zone *zone; - - if (val == RBIN_ALLOW) - wake_ion_rbin_heap_shrink(); - for_each_populated_zone(zone) { - atomic_set(&zone->rbin_alloc, val); - if (val) - wakeup_kswapd(zone, 0, gfp_zone(GFP_KERNEL)); - } -} -#else -static inline void set_rbin_alloc_policy(enum rbin_alloc_policy val) {} -#endif - -void test_and_set_mem_boost_timeout(void) -{ - if ((mem_boost_mode != NO_BOOST) && - time_after(jiffies, last_mode_change + MEM_BOOST_MAX_TIME)) { - mem_boost_mode = NO_BOOST; - set_rbin_alloc_policy(RBIN_ALLOW); - } -} - -static ssize_t mem_boost_mode_show(struct kobject *kobj, - struct kobj_attribute *attr, char *buf) -{ - test_and_set_mem_boost_timeout(); - return sprintf(buf, "%d\n", mem_boost_mode); -} - -static ssize_t mem_boost_mode_store(struct kobject *kobj, - struct kobj_attribute *attr, - const char *buf, size_t count) -{ - int mode; - int err; - - err = kstrtoint(buf, 10, &mode); - if (err || mode > BOOST_HIGH || mode < NO_BOOST) - return -EINVAL; - - mem_boost_mode = mode; - trace_printk("memboost start\n"); - last_mode_change = jiffies; - if (mem_boost_mode == BOOST_HIGH) { -#ifdef CONFIG_ION_RBIN_HEAP - wake_ion_rbin_heap_prereclaim(); -#endif - set_rbin_alloc_policy(RBIN_DENY); - } - - return count; -} - -static ssize_t disable_mem_boost_show(struct kobject *kobj, - struct kobj_attribute *attr, char *buf) -{ - int ret; - - ret = memory_boosting_disabled ? 1 : 0; - return sprintf(buf, "%d\n", ret); -} - -static ssize_t disable_mem_boost_store(struct kobject *kobj, - struct kobj_attribute *attr, - const char *buf, size_t count) -{ - int mode; - int err; - - err = kstrtoint(buf, 10, &mode); - if (err || (mode != 0 && mode != 1)) - return -EINVAL; - - memory_boosting_disabled = mode ? true : false; - - return count; -} - -#define MEM_BOOST_ATTR(_name) \ - static struct kobj_attribute _name##_attr = \ - __ATTR(_name, 0644, _name##_show, _name##_store) -MEM_BOOST_ATTR(mem_boost_mode); -MEM_BOOST_ATTR(disable_mem_boost); - -static struct attribute *mem_boost_attrs[] = { - &mem_boost_mode_attr.attr, - &disable_mem_boost_attr.attr, - NULL, -}; - -static struct attribute_group mem_boost_attr_group = { - .attrs = mem_boost_attrs, - .name = "vmscan", -}; -#endif - -static inline bool mem_boost_pgdat_wmark(struct zone *zone) -{ - return zone_watermark_ok_safe(zone, 0, low_wmark_pages(zone), 0); //TODO: low, high, or (low + high)/2 -} - -static inline bool need_memory_boosting(struct zone *zone) -{ - bool ret; - - test_and_set_mem_boost_timeout(); - - if (memory_boosting_disabled) - return false; - - switch (mem_boost_mode) { - case BOOST_HIGH: - ret = true; - break; - case BOOST_MID: - ret = mem_boost_pgdat_wmark(zone) ? false : true; - break; - case NO_BOOST: - default: - ret = false; - break; - } - return ret; -} - /* * Determine how aggressively the anon and file LRU lists should be * scanned. The relative value of each set of LRU lists is determined @@ -2259,11 +2055,6 @@ static void get_scan_count(struct lruvec *lruvec, int swappiness, } } - if (current_is_kswapd() && need_memory_boosting(zone)) { - scan_balance = SCAN_FILE; - goto out; - } - /* * There is enough inactive page cache, do not reclaim * anything from the anonymous working set right now. @@ -3061,6 +2852,7 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order, gfp_mask); nr_reclaimed = do_try_to_free_pages(zonelist, &sc); + trace_mm_vmscan_direct_reclaim_end(nr_reclaimed); return nr_reclaimed; @@ -3169,14 +2961,10 @@ static void age_active_anon(struct zone *zone, struct scan_control *sc) } while (memcg); } -#define MEM_BOOST_WMARK_SCALE_FACTOR 1 static bool zone_balanced(struct zone *zone, int order, bool highorder, unsigned long balance_gap, int classzone_idx) { - unsigned long mark = high_wmark_pages(zone); - - if (current_is_kswapd() && need_memory_boosting(zone)) - mark *= MEM_BOOST_WMARK_SCALE_FACTOR; + unsigned long mark = high_wmark_pages(zone) + balance_gap; /* * When checking from pgdat_balanced(), kswapd should stop and sleep @@ -3189,7 +2977,7 @@ static bool zone_balanced(struct zone *zone, int order, bool highorder, order = 0; } - return zone_watermark_ok_safe(zone, order, mark + balance_gap, classzone_idx); + return zone_watermark_ok_safe(zone, order, mark, classzone_idx); } /* @@ -3694,14 +3482,9 @@ void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx) } if (!waitqueue_active(&pgdat->kswapd_wait)) return; - - if (need_memory_boosting(zone)) - goto wakeup; - if (zone_balanced(zone, order, true, 0, 0)) return; -wakeup: trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, zone_idx(zone), order); wake_up_interruptible(&pgdat->kswapd_wait); } @@ -3816,10 +3599,6 @@ static int __init kswapd_init(void) for_each_node_state(nid, N_MEMORY) kswapd_run(nid); hotcpu_notifier(cpu_callback, 0); -#ifdef CONFIG_SYSFS - if (sysfs_create_group(mm_kobj, &mem_boost_attr_group)) - pr_err("vmscan: register mem boost sysfs failed\n"); -#endif return 0; } diff --git a/mm/vmstat.c b/mm/vmstat.c index 0c705fec4a48..48aa6b0681aa 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -764,7 +764,6 @@ const char * const vmstat_text[] = { "workingset_nodereclaim", "nr_anon_transparent_hugepages", "nr_free_cma", - "nr_free_rbin", /* enum writeback_stat_item counters */ "nr_dirty_threshold", @@ -931,9 +930,6 @@ static char * const migratetype_names[MIGRATE_TYPES] = { "HighAtomic", #ifdef CONFIG_CMA "CMA", -#ifdef CONFIG_RBIN - "RBIN", -#endif #endif #ifdef CONFIG_MEMORY_ISOLATION "Isolate", diff --git a/net/netfilter/nf_conntrack_h323_main.c b/net/netfilter/nf_conntrack_h323_main.c index 9511af04dc81..15495b956855 100644 --- a/net/netfilter/nf_conntrack_h323_main.c +++ b/net/netfilter/nf_conntrack_h323_main.c @@ -1225,6 +1225,7 @@ static struct nf_conntrack_helper nf_conntrack_helper_q931[] __read_mostly = { { .name = "Q.931", .me = THIS_MODULE, + .data_len = sizeof(struct nf_ct_h323_master), .tuple.src.l3num = AF_INET6, .tuple.src.u.tcp.port = cpu_to_be16(Q931_PORT), .tuple.dst.protonum = IPPROTO_TCP, -- 2.20.1