From: Ingo Molnar Date: Mon, 24 Nov 2008 16:44:55 +0000 (+0100) Subject: Merge branches 'core/debug', 'core/futexes', 'core/locking', 'core/rcu', 'core/signal... X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=b19b3c74c7bbec45a848631b8f970ac110665a01;p=GitHub%2FLineageOS%2FG12%2Fandroid_kernel_amlogic_linux-4.9.git Merge branches 'core/debug', 'core/futexes', 'core/locking', 'core/rcu', 'core/signal', 'core/urgent' and 'core/xen' into core/core --- b19b3c74c7bbec45a848631b8f970ac110665a01 diff --cc arch/x86/include/asm/uaccess_64.h index f8cfd00db450,000000000000,000000000000,543ba883cc66,664f15280f14,664f15280f14,f8cfd00db450,000000000000..84210c479fca mode 100644,000000,000000,100644,100644,100644,100644,000000..100644 --- a/arch/x86/include/asm/uaccess_64.h +++ b/arch/x86/include/asm/uaccess_64.h @@@@@@@@@ -1,202 -1,0 -1,0 -1,208 -1,202 -1,202 -1,202 -1,0 +1,208 @@@@@@@@@ ++ +#ifndef _ASM_X86_UACCESS_64_H ++ +#define _ASM_X86_UACCESS_64_H ++ + ++ +/* ++ + * User space memory access functions ++ + */ ++ +#include ++ +#include ++ +#include ++ +#include ++ +#include ++ + ++ +/* ++ + * Copy To/From Userspace ++ + */ ++ + ++ +/* Handles exceptions in both to and from, but doesn't do access_ok */ ++ +__must_check unsigned long ++ +copy_user_generic(void *to, const void *from, unsigned len); ++ + ++ +__must_check unsigned long ++ +copy_to_user(void __user *to, const void *from, unsigned len); ++ +__must_check unsigned long ++ +copy_from_user(void *to, const void __user *from, unsigned len); ++ +__must_check unsigned long ++ +copy_in_user(void __user *to, const void __user *from, unsigned len); ++ + ++ +static __always_inline __must_check ++ +int __copy_from_user(void *dst, const void __user *src, unsigned size) ++ +{ ++ + int ret = 0; +++ ++++ +++ ++++ might_fault(); ++ + if (!__builtin_constant_p(size)) ++ + return copy_user_generic(dst, (__force void *)src, size); ++ + switch (size) { ++ + case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src, ++ + ret, "b", "b", "=q", 1); ++ + return ret; ++ + case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src, ++ + ret, "w", "w", "=r", 2); ++ + return ret; ++ + case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src, ++ + ret, "l", "k", "=r", 4); ++ + return ret; ++ + case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src, ++ + ret, "q", "", "=r", 8); ++ + return ret; ++ + case 10: ++ + __get_user_asm(*(u64 *)dst, (u64 __user *)src, --- ret, "q", "", "=r", 16); +++++ + ret, "q", "", "=r", 10); ++ + if (unlikely(ret)) ++ + return ret; ++ + __get_user_asm(*(u16 *)(8 + (char *)dst), ++ + (u16 __user *)(8 + (char __user *)src), ++ + ret, "w", "w", "=r", 2); ++ + return ret; ++ + case 16: ++ + __get_user_asm(*(u64 *)dst, (u64 __user *)src, ++ + ret, "q", "", "=r", 16); ++ + if (unlikely(ret)) ++ + return ret; ++ + __get_user_asm(*(u64 *)(8 + (char *)dst), ++ + (u64 __user *)(8 + (char __user *)src), ++ + ret, "q", "", "=r", 8); ++ + return ret; ++ + default: ++ + return copy_user_generic(dst, (__force void *)src, size); ++ + } ++ +} ++ + ++ +static __always_inline __must_check ++ +int __copy_to_user(void __user *dst, const void *src, unsigned size) ++ +{ ++ + int ret = 0; +++ ++++ +++ ++++ might_fault(); ++ + if (!__builtin_constant_p(size)) ++ + return copy_user_generic((__force void *)dst, src, size); ++ + switch (size) { ++ + case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst, ++ + ret, "b", "b", "iq", 1); ++ + return ret; ++ + case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst, ++ + ret, "w", "w", "ir", 2); ++ + return ret; ++ + case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst, ++ + ret, "l", "k", "ir", 4); ++ + return ret; ++ + case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst, ++ + ret, "q", "", "ir", 8); ++ + return ret; ++ + case 10: ++ + __put_user_asm(*(u64 *)src, (u64 __user *)dst, ++ + ret, "q", "", "ir", 10); ++ + if (unlikely(ret)) ++ + return ret; ++ + asm("":::"memory"); ++ + __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst, ++ + ret, "w", "w", "ir", 2); ++ + return ret; ++ + case 16: ++ + __put_user_asm(*(u64 *)src, (u64 __user *)dst, ++ + ret, "q", "", "ir", 16); ++ + if (unlikely(ret)) ++ + return ret; ++ + asm("":::"memory"); ++ + __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst, ++ + ret, "q", "", "ir", 8); ++ + return ret; ++ + default: ++ + return copy_user_generic((__force void *)dst, src, size); ++ + } ++ +} ++ + ++ +static __always_inline __must_check ++ +int __copy_in_user(void __user *dst, const void __user *src, unsigned size) ++ +{ ++ + int ret = 0; +++ ++++ +++ ++++ might_fault(); ++ + if (!__builtin_constant_p(size)) ++ + return copy_user_generic((__force void *)dst, ++ + (__force void *)src, size); ++ + switch (size) { ++ + case 1: { ++ + u8 tmp; ++ + __get_user_asm(tmp, (u8 __user *)src, ++ + ret, "b", "b", "=q", 1); ++ + if (likely(!ret)) ++ + __put_user_asm(tmp, (u8 __user *)dst, ++ + ret, "b", "b", "iq", 1); ++ + return ret; ++ + } ++ + case 2: { ++ + u16 tmp; ++ + __get_user_asm(tmp, (u16 __user *)src, ++ + ret, "w", "w", "=r", 2); ++ + if (likely(!ret)) ++ + __put_user_asm(tmp, (u16 __user *)dst, ++ + ret, "w", "w", "ir", 2); ++ + return ret; ++ + } ++ + ++ + case 4: { ++ + u32 tmp; ++ + __get_user_asm(tmp, (u32 __user *)src, ++ + ret, "l", "k", "=r", 4); ++ + if (likely(!ret)) ++ + __put_user_asm(tmp, (u32 __user *)dst, ++ + ret, "l", "k", "ir", 4); ++ + return ret; ++ + } ++ + case 8: { ++ + u64 tmp; ++ + __get_user_asm(tmp, (u64 __user *)src, ++ + ret, "q", "", "=r", 8); ++ + if (likely(!ret)) ++ + __put_user_asm(tmp, (u64 __user *)dst, ++ + ret, "q", "", "ir", 8); ++ + return ret; ++ + } ++ + default: ++ + return copy_user_generic((__force void *)dst, ++ + (__force void *)src, size); ++ + } ++ +} ++ + ++ +__must_check long ++ +strncpy_from_user(char *dst, const char __user *src, long count); ++ +__must_check long ++ +__strncpy_from_user(char *dst, const char __user *src, long count); ++ +__must_check long strnlen_user(const char __user *str, long n); ++ +__must_check long __strnlen_user(const char __user *str, long n); ++ +__must_check long strlen_user(const char __user *str); ++ +__must_check unsigned long clear_user(void __user *mem, unsigned long len); ++ +__must_check unsigned long __clear_user(void __user *mem, unsigned long len); ++ + ++ +__must_check long __copy_from_user_inatomic(void *dst, const void __user *src, ++ + unsigned size); ++ + ++ +static __must_check __always_inline int ++ +__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size) ++ +{ ++ + return copy_user_generic((__force void *)dst, src, size); ++ +} ++ + ++ +extern long __copy_user_nocache(void *dst, const void __user *src, ++ + unsigned size, int zerorest); ++ + ++ +static inline int __copy_from_user_nocache(void *dst, const void __user *src, ++ + unsigned size) ++ +{ ++ + might_sleep(); ++ + return __copy_user_nocache(dst, src, size, 1); ++ +} ++ + ++ +static inline int __copy_from_user_inatomic_nocache(void *dst, ++ + const void __user *src, ++ + unsigned size) ++ +{ ++ + return __copy_user_nocache(dst, src, size, 0); ++ +} ++ + ++ +unsigned long ++ +copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest); ++ + ++ +#endif /* _ASM_X86_UACCESS_64_H */ diff --cc include/linux/kernel.h index dc7e0d0a6474,3f30557be2a3,2651f805ba6d,69a9bfdf9c86,fba141d3ca07,fba141d3ca07,dc7e0d0a6474,94d17ff64c5a..269df5a17b30 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@@@@@@@@ -318,36 -290,28 -288,28 -327,32 -318,32 -318,32 -318,36 -294,32 +329,36 @@@@@@@@@ static inline char *pack_hex_byte(char return buf; } ----- -#define pr_emerg(fmt, arg...) \ ----- - printk(KERN_EMERG fmt, ##arg) ----- -#define pr_alert(fmt, arg...) \ ----- - printk(KERN_ALERT fmt, ##arg) ----- -#define pr_crit(fmt, arg...) \ ----- - printk(KERN_CRIT fmt, ##arg) ----- -#define pr_err(fmt, arg...) \ ----- - printk(KERN_ERR fmt, ##arg) ----- -#define pr_warning(fmt, arg...) \ ----- - printk(KERN_WARNING fmt, ##arg) ----- -#define pr_notice(fmt, arg...) \ ----- - printk(KERN_NOTICE fmt, ##arg) ----- -#define pr_info(fmt, arg...) \ ----- - printk(KERN_INFO fmt, ##arg) -- -- #ifdef DEBUG +++++ +#ifndef pr_fmt +++++ +#define pr_fmt(fmt) fmt +++++ +#endif +++++ + +++++ +#define pr_emerg(fmt, ...) \ +++++ + printk(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__) +++++ +#define pr_alert(fmt, ...) \ +++++ + printk(KERN_ALERT pr_fmt(fmt), ##__VA_ARGS__) +++++ +#define pr_crit(fmt, ...) \ +++++ + printk(KERN_CRIT pr_fmt(fmt), ##__VA_ARGS__) +++++ +#define pr_err(fmt, ...) \ +++++ + printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__) +++++ +#define pr_warning(fmt, ...) \ +++++ + printk(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__) +++++ +#define pr_notice(fmt, ...) \ +++++ + printk(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__) +++++ +#define pr_info(fmt, ...) \ +++++ + printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__) ++ /* If you are writing a driver, please use dev_dbg instead */ -- #define pr_debug(fmt, arg...) \ -- printk(KERN_DEBUG fmt, ##arg) ++ #if defined(CONFIG_DYNAMIC_PRINTK_DEBUG) ++ #define pr_debug(fmt, ...) do { \ --- - dynamic_pr_debug(fmt, ##__VA_ARGS__); \ +++++ + dynamic_pr_debug(pr_fmt(fmt), ##__VA_ARGS__); \ ++ } while (0) ++ #elif defined(DEBUG) --- -#define pr_debug(fmt, arg...) \ --- - printk(KERN_DEBUG fmt, ##arg) +++++ +#define pr_debug(fmt, ...) \ +++++ + printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) #else ----- -#define pr_debug(fmt, arg...) \ ----- - ({ if (0) printk(KERN_DEBUG fmt, ##arg); 0; }) +++++ +#define pr_debug(fmt, ...) \ +++++ + ({ if (0) printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); 0; }) #endif /* diff --cc kernel/exit.c index 2d8be7ebb0f7,16395644a98f,85a83c831856,ae2b92be5fae,80137a5d9467,b9c4d8bb72e5,2d8be7ebb0f7,80137a5d9467..30fcdf16737a --- a/kernel/exit.c +++ b/kernel/exit.c @@@@@@@@@ -1316,23 -1309,20 -1317,20 -1325,23 -1320,23 -1325,23 -1316,23 -1320,23 +1316,23 @@@@@@@@@ static int wait_task_zombie(struct task * need to protect the access to p->parent->signal fields, * as other threads in the parent group can be right * here reaping other children at the same time. ++ * ++ * We use thread_group_cputime() to get times for the thread ++ * group, which consolidates times for all threads in the ++ * group including the group leader. */ +++++ ++ thread_group_cputime(p, &cputime); spin_lock_irq(&p->parent->sighand->siglock); psig = p->parent->signal; sig = p->signal; - -- -- thread_group_cputime(p, &cputime); psig->cutime = cputime_add(psig->cutime, -- cputime_add(p->utime, -- cputime_add(sig->utime, -- sig->cutime))); ++ cputime_add(cputime.utime, ++ sig->cutime)); psig->cstime = cputime_add(psig->cstime, -- cputime_add(p->stime, -- cputime_add(sig->stime, -- sig->cstime))); ++ cputime_add(cputime.stime, ++ sig->cstime)); psig->cgtime = cputime_add(psig->cgtime, cputime_add(p->gtime, diff --cc kernel/futex.c index 8af10027514b,7d1136e97c14,62cbd648e28a,8af10027514b,8af10027514b,8af10027514b,8af10027514b,7d1136e97c14..e10c5c8786a6 --- a/kernel/futex.c +++ b/kernel/futex.c @@@@@@@@@ -229,79 -229,79 -248,29 -229,79 -229,79 -229,79 -229,79 -229,79 +248,29 @@@@@@@@@ again * * NOTE: When userspace waits on a MAP_SHARED mapping, even if * it's a read-only handle, it's expected that futexes attach to -- ----- * the object not the particular process. Therefore we use -- ----- * VM_MAYSHARE here, not VM_SHARED which is restricted to shared -- ----- * mappings of _writable_ handles. ++ +++++ * the object not the particular process. */ -- ----- if (likely(!(vma->vm_flags & VM_MAYSHARE))) { -- ----- key->both.offset |= FUT_OFF_MMSHARED; /* reference taken on mm */ ++ +++++ if (PageAnon(page)) { ++ +++++ key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */ key->private.mm = mm; key->private.address = address; - - return 0; - - } - - - - /* - - * Linear file mappings are also simple. - - */ - - key->shared.inode = vma->vm_file->f_path.dentry->d_inode; - - key->both.offset |= FUT_OFF_INODE; /* inode-based key. */ - - if (likely(!(vma->vm_flags & VM_NONLINEAR))) { - - key->shared.pgoff = (((address - vma->vm_start) >> PAGE_SHIFT) - - + vma->vm_pgoff); -- ----- return 0; ++ +++++ } else { ++ +++++ key->both.offset |= FUT_OFF_INODE; /* inode-based key */ ++ +++++ key->shared.inode = page->mapping->host; ++ +++++ key->shared.pgoff = page->index; } -- ----- /* - ---- * Linear file mappings are also simple. - - * We could walk the page table to read the non-linear - - * pte, and get the page index without fetching the page - - * from swap. But that's a lot of code to duplicate here - - * for a rare case, so we simply fetch the page. -- ----- */ - ---- key->shared.inode = vma->vm_file->f_path.dentry->d_inode; - ---- key->both.offset |= FUT_OFF_INODE; /* inode-based key. */ - ---- if (likely(!(vma->vm_flags & VM_NONLINEAR))) { - ---- key->shared.pgoff = (((address - vma->vm_start) >> PAGE_SHIFT) - ---- + vma->vm_pgoff); - - err = get_user_pages(current, mm, address, 1, 0, 0, &page, NULL); - - if (err >= 0) { - - key->shared.pgoff = - - page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); - - put_page(page); -- ----- return 0; -- ----- } - - return err; - -} ++ +++++ get_futex_key_refs(key); - ---- /* - ---- * We could walk the page table to read the non-linear - ---- * pte, and get the page index without fetching the page - ---- * from swap. But that's a lot of code to duplicate here - ---- * for a rare case, so we simply fetch the page. - ---- */ - ---- err = get_user_pages(current, mm, address, 1, 0, 0, &page, NULL); - ---- if (err >= 0) { - ---- key->shared.pgoff = - ---- page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); - ---- put_page(page); - ---- return 0; - ---- } - ---- return err; - ---- } - ---- -- -----/* -- ----- * Take a reference to the resource addressed by a key. -- ----- * Can be called while holding spinlocks. -- ----- * -- ----- */ -- -----static void get_futex_key_refs(union futex_key *key) -- -----{ -- ----- if (key->both.ptr == NULL) -- ----- return; -- ----- switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) { -- ----- case FUT_OFF_INODE: -- ----- atomic_inc(&key->shared.inode->i_count); -- ----- break; -- ----- case FUT_OFF_MMSHARED: -- ----- atomic_inc(&key->private.mm->mm_count); -- ----- break; -- ----- } ++ +++++ unlock_page(page); ++ +++++ put_page(page); ++ +++++ return 0; } -- -----/* -- ----- * Drop a reference to the resource addressed by a key. -- ----- * The hash bucket spinlock must not be held. -- ----- */ -- -----static void drop_futex_key_refs(union futex_key *key) ++ +++++static inline ++ +++++void put_futex_key(int fshared, union futex_key *key) { -- ----- if (!key->both.ptr) -- ----- return; -- ----- switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) { -- ----- case FUT_OFF_INODE: -- ----- iput(key->shared.inode); -- ----- break; -- ----- case FUT_OFF_MMSHARED: -- ----- mmdrop(key->private.mm); -- ----- break; -- ----- } ++ +++++ drop_futex_key_refs(key); } static u32 cmpxchg_futex_value_locked(u32 __user *uaddr, u32 uval, u32 newval) diff --cc kernel/sched.c index 9b1e79371c20,cc1f81b50b82,13dd2db9fb2d,2a106b6b78b0,e8819bc6f462,b388c9b243e9,9b1e79371c20,d906f72b42d2..558e5f284269 --- a/kernel/sched.c +++ b/kernel/sched.c @@@@@@@@@ -1433,37 -1425,9 -1425,9 -1433,35 -1425,35 -1433,37 -1433,37 -1419,35 +1433,37 @@@@@@@@@ up parent = parent->parent; if (parent) goto up; ++ out_unlock: rcu_read_unlock(); ++ ++ return ret; ++ } ++ ++ static int tg_nop(struct task_group *tg, void *data) ++ { ++ return 0; ++ } ++ #endif ++ ++ #ifdef CONFIG_SMP ++ static unsigned long source_load(int cpu, int type); ++ static unsigned long target_load(int cpu, int type); ++ static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd); ++ ++ static unsigned long cpu_avg_load_per_task(int cpu) ++ { ++ struct rq *rq = cpu_rq(cpu); ++ ++ if (rq->nr_running) ++ rq->avg_load_per_task = rq->load.weight / rq->nr_running; ++++ + else ++++ + rq->avg_load_per_task = 0; ++ ++ return rq->avg_load_per_task; } ++ #ifdef CONFIG_FAIR_GROUP_SCHED ++ static void __set_se_shares(struct sched_entity *se, unsigned long shares); /* @@@@@@@@@ -1547,10 -1507,14 -1507,14 -1545,10 -1537,10 -1547,10 -1547,10 -1527,16 +1547,10 @@@@@@@@@ static int tg_shares_up(struct task_gro if (!rq_weight) rq_weight = cpus_weight(sd->span) * NICE_0_LOAD; -- - for_each_cpu_mask(i, sd->span) { -- - struct rq *rq = cpu_rq(i); -- - unsigned long flags; - - spin_lock_irqsave(&rq->lock, flags); - __update_group_shares_cpu(tg, i, shares, rq_weight); - spin_unlock_irqrestore(&rq->lock, flags); - } ++ + for_each_cpu_mask(i, sd->span) ++ + update_group_shares_cpu(tg, i, shares, rq_weight); -- spin_lock_irqsave(&rq->lock, flags); -- __update_group_shares_cpu(tg, i, shares, rq_weight); -- spin_unlock_irqrestore(&rq->lock, flags); -- } ++ return 0; } /* @@@@@@@@@ -9025,25 -8905,16 -8905,19 -9021,25 -9008,25 -9023,25 -9025,25 -9008,25 +9024,25 @@@@@@@@@ long sched_group_rt_period(struct task_ static int sched_rt_global_constraints(void) { -- struct task_group *tg = &root_task_group; -- u64 rt_runtime, rt_period; ++ u64 runtime, period; int ret = 0; - rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period); - rt_runtime = tg->rt_bandwidth.rt_runtime; + if (sysctl_sched_rt_period <= 0) + return -EINVAL; + - rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period); - rt_runtime = tg->rt_bandwidth.rt_runtime; ++ runtime = global_rt_runtime(); ++ period = global_rt_period(); ++ ++ /* ++ * Sanity check on the sysctl variables. ++ */ ++ if (runtime > period && runtime != RUNTIME_INF) ++ return -EINVAL; mutex_lock(&rt_constraints_mutex); -- if (!__rt_schedulable(tg, rt_period, rt_runtime)) -- ret = -EINVAL; ++ read_lock(&tasklist_lock); ++ ret = __rt_schedulable(NULL, 0, 0); ++ read_unlock(&tasklist_lock); mutex_unlock(&rt_constraints_mutex); return ret;