From: Stricted Date: Wed, 21 Mar 2018 21:39:46 +0000 (+0100) Subject: Merge tag 'v3.10.69' into update X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=4a2455f795a889c7c76c55abe4ce50bdd5175e5f;p=GitHub%2Fmt8127%2Fandroid_kernel_alcatel_ttab.git Merge tag 'v3.10.69' into update This is the 3.10.69 stable release --- 4a2455f795a889c7c76c55abe4ce50bdd5175e5f diff --cc arch/arm64/include/asm/cputype.h index 3c1f29592c34,be9b5ca9a6c0..65531f1cc8f3 --- a/arch/arm64/include/asm/cputype.h +++ b/arch/arm64/include/asm/cputype.h @@@ -82,9 -71,11 +82,11 @@@ static inline unsigned int __attribute_ static inline u32 __attribute_const__ read_cpuid_cachetype(void) { - return read_cpuid(ID_CTR_EL0); + return read_cpuid(CTR_EL0); } + void cpuinfo_store_cpu(void); + #endif /* __ASSEMBLY__ */ #endif diff --cc arch/arm64/kernel/setup.c index 268f504664fb,7cc551d1b0e1..21ad76b0495f --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@@ -109,95 -98,28 +110,108 @@@ void __init early_print(const char *str printk("%s", buf); } - void __init smp_setup_processor_id(void) + struct cpuinfo_arm64 { + struct cpu cpu; + u32 reg_midr; + }; + + static DEFINE_PER_CPU(struct cpuinfo_arm64, cpu_data); + + void cpuinfo_store_cpu(void) + { + struct cpuinfo_arm64 *info = this_cpu_ptr(&cpu_data); + info->reg_midr = read_cpuid_id(); + } + + static void __init setup_processor(void) { - struct cpu_info *cpu_info; + /* + * clear __my_cpu_offset on boot CPU to avoid hang caused by + * using percpu variable early, for example, lockdep will + * access percpu variable inside lock_release + */ + set_my_cpu_offset(0); +} + +bool arch_match_cpu_phys_id(int cpu, u64 phys_id) +{ + return phys_id == cpu_logical_map(cpu); +} +struct mpidr_hash mpidr_hash; +#ifdef CONFIG_SMP +/** + * smp_build_mpidr_hash - Pre-compute shifts required at each affinity + * level in order to build a linear index from an + * MPIDR value. Resulting algorithm is a collision + * free hash carried out through shifting and ORing + */ +static void __init smp_build_mpidr_hash(void) +{ + u32 i, affinity, fs[4], bits[4], ls; + u64 mask = 0; + /* + * Pre-scan the list of MPIDRS and filter out bits that do + * not contribute to affinity levels, ie they never toggle. + */ + for_each_possible_cpu(i) + mask |= (cpu_logical_map(i) ^ cpu_logical_map(0)); + pr_debug("mask of set bits %#llx\n", mask); + /* + * Find and stash the last and first bit set at all affinity levels to + * check how many bits are required to represent them. + */ + for (i = 0; i < 4; i++) { + affinity = MPIDR_AFFINITY_LEVEL(mask, i); + /* + * Find the MSB bit and LSB bits position + * to determine how many bits are required + * to express the affinity level. + */ + ls = fls(affinity); + fs[i] = affinity ? ffs(affinity) - 1 : 0; + bits[i] = ls - fs[i]; + } /* - * locate processor in the list of supported processor - * types. The linker builds this table for us from the - * entries in arch/arm/mm/proc.S + * An index can be created from the MPIDR_EL1 by isolating the + * significant bits at each affinity level and by shifting + * them in order to compress the 32 bits values space to a + * compressed set of values. This is equivalent to hashing + * the MPIDR_EL1 through shifting and ORing. It is a collision free + * hash though not minimal since some levels might contain a number + * of CPUs that is not an exact power of 2 and their bit + * representation might contain holes, eg MPIDR_EL1[7:0] = {0x2, 0x80}. */ + mpidr_hash.shift_aff[0] = MPIDR_LEVEL_SHIFT(0) + fs[0]; + mpidr_hash.shift_aff[1] = MPIDR_LEVEL_SHIFT(1) + fs[1] - bits[0]; + mpidr_hash.shift_aff[2] = MPIDR_LEVEL_SHIFT(2) + fs[2] - + (bits[1] + bits[0]); + mpidr_hash.shift_aff[3] = MPIDR_LEVEL_SHIFT(3) + + fs[3] - (bits[2] + bits[1] + bits[0]); + mpidr_hash.mask = mask; + mpidr_hash.bits = bits[3] + bits[2] + bits[1] + bits[0]; + pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] aff3[%u] mask[%#llx] bits[%u]\n", + mpidr_hash.shift_aff[0], + mpidr_hash.shift_aff[1], + mpidr_hash.shift_aff[2], + mpidr_hash.shift_aff[3], + mpidr_hash.mask, + mpidr_hash.bits); + /* + * 4x is an arbitrary value used to warn on a hash table much bigger + * than expected on most systems. + */ + if (mpidr_hash_size() > 4 * num_possible_cpus()) + pr_warn("Large number of MPIDR hash buckets detected\n"); + __flush_dcache_area(&mpidr_hash, sizeof(struct mpidr_hash)); +} +#endif + +static void __init setup_processor(void) +{ + struct cpu_info *cpu_info; + u64 features, block; + cpu_info = lookup_processor_type(read_cpuid_id()); if (!cpu_info) { printk("CPU configuration botched (ID %08x), unable to continue.\n", @@@ -414,10 -304,8 +430,8 @@@ static int __init arm64_device_init(voi of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); return 0; } -arch_initcall(arm64_device_init); +arch_initcall_sync(arm64_device_init); - static DEFINE_PER_CPU(struct cpu, cpu_data); - static int __init topology_init(void) { int i; diff --cc arch/arm64/kernel/smp.c index b6bc1a4a925c,b0a8703a25ec..7d850f9a0c2b --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@@ -154,9 -187,23 +154,14 @@@ asmlinkage void __cpuinit secondary_sta preempt_disable(); trace_hardirqs_off(); - /* - * Let the primary processor know we're out of the - * pen, then head off into the C entry point - */ - write_pen_release(INVALID_HWID); - - /* - * Synchronise with the boot thread. - */ - raw_spin_lock(&boot_lock); - raw_spin_unlock(&boot_lock); + if (cpu_ops[cpu]->cpu_postboot) + cpu_ops[cpu]->cpu_postboot(); + /* + * Log the CPU info before it is marked online and might get read. + */ + cpuinfo_store_cpu(); + /* * OK, now it's safe to let the boot CPU continue. Wait for * the CPU migration code to notice that the CPU is online