From: Benjamin Herrenschmidt Date: Wed, 16 Jul 2008 01:07:59 +0000 (+1000) Subject: Merge commit 'origin/master' X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=84c3d4aaec3338201b449034beac41635866bddf;p=GitHub%2FLineageOS%2FG12%2Fandroid_kernel_amlogic_linux-4.9.git Merge commit 'origin/master' Manual merge of: arch/powerpc/Kconfig arch/powerpc/kernel/stacktrace.c arch/powerpc/mm/slice.c arch/ppc/kernel/smp.c --- 84c3d4aaec3338201b449034beac41635866bddf diff --cc arch/powerpc/Kconfig index 1530a41fc56d,20eacf2a8424..de88972c5896 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@@ -114,7 -111,7 +114,8 @@@ config PP select HAVE_KPROBES select HAVE_KRETPROBES select HAVE_LMB + select HAVE_DMA_ATTRS if PPC64 + select USE_GENERIC_SMP_HELPERS if SMP select HAVE_OPROFILE config EARLY_PRINTK diff --cc arch/powerpc/kernel/smp.c index ba7989ffaeee,5191b46a611e..f5ae9fa222ea --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@@ -154,219 -166,17 +166,13 @@@ static void stop_this_cpu(void *dummy ; } - /* - * Structure and data for smp_call_function(). This is designed to minimise - * static memory requirements. It also looks cleaner. - * Stolen from the i386 version. - */ - static __cacheline_aligned_in_smp DEFINE_SPINLOCK(call_lock); - - static struct call_data_struct { - void (*func) (void *info); - void *info; - atomic_t started; - atomic_t finished; - int wait; - } *call_data; - - /* delay of at least 8 seconds */ - #define SMP_CALL_TIMEOUT 8 - - /* - * These functions send a 'generic call function' IPI to other online - * CPUS in the system. - * - * [SUMMARY] Run a function on other CPUs. - * The function to run. This must be fast and non-blocking. - * An arbitrary pointer to pass to the function. - * currently unused. - * If true, wait (atomically) until function has completed on other CPUs. - * [RETURNS] 0 on success, else a negative status code. Does not return until - * remote CPUs are nearly ready to execute <> or are or have executed. - * is a cpu map of the cpus to send IPI to. - * - * You must not call this function with disabled interrupts or from a - * hardware interrupt handler or from a bottom half handler. - */ - static int __smp_call_function_map(void (*func) (void *info), void *info, - int nonatomic, int wait, cpumask_t map) - { - struct call_data_struct data; - int ret = -1, num_cpus; - int cpu; - u64 timeout; - - if (unlikely(smp_ops == NULL)) - return ret; - - data.func = func; - data.info = info; - atomic_set(&data.started, 0); - data.wait = wait; - if (wait) - atomic_set(&data.finished, 0); - - /* remove 'self' from the map */ - if (cpu_isset(smp_processor_id(), map)) - cpu_clear(smp_processor_id(), map); - - /* sanity check the map, remove any non-online processors. */ - cpus_and(map, map, cpu_online_map); - - num_cpus = cpus_weight(map); - if (!num_cpus) - goto done; - - call_data = &data; - smp_wmb(); - /* Send a message to all CPUs in the map */ - for_each_cpu_mask(cpu, map) - smp_ops->message_pass(cpu, PPC_MSG_CALL_FUNCTION); - - timeout = get_tb() + (u64) SMP_CALL_TIMEOUT * tb_ticks_per_sec; - - /* Wait for indication that they have received the message */ - while (atomic_read(&data.started) != num_cpus) { - HMT_low(); - if (get_tb() >= timeout) { - printk("smp_call_function on cpu %d: other cpus not " - "responding (%d)\n", smp_processor_id(), - atomic_read(&data.started)); - if (!ipi_fail_ok) - debugger(NULL); - goto out; - } - } - - /* optionally wait for the CPUs to complete */ - if (wait) { - while (atomic_read(&data.finished) != num_cpus) { - HMT_low(); - if (get_tb() >= timeout) { - printk("smp_call_function on cpu %d: other " - "cpus not finishing (%d/%d)\n", - smp_processor_id(), - atomic_read(&data.finished), - atomic_read(&data.started)); - debugger(NULL); - goto out; - } - } - } - - done: - ret = 0; - - out: - call_data = NULL; - HMT_medium(); - return ret; - } - - static int __smp_call_function(void (*func)(void *info), void *info, - int nonatomic, int wait) - { - int ret; - spin_lock(&call_lock); - ret =__smp_call_function_map(func, info, nonatomic, wait, - cpu_online_map); - spin_unlock(&call_lock); - return ret; - } - - int smp_call_function(void (*func) (void *info), void *info, int nonatomic, - int wait) - { - /* Can deadlock when called with interrupts disabled */ - WARN_ON(irqs_disabled()); - - return __smp_call_function(func, info, nonatomic, wait); - } - EXPORT_SYMBOL(smp_call_function); - - int smp_call_function_single(int cpu, void (*func) (void *info), void *info, - int nonatomic, int wait) - { - cpumask_t map = CPU_MASK_NONE; - int ret = 0; - - /* Can deadlock when called with interrupts disabled */ - WARN_ON(irqs_disabled()); - - if (!cpu_online(cpu)) - return -EINVAL; - - cpu_set(cpu, map); - if (cpu != get_cpu()) { - spin_lock(&call_lock); - ret = __smp_call_function_map(func, info, nonatomic, wait, map); - spin_unlock(&call_lock); - } else { - local_irq_disable(); - func(info); - local_irq_enable(); - } - put_cpu(); - return ret; - } - EXPORT_SYMBOL(smp_call_function_single); - void smp_send_stop(void) { - int nolock; - - /* It's OK to fail sending the IPI, since the alternative is to - * be stuck forever waiting on the other CPU to take the interrupt. - * - * It's better to at least continue and go through reboot, since this - * function is usually called at panic or reboot time in the first - * place. - */ - ipi_fail_ok = 1; - - /* Don't deadlock in case we got called through panic */ - nolock = !spin_trylock(&call_lock); - __smp_call_function_map(stop_this_cpu, NULL, 1, 0, cpu_online_map); - if (!nolock) - spin_unlock(&call_lock); - } - - void smp_call_function_interrupt(void) - { - void (*func) (void *info); - void *info; - int wait; - - /* call_data will be NULL if the sender timed out while - * waiting on us to receive the call. - */ - if (!call_data) - return; - - func = call_data->func; - info = call_data->info; - wait = call_data->wait; - - if (!wait) - smp_mb__before_atomic_inc(); - - /* - * Notify initiating CPU that I've grabbed the data and am - * about to execute the function - */ - atomic_inc(&call_data->started); - /* - * At this point the info structure may be out of scope unless wait==1 - */ - (*func)(info); - if (wait) { - smp_mb__before_atomic_inc(); - atomic_inc(&call_data->finished); - } + smp_call_function(stop_this_cpu, NULL, 0); } -extern struct gettimeofday_struct do_gtod; - struct thread_info *current_set[NR_CPUS]; -DECLARE_PER_CPU(unsigned int, pvr); - static void __devinit smp_store_cpu_info(int id) { per_cpu(pvr, id) = mfspr(SPRN_PVR); diff --cc arch/powerpc/kernel/stacktrace.c index 6a4fb003fa54,3cf0d94ba340..071bee3ec749 --- a/arch/powerpc/kernel/stacktrace.c +++ b/arch/powerpc/kernel/stacktrace.c @@@ -10,11 -10,10 +10,12 @@@ * 2 of the License, or (at your option) any later version. */ +#include #include #include + #include #include +#include /* * Save stack-backtrace addresses into a stack_trace buffer. diff --cc arch/powerpc/mm/slice.c index 583be67ad938,2bd12d965db1..db44e02e045b --- a/arch/powerpc/mm/slice.c +++ b/arch/powerpc/mm/slice.c @@@ -551,13 -498,7 +551,13 @@@ unsigned long slice_get_unmapped_area(u slice_print_mask(" mask", mask); convert: - slice_convert(mm, mask, psize); + andnot_mask(mask, good_mask); + andnot_mask(mask, compat_mask); + if (mask.low_slices || mask.high_slices) { + slice_convert(mm, mask, psize); + if (psize > MMU_PAGE_BASE) - on_each_cpu(slice_flush_segments, mm, 0, 1); ++ on_each_cpu(slice_flush_segments, mm, 1); + } return addr; }