From: Glauber Costa Date: Thu, 3 Jul 2008 15:35:41 +0000 (-0300) Subject: x86: integrate delay functions. X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=f0fbf0abc093ec8bf64506eee4ede9e5daf40ffd;p=GitHub%2FLineageOS%2FG12%2Fandroid_kernel_amlogic_linux-4.9.git x86: integrate delay functions. delay_32.c, delay_64.c are now equal, and are integrated into delay.c. Signed-off-by: Glauber Costa Signed-off-by: H. Peter Anvin Signed-off-by: Ingo Molnar --- diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile index 76f60f52a885..86960a6c41c0 100644 --- a/arch/x86/lib/Makefile +++ b/arch/x86/lib/Makefile @@ -4,7 +4,7 @@ obj-$(CONFIG_SMP) := msr-on-cpu.o -lib-y := delay_$(BITS).o +lib-y := delay.o lib-y += usercopy_$(BITS).o getuser_$(BITS).o putuser_$(BITS).o lib-y += memcpy_$(BITS).o diff --git a/arch/x86/lib/delay.c b/arch/x86/lib/delay.c new file mode 100644 index 000000000000..f4568605d7d5 --- /dev/null +++ b/arch/x86/lib/delay.c @@ -0,0 +1,137 @@ +/* + * Precise Delay Loops for i386 + * + * Copyright (C) 1993 Linus Torvalds + * Copyright (C) 1997 Martin Mares + * Copyright (C) 2008 Jiri Hladky + * + * The __delay function must _NOT_ be inlined as its execution time + * depends wildly on alignment on many x86 processors. The additional + * jump magic is needed to get the timing stable on all the CPU's + * we have to worry about. + */ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#ifdef CONFIG_SMP +# include +#endif + +/* simple loop based delay: */ +static void delay_loop(unsigned long loops) +{ + asm volatile( + " test %0,%0 \n" + " jz 3f \n" + " jmp 1f \n" + + ".align 16 \n" + "1: jmp 2f \n" + + ".align 16 \n" + "2: dec %0 \n" + " jnz 2b \n" + "3: dec %0 \n" + + : /* we don't need output */ + :"a" (loops) + ); +} + +/* TSC based delay: */ +static void delay_tsc(unsigned long loops) +{ + unsigned long bclock, now; + int cpu; + + preempt_disable(); + cpu = smp_processor_id(); + rdtscl(bclock); + for (;;) { + rdtscl(now); + if ((now - bclock) >= loops) + break; + + /* Allow RT tasks to run */ + preempt_enable(); + rep_nop(); + preempt_disable(); + + /* + * It is possible that we moved to another CPU, and + * since TSC's are per-cpu we need to calculate + * that. The delay must guarantee that we wait "at + * least" the amount of time. Being moved to another + * CPU could make the wait longer but we just need to + * make sure we waited long enough. Rebalance the + * counter for this CPU. + */ + if (unlikely(cpu != smp_processor_id())) { + loops -= (now - bclock); + cpu = smp_processor_id(); + rdtscl(bclock); + } + } + preempt_enable(); +} + +/* + * Since we calibrate only once at boot, this + * function should be set once at boot and not changed + */ +static void (*delay_fn)(unsigned long) = delay_loop; + +void use_tsc_delay(void) +{ + delay_fn = delay_tsc; +} + +int __devinit read_current_timer(unsigned long *timer_val) +{ + if (delay_fn == delay_tsc) { + rdtscll(*timer_val); + return 0; + } + return -1; +} + +void __delay(unsigned long loops) +{ + delay_fn(loops); +} +EXPORT_SYMBOL(__delay); + +inline void __const_udelay(unsigned long xloops) +{ + int d0; + + xloops *= 4; + asm("mull %%edx" + :"=d" (xloops), "=&a" (d0) + :"1" (xloops), "0" + (cpu_data(raw_smp_processor_id()).loops_per_jiffy * (HZ/4))); + + __delay(++xloops); +} +EXPORT_SYMBOL(__const_udelay); + +void __udelay(unsigned long usecs) +{ + __const_udelay(usecs * 0x000010c7); /* 2**32 / 1000000 (rounded up) */ +} +EXPORT_SYMBOL(__udelay); + +void __ndelay(unsigned long nsecs) +{ + __const_udelay(nsecs * 0x00005); /* 2**32 / 1000000000 (rounded up) */ +} +EXPORT_SYMBOL(__ndelay); diff --git a/arch/x86/lib/delay_32.c b/arch/x86/lib/delay_32.c deleted file mode 100644 index 0b659a320b1e..000000000000 --- a/arch/x86/lib/delay_32.c +++ /dev/null @@ -1,138 +0,0 @@ -/* - * Precise Delay Loops for i386 - * - * Copyright (C) 1993 Linus Torvalds - * Copyright (C) 1997 Martin Mares - * Copyright (C) 2008 Jiri Hladky - * - * The __delay function must _NOT_ be inlined as its execution time - * depends wildly on alignment on many x86 processors. The additional - * jump magic is needed to get the timing stable on all the CPU's - * we have to worry about. - */ - -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -#ifdef CONFIG_SMP -# include -#endif - -/* simple loop based delay: */ -static void delay_loop(unsigned long loops) -{ - __asm__ __volatile__( - " test %0,%0 \n" - " jz 3f \n" - " jmp 1f \n" - - ".align 16 \n" - "1: jmp 2f \n" - - ".align 16 \n" - "2: dec %0 \n" - " jnz 2b \n" - "3: dec %0 \n" - - : /* we don't need output */ - :"a" (loops) - ); -} - -/* TSC based delay: */ -static void delay_tsc(unsigned long loops) -{ - unsigned long bclock, now; - int cpu; - - preempt_disable(); - cpu = smp_processor_id(); - rdtscl(bclock); - for (;;) { - rdtscl(now); - if ((now - bclock) >= loops) - break; - - /* Allow RT tasks to run */ - preempt_enable(); - rep_nop(); - preempt_disable(); - - /* - * It is possible that we moved to another CPU, and - * since TSC's are per-cpu we need to calculate - * that. The delay must guarantee that we wait "at - * least" the amount of time. Being moved to another - * CPU could make the wait longer but we just need to - * make sure we waited long enough. Rebalance the - * counter for this CPU. - */ - if (unlikely(cpu != smp_processor_id())) { - loops -= (now - bclock); - cpu = smp_processor_id(); - rdtscl(bclock); - } - } - preempt_enable(); -} - -/* - * Since we calibrate only once at boot, this - * function should be set once at boot and not changed - */ -static void (*delay_fn)(unsigned long) = delay_loop; - -void use_tsc_delay(void) -{ - delay_fn = delay_tsc; -} - -int __devinit read_current_timer(unsigned long *timer_val) -{ - if (delay_fn == delay_tsc) { - rdtscll(*timer_val); - return 0; - } - return -1; -} - -void __delay(unsigned long loops) -{ - delay_fn(loops); -} - -inline void __const_udelay(unsigned long xloops) -{ - int d0; - - xloops *= 4; - __asm__("mull %%edx" - :"=d" (xloops), "=&a" (d0) - :"1" (xloops), "0" - (cpu_data(raw_smp_processor_id()).loops_per_jiffy * (HZ/4))); - - __delay(++xloops); -} - -void __udelay(unsigned long usecs) -{ - __const_udelay(usecs * 0x000010c7); /* 2**32 / 1000000 (rounded up) */ -} - -void __ndelay(unsigned long nsecs) -{ - __const_udelay(nsecs * 0x00005); /* 2**32 / 1000000000 (rounded up) */ -} - -EXPORT_SYMBOL(__delay); -EXPORT_SYMBOL(__const_udelay); -EXPORT_SYMBOL(__udelay); -EXPORT_SYMBOL(__ndelay); diff --git a/arch/x86/lib/delay_64.c b/arch/x86/lib/delay_64.c deleted file mode 100644 index ff3dfecdb6f9..000000000000 --- a/arch/x86/lib/delay_64.c +++ /dev/null @@ -1,128 +0,0 @@ -/* - * Precise Delay Loops for x86-64 - * - * Copyright (C) 1993 Linus Torvalds - * Copyright (C) 1997 Martin Mares - * - * The __delay function must _NOT_ be inlined as its execution time - * depends wildly on alignment on many x86 processors. - */ - -#include -#include -#include -#include -#include -#include - -#include -#include - -#ifdef CONFIG_SMP -#include -#endif - -/* simple loop based delay: */ -static void delay_loop(unsigned long loops) -{ - asm volatile( - " test %0,%0 \n" - " jz 3f \n" - " jmp 1f \n" - - ".align 16 \n" - "1: jmp 2f \n" - - ".align 16 \n" - "2: dec %0 \n" - " jnz 2b \n" - "3: dec %0 \n" - - : /* we don't need output */ - :"a" (loops) - ); -} - -static void delay_tsc(unsigned long loops) -{ - unsigned bclock, now; - int cpu; - - preempt_disable(); - cpu = smp_processor_id(); - rdtscl(bclock); - for (;;) { - rdtscl(now); - if ((now - bclock) >= loops) - break; - - /* Allow RT tasks to run */ - preempt_enable(); - rep_nop(); - preempt_disable(); - - /* - * It is possible that we moved to another CPU, and - * since TSC's are per-cpu we need to calculate - * that. The delay must guarantee that we wait "at - * least" the amount of time. Being moved to another - * CPU could make the wait longer but we just need to - * make sure we waited long enough. Rebalance the - * counter for this CPU. - */ - if (unlikely(cpu != smp_processor_id())) { - loops -= (now - bclock); - cpu = smp_processor_id(); - rdtscl(bclock); - } - } - preempt_enable(); -} - -static void (*delay_fn)(unsigned long) = delay_loop; - -void use_tsc_delay(void) -{ - delay_fn = delay_tsc; -} - -int __devinit read_current_timer(unsigned long *timer_value) -{ - if (delay_fn == delay_tsc) { - rdtscll(*timer_value); - return 0; - } - return -1; -} - -void __delay(unsigned long loops) -{ - delay_fn(loops); -} -EXPORT_SYMBOL(__delay); - -inline void __const_udelay(unsigned long xloops) -{ - int d0; - xloops *= 4; - __asm__("mull %%edx" - :"=d" (xloops), "=&a" (d0) - :"1" (xloops), "0" - (cpu_data(raw_smp_processor_id()).loops_per_jiffy * (HZ/4))); - - __delay(++xloops); -} - -EXPORT_SYMBOL(__const_udelay); - -void __udelay(unsigned long usecs) -{ - __const_udelay(usecs * 0x000010c7); /* 2**32 / 1000000 (rounded up) */ -} -EXPORT_SYMBOL(__udelay); - -void __ndelay(unsigned long nsecs) -{ - __const_udelay(nsecs * 0x00005); /* 2**32 / 1000000000 (rounded up) */ -} -EXPORT_SYMBOL(__ndelay);