Commit | Line | Data |
---|---|---|
867e359b CM |
1 | /* |
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of the GNU General Public License | |
6 | * as published by the Free Software Foundation, version 2. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, but | |
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | |
11 | * NON INFRINGEMENT. See the GNU General Public License for | |
12 | * more details. | |
13 | */ | |
14 | ||
15 | #include <linux/linkage.h> | |
0707ad30 | 16 | #include <linux/unistd.h> |
867e359b | 17 | #include <asm/irqflags.h> |
a78c942d | 18 | #include <asm/processor.h> |
0707ad30 | 19 | #include <arch/abi.h> |
a78c942d | 20 | #include <arch/spr_def.h> |
867e359b CM |
21 | |
22 | #ifdef __tilegx__ | |
23 | #define bnzt bnezt | |
24 | #endif | |
25 | ||
26 | STD_ENTRY(current_text_addr) | |
27 | { move r0, lr; jrp lr } | |
28 | STD_ENDPROC(current_text_addr) | |
29 | ||
867e359b CM |
30 | /* |
31 | * We don't run this function directly, but instead copy it to a page | |
32 | * we map into every user process. See vdso_setup(). | |
33 | * | |
34 | * Note that libc has a copy of this function that it uses to compare | |
35 | * against the PC when a stack backtrace ends, so if this code is | |
36 | * changed, the libc implementation(s) should also be updated. | |
37 | */ | |
38 | .pushsection .data | |
39 | ENTRY(__rt_sigreturn) | |
40 | moveli TREG_SYSCALL_NR_NAME,__NR_rt_sigreturn | |
41 | swint1 | |
42 | ENDPROC(__rt_sigreturn) | |
43 | ENTRY(__rt_sigreturn_end) | |
44 | .popsection | |
45 | ||
46 | STD_ENTRY(dump_stack) | |
47 | { move r2, lr; lnk r1 } | |
48 | { move r4, r52; addli r1, r1, dump_stack - . } | |
49 | { move r3, sp; j _dump_stack } | |
50 | jrp lr /* keep backtracer happy */ | |
51 | STD_ENDPROC(dump_stack) | |
52 | ||
53 | STD_ENTRY(KBacktraceIterator_init_current) | |
54 | { move r2, lr; lnk r1 } | |
55 | { move r4, r52; addli r1, r1, KBacktraceIterator_init_current - . } | |
56 | { move r3, sp; j _KBacktraceIterator_init_current } | |
57 | jrp lr /* keep backtracer happy */ | |
58 | STD_ENDPROC(KBacktraceIterator_init_current) | |
867e359b CM |
59 | |
60 | /* Loop forever on a nap during SMP boot. */ | |
61 | STD_ENTRY(smp_nap) | |
62 | nap | |
8c92ba6c | 63 | nop /* avoid provoking the icache prefetch with a jump */ |
867e359b CM |
64 | j smp_nap /* we are not architecturally guaranteed not to exit nap */ |
65 | jrp lr /* clue in the backtracer */ | |
66 | STD_ENDPROC(smp_nap) | |
67 | ||
68 | /* | |
69 | * Enable interrupts racelessly and then nap until interrupted. | |
0b989cac CM |
70 | * Architecturally, we are guaranteed that enabling interrupts via |
71 | * mtspr to INTERRUPT_CRITICAL_SECTION only interrupts at the next PC. | |
867e359b CM |
72 | * This function's _cpu_idle_nap address is special; see intvec.S. |
73 | * When interrupted at _cpu_idle_nap, we bump the PC forward 8, and | |
74 | * as a result return to the function that called _cpu_idle(). | |
75 | */ | |
76 | STD_ENTRY(_cpu_idle) | |
0b989cac | 77 | movei r1, 1 |
51007004 | 78 | IRQ_ENABLE_LOAD(r2, r3) |
0b989cac | 79 | mtspr INTERRUPT_CRITICAL_SECTION, r1 |
51007004 | 80 | IRQ_ENABLE_APPLY(r2, r3) /* unmask, but still with ICS set */ |
0b989cac | 81 | mtspr INTERRUPT_CRITICAL_SECTION, zero |
867e359b CM |
82 | .global _cpu_idle_nap |
83 | _cpu_idle_nap: | |
84 | nap | |
8c92ba6c | 85 | nop /* avoid provoking the icache prefetch with a jump */ |
867e359b CM |
86 | jrp lr |
87 | STD_ENDPROC(_cpu_idle) |