atomic: use <linux/atomic.h>
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / sparc / include / asm / smp_32.h
CommitLineData
f5e706ad
SR
1/* smp.h: Sparc specific SMP stuff.
2 *
3 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
4 */
5
6#ifndef _SPARC_SMP_H
7#define _SPARC_SMP_H
8
9#include <linux/threads.h>
10#include <asm/head.h>
11#include <asm/btfixup.h>
12
13#ifndef __ASSEMBLY__
14
15#include <linux/cpumask.h>
16
17#endif /* __ASSEMBLY__ */
18
19#ifdef CONFIG_SMP
20
21#ifndef __ASSEMBLY__
22
23#include <asm/ptrace.h>
24#include <asm/asi.h>
60063497 25#include <linux/atomic.h>
f5e706ad
SR
26
27/*
28 * Private routines/data
29 */
30
31extern unsigned char boot_cpu_id;
b7afdb7e
SR
32extern volatile unsigned long cpu_callin_map[NR_CPUS];
33extern cpumask_t smp_commenced_mask;
34extern struct linux_prom_registers smp_penguin_ctable;
f5e706ad
SR
35
36typedef void (*smpfunc_t)(unsigned long, unsigned long, unsigned long,
37 unsigned long, unsigned long);
38
b7afdb7e
SR
39void cpu_panic(void);
40extern void smp4m_irq_rotate(int cpu);
41
f5e706ad
SR
42/*
43 * General functions that each host system must provide.
44 */
45
46void sun4m_init_smp(void);
47void sun4d_init_smp(void);
48
49void smp_callin(void);
50void smp_boot_cpus(void);
51void smp_store_cpu_info(int);
52
d6d04819
DH
53void smp_resched_interrupt(void);
54void smp_call_function_single_interrupt(void);
55void smp_call_function_interrupt(void);
56
f5e706ad
SR
57struct seq_file;
58void smp_bogo(struct seq_file *);
59void smp_info(struct seq_file *);
60
66e4f8c0 61BTFIXUPDEF_CALL(void, smp_cross_call, smpfunc_t, cpumask_t, unsigned long, unsigned long, unsigned long, unsigned long)
f5e706ad 62BTFIXUPDEF_CALL(int, __hard_smp_processor_id, void)
d6d04819
DH
63BTFIXUPDEF_CALL(void, smp_ipi_resched, int);
64BTFIXUPDEF_CALL(void, smp_ipi_single, int);
65BTFIXUPDEF_CALL(void, smp_ipi_mask_one, int);
f5e706ad
SR
66BTFIXUPDEF_BLACKBOX(hard_smp_processor_id)
67BTFIXUPDEF_BLACKBOX(load_current)
68
66e4f8c0 69#define smp_cross_call(func,mask,arg1,arg2,arg3,arg4) BTFIXUP_CALL(smp_cross_call)(func,mask,arg1,arg2,arg3,arg4)
f5e706ad 70
fb1fece5 71static inline void xc0(smpfunc_t func) { smp_cross_call(func, *cpu_online_mask, 0, 0, 0, 0); }
f5e706ad 72static inline void xc1(smpfunc_t func, unsigned long arg1)
fb1fece5 73{ smp_cross_call(func, *cpu_online_mask, arg1, 0, 0, 0); }
f5e706ad 74static inline void xc2(smpfunc_t func, unsigned long arg1, unsigned long arg2)
fb1fece5 75{ smp_cross_call(func, *cpu_online_mask, arg1, arg2, 0, 0); }
f5e706ad
SR
76static inline void xc3(smpfunc_t func, unsigned long arg1, unsigned long arg2,
77 unsigned long arg3)
fb1fece5 78{ smp_cross_call(func, *cpu_online_mask, arg1, arg2, arg3, 0); }
f5e706ad
SR
79static inline void xc4(smpfunc_t func, unsigned long arg1, unsigned long arg2,
80 unsigned long arg3, unsigned long arg4)
fb1fece5 81{ smp_cross_call(func, *cpu_online_mask, arg1, arg2, arg3, arg4); }
f5e706ad 82
d6d04819
DH
83extern void arch_send_call_function_single_ipi(int cpu);
84extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
66e4f8c0 85
f5e706ad
SR
86static inline int cpu_logical_map(int cpu)
87{
88 return cpu;
89}
90
91static inline int hard_smp4m_processor_id(void)
92{
93 int cpuid;
94
95 __asm__ __volatile__("rd %%tbr, %0\n\t"
96 "srl %0, 12, %0\n\t"
97 "and %0, 3, %0\n\t" :
98 "=&r" (cpuid));
99 return cpuid;
100}
101
102static inline int hard_smp4d_processor_id(void)
103{
104 int cpuid;
105
106 __asm__ __volatile__("lda [%%g0] %1, %0\n\t" :
107 "=&r" (cpuid) : "i" (ASI_M_VIKING_TMP1));
108 return cpuid;
109}
110
8401707f
KE
111extern inline int hard_smpleon_processor_id(void)
112{
113 int cpuid;
114 __asm__ __volatile__("rd %%asr17,%0\n\t"
115 "srl %0,28,%0" :
116 "=&r" (cpuid) : );
117 return cpuid;
118}
119
f5e706ad
SR
120#ifndef MODULE
121static inline int hard_smp_processor_id(void)
122{
123 int cpuid;
124
125 /* Black box - sun4m
126 __asm__ __volatile__("rd %%tbr, %0\n\t"
127 "srl %0, 12, %0\n\t"
128 "and %0, 3, %0\n\t" :
129 "=&r" (cpuid));
130 - sun4d
131 __asm__ __volatile__("lda [%g0] ASI_M_VIKING_TMP1, %0\n\t"
132 "nop; nop" :
133 "=&r" (cpuid));
5149bed8
DH
134 - leon
135 __asm__ __volatile__( "rd %asr17, %0\n\t"
136 "srl %0, 0x1c, %0\n\t"
137 "nop\n\t" :
138 "=&r" (cpuid));
f5e706ad
SR
139 See btfixup.h and btfixupprep.c to understand how a blackbox works.
140 */
141 __asm__ __volatile__("sethi %%hi(___b_hard_smp_processor_id), %0\n\t"
142 "sethi %%hi(boot_cpu_id), %0\n\t"
143 "ldub [%0 + %%lo(boot_cpu_id)], %0\n\t" :
144 "=&r" (cpuid));
145 return cpuid;
146}
147#else
148static inline int hard_smp_processor_id(void)
149{
150 int cpuid;
151
152 __asm__ __volatile__("mov %%o7, %%g1\n\t"
153 "call ___f___hard_smp_processor_id\n\t"
154 " nop\n\t"
155 "mov %%g2, %0\n\t" : "=r"(cpuid) : : "g1", "g2");
156 return cpuid;
157}
158#endif
159
160#define raw_smp_processor_id() (current_thread_info()->cpu)
161
162#define prof_multiplier(__cpu) cpu_data(__cpu).multiplier
163#define prof_counter(__cpu) cpu_data(__cpu).counter
164
165void smp_setup_cpu_possible_map(void);
166
167#endif /* !(__ASSEMBLY__) */
168
169/* Sparc specific messages. */
170#define MSG_CROSS_CALL 0x0005 /* run func on cpus */
171
172/* Empirical PROM processor mailbox constants. If the per-cpu mailbox
173 * contains something other than one of these then the ipi is from
174 * Linux's active_kernel_processor. This facility exists so that
175 * the boot monitor can capture all the other cpus when one catches
176 * a watchdog reset or the user enters the monitor using L1-A keys.
177 */
178#define MBOX_STOPCPU 0xFB
179#define MBOX_IDLECPU 0xFC
180#define MBOX_IDLECPU2 0xFD
181#define MBOX_STOPCPU2 0xFE
182
183#else /* SMP */
184
185#define hard_smp_processor_id() 0
186#define smp_setup_cpu_possible_map() do { } while (0)
187
188#endif /* !(SMP) */
f5e706ad 189#endif /* !(_SPARC_SMP_H) */