import PULS_20160108
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / arm / mach-mt8127 / mt-smp.c
1 #include <linux/init.h>
2 #include <linux/smp.h>
3 #include <linux/spinlock.h>
4 #include <linux/jiffies.h>
5 #include <linux/delay.h>
6 #include <asm/localtimer.h>
7 #if defined(CONFIG_TRUSTONIC_TEE_SUPPORT)
8 #include <asm/hardware/gic.h>
9 #include <mach/mtk_boot_share_page.h>
10 #endif
11 #include <asm/fiq_glue.h>
12 #include <mach/mt_reg_base.h>
13 #include <mach/smp.h>
14 #include <mach/sync_write.h>
15 #include <mach/hotplug.h>
16 #ifdef CONFIG_HOTPLUG_WITH_POWER_CTRL
17 #include <mach/mt_spm_mtcmos.h>
18 #endif
19 #include <mach/mt_spm_idle.h>
20 #include <mach/wd_api.h>
21
22 #if defined (CONFIG_TRUSTONIC_TEE_SUPPORT)
23 #include <mach/mt_secure_api.h>
24 #endif
25
26 #if defined(CONFIG_MTK_IN_HOUSE_TEE_SUPPORT)
27 #include <trustzone/kree/tz_pm.h>
28 #include <mach/mtk_boot_share_page.h>
29 #endif
30
31 #include <linux/version.h>
32
33 #if defined(CONFIG_TRUSTONIC_TEE_SUPPORT)
34 #define SLAVE1_MAGIC_REG (0xF0002000 + 0x38)
35 #define SLAVE2_MAGIC_REG (0xF0002000 + 0x3C)
36 #define SLAVE3_MAGIC_REG (0xF0002000 + 0x40)
37 #else
38 #define SLAVE1_MAGIC_REG (SRAMROM_BASE+0x38)
39 #define SLAVE2_MAGIC_REG (SRAMROM_BASE+0x3C)
40 #define SLAVE3_MAGIC_REG (SRAMROM_BASE+0x40)
41 #endif
42
43 #define SLAVE1_MAGIC_NUM 0x534C4131
44 #define SLAVE2_MAGIC_NUM 0x4C415332
45 #define SLAVE3_MAGIC_NUM 0x41534C33
46 #define SLAVE_JUMP_REG (SRAMROM_BASE+0x34)
47
48 #if defined(CONFIG_TRUSTONIC_TEE_SUPPORT) || defined(CONFIG_MTK_IN_HOUSE_TEE_SUPPORT)
49 // Secure world location, using SRAM now.
50 #define NS_SLAVE_JUMP_REG (BOOT_SHARE_BASE+1020)
51 #define NS_SLAVE_MAGIC_REG (BOOT_SHARE_BASE+1016)
52 #define NS_SLAVE_BOOT_ADDR (BOOT_SHARE_BASE+1012)
53 #endif
54
55 extern void mt_secondary_startup(void);
56 extern void irq_raise_softirq(const struct cpumask *mask, unsigned int irq);
57 extern void mt_gic_secondary_init(void);
58
59
60 extern unsigned int irq_total_secondary_cpus;
61 static unsigned int is_secondary_cpu_first_boot;
62 static DEFINE_SPINLOCK(boot_lock);
63
64 static const unsigned int secure_magic_reg[] = {SLAVE1_MAGIC_REG, SLAVE2_MAGIC_REG, SLAVE3_MAGIC_REG};
65 static const unsigned int secure_magic_num[] = {SLAVE1_MAGIC_NUM, SLAVE2_MAGIC_NUM, SLAVE3_MAGIC_NUM};
66 typedef int (*spm_mtcmos_ctrl_func)(int state, int chkWfiBeforePdn);
67 static const spm_mtcmos_ctrl_func secure_ctrl_func[] = {spm_mtcmos_ctrl_cpu1, spm_mtcmos_ctrl_cpu2, spm_mtcmos_ctrl_cpu3};
68
69 /*
70 * control for which core is the next to come out of the secondary
71 * boot "holding pen".
72 */
73 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0))
74 volatile int pen_release = -1;
75 #endif
76
77 /*
78 * Write pen_release in a way that is guaranteed to be visible to all
79 * observers, irrespective of whether they're taking part in coherency
80 * or not. This is necessary for the hotplug code to work reliably.
81 */
82 static void __cpuinit write_pen_release(int val)
83 {
84 pen_release = val;
85 smp_wmb();
86 __cpuc_flush_dcache_area((void *)&pen_release, sizeof(pen_release));
87 outer_clean_range(__pa(&pen_release), __pa(&pen_release + 1));
88 }
89
90
91 int L2CTLR_get_core_count(void){
92
93 //unsigned int cores = 0;
94 //asm volatile(
95 //"MRC p15, 1, %0, c9, c0, 2\n"
96 //: "=r" (cores)
97 //:
98 //: "cc"
99 //);
100 //
101 //cores = cores >> 24;
102 //cores += 1;
103 //return cores;
104
105 unsigned int cores = 0;
106 extern u32 get_devinfo_with_index(u32 index);
107 u32 idx = 3;
108 u32 value = 0;
109 value = get_devinfo_with_index(idx);
110
111 value = (value >> 24) & 0xF;
112 if (value == 0x0)
113 cores = 4;
114 else if (value == 0x8)
115 cores = 3;
116 else
117 cores = 2;
118
119 printk("[CORE] num:%d\n",cores);
120 return cores;
121 }
122 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0))
123 void __cpuinit platform_secondary_init(unsigned int cpu)
124 #else
125 void __cpuinit mt_platform_secondary_init(unsigned int cpu)
126 #endif
127 {
128 struct wd_api *wd_api = NULL;
129
130 printk(KERN_INFO "Slave cpu init\n");
131 HOTPLUG_INFO("platform_secondary_init, cpu: %d\n", cpu);
132
133 mt_gic_secondary_init();
134
135 /*
136 * let the primary processor know we're out of the
137 * pen, then head off into the C entry point
138 */
139 write_pen_release(-1);
140
141 get_wd_api(&wd_api);
142 if (wd_api)
143 wd_api->wd_cpu_hot_plug_on_notify(cpu);
144
145 fiq_glue_resume();
146
147 #ifdef SPM_MCDI_FUNC
148 spm_hot_plug_in_before(cpu);
149 #endif
150
151 /*
152 * Synchronise with the boot thread.
153 */
154 spin_lock(&boot_lock);
155 spin_unlock(&boot_lock);
156 }
157
158 static void __cpuinit mt_wakeup_cpu(int cpu)
159 {
160 mt65xx_reg_sync_writel(secure_magic_num[cpu-1], secure_magic_reg[cpu-1]);
161 #if defined(CONFIG_TRUSTONIC_TEE_SUPPORT) || defined(CONFIG_MTK_IN_HOUSE_TEE_SUPPORT)
162 *((unsigned int*)NS_SLAVE_MAGIC_REG) = secure_magic_num[cpu-1];
163 #endif
164 HOTPLUG_INFO("SLAVE%d_MAGIC_NUM:%x\n", cpu, secure_magic_num[cpu-1]);
165 #ifdef CONFIG_HOTPLUG_WITH_POWER_CTRL
166 if (is_secondary_cpu_first_boot)
167 {
168 printk("mt_wakeup_cpu: first boot!(%d)\n", cpu);
169 --is_secondary_cpu_first_boot;
170 }
171 else
172 {
173 printk("mt_wakeup_cpu: not first boot!(%d)\n", cpu);
174 mt65xx_reg_sync_writel(virt_to_phys(mt_secondary_startup), BOOT_ADDR);
175
176 #if defined(CONFIG_TRUSTONIC_TEE_SUPPORT) || defined(CONFIG_MTK_IN_HOUSE_TEE_SUPPORT)
177 *((unsigned int*)NS_SLAVE_BOOT_ADDR) = virt_to_phys(mt_secondary_startup);
178 #endif
179 (*secure_ctrl_func[cpu-1])(STA_POWER_ON, 1);
180 }
181 #endif
182 }
183
184 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0))
185 int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
186 #else
187 int __cpuinit mt_boot_secondary(unsigned int cpu, struct task_struct *idle)
188 #endif
189 {
190 unsigned long timeout;
191
192 printk(KERN_CRIT "Boot slave CPU\n");
193
194 atomic_inc(&hotplug_cpu_count);
195
196 /*
197 * Set synchronisation state between this boot processor
198 * and the secondary one
199 */
200 spin_lock(&boot_lock);
201
202 HOTPLUG_INFO("boot_secondary, cpu: %d\n", cpu);
203 /*
204 * The secondary processor is waiting to be released from
205 * the holding pen - release it, then wait for it to flag
206 * that it has been released by resetting pen_release.
207 *
208 * Note that "pen_release" is the hardware CPU ID, whereas
209 * "cpu" is Linux's internal ID.
210 */
211 /*
212 * This is really belt and braces; we hold unintended secondary
213 * CPUs in the holding pen until we're ready for them. However,
214 * since we haven't sent them a soft interrupt, they shouldn't
215 * be there.
216 */
217 write_pen_release(cpu);
218
219 switch(cpu)
220 {
221 case 1:
222 case 2:
223 case 3:
224 #if !defined (CONFIG_TRUSTONIC_TEE_SUPPORT)
225 mt_wakeup_cpu(cpu);
226 #else //#if !defined (CONFIG_TRUSTONIC_TEE_SUPPORT)
227 printk("mt_wakeup_cpu: not first boot!(%d)\n", cpu);
228 //mt65xx_reg_sync_writel(virt_to_phys(mt_secondary_startup), BOOT_ADDR);
229 // fixme, to replace SMC with parameter of ns_slave_boot_addr
230 mt_secure_call(MC_FC_SET_RESET_VECTOR, virt_to_phys(mt_secondary_startup), cpu, 0);
231 (*secure_ctrl_func[cpu-1])(STA_POWER_ON, 1);
232 #endif //#if !defined (CONFIG_TRUSTONIC_TEE_SUPPORT)
233 break;
234 default:
235 break;
236
237 }
238
239 #if !defined (CONFIG_TRUSTONIC_TEE_SUPPORT)
240 smp_cross_call(cpumask_of(cpu));
241 #endif //#if !defined (CONFIG_TRUSTONIC_TEE_SUPPORT)
242
243 /*
244 * Now the secondary core is starting up let it run its
245 * calibrations, then wait for it to finish
246 */
247 spin_unlock(&boot_lock);
248
249 timeout = jiffies + (1 * HZ);
250 while (time_before(jiffies, timeout)) {
251 smp_rmb();
252 if (pen_release == -1)
253 break;
254
255 udelay(10);
256 }
257
258 if (pen_release == -1)
259 {
260 return 0;
261 }
262 else
263 {
264 #if defined(CONFIG_TRUSTONIC_TEE_SUPPORT)
265 //to fix monitor register
266 printk(KERN_EMERG "failed to boot.\n");
267 mt65xx_reg_sync_writel( 0x1, 0xF020021c);
268 printk(KERN_EMERG "CPU2, debug event: 0x%08x, debug monitor: 0x%08x\n", *(volatile u32 *)(0xF020021c), *(volatile u32 *)(0xF0200014));
269 mt65xx_reg_sync_writel( 0x3, 0xF020021c);
270 printk(KERN_EMERG "CPU3, debug event: 0x%08x, debug monitor: 0x%08x\n", *(volatile u32 *)(0xF020021c), *(volatile u32 *)(0xF0200014));
271 on_each_cpu((smp_call_func_t)dump_stack, NULL, 0);
272 #else
273 mt65xx_reg_sync_writel(cpu + 8, 0xf0200080);
274 printk(KERN_EMERG "CPU%u, debug event: 0x%08x, debug monitor: 0x%08x\n", cpu, *(volatile u32 *)(0xf0200080), *(volatile u32 *)(0xf0200084));
275 on_each_cpu((smp_call_func_t)dump_stack, NULL, 0);
276 #endif
277 atomic_dec(&hotplug_cpu_count);
278 return -ENOSYS;
279 }
280 }
281
282 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0))
283 void __init smp_init_cpus(void)
284 #else
285 void __init mt_smp_init_cpus(void)
286 #endif
287 {
288 unsigned int i, ncores;
289
290 ncores = L2CTLR_get_core_count();
291 if (ncores > NR_CPUS) {
292 printk(KERN_WARNING
293 "L2CTLR core count (%d) > NR_CPUS (%d)\n", ncores, NR_CPUS);
294 printk(KERN_WARNING
295 "set nr_cores to NR_CPUS (%d)\n", NR_CPUS);
296 ncores = NR_CPUS;
297 }
298
299 if (ncores == 3)
300 {
301 spm_mtcmos_ctrl_cpu3(STA_POWER_DOWN, 0);
302 }
303 else if (ncores == 2)
304 {
305 spm_mtcmos_ctrl_cpu3(STA_POWER_DOWN, 0);
306 spm_mtcmos_ctrl_cpu2(STA_POWER_DOWN, 0);
307 }
308
309 for (i = 0; i < ncores; i++)
310 set_cpu_possible(i, true);
311
312 #if !defined (CONFIG_TRUSTONIC_TEE_SUPPORT)
313 irq_total_secondary_cpus = num_possible_cpus() - 1;
314 is_secondary_cpu_first_boot = num_possible_cpus() - 1;
315 #endif //#if !defined (CONFIG_TRUSTONIC_TEE_SUPPORT)
316
317 set_smp_cross_call(irq_raise_softirq);
318
319 }
320
321 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0))
322 void __init platform_smp_prepare_cpus(unsigned int max_cpus)
323 #else
324 void __init mt_platform_smp_prepare_cpus(unsigned int max_cpus)
325 #endif
326 {
327 int i;
328
329 for (i = 0; i < max_cpus; i++)
330 set_cpu_present(i, true);
331
332
333 #if !defined (CONFIG_TRUSTONIC_TEE_SUPPORT)
334 /* write the address of slave startup into the system-wide flags register */
335 mt65xx_reg_sync_writel(virt_to_phys(mt_secondary_startup), SLAVE_JUMP_REG);
336
337 #ifdef CONFIG_MTK_IN_HOUSE_TEE_SUPPORT
338 *((unsigned int*)NS_SLAVE_JUMP_REG) = virt_to_phys(mt_secondary_startup);
339 #endif
340
341 #else //#if !defined (CONFIG_TRUSTONIC_TEE_SUPPORT)
342
343 //for (i = 1; i < max_cpus; i++)
344 //(*secure_ctrl_func[i-1])(STA_POWER_DOWN, 1);
345
346 //printk(KERN_CRIT "platform_smp_prepare_cpus !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n");
347
348 //reg_write(BOOTROM_PWR_CTRL, reg_read(BOOTROM_PWR_CTRL) | 0x80000000);
349 //BOOTROM_PWR_CTRL not accessible from NW...
350 //mt_secure_call(MC_FC_BOOTROM_PWR_CTRL, 0, 0, 0);
351 #endif //#if !defined (CONFIG_TRUSTONIC_TEE_SUPPORT)
352 }
353 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0))
354 extern int platform_cpu_kill(unsigned int cpu);
355 extern void platform_cpu_die(unsigned int cpu);
356 extern int platform_cpu_disable(unsigned int cpu);
357 struct smp_operations mt65xx_smp_ops __initdata = {
358 .smp_init_cpus = mt_smp_init_cpus,
359 .smp_prepare_cpus = mt_platform_smp_prepare_cpus,
360 .smp_secondary_init = mt_platform_secondary_init,
361 .smp_boot_secondary = mt_boot_secondary,
362 #ifdef CONFIG_HOTPLUG_CPU
363 .cpu_kill = platform_cpu_kill,
364 .cpu_die = platform_cpu_die,
365 .cpu_disable = platform_cpu_disable,
366 #endif
367 };
368 #endif