ARM: alpine: add support for generic pci
[GitHub/moto-9609/android_kernel_motorola_exynos9610.git] / arch / arm / mach-exynos / platsmp.c
1 /*
2 * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
3 * http://www.samsung.com
4 *
5 * Cloned from linux/arch/arm/mach-vexpress/platsmp.c
6 *
7 * Copyright (C) 2002 ARM Ltd.
8 * All Rights Reserved
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 */
14
15 #include <linux/init.h>
16 #include <linux/errno.h>
17 #include <linux/delay.h>
18 #include <linux/device.h>
19 #include <linux/jiffies.h>
20 #include <linux/smp.h>
21 #include <linux/io.h>
22 #include <linux/of_address.h>
23
24 #include <asm/cacheflush.h>
25 #include <asm/cp15.h>
26 #include <asm/smp_plat.h>
27 #include <asm/smp_scu.h>
28 #include <asm/firmware.h>
29
30 #include <mach/map.h>
31
32 #include "common.h"
33 #include "regs-pmu.h"
34
35 extern void exynos4_secondary_startup(void);
36
37 /*
38 * Set or clear the USE_DELAYED_RESET_ASSERTION option, set on Exynos4 SoCs
39 * during hot-(un)plugging CPUx.
40 *
41 * The feature can be cleared safely during first boot of secondary CPU.
42 *
43 * Exynos4 SoCs require setting USE_DELAYED_RESET_ASSERTION during powering
44 * down a CPU so the CPU idle clock down feature could properly detect global
45 * idle state when CPUx is off.
46 */
47 static void exynos_set_delayed_reset_assertion(u32 core_id, bool enable)
48 {
49 if (soc_is_exynos4()) {
50 unsigned int tmp;
51
52 tmp = pmu_raw_readl(EXYNOS_ARM_CORE_OPTION(core_id));
53 if (enable)
54 tmp |= S5P_USE_DELAYED_RESET_ASSERTION;
55 else
56 tmp &= ~(S5P_USE_DELAYED_RESET_ASSERTION);
57 pmu_raw_writel(tmp, EXYNOS_ARM_CORE_OPTION(core_id));
58 }
59 }
60
61 #ifdef CONFIG_HOTPLUG_CPU
62 static inline void cpu_leave_lowpower(u32 core_id)
63 {
64 unsigned int v;
65
66 asm volatile(
67 "mrc p15, 0, %0, c1, c0, 0\n"
68 " orr %0, %0, %1\n"
69 " mcr p15, 0, %0, c1, c0, 0\n"
70 " mrc p15, 0, %0, c1, c0, 1\n"
71 " orr %0, %0, %2\n"
72 " mcr p15, 0, %0, c1, c0, 1\n"
73 : "=&r" (v)
74 : "Ir" (CR_C), "Ir" (0x40)
75 : "cc");
76
77 exynos_set_delayed_reset_assertion(core_id, false);
78 }
79
80 static inline void platform_do_lowpower(unsigned int cpu, int *spurious)
81 {
82 u32 mpidr = cpu_logical_map(cpu);
83 u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
84
85 for (;;) {
86
87 /* Turn the CPU off on next WFI instruction. */
88 exynos_cpu_power_down(core_id);
89
90 /*
91 * Exynos4 SoCs require setting
92 * USE_DELAYED_RESET_ASSERTION so the CPU idle
93 * clock down feature could properly detect
94 * global idle state when CPUx is off.
95 */
96 exynos_set_delayed_reset_assertion(core_id, true);
97
98 wfi();
99
100 if (pen_release == core_id) {
101 /*
102 * OK, proper wakeup, we're done
103 */
104 break;
105 }
106
107 /*
108 * Getting here, means that we have come out of WFI without
109 * having been woken up - this shouldn't happen
110 *
111 * Just note it happening - when we're woken, we can report
112 * its occurrence.
113 */
114 (*spurious)++;
115 }
116 }
117 #endif /* CONFIG_HOTPLUG_CPU */
118
119 /**
120 * exynos_core_power_down : power down the specified cpu
121 * @cpu : the cpu to power down
122 *
123 * Power down the specified cpu. The sequence must be finished by a
124 * call to cpu_do_idle()
125 *
126 */
127 void exynos_cpu_power_down(int cpu)
128 {
129 u32 core_conf;
130
131 if (cpu == 0 && (of_machine_is_compatible("samsung,exynos5420") ||
132 of_machine_is_compatible("samsung,exynos5800"))) {
133 /*
134 * Bypass power down for CPU0 during suspend. Check for
135 * the SYS_PWR_REG value to decide if we are suspending
136 * the system.
137 */
138 int val = pmu_raw_readl(EXYNOS5_ARM_CORE0_SYS_PWR_REG);
139
140 if (!(val & S5P_CORE_LOCAL_PWR_EN))
141 return;
142 }
143
144 core_conf = pmu_raw_readl(EXYNOS_ARM_CORE_CONFIGURATION(cpu));
145 core_conf &= ~S5P_CORE_LOCAL_PWR_EN;
146 pmu_raw_writel(core_conf, EXYNOS_ARM_CORE_CONFIGURATION(cpu));
147 }
148
149 /**
150 * exynos_cpu_power_up : power up the specified cpu
151 * @cpu : the cpu to power up
152 *
153 * Power up the specified cpu
154 */
155 void exynos_cpu_power_up(int cpu)
156 {
157 u32 core_conf = S5P_CORE_LOCAL_PWR_EN;
158
159 if (soc_is_exynos3250())
160 core_conf |= S5P_CORE_AUTOWAKEUP_EN;
161
162 pmu_raw_writel(core_conf,
163 EXYNOS_ARM_CORE_CONFIGURATION(cpu));
164 }
165
166 /**
167 * exynos_cpu_power_state : returns the power state of the cpu
168 * @cpu : the cpu to retrieve the power state from
169 *
170 */
171 int exynos_cpu_power_state(int cpu)
172 {
173 return (pmu_raw_readl(EXYNOS_ARM_CORE_STATUS(cpu)) &
174 S5P_CORE_LOCAL_PWR_EN);
175 }
176
177 /**
178 * exynos_cluster_power_down : power down the specified cluster
179 * @cluster : the cluster to power down
180 */
181 void exynos_cluster_power_down(int cluster)
182 {
183 pmu_raw_writel(0, EXYNOS_COMMON_CONFIGURATION(cluster));
184 }
185
186 /**
187 * exynos_cluster_power_up : power up the specified cluster
188 * @cluster : the cluster to power up
189 */
190 void exynos_cluster_power_up(int cluster)
191 {
192 pmu_raw_writel(S5P_CORE_LOCAL_PWR_EN,
193 EXYNOS_COMMON_CONFIGURATION(cluster));
194 }
195
196 /**
197 * exynos_cluster_power_state : returns the power state of the cluster
198 * @cluster : the cluster to retrieve the power state from
199 *
200 */
201 int exynos_cluster_power_state(int cluster)
202 {
203 return (pmu_raw_readl(EXYNOS_COMMON_STATUS(cluster)) &
204 S5P_CORE_LOCAL_PWR_EN);
205 }
206
207 void __iomem *cpu_boot_reg_base(void)
208 {
209 if (soc_is_exynos4210() && samsung_rev() == EXYNOS4210_REV_1_1)
210 return pmu_base_addr + S5P_INFORM5;
211 return sysram_base_addr;
212 }
213
214 static inline void __iomem *cpu_boot_reg(int cpu)
215 {
216 void __iomem *boot_reg;
217
218 boot_reg = cpu_boot_reg_base();
219 if (!boot_reg)
220 return ERR_PTR(-ENODEV);
221 if (soc_is_exynos4412())
222 boot_reg += 4*cpu;
223 else if (soc_is_exynos5420() || soc_is_exynos5800())
224 boot_reg += 4;
225 return boot_reg;
226 }
227
228 /*
229 * Set wake up by local power mode and execute software reset for given core.
230 *
231 * Currently this is needed only when booting secondary CPU on Exynos3250.
232 */
233 static void exynos_core_restart(u32 core_id)
234 {
235 u32 val;
236
237 if (!of_machine_is_compatible("samsung,exynos3250"))
238 return;
239
240 while (!pmu_raw_readl(S5P_PMU_SPARE2))
241 udelay(10);
242 udelay(10);
243
244 val = pmu_raw_readl(EXYNOS_ARM_CORE_STATUS(core_id));
245 val |= S5P_CORE_WAKEUP_FROM_LOCAL_CFG;
246 pmu_raw_writel(val, EXYNOS_ARM_CORE_STATUS(core_id));
247
248 pr_info("CPU%u: Software reset\n", core_id);
249 pmu_raw_writel(EXYNOS_CORE_PO_RESET(core_id), EXYNOS_SWRESET);
250 }
251
252 /*
253 * Write pen_release in a way that is guaranteed to be visible to all
254 * observers, irrespective of whether they're taking part in coherency
255 * or not. This is necessary for the hotplug code to work reliably.
256 */
257 static void write_pen_release(int val)
258 {
259 pen_release = val;
260 smp_wmb();
261 sync_cache_w(&pen_release);
262 }
263
264 static void __iomem *scu_base_addr(void)
265 {
266 return (void __iomem *)(S5P_VA_SCU);
267 }
268
269 static DEFINE_SPINLOCK(boot_lock);
270
271 static void exynos_secondary_init(unsigned int cpu)
272 {
273 /*
274 * let the primary processor know we're out of the
275 * pen, then head off into the C entry point
276 */
277 write_pen_release(-1);
278
279 /*
280 * Synchronise with the boot thread.
281 */
282 spin_lock(&boot_lock);
283 spin_unlock(&boot_lock);
284 }
285
286 static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
287 {
288 unsigned long timeout;
289 u32 mpidr = cpu_logical_map(cpu);
290 u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
291 int ret = -ENOSYS;
292
293 /*
294 * Set synchronisation state between this boot processor
295 * and the secondary one
296 */
297 spin_lock(&boot_lock);
298
299 /*
300 * The secondary processor is waiting to be released from
301 * the holding pen - release it, then wait for it to flag
302 * that it has been released by resetting pen_release.
303 *
304 * Note that "pen_release" is the hardware CPU core ID, whereas
305 * "cpu" is Linux's internal ID.
306 */
307 write_pen_release(core_id);
308
309 if (!exynos_cpu_power_state(core_id)) {
310 exynos_cpu_power_up(core_id);
311 timeout = 10;
312
313 /* wait max 10 ms until cpu1 is on */
314 while (exynos_cpu_power_state(core_id)
315 != S5P_CORE_LOCAL_PWR_EN) {
316 if (timeout-- == 0)
317 break;
318
319 mdelay(1);
320 }
321
322 if (timeout == 0) {
323 printk(KERN_ERR "cpu1 power enable failed");
324 spin_unlock(&boot_lock);
325 return -ETIMEDOUT;
326 }
327 }
328
329 exynos_core_restart(core_id);
330
331 /*
332 * Send the secondary CPU a soft interrupt, thereby causing
333 * the boot monitor to read the system wide flags register,
334 * and branch to the address found there.
335 */
336
337 timeout = jiffies + (1 * HZ);
338 while (time_before(jiffies, timeout)) {
339 unsigned long boot_addr;
340
341 smp_rmb();
342
343 boot_addr = virt_to_phys(exynos4_secondary_startup);
344
345 /*
346 * Try to set boot address using firmware first
347 * and fall back to boot register if it fails.
348 */
349 ret = call_firmware_op(set_cpu_boot_addr, core_id, boot_addr);
350 if (ret && ret != -ENOSYS)
351 goto fail;
352 if (ret == -ENOSYS) {
353 void __iomem *boot_reg = cpu_boot_reg(core_id);
354
355 if (IS_ERR(boot_reg)) {
356 ret = PTR_ERR(boot_reg);
357 goto fail;
358 }
359 __raw_writel(boot_addr, boot_reg);
360 }
361
362 call_firmware_op(cpu_boot, core_id);
363
364 if (soc_is_exynos3250())
365 dsb_sev();
366 else
367 arch_send_wakeup_ipi_mask(cpumask_of(cpu));
368
369 if (pen_release == -1)
370 break;
371
372 udelay(10);
373 }
374
375 /* No harm if this is called during first boot of secondary CPU */
376 exynos_set_delayed_reset_assertion(core_id, false);
377
378 /*
379 * now the secondary core is starting up let it run its
380 * calibrations, then wait for it to finish
381 */
382 fail:
383 spin_unlock(&boot_lock);
384
385 return pen_release != -1 ? ret : 0;
386 }
387
388 /*
389 * Initialise the CPU possible map early - this describes the CPUs
390 * which may be present or become present in the system.
391 */
392
393 static void __init exynos_smp_init_cpus(void)
394 {
395 void __iomem *scu_base = scu_base_addr();
396 unsigned int i, ncores;
397
398 if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9)
399 ncores = scu_base ? scu_get_core_count(scu_base) : 1;
400 else
401 /*
402 * CPU Nodes are passed thru DT and set_cpu_possible
403 * is set by "arm_dt_init_cpu_maps".
404 */
405 return;
406
407 /* sanity check */
408 if (ncores > nr_cpu_ids) {
409 pr_warn("SMP: %u cores greater than maximum (%u), clipping\n",
410 ncores, nr_cpu_ids);
411 ncores = nr_cpu_ids;
412 }
413
414 for (i = 0; i < ncores; i++)
415 set_cpu_possible(i, true);
416 }
417
418 static void __init exynos_smp_prepare_cpus(unsigned int max_cpus)
419 {
420 int i;
421
422 exynos_sysram_init();
423
424 if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9)
425 scu_enable(scu_base_addr());
426
427 /*
428 * Write the address of secondary startup into the
429 * system-wide flags register. The boot monitor waits
430 * until it receives a soft interrupt, and then the
431 * secondary CPU branches to this address.
432 *
433 * Try using firmware operation first and fall back to
434 * boot register if it fails.
435 */
436 for (i = 1; i < max_cpus; ++i) {
437 unsigned long boot_addr;
438 u32 mpidr;
439 u32 core_id;
440 int ret;
441
442 mpidr = cpu_logical_map(i);
443 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
444 boot_addr = virt_to_phys(exynos4_secondary_startup);
445
446 ret = call_firmware_op(set_cpu_boot_addr, core_id, boot_addr);
447 if (ret && ret != -ENOSYS)
448 break;
449 if (ret == -ENOSYS) {
450 void __iomem *boot_reg = cpu_boot_reg(core_id);
451
452 if (IS_ERR(boot_reg))
453 break;
454 __raw_writel(boot_addr, boot_reg);
455 }
456 }
457 }
458
459 #ifdef CONFIG_HOTPLUG_CPU
460 /*
461 * platform-specific code to shutdown a CPU
462 *
463 * Called with IRQs disabled
464 */
465 static void exynos_cpu_die(unsigned int cpu)
466 {
467 int spurious = 0;
468 u32 mpidr = cpu_logical_map(cpu);
469 u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
470
471 v7_exit_coherency_flush(louis);
472
473 platform_do_lowpower(cpu, &spurious);
474
475 /*
476 * bring this CPU back into the world of cache
477 * coherency, and then restore interrupts
478 */
479 cpu_leave_lowpower(core_id);
480
481 if (spurious)
482 pr_warn("CPU%u: %u spurious wakeup calls\n", cpu, spurious);
483 }
484 #endif /* CONFIG_HOTPLUG_CPU */
485
486 struct smp_operations exynos_smp_ops __initdata = {
487 .smp_init_cpus = exynos_smp_init_cpus,
488 .smp_prepare_cpus = exynos_smp_prepare_cpus,
489 .smp_secondary_init = exynos_secondary_init,
490 .smp_boot_secondary = exynos_boot_secondary,
491 #ifdef CONFIG_HOTPLUG_CPU
492 .cpu_die = exynos_cpu_die,
493 #endif
494 };