import PULS_20160108
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / arm / kernel / suspend.c
1 #include <linux/init.h>
2
3 #include <asm/idmap.h>
4 #include <asm/pgalloc.h>
5 #include <asm/pgtable.h>
6 #include <asm/memory.h>
7 #include <asm/suspend.h>
8 #include <asm/tlbflush.h>
9
10 extern int __cpu_suspend(unsigned long, int (*)(unsigned long));
11 extern void cpu_resume_mmu(void);
12
13 #define CA15L_TYPEID 0x410FC0D0
14 #define CA7_TYPEID 0x410FC070
15 #define CPU_TYPEID_MASK 0xfffffff0
16
17 //inline int is_cpu_type(int type)
18 #define read_midr() \
19 ({ \
20 register unsigned int ret; \
21 __asm__ __volatile__ ("mrc p15, 0, %0, c0, c0, 0 \n\t" \
22 :"=r"(ret)); \
23 ret; \
24 })
25
26 #define is_cpu_type(type) \
27 ({ \
28 ((read_midr() & CPU_TYPEID_MASK) == type) ? 1 : 0; \
29 })
30
31 /*
32 * This is called by __cpu_suspend() to save the state, and do whatever
33 * flushing is required to ensure that when the CPU goes to sleep we have
34 * the necessary data available when the caches are not searched.
35 */
36 void __cpu_suspend_save(u32 *ptr, u32 ptrsz, u32 sp, u32 *save_ptr)
37 {
38 u32 *ctx = ptr;
39
40 *save_ptr = virt_to_phys(ptr);
41
42 /* This must correspond to the LDM in cpu_resume() assembly */
43 *ptr++ = virt_to_phys(idmap_pgd);
44 *ptr++ = sp;
45 *ptr++ = virt_to_phys(cpu_do_resume);
46
47 cpu_do_suspend(ptr);
48
49 /** optimization with CA17 CCIALL. **/
50 if (is_cpu_type(CA15L_TYPEID)) {
51 __asm__ __volatile__ (
52 "mov r0, #0 \n\t"
53 "MCR p15, 1, r0, c15, c14, 0 @; DCCIALL L1 \n\t"
54 "dsb \n\t"
55 "isb \n\t"
56 :::"r0");
57 }
58 else {
59 flush_cache_louis();
60 }
61
62 /*
63 * flush_cache_louis does not guarantee that
64 * save_ptr and ptr are cleaned to main memory,
65 * just up to the Level of Unification Inner Shareable.
66 * Since the context pointer and context itself
67 * are to be retrieved with the MMU off that
68 * data must be cleaned from all cache levels
69 * to main memory using "area" cache primitives.
70 */
71 __cpuc_flush_dcache_area(ctx, ptrsz);
72 __cpuc_flush_dcache_area(save_ptr, sizeof(*save_ptr));
73
74 outer_clean_range(*save_ptr, *save_ptr + ptrsz);
75 outer_clean_range(virt_to_phys(save_ptr),
76 virt_to_phys(save_ptr) + sizeof(*save_ptr));
77 }
78
79 /*
80 * Hide the first two arguments to __cpu_suspend - these are an implementation
81 * detail which platform code shouldn't have to know about.
82 */
83 int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
84 {
85 struct mm_struct *mm = current->active_mm;
86 int ret;
87
88 if (!idmap_pgd)
89 return -EINVAL;
90
91 /*
92 * Provide a temporary page table with an identity mapping for
93 * the MMU-enable code, required for resuming. On successful
94 * resume (indicated by a zero return code), we need to switch
95 * back to the correct page tables.
96 */
97 ret = __cpu_suspend(arg, fn);
98 if (ret == 0) {
99 cpu_switch_mm(mm->pgd, mm);
100 local_flush_bp_all();
101 local_flush_tlb_all();
102 }
103
104 return ret;
105 }