1 #include <linux/init.h>
4 #include <asm/pgalloc.h>
5 #include <asm/pgtable.h>
6 #include <asm/memory.h>
7 #include <asm/suspend.h>
8 #include <asm/tlbflush.h>
10 extern int __cpu_suspend(unsigned long, int (*)(unsigned long));
11 extern void cpu_resume_mmu(void);
13 #define CA15L_TYPEID 0x410FC0D0
14 #define CA7_TYPEID 0x410FC070
15 #define CPU_TYPEID_MASK 0xfffffff0
17 //inline int is_cpu_type(int type)
20 register unsigned int ret; \
21 __asm__ __volatile__ ("mrc p15, 0, %0, c0, c0, 0 \n\t" \
26 #define is_cpu_type(type) \
28 ((read_midr() & CPU_TYPEID_MASK) == type) ? 1 : 0; \
32 * This is called by __cpu_suspend() to save the state, and do whatever
33 * flushing is required to ensure that when the CPU goes to sleep we have
34 * the necessary data available when the caches are not searched.
36 void __cpu_suspend_save(u32
*ptr
, u32 ptrsz
, u32 sp
, u32
*save_ptr
)
40 *save_ptr
= virt_to_phys(ptr
);
42 /* This must correspond to the LDM in cpu_resume() assembly */
43 *ptr
++ = virt_to_phys(idmap_pgd
);
45 *ptr
++ = virt_to_phys(cpu_do_resume
);
49 /** optimization with CA17 CCIALL. **/
50 if (is_cpu_type(CA15L_TYPEID
)) {
51 __asm__
__volatile__ (
53 "MCR p15, 1, r0, c15, c14, 0 @; DCCIALL L1 \n\t"
63 * flush_cache_louis does not guarantee that
64 * save_ptr and ptr are cleaned to main memory,
65 * just up to the Level of Unification Inner Shareable.
66 * Since the context pointer and context itself
67 * are to be retrieved with the MMU off that
68 * data must be cleaned from all cache levels
69 * to main memory using "area" cache primitives.
71 __cpuc_flush_dcache_area(ctx
, ptrsz
);
72 __cpuc_flush_dcache_area(save_ptr
, sizeof(*save_ptr
));
74 outer_clean_range(*save_ptr
, *save_ptr
+ ptrsz
);
75 outer_clean_range(virt_to_phys(save_ptr
),
76 virt_to_phys(save_ptr
) + sizeof(*save_ptr
));
80 * Hide the first two arguments to __cpu_suspend - these are an implementation
81 * detail which platform code shouldn't have to know about.
83 int cpu_suspend(unsigned long arg
, int (*fn
)(unsigned long))
85 struct mm_struct
*mm
= current
->active_mm
;
92 * Provide a temporary page table with an identity mapping for
93 * the MMU-enable code, required for resuming. On successful
94 * resume (indicated by a zero return code), we need to switch
95 * back to the correct page tables.
97 ret
= __cpu_suspend(arg
, fn
);
99 cpu_switch_mm(mm
->pgd
, mm
);
100 local_flush_bp_all();
101 local_flush_tlb_all();