Merge tag 'v3.10.55' into update
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / arm / mach-mt8127 / mt_dormant.c
1 /*********************************
2 * include
3 **********************************/
4 #include <linux/init.h>
5 #include <linux/module.h>
6 #include <linux/kernel.h>
7
8 #include <asm/system.h>
9 #include <mach/mt_reg_base.h>
10 #include <mach/mt_dormant.h>
11 #include <mach/mt_spm.h>
12 #include <mach/mt_irq.h>
13 #include <mach/sync_write.h>
14 #include <mach/mt_spm_mtcmos.h>
15
16 #if defined(CONFIG_TRUSTONIC_TEE_SUPPORT)
17 #include <mach/mt_secure_api.h>
18 #endif
19
20 #if defined(CONFIG_MTK_IN_HOUSE_TEE_SUPPORT)
21 #include <mach/mtk_boot_share_page.h>
22 #include <trustzone/kree/tz_pm.h>
23 #endif
24
25 /*********************************
26 * macro
27 **********************************/
28 #define BOOTROM_PWR_CTRL (INFRACFG_AO_BASE + 0x804)
29 #define BOOTROM_BOOT_ADDR (INFRACFG_AO_BASE + 0x800)
30 #define NS_SLAVE_BOOT_ADDR (BOOT_SHARE_BASE + 1012)
31
32 #define CA7_CACHE_CONFIG (MCUSYS_CFGREG_BASE + 0x0000)
33
34 #define reg_read(addr) (*(volatile u32 *)(addr))
35 #define reg_write(addr, val) mt65xx_reg_sync_writel(val, addr)
36
37 /* Timer Bits */
38 #define HYP_TIMER_MULT 0xa /* 12Mhz * 10 i.e. interrupt every 10ms. Linux uses 12MHz * 10 */
39 #define LCL_TIMER_FREQ 0x7f /* Every 128th timer acts as a trigger */
40 #define HYP_TIMER_IRQ 0x1a
41 #define LCL_TIMER_IRQ 0x1e
42 #define TIMER_ENABLE 0x1
43 #define TIMER_DISABLE 0x0
44 #define TIMER_MASK_IRQ 0x2
45 #define TIMER_IRQ_STAT 0x4
46
47 /* PMU States. */
48 #define PMU_STATE0 0
49 #define PMU_STATE1 1
50 #define PMU_STATE2 2
51
52 #define MAX_CLUSTERS 2
53 #define MAX_CORES 8
54 #define MAX_CPUIFS 8
55 #define STACK_SIZE 96
56
57 #define REGS 32
58 #define PMCR_IDX 0
59 #define PMSELR_IDX 1
60 #define PMCNTENSET_IDX 2
61 #define PMCNTENCLR_IDX 3
62 #define PMCCNTR_IDX 4
63 #define PMOVSR_IDX 5
64 #define PMINTENSET_IDX 6
65 #define PMINTENCLR_IDX 7
66 #define PMXEVTYPE0_IDX 8
67 #define PMXEVCNT0_IDX 9
68 #define PMXEVTYPE1_IDX 10
69 #define PMXEVCNT1_IDX 11
70 #define PMXEVTYPE2_IDX 12
71 #define PMXEVCNT2_IDX 13
72 #define PMXEVTYPE3_IDX 14
73 #define PMXEVCNT3_IDX 15
74 unsigned int clusters_ctx[MAX_CLUSTERS][MAX_CORES][REGS];
75 unsigned int migration_ctx[MAX_CORES][REGS];
76 /*
77 * Defines for PMU states
78 */
79 static int pmu_mode = PMU_STATE0;
80
81 #define DIDR_VERSION_SHIFT 16
82 #define DIDR_VERSION_MASK 0xF
83 #define DIDR_VERSION_7_1 5
84 #define DIDR_BP_SHIFT 24
85 #define DIDR_BP_MASK 0xF
86 #define DIDR_WP_SHIFT 28
87 #define DIDR_WP_MASK 0xF
88 #define CLAIMCLR_CLEAR_ALL 0xff
89
90 #define DRAR_VALID_MASK 0x00000003
91 #define DSAR_VALID_MASK 0x00000003
92 #define DRAR_ADDRESS_MASK 0xFFFFF000
93 #define DSAR_ADDRESS_MASK 0xFFFFF000
94 #define OSLSR_OSLM_MASK 0x00000009
95 #define OSLAR_UNLOCKED 0x00000000
96 #define OSLAR_LOCKED 0xC5ACCE55
97 #define LAR_UNLOCKED 0xC5ACCE55
98 #define LAR_LOCKED 0x00000000
99 #define OSDLR_UNLOCKED 0x00000000
100 #define OSDLR_LOCKED 0x00000001
101
102 #define DBGREG_BP_VAL 0x0
103 #define DBGREG_WP_VAL 0x1
104 #define DBGREG_BP_CTRL 0x2
105 #define DBGREG_WP_CTRL 0x3
106 #define DBGREG_BP_XVAL 0x4
107
108 /* MCU_BIU Register */
109 #define MCU_BIU_CONTROL (MCU_BIU_BASE)
110
111 /*********************************
112 * macro for log
113 **********************************/
114 #define CPU_DORMANT_LOG_NONE 0
115 #define CPU_DORMANT_LOG_WITH_XLOG 1
116 #define CPU_DORMANT_LOG_WITH_PRINTK 2
117
118 #define CPU_DORMANT_LOG_PRINT CPU_DORMANT_LOG_WITH_PRINTK
119
120 #if (CPU_DORMANT_LOG_PRINT == CPU_DORMANT_LOG_NONE)
121 #define CPU_DORMANT_INFO(fmt, args...)
122 #elif (CPU_DORMANT_LOG_PRINT == CPU_DORMANT_LOG_WITH_XLOG)
123 #define CPU_DORMANT_INFO(fmt, args...) xlog_printk(ANDROID_LOG_INFO, "Power/cpu_dormant", fmt, ##args)
124 #elif (CPU_DORMANT_LOG_PRINT == CPU_DORMANT_LOG_WITH_PRINTK)
125 #define CPU_DORMANT_INFO(fmt, args...) printk("[Power/cpu_dormant] "fmt, ##args)
126 #endif
127
128 /*********************************
129 * struct
130 **********************************/
131 typedef struct {
132 unsigned vcr;
133 unsigned eacr;
134 unsigned claim;
135 unsigned claimclr;
136 unsigned dtrrx_e;
137 unsigned dtrtx_e;
138 unsigned dscr_e;
139 unsigned wfar;
140 unsigned bvr[16];
141 unsigned bcr[16];
142 unsigned wvr[16];
143 unsigned wcr[16];
144 unsigned bxvr[16];
145 } debug_context_t; /* total size 88 * 4 = 352 bytes */
146
147 typedef struct ns_gic_cpu_context {
148 unsigned int gic_cpu_if_regs[32]; /* GIC context local to the CPU */
149 unsigned int gic_dist_if_pvt_regs[32]; /* GIC SGI/PPI context local to the CPU */
150 } gic_cpu_context;
151
152 typedef struct fault_regs {
153 unsigned dfar;
154 unsigned ifar;
155 unsigned ifsr;
156 unsigned dfsr;
157 unsigned adfsr;
158 unsigned aifsr;
159 } cp15_fault_regs;
160
161 typedef struct ns_banked_cp15_context {
162 unsigned int cp15_misc_regs[2]; /* cp15 miscellaneous registers */
163 unsigned int cp15_ctrl_regs[20]; /* cp15 control registers */
164 unsigned int cp15_mmu_regs[16]; /* cp15 mmu registers */
165 cp15_fault_regs ns_cp15_fault_regs; /* cp15 fault status registers */
166 } banked_cp15_context;
167
168 typedef struct gen_tmr_ctx {
169 unsigned cntfrq;
170 unsigned long long cntvoff;
171 unsigned cnthctl;
172 unsigned cntkctl;
173 unsigned long long cntp_cval;
174 unsigned cntp_tval;
175 unsigned cntp_ctl;
176 unsigned long long cntv_cval;
177 unsigned cntv_tval;
178 unsigned cntv_ctl;
179 unsigned long long cnthp_cval;
180 unsigned cnthp_tval;
181 unsigned cnthp_ctl;
182 } generic_timer_context;
183
184 typedef struct ns_cpu_context {
185 unsigned int banked_cpu_regs[32]; /* Banked cpu registers */
186 banked_cp15_context banked_cp15_regs; /* Per cpu banked cp15 context */
187 generic_timer_context cp15_timer_ctx; /* Global counter registers if accessible in NS world */
188 gic_cpu_context gic_cpu_ctx; /* Per cpu GIC distributor and interface context */
189 unsigned int endianess; /* Per cpu endianess */
190 unsigned int vfp_regs[34]; /* Dummy entry for VFP context. */
191 //debug_context_t debug_ctx; /* Entry for Debug context. */
192 unsigned int dbg_data[32]; /* Entry for Debug context. */
193 } cpu_context;
194
195 typedef struct ns_global_context {
196 unsigned int gic_dist_if_regs[512]; /* GIC distributor context to be saved by the last cpu. */
197 unsigned int generic_timer_regs[8]; /* Global timers if the NS world has access to them */
198 } global_context;
199
200 /*
201 * Structure to preserve the OS mmu and stack state for swtich from OS to Switcher
202 * context handler.
203 */
204 typedef struct os_state {
205 unsigned sctlr;
206 unsigned dacr;
207 unsigned ttbr0;
208 unsigned nmrr;
209 unsigned prrr;
210 } os_state;
211
212 /*
213 * Top level structure to hold the complete context of a core in a cluster in
214 * a multi-cluster system
215 */
216 typedef struct core_context {
217 /*
218 * Non-secure context save area
219 */
220 cpu_context ns_cpu_ctx;
221
222 } core_context;
223
224 /*
225 * Top level structure to hold the complete context of a cluster in a multi-
226 * cluster system
227 */
228 typedef struct cluster_context {
229 core_context core[MAX_CORES];
230 unsigned num_cores;
231 global_context ns_cluster_ctx;
232 } cluster_context;
233
234 /*
235 * Top level structure to hold the complete context of a multi cluster system
236 */
237 typedef struct system_context {
238 cluster_context cluster;
239 unsigned num_clusters;
240 unsigned warm_reset;
241 } system_context;
242
243 typedef volatile struct { /* Registers Save? */
244 unsigned const didr; /* 0 Read only */
245 unsigned dscr_i; /* 1 ignore - use dscr_e instead */
246 unsigned const dummy1[3]; /* 2-4 ignore */
247 unsigned dtrrx_dtrtx_i; /* 5 ignore */
248 unsigned wfar; /* 6 ignore - transient information */
249 unsigned vcr; /* 7 Save */
250 unsigned const dummy2; /* 8 ignore */
251 unsigned ecr; /* 9 ignore */
252 unsigned dsccr; /* 10 ignore */
253 unsigned dsmcr; /* 11 ignore */
254 unsigned const dummy3[20]; /* 12-31 ignore */
255 unsigned dtrrx_e; /* 32 ignore */
256 unsigned itr_pcsr; /* 33 ignore */
257 unsigned dscr_e; /* 34 Save */
258 unsigned dtrtx_e; /* 35 ignore */
259 unsigned drcr; /* 36 ignore */
260 unsigned eacr; /* 37 Save - V7.1 only */
261 unsigned const dummy4[2]; /* 38-39 ignore */
262 unsigned pcsr; /* 40 ignore */
263 unsigned cidsr; /* 41 ignore */
264 unsigned vidsr; /* 42 ignore */
265 unsigned const dummy5[21]; /* 43-63 ignore */
266 unsigned bvr[16]; /* 64-79 Save */
267 unsigned bcr[16]; /* 80-95 Save */
268 unsigned wvr[16]; /* 96-111 Save */
269 unsigned wcr[16]; /* 112-127 Save */
270 unsigned const dummy6[16]; /* 128-143 ignore */
271 unsigned bxvr[16]; /* 144-159 Save if have Virtualization extensions */
272 unsigned const dummy7[32]; /* 160-191 ignore */
273 unsigned oslar; /* 192 If oslsr[0] is 1, unlock before save/restore */
274 unsigned const oslsr; /* 193 ignore */
275 unsigned ossrr; /* 194 ignore */
276 unsigned const dummy8; /* 195 ignore */
277 unsigned prcr; /* 196 ignore */
278 unsigned prsr; /* 197 clear SPD on restore */
279 unsigned const dummy9[762]; /* 198-959 ignore */
280 unsigned itctrl; /* 960 ignore */
281 unsigned const dummy10[39]; /* 961-999 ignore */
282 unsigned claimset; /* 1000 Restore claim bits to here */
283 unsigned claimclr; /* 1001 Save claim bits from here */
284 unsigned const dummy11[2]; /* 1002-1003 ignore */
285 unsigned lar; /* 1004 Unlock before restore */
286 unsigned const lsr; /* 1005 ignore */
287 unsigned const authstatus; /* 1006 Read only */
288 unsigned const dummy12; /* 1007 ignore */
289 unsigned const devid2; /* 1008 Read only */
290 unsigned const devid1; /* 1009 Read only */
291 unsigned const devid; /* 1010 Read only */
292 unsigned const devtype; /* 1011 Read only */
293 unsigned const pid[8]; /* 1012-1019 Read only */
294 unsigned const cid[4]; /* 1020-1023 Read only */
295 } debug_registers_t;
296
297 typedef struct {
298 unsigned (*read) (void);
299 void (*write) (unsigned);
300 } rw_ops;
301
302 typedef struct {
303 rw_ops bvr;
304 rw_ops bcr;
305 rw_ops wvr;
306 rw_ops wcr;
307 rw_ops bxvr;
308 } dbgreg_rw_ops;
309
310
311
312 struct set_and_clear_regs
313 {
314 volatile unsigned int set[32], clear[32];
315 };
316
317 typedef struct
318 {
319 volatile unsigned int control; /* 0x000 */
320 const unsigned int controller_type;
321 const unsigned int implementer;
322 const char padding1[116];
323 volatile unsigned int security[32]; /* 0x080 */
324 struct set_and_clear_regs enable; /* 0x100 */
325 struct set_and_clear_regs pending; /* 0x200 */
326 struct set_and_clear_regs active; /* 0x300 */
327 volatile unsigned int priority[256]; /* 0x400 */
328 volatile unsigned int target[256]; /* 0x800 */
329 volatile unsigned int configuration[64]; /* 0xC00 */
330 const char padding3[512]; /* 0xD00 */
331 volatile unsigned int software_interrupt; /* 0xF00 */
332 volatile unsigned int sgi_clr_pending[4]; /* 0xF10 */
333 volatile unsigned int sgi_set_pending[4]; /* 0xF20 */
334 const char padding4[176];
335 unsigned const int peripheral_id[4]; /* 0xFE0 */
336 unsigned const int primecell_id[4]; /* 0xFF0 */
337 } interrupt_distributor;
338
339
340
341 typedef struct
342 {
343 volatile unsigned int control; /* 0x00 */
344 volatile unsigned int priority_mask; /* 0x04 */
345 volatile unsigned int binary_point; /* 0x08 */
346 volatile unsigned const int interrupt_ack; /* 0x0c */
347 volatile unsigned int end_of_interrupt; /* 0x10 */
348 volatile unsigned const int running_priority; /* 0x14 */
349 volatile unsigned const int highest_pending; /* 0x18 */
350 volatile unsigned int aliased_binary_point; /* 0x1c */
351 volatile unsigned const int aliased_interrupt_ack; /* 0x20 */
352 volatile unsigned int alias_end_of_interrupt; /* 0x24 */
353 volatile unsigned const int alias_highest_pending; /* 0x28 */
354 } cpu_interface;
355
356 typedef struct
357 {
358 unsigned int mcu_biu_control; /* MCU_BIU control register */
359 } mcu_biu_reg;
360
361 /*********************************
362 * extern
363 **********************************/
364 extern unsigned *copy_words(volatile unsigned *destination, volatile unsigned *source, unsigned num_words);
365
366 extern void cpu_wake_up(void);
367
368 extern void save_control_registers(unsigned *pointer, int is_secure);
369 extern void save_mmu(unsigned *pointer);
370 extern void save_mpu(unsigned *pointer);
371 extern void save_performance_monitors(unsigned *pointer);
372 extern void save_banked_registers(unsigned *pointer);
373 extern void save_cp15(unsigned *pointer);
374 extern void save_vfp(unsigned *pointer);
375 extern void save_generic_timer(unsigned *pointer, int is_hyp);
376 //extern void save_v7_debug(unsigned *pointer);
377 extern void save_fault_status(unsigned *pointer);
378
379 extern void restore_control_registers(unsigned *pointer, int is_secure);
380 extern void mt_restore_control_registers(unsigned *pointer, int is_secure);
381 extern void restore_mmu(unsigned *pointer);
382 extern void restore_mpu(unsigned *pointer);
383 extern void restore_performance_monitors(unsigned *pointer);
384 extern void restore_banked_registers(unsigned *pointer);
385 extern void restore_cp15(unsigned *pointer);
386 extern void restore_vfp(unsigned *pointer);
387 extern void restore_generic_timer(unsigned *pointer, int is_hyp);
388 //extern void restore_v7_debug(unsigned *pointer);
389 extern void restore_fault_status(unsigned *pointer);
390
391 extern void write_cntp_ctl(unsigned);
392 extern void write_cntv_ctl(unsigned);
393 extern unsigned read_cpuid(void);
394 extern unsigned read_clusterid(void);
395 extern unsigned read_nsacr(void);
396 extern unsigned read_id_pfr1(void);
397
398 extern unsigned read_dbg_osdlr(void);
399 extern unsigned read_dbg_drar(void);
400 extern unsigned read_dbg_dsar(void);
401 extern unsigned read_dbg_devid(void);
402 extern unsigned read_dbg_didr(void);
403 extern unsigned read_dbg_dtrrxext(void);
404 extern unsigned read_dbg_dtrtxext(void);
405 extern unsigned read_dbg_dscrext(void);
406 extern unsigned read_dbg_wfar(void);
407 extern unsigned read_dbg_vcr(void);
408 extern unsigned read_dbg_claimclr(void);
409 extern unsigned read_dbg_bvr0(void);
410 extern unsigned read_dbg_bvr1(void);
411 extern unsigned read_dbg_bvr2(void);
412 extern unsigned read_dbg_bvr3(void);
413 extern unsigned read_dbg_bvr4(void);
414 extern unsigned read_dbg_bvr5(void);
415 extern unsigned read_dbg_bvr6(void);
416 extern unsigned read_dbg_bvr7(void);
417 extern unsigned read_dbg_bvr8(void);
418 extern unsigned read_dbg_bvr9(void);
419 extern unsigned read_dbg_bvr10(void);
420 extern unsigned read_dbg_bvr11(void);
421 extern unsigned read_dbg_bvr12(void);
422 extern unsigned read_dbg_bvr13(void);
423 extern unsigned read_dbg_bvr14(void);
424 extern unsigned read_dbg_bvr15(void);
425 extern unsigned read_dbg_bcr0(void);
426 extern unsigned read_dbg_bcr1(void);
427 extern unsigned read_dbg_bcr2(void);
428 extern unsigned read_dbg_bcr3(void);
429 extern unsigned read_dbg_bcr4(void);
430 extern unsigned read_dbg_bcr5(void);
431 extern unsigned read_dbg_bcr6(void);
432 extern unsigned read_dbg_bcr7(void);
433 extern unsigned read_dbg_bcr8(void);
434 extern unsigned read_dbg_bcr9(void);
435 extern unsigned read_dbg_bcr10(void);
436 extern unsigned read_dbg_bcr11(void);
437 extern unsigned read_dbg_bcr12(void);
438 extern unsigned read_dbg_bcr13(void);
439 extern unsigned read_dbg_bcr14(void);
440 extern unsigned read_dbg_bcr15(void);
441 extern unsigned read_dbg_wvr0(void);
442 extern unsigned read_dbg_wvr1(void);
443 extern unsigned read_dbg_wvr2(void);
444 extern unsigned read_dbg_wvr3(void);
445 extern unsigned read_dbg_wvr4(void);
446 extern unsigned read_dbg_wvr5(void);
447 extern unsigned read_dbg_wvr6(void);
448 extern unsigned read_dbg_wvr7(void);
449 extern unsigned read_dbg_wvr8(void);
450 extern unsigned read_dbg_wvr9(void);
451 extern unsigned read_dbg_wvr10(void);
452 extern unsigned read_dbg_wvr11(void);
453 extern unsigned read_dbg_wvr12(void);
454 extern unsigned read_dbg_wvr13(void);
455 extern unsigned read_dbg_wvr14(void);
456 extern unsigned read_dbg_wvr15(void);
457 extern unsigned read_dbg_wcr0(void);
458 extern unsigned read_dbg_wcr1(void);
459 extern unsigned read_dbg_wcr2(void);
460 extern unsigned read_dbg_wcr3(void);
461 extern unsigned read_dbg_wcr4(void);
462 extern unsigned read_dbg_wcr5(void);
463 extern unsigned read_dbg_wcr6(void);
464 extern unsigned read_dbg_wcr7(void);
465 extern unsigned read_dbg_wcr8(void);
466 extern unsigned read_dbg_wcr9(void);
467 extern unsigned read_dbg_wcr10(void);
468 extern unsigned read_dbg_wcr11(void);
469 extern unsigned read_dbg_wcr12(void);
470 extern unsigned read_dbg_wcr13(void);
471 extern unsigned read_dbg_wcr14(void);
472 extern unsigned read_dbg_wcr15(void);
473 extern unsigned read_dbg_bxvr0(void);
474 extern unsigned read_dbg_bxvr1(void);
475 extern unsigned read_dbg_bxvr2(void);
476 extern unsigned read_dbg_bxvr3(void);
477 extern unsigned read_dbg_bxvr4(void);
478 extern unsigned read_dbg_bxvr5(void);
479 extern unsigned read_dbg_bxvr6(void);
480 extern unsigned read_dbg_bxvr7(void);
481 extern unsigned read_dbg_bxvr8(void);
482 extern unsigned read_dbg_bxvr9(void);
483 extern unsigned read_dbg_bxvr10(void);
484 extern unsigned read_dbg_bxvr11(void);
485 extern unsigned read_dbg_bxvr12(void);
486 extern unsigned read_dbg_bxvr13(void);
487 extern unsigned read_dbg_bxvr14(void);
488 extern unsigned read_dbg_bxvr15(void);
489
490 extern void write_dbg_osdlr(unsigned);
491 extern void write_dbg_oslar(unsigned);
492 extern void write_dbg_dtrrxext(unsigned);
493 extern void write_dbg_dtrtxext(unsigned);
494 extern void write_dbg_dscrext(unsigned);
495 extern void write_dbg_wfar(unsigned);
496 extern void write_dbg_vcr(unsigned);
497 extern void write_dbg_claimset(unsigned);
498 extern void write_dbg_bvr0(unsigned);
499 extern void write_dbg_bvr1(unsigned);
500 extern void write_dbg_bvr2(unsigned);
501 extern void write_dbg_bvr3(unsigned);
502 extern void write_dbg_bvr4(unsigned);
503 extern void write_dbg_bvr5(unsigned);
504 extern void write_dbg_bvr6(unsigned);
505 extern void write_dbg_bvr7(unsigned);
506 extern void write_dbg_bvr8(unsigned);
507 extern void write_dbg_bvr9(unsigned);
508 extern void write_dbg_bvr10(unsigned);
509 extern void write_dbg_bvr11(unsigned);
510 extern void write_dbg_bvr12(unsigned);
511 extern void write_dbg_bvr13(unsigned);
512 extern void write_dbg_bvr14(unsigned);
513 extern void write_dbg_bvr15(unsigned);
514 extern void write_dbg_bcr0(unsigned);
515 extern void write_dbg_bcr1(unsigned);
516 extern void write_dbg_bcr2(unsigned);
517 extern void write_dbg_bcr3(unsigned);
518 extern void write_dbg_bcr4(unsigned);
519 extern void write_dbg_bcr5(unsigned);
520 extern void write_dbg_bcr6(unsigned);
521 extern void write_dbg_bcr7(unsigned);
522 extern void write_dbg_bcr8(unsigned);
523 extern void write_dbg_bcr9(unsigned);
524 extern void write_dbg_bcr10(unsigned);
525 extern void write_dbg_bcr11(unsigned);
526 extern void write_dbg_bcr12(unsigned);
527 extern void write_dbg_bcr13(unsigned);
528 extern void write_dbg_bcr14(unsigned);
529 extern void write_dbg_bcr15(unsigned);
530 extern void write_dbg_wvr0(unsigned);
531 extern void write_dbg_wvr1(unsigned);
532 extern void write_dbg_wvr2(unsigned);
533 extern void write_dbg_wvr3(unsigned);
534 extern void write_dbg_wvr4(unsigned);
535 extern void write_dbg_wvr5(unsigned);
536 extern void write_dbg_wvr6(unsigned);
537 extern void write_dbg_wvr7(unsigned);
538 extern void write_dbg_wvr8(unsigned);
539 extern void write_dbg_wvr9(unsigned);
540 extern void write_dbg_wvr10(unsigned);
541 extern void write_dbg_wvr11(unsigned);
542 extern void write_dbg_wvr12(unsigned);
543 extern void write_dbg_wvr13(unsigned);
544 extern void write_dbg_wvr14(unsigned);
545 extern void write_dbg_wvr15(unsigned);
546 extern void write_dbg_wcr0(unsigned);
547 extern void write_dbg_wcr1(unsigned);
548 extern void write_dbg_wcr2(unsigned);
549 extern void write_dbg_wcr3(unsigned);
550 extern void write_dbg_wcr4(unsigned);
551 extern void write_dbg_wcr5(unsigned);
552 extern void write_dbg_wcr6(unsigned);
553 extern void write_dbg_wcr7(unsigned);
554 extern void write_dbg_wcr8(unsigned);
555 extern void write_dbg_wcr9(unsigned);
556 extern void write_dbg_wcr10(unsigned);
557 extern void write_dbg_wcr11(unsigned);
558 extern void write_dbg_wcr12(unsigned);
559 extern void write_dbg_wcr13(unsigned);
560 extern void write_dbg_wcr14(unsigned);
561 extern void write_dbg_wcr15(unsigned);
562 extern void write_dbg_bxvr0(unsigned);
563 extern void write_dbg_bxvr1(unsigned);
564 extern void write_dbg_bxvr2(unsigned);
565 extern void write_dbg_bxvr3(unsigned);
566 extern void write_dbg_bxvr4(unsigned);
567 extern void write_dbg_bxvr5(unsigned);
568 extern void write_dbg_bxvr6(unsigned);
569 extern void write_dbg_bxvr7(unsigned);
570 extern void write_dbg_bxvr8(unsigned);
571 extern void write_dbg_bxvr9(unsigned);
572 extern void write_dbg_bxvr10(unsigned);
573 extern void write_dbg_bxvr11(unsigned);
574 extern void write_dbg_bxvr12(unsigned);
575 extern void write_dbg_bxvr13(unsigned);
576 extern void write_dbg_bxvr14(unsigned);
577 extern void write_dbg_bxvr15(unsigned);
578
579 extern void __enable_cache(void);
580 extern void __disable_cache(void);
581 extern void __disable_dcache(void);
582
583 extern void inner_dcache_flush_L1(void);
584 extern void inner_dcache_flush_all(void);
585
586 extern void invalidate_unified_TLB_inner_shareable(void);
587
588 extern void __inner_inv_dcache_L1(void);
589 extern void __inner_inv_dcache_L2(void);
590
591 extern void __inner_clean_dcache_L1(void);
592 extern void __inner_clean_dcache_L2(void);
593 extern void __inner_clean_dcache_all(void);
594
595 extern void trace_stop_dormant(void);
596 extern void trace_start_dormant(void);
597
598 //for save/restore breakpoint and watchpoint
599 extern void save_dbg_regs(unsigned int data[]);
600 extern void restore_dbg_regs(unsigned int data[]);
601
602 /*********************************
603 * glabal variable
604 **********************************/
605 volatile static int dormant_ret_flag[4] = {0,0,0,0};
606 volatile int power_state[4] = {STATUS_RUN,STATUS_RUN,STATUS_RUN,STATUS_RUN};
607 mcu_biu_reg mcu_biu;
608
609 /*
610 * Top level structure which encapsulates the context of the entire
611 * Kingfisher system
612 */
613 system_context switcher_context;
614
615 dbgreg_rw_ops dbgreg_rw_handlers[] = {
616 {
617 {read_dbg_bvr0, write_dbg_bvr0,},
618 {read_dbg_bcr0, write_dbg_bcr0,},
619 {read_dbg_wvr0, write_dbg_wvr0,},
620 {read_dbg_wcr0, write_dbg_wcr0,},
621 {read_dbg_bxvr0, write_dbg_bxvr0,},
622 },
623 {
624 {read_dbg_bvr1, write_dbg_bvr1,},
625 {read_dbg_bcr1, write_dbg_bcr1,},
626 {read_dbg_wvr1, write_dbg_wvr1,},
627 {read_dbg_wcr1, write_dbg_wcr1,},
628 {read_dbg_bxvr1, write_dbg_bxvr1,},
629 },
630 {
631 {read_dbg_bvr2, write_dbg_bvr2,},
632 {read_dbg_bcr2, write_dbg_bcr2,},
633 {read_dbg_wvr2, write_dbg_wvr2,},
634 {read_dbg_wcr2, write_dbg_wcr2,},
635 {read_dbg_bxvr2, write_dbg_bxvr2,},
636 },
637 {
638 {read_dbg_bvr3, write_dbg_bvr3,},
639 {read_dbg_bcr3, write_dbg_bcr3,},
640 {read_dbg_wvr3, write_dbg_wvr3,},
641 {read_dbg_wcr3, write_dbg_wcr3,},
642 {read_dbg_bxvr3, write_dbg_bxvr3,},
643 },
644 {
645 {read_dbg_bvr4, write_dbg_bvr4,},
646 {read_dbg_bcr4, write_dbg_bcr4,},
647 {read_dbg_wvr4, write_dbg_wvr4,},
648 {read_dbg_wcr4, write_dbg_wcr4,},
649 {read_dbg_bxvr4, write_dbg_bxvr4,},
650 },
651 {
652 {read_dbg_bvr5, write_dbg_bvr5,},
653 {read_dbg_bcr5, write_dbg_bcr5,},
654 {read_dbg_wvr5, write_dbg_wvr5,},
655 {read_dbg_wcr5, write_dbg_wcr5,},
656 {read_dbg_bxvr5, write_dbg_bxvr5,},
657 },
658 {
659 {read_dbg_bvr6, write_dbg_bvr6,},
660 {read_dbg_bcr6, write_dbg_bcr6,},
661 {read_dbg_wvr6, write_dbg_wvr6,},
662 {read_dbg_wcr6, write_dbg_wcr6,},
663 {read_dbg_bxvr6, write_dbg_bxvr6,},
664 },
665 {
666 {read_dbg_bvr7, write_dbg_bvr7,},
667 {read_dbg_bcr7, write_dbg_bcr7,},
668 {read_dbg_wvr7, write_dbg_wvr7,},
669 {read_dbg_wcr7, write_dbg_wcr7,},
670 {read_dbg_bxvr7, write_dbg_bxvr7,},
671 },
672 {
673 {read_dbg_bvr8, write_dbg_bvr8,},
674 {read_dbg_bcr8, write_dbg_bcr8,},
675 {read_dbg_wvr8, write_dbg_wvr8,},
676 {read_dbg_wcr8, write_dbg_wcr8,},
677 {read_dbg_bxvr8, write_dbg_bxvr8,},
678 },
679 {
680 {read_dbg_bvr9, write_dbg_bvr9,},
681 {read_dbg_bcr9, write_dbg_bcr9,},
682 {read_dbg_wvr9, write_dbg_wvr9,},
683 {read_dbg_wcr9, write_dbg_wcr9,},
684 {read_dbg_bxvr9, write_dbg_bxvr9,},
685 },
686 {
687 {read_dbg_bvr10, write_dbg_bvr10,},
688 {read_dbg_bcr10, write_dbg_bcr10,},
689 {read_dbg_wvr10, write_dbg_wvr10,},
690 {read_dbg_wcr10, write_dbg_wcr10,},
691 {read_dbg_bxvr10, write_dbg_bxvr10,},
692 },
693 {
694 {read_dbg_bvr11, write_dbg_bvr11,},
695 {read_dbg_bcr11, write_dbg_bcr11,},
696 {read_dbg_wvr11, write_dbg_wvr11,},
697 {read_dbg_wcr11, write_dbg_wcr11,},
698 {read_dbg_bxvr11, write_dbg_bxvr11,},
699 },
700 {
701 {read_dbg_bvr12, write_dbg_bvr12,},
702 {read_dbg_bcr12, write_dbg_bcr12,},
703 {read_dbg_wvr12, write_dbg_wvr12,},
704 {read_dbg_wcr12, write_dbg_wcr12,},
705 {read_dbg_bxvr12, write_dbg_bxvr12,},
706 },
707 {
708 {read_dbg_bvr13, write_dbg_bvr13,},
709 {read_dbg_bcr13, write_dbg_bcr13,},
710 {read_dbg_wvr13, write_dbg_wvr13,},
711 {read_dbg_wcr13, write_dbg_wcr13,},
712 {read_dbg_bxvr13, write_dbg_bxvr13,},
713 },
714 {
715 {read_dbg_bvr14, write_dbg_bvr14,},
716 {read_dbg_bcr14, write_dbg_bcr14,},
717 {read_dbg_wvr14, write_dbg_wvr14,},
718 {read_dbg_wcr14, write_dbg_wcr14,},
719 {read_dbg_bxvr14, write_dbg_bxvr14,},
720 },
721 {
722 {read_dbg_bvr15, write_dbg_bvr15,},
723 {read_dbg_bcr15, write_dbg_bcr15,},
724 {read_dbg_wvr15, write_dbg_wvr15,},
725 {read_dbg_wcr15, write_dbg_wcr15,},
726 {read_dbg_bxvr15, write_dbg_bxvr15,},
727 },
728 };
729
730
731 /*********************************
732 * function
733 **********************************/
734 /*
735 * Saves the MCU_BIU register context
736 * Requires 1 words of memory
737 */
738 static void save_mcu_biu_register(void)
739 {
740 mcu_biu.mcu_biu_control = reg_read(MCU_BIU_CONTROL);
741 }
742
743 static void restore_mcu_biu_register(void)
744 {
745 reg_write(MCU_BIU_CONTROL, mcu_biu.mcu_biu_control);
746 }
747
748 /*
749 * Saves the GIC CPU interface context
750 * Requires 3 words of memory
751 */
752 static void save_gic_interface(u32 *pointer, unsigned gic_interface_address)
753 {
754 cpu_interface *ci = (cpu_interface *)gic_interface_address;
755
756 pointer[0] = ci->control;
757 pointer[1] = ci->priority_mask;
758 pointer[2] = ci->binary_point;
759 pointer[3] = ci->aliased_binary_point;
760
761 /* TODO: add nonsecure stuff */
762
763 }
764
765 /*
766 * Saves this CPU's banked parts of the distributor
767 * Returns non-zero if an SGI/PPI interrupt is pending (after saving all required context)
768 * Requires 19 words of memory
769 */
770 static void save_gic_distributor_private(u32 *pointer, unsigned gic_distributor_address)
771 {
772 interrupt_distributor *id = (interrupt_distributor *)gic_distributor_address;
773 unsigned int *ptr = 0x0;
774
775 /* Save SGI,PPI enable status*/
776 *pointer = id->enable.set[0];
777 ++pointer;
778 /* Save SGI,PPI priority status*/
779 pointer = copy_words(pointer, id->priority, 8);
780 /* Save SGI,PPI target status*/
781 pointer = copy_words(pointer, id->target, 8);
782 /* Save just the PPI configurations (SGIs are not configurable) */
783 *pointer = id->configuration[1];
784 ++pointer;
785 /* Save SGI,PPI security status*/
786 *pointer = id->security[0];
787 ++pointer;
788 #if 0
789 /*
790 * Private peripheral interrupts need to be replayed on
791 * the destination cpu interface for consistency. This
792 * is the responsibility of the peripheral driver. When
793 * it sees a pending interrupt while saving its context
794 * it should record enough information to recreate the
795 * interrupt while restoring.
796 * We don't save the Pending/Active status and clear it
797 * so that it does not interfere when we are back.
798 */
799 /* Clear PPI pending status*/
800 id->pending.clear[0] = 0xffffffff;
801 id->active.clear[0] = 0xffffffff;
802 #endif
803 #if 1
804 /* Save SGI,PPI pending status*/
805 *pointer = id->pending.set[0];
806 ++pointer;
807 #endif
808 /*
809 * IPIs are different and can be replayed just by saving
810 * and restoring the set/clear pending registers
811 */
812 ptr = pointer;
813 copy_words(pointer, id->sgi_set_pending, 4);
814 pointer += 8;
815
816 /*
817 * Clear the pending SGIs on this cpuif so that they don't
818 * interfere with the wfi later on.
819 */
820 copy_words(id->sgi_clr_pending, ptr, 4);
821
822
823
824 }
825
826 /*
827 * Saves the shared parts of the distributor
828 * Requires 1 word of memory, plus 20 words for each block of 32 SPIs (max 641 words)
829 * Returns non-zero if an SPI interrupt is pending (after saving all required context)
830 */
831 static void save_gic_distributor_shared(u32 *pointer, unsigned gic_distributor_address)
832 {
833 interrupt_distributor *id = (interrupt_distributor *)gic_distributor_address;
834 unsigned num_spis, *saved_pending;
835
836 /* Calculate how many SPIs the GIC supports */
837 num_spis = 32 * (id->controller_type & 0x1f);
838
839 /* TODO: add nonsecure stuff */
840
841 /* Save rest of GIC configuration */
842 if (num_spis) {
843 pointer = copy_words(pointer, id->enable.set + 1, num_spis / 32);
844 pointer = copy_words(pointer, id->priority + 8, num_spis / 4);
845 pointer = copy_words(pointer, id->target + 8, num_spis / 4);
846 pointer = copy_words(pointer, id->configuration + 2, num_spis / 16);
847 pointer = copy_words(pointer, id->security + 1, num_spis / 32);
848 saved_pending = pointer;
849 pointer = copy_words(pointer, id->pending.set + 1, num_spis / 32);
850 }
851
852 /* Save control register */
853 *pointer = id->control;
854 }
855 static void restore_gic_interface(u32 *pointer, unsigned gic_interface_address)
856 {
857 cpu_interface *ci = (cpu_interface *)gic_interface_address;
858
859 /* TODO: add nonsecure stuff */
860
861 ci->priority_mask = pointer[1];
862 ci->binary_point = pointer[2];
863 ci->aliased_binary_point = pointer[3];
864
865 /* Restore control register last */
866 ci->control = pointer[0];
867 }
868 static void restore_gic_distributor_private(u32 *pointer, unsigned gic_distributor_address)
869 {
870 interrupt_distributor *id = (interrupt_distributor *)gic_distributor_address;
871 unsigned tmp;
872 //unsigned ctr, prev_val = 0, prev_ctr = 0;
873
874 /* First disable the distributor so we can write to its config registers */
875 tmp = id->control;
876 id->control = 0;
877 /* Restore SGI,PPI enable status*/
878 id->enable.set[0] = *pointer;
879 ++pointer;
880 /* Restore SGI,PPI priority status*/
881 copy_words(id->priority, pointer, 8);
882 pointer += 8;
883 /* Restore SGI,PPI target status*/
884 copy_words(id->target, pointer, 8);
885 pointer += 8;
886 /* Restore just the PPI configurations (SGIs are not configurable) */
887 id->configuration[1] = *pointer;
888 ++pointer;
889 /* Restore SGI,PPI security status*/
890 id->security[0] = *pointer;
891 ++pointer;
892 #if 0
893 /*
894 * Clear active and pending PPIs as they will be recreated by the
895 * peripiherals
896 */
897 id->active.clear[0] = 0xffffffff;
898 id->pending.clear[0] = 0xffffffff;
899 #endif
900 #if 1
901 /* Restore SGI,PPI pending status*/
902 id->pending.set[0] = *pointer;
903 ++pointer;
904 #endif
905 /*
906 * Restore pending SGIs
907 */
908 copy_words(id->sgi_set_pending, pointer, 4);
909 pointer += 4;
910
911 id->control = tmp;
912 }
913 static void restore_gic_distributor_shared(u32 *pointer, unsigned gic_distributor_address)
914 {
915 interrupt_distributor *id = (interrupt_distributor *)gic_distributor_address;
916 unsigned num_spis;
917 int i, j;
918
919 /* First disable the distributor so we can write to its config registers */
920 id->control = 0;
921
922 /* Calculate how many SPIs the GIC supports */
923 num_spis = 32 * ((id->controller_type) & 0x1f);
924
925 /* TODO: add nonsecure stuff */
926
927 /* Restore rest of GIC configuration */
928 if (num_spis) {
929 copy_words(id->enable.set + 1, pointer, num_spis / 32);
930 pointer += num_spis / 32;
931 copy_words(id->priority + 8, pointer, num_spis / 4);
932 pointer += num_spis / 4;
933 copy_words(id->target + 8, pointer, num_spis / 4);
934 pointer += num_spis / 4;
935 copy_words(id->configuration + 2, pointer, num_spis / 16);
936 pointer += num_spis / 16;
937 copy_words(id->security + 1, pointer, num_spis / 32);
938 pointer += num_spis / 32;
939 copy_words(id->pending.set + 1, pointer, num_spis / 32);
940
941 if (reg_read( IO_VIRT_TO_PHYS(SPM_SLEEP_ISR_RAW_STA) ) & WAKE_SRC_KP) {
942 i = MT_KP_IRQ_ID / GIC_PRIVATE_SIGNALS;
943 j = MT_KP_IRQ_ID % GIC_PRIVATE_SIGNALS;
944 id->pending.set[i] |= (1 << j);
945 }
946
947 /*
948 if (reg_read( IO_VIRT_TO_PHYS(SPM_SLEEP_ISR_RAW_STA) ) & WAKE_SRC_MD_WDT) {
949 i = MD_WDT_IRQ_ID / GIC_PRIVATE_SIGNALS;
950 j = MD_WDT_IRQ_ID % GIC_PRIVATE_SIGNALS;
951 id->pending.set[i] |= (1 << j);
952 }
953 */
954 if (reg_read( IO_VIRT_TO_PHYS(SPM_SLEEP_ISR_RAW_STA) ) & WAKE_SRC_CONN_WDT) {
955 i = CONN_WDT_IRQ_ID / GIC_PRIVATE_SIGNALS;
956 j = CONN_WDT_IRQ_ID % GIC_PRIVATE_SIGNALS;
957 id->pending.set[i] |= (1 << j);
958 }
959 if (reg_read( IO_VIRT_TO_PHYS(SPM_SLEEP_ISR_RAW_STA) ) & WAKE_SRC_TS) {
960 i = TS_IRQ_ID / GIC_PRIVATE_SIGNALS;
961 j = TS_IRQ_ID % GIC_PRIVATE_SIGNALS;
962 id->pending.set[i] |= (1 << j);
963 }
964 if (reg_read( IO_VIRT_TO_PHYS(SPM_SLEEP_ISR_RAW_STA) ) & WAKE_SRC_LOW_BAT) {
965 i = LOWBATTERY_IRQ_ID / GIC_PRIVATE_SIGNALS;
966 j = LOWBATTERY_IRQ_ID % GIC_PRIVATE_SIGNALS;
967 id->pending.set[i] |= (1 << j);
968 }
969 if (reg_read( IO_VIRT_TO_PHYS(SPM_SLEEP_ISR_RAW_STA) ) & WAKE_SRC_WDT) {
970 i = MT_WDT_IRQ_ID / GIC_PRIVATE_SIGNALS;
971 j = MT_WDT_IRQ_ID % GIC_PRIVATE_SIGNALS;
972 id->pending.set[i] |= (1 << j);
973 }
974
975 pointer += num_spis / 32;
976 }
977
978 /* We assume the I and F bits are set in the CPSR so that we will not respond to interrupts! */
979 /* Restore control register */
980 id->control = *pointer;
981 }
982
983 static void restore_bp_reg(debug_context_t *dbg, unsigned index, unsigned type)
984 {
985 switch (type) {
986 case DBGREG_WP_VAL:
987 dbgreg_rw_handlers[index].wvr.write(dbg->wvr[index]);
988 break;
989 case DBGREG_WP_CTRL:
990 dbgreg_rw_handlers[index].wcr.write(dbg->wcr[index]);
991 break;
992 case DBGREG_BP_XVAL:
993 dbgreg_rw_handlers[index].bxvr.write(dbg->bxvr[index]);
994 break;
995 case DBGREG_BP_VAL:
996 dbgreg_rw_handlers[index].bvr.write(dbg->bvr[index]);
997 break;
998 case DBGREG_BP_CTRL:
999 dbgreg_rw_handlers[index].bcr.write(dbg->bcr[index]);
1000 break;
1001 default:
1002 break;
1003 }
1004
1005 return;
1006 }
1007
1008 static void save_bp_reg(debug_context_t *dbg, unsigned index, unsigned type)
1009 {
1010 switch (type) {
1011 case DBGREG_WP_VAL:
1012 dbg->wvr[index] = dbgreg_rw_handlers[index].wvr.read();
1013 break;
1014 case DBGREG_WP_CTRL:
1015 dbg->wcr[index] = dbgreg_rw_handlers[index].wcr.read();
1016 break;
1017 case DBGREG_BP_XVAL:
1018 dbg->bxvr[index] = dbgreg_rw_handlers[index].bxvr.read();
1019 break;
1020 case DBGREG_BP_VAL:
1021 dbg->bvr[index] = dbgreg_rw_handlers[index].bvr.read();
1022 break;
1023 case DBGREG_BP_CTRL:
1024 dbg->bcr[index] = dbgreg_rw_handlers[index].bcr.read();
1025 break;
1026 default:
1027 break;
1028 }
1029
1030 return;
1031 }
1032
1033 static void sr_bp_context(debug_context_t *dbg, unsigned bp_type, unsigned op)
1034 {
1035 unsigned num_bps, num_ctx_cmps, num_wps, didr;
1036 unsigned index = 0, max_index = 0;
1037
1038 didr = read_dbg_didr();
1039 num_bps = (didr >> 24) & 0xf;
1040 num_ctx_cmps = (didr >> 20) & 0xf;
1041 num_wps = (didr >> 28) & 0xf;
1042
1043 switch (bp_type) {
1044 case DBGREG_WP_VAL:
1045 case DBGREG_WP_CTRL:
1046 max_index = num_wps;
1047 break;
1048 case DBGREG_BP_XVAL:
1049 index = num_bps - num_ctx_cmps;
1050 case DBGREG_BP_VAL:
1051 case DBGREG_BP_CTRL:
1052 max_index = num_bps;
1053 break;
1054 default:
1055 break;
1056 }
1057
1058 for (; index <= max_index; index++)
1059 if (op)
1060 save_bp_reg(dbg, index, bp_type);
1061 else
1062 restore_bp_reg(dbg, index, bp_type);
1063 return;
1064 }
1065
1066 static void save_v71_debug_cp14(unsigned *context)
1067 {
1068 debug_context_t *dbg = (void *) context;
1069 unsigned virtext_present;
1070
1071 /*
1072 * Unlock the Double lock.
1073 */
1074 if (read_dbg_osdlr() == 0x1)
1075 write_dbg_osdlr(OSDLR_UNLOCKED);
1076
1077 virtext_present = (read_dbg_devid() >> 16) & 0xf;
1078
1079 /*
1080 * Prevent updates to the debug registers during a S&R operation
1081 */
1082 write_dbg_oslar(OSLAR_LOCKED);
1083
1084 dbg->dtrrx_e = read_dbg_dtrrxext();
1085 dbg->dtrtx_e = read_dbg_dtrtxext();
1086 dbg->dscr_e = read_dbg_dscrext();
1087 dbg->wfar = read_dbg_wfar();
1088 dbg->vcr = read_dbg_vcr();
1089 dbg->claimclr = read_dbg_claimclr();
1090
1091 if (virtext_present)
1092 sr_bp_context(dbg, DBGREG_BP_XVAL, 1);
1093
1094 sr_bp_context(dbg, DBGREG_BP_VAL, 1);
1095 sr_bp_context(dbg, DBGREG_BP_CTRL, 1);
1096 sr_bp_context(dbg, DBGREG_WP_VAL, 1);
1097 sr_bp_context(dbg, DBGREG_WP_CTRL, 1);
1098
1099 write_dbg_osdlr(OSDLR_LOCKED);
1100
1101 return;
1102 }
1103
1104 static void restore_v71_debug_cp14(unsigned *context)
1105 {
1106 debug_context_t *dbg = (void *) context;
1107 unsigned virtext_present;
1108
1109 /*
1110 * Unlock the Double lock.
1111 */
1112 if (read_dbg_osdlr() == 0x1)
1113 write_dbg_osdlr(OSDLR_UNLOCKED);
1114
1115 virtext_present = (read_dbg_devid() >> 16) & 0xf;
1116
1117 /*
1118 * Prevent updates to the debug registers during a S&R operation
1119 */
1120 write_dbg_oslar(OSLAR_LOCKED);
1121
1122 write_dbg_dtrrxext(dbg->dtrrx_e);
1123 write_dbg_dtrtxext(dbg->dtrtx_e);
1124 write_dbg_dscrext(dbg->dscr_e);
1125 write_dbg_wfar(dbg->wfar);
1126 write_dbg_vcr(dbg->vcr);
1127 write_dbg_claimset(dbg->claimclr);
1128
1129 if (virtext_present)
1130 sr_bp_context(dbg, DBGREG_BP_XVAL, 0);
1131
1132 sr_bp_context(dbg, DBGREG_BP_VAL, 0);
1133 sr_bp_context(dbg, DBGREG_BP_CTRL, 0);
1134 sr_bp_context(dbg, DBGREG_WP_VAL, 0);
1135 sr_bp_context(dbg, DBGREG_WP_CTRL, 0);
1136 isb();
1137
1138 /*
1139 * Unlock access to the debug registers
1140 */
1141 write_dbg_oslar(OSLAR_UNLOCKED);
1142
1143 return;
1144 }
1145
1146 debug_registers_t *read_debug_address(void)
1147 {
1148 unsigned drar, dsar;
1149
1150 drar = read_dbg_drar();
1151 dsar = read_dbg_dsar();
1152
1153 if (!(drar & DRAR_VALID_MASK)
1154 || !(dsar & DSAR_VALID_MASK)) {
1155 return 0; /* No memory-mapped debug on this processor */
1156 }
1157
1158 return (debug_registers_t *) ((drar & DRAR_ADDRESS_MASK)
1159 + (dsar & DSAR_ADDRESS_MASK));
1160 }
1161
1162 /*
1163 * We assume that before save (and after restore):
1164 * - OSLAR is NOT locked, or the debugger would not work properly
1165 * - LAR is locked, because the ARM ARM says it must be
1166 * - OSDLR is NOT locked, or the debugger would not work properly
1167 */
1168
1169 static void save_v7_debug_mmapped(unsigned *context)
1170 {
1171 debug_registers_t *dbg = (void *)read_debug_address();
1172 debug_context_t *ctx = (void *)context;
1173 unsigned v71, num_bps, num_wps, i;
1174 unsigned didr, virtext_present = (read_id_pfr1() >> 12) & 0xf;
1175
1176 if (!dbg) {
1177 return;
1178 }
1179
1180 didr = dbg->didr;
1181 /*
1182 * Work out what version of debug we have
1183 */
1184 v71 =
1185 (((didr >> DIDR_VERSION_SHIFT) & DIDR_VERSION_MASK) ==
1186 DIDR_VERSION_7_1);
1187
1188 /*
1189 * Save all context to memory
1190 */
1191 ctx->vcr = dbg->vcr;
1192 ctx->dscr_e = dbg->dscr_e;
1193 ctx->claim = dbg->claimclr;
1194
1195 if (v71) {
1196 ctx->eacr = dbg->eacr;
1197 }
1198
1199 num_bps = 1 + ((didr >> DIDR_BP_SHIFT) & DIDR_BP_MASK);
1200 for (i = 0; i < num_bps; ++i) {
1201 ctx->bvr[i] = dbg->bvr[i];
1202 ctx->bcr[i] = dbg->bcr[i];
1203 if (virtext_present)
1204 ctx->bxvr[i] = dbg->bxvr[i];
1205 }
1206
1207 num_wps = 1 + ((didr >> DIDR_WP_SHIFT) & DIDR_WP_MASK);
1208 for (i = 0; i < num_wps; ++i) {
1209 ctx->wvr[i] = dbg->wvr[i];
1210 ctx->wcr[i] = dbg->wcr[i];
1211 }
1212
1213 /*
1214 * If Debug V7.1, we must set osdlr (by cp14 interface) before power down.
1215 * Once we have done this, debug becomes inaccessible.
1216 */
1217 if (v71) {
1218 write_dbg_osdlr(OSDLR_LOCKED);
1219 }
1220 }
1221
1222 static void restore_v7_debug_mmapped(unsigned *context)
1223 {
1224 debug_registers_t *dbg = (void *)read_debug_address();
1225 debug_context_t *ctx = (void *)context;
1226 unsigned v71, num_bps, num_wps, i;
1227 unsigned didr, virtext_present = (read_id_pfr1() >> 12) & 0xf;;
1228
1229 if (!dbg) {
1230 return;
1231 }
1232
1233 didr = dbg->didr;
1234 /*
1235 * Work out what version of debug we have
1236 */
1237 v71 =
1238 (((didr >> DIDR_VERSION_SHIFT) & DIDR_VERSION_MASK) ==
1239 DIDR_VERSION_7_1);
1240
1241 /* Enable write access to registers */
1242 dbg->lar = LAR_UNLOCKED;
1243 /*
1244 * If Debug V7.1, we must unset osdlr (by cp14 interface) before restoring.
1245 * (If the CPU has not actually power-cycled, osdlr may not be reset).
1246 */
1247 if (v71) {
1248 write_dbg_osdlr(OSDLR_UNLOCKED);
1249 }
1250
1251 /*
1252 * Restore all context from memory
1253 */
1254 dbg->vcr = ctx->vcr;
1255 dbg->claimclr = CLAIMCLR_CLEAR_ALL;
1256 dbg->claimset = ctx->claim;
1257
1258 if (v71) {
1259 dbg->eacr = ctx->eacr;
1260 }
1261
1262 num_bps = 1 + ((didr >> DIDR_BP_SHIFT) & DIDR_BP_MASK);
1263 for (i = 0; i < num_bps; ++i) {
1264 dbg->bvr[i] = ctx->bvr[i];
1265 dbg->bcr[i] = ctx->bcr[i];
1266 if (virtext_present)
1267 dbg->bxvr[i] = ctx->bxvr[i];
1268 }
1269
1270 num_wps = 1 + ((didr >> DIDR_WP_SHIFT) & DIDR_WP_MASK);
1271 for (i = 0; i < num_wps; ++i) {
1272 dbg->wvr[i] = ctx->wvr[i];
1273 dbg->wcr[i] = ctx->wcr[i];
1274 }
1275
1276 /* Clear PRSR.SPD by reading PRSR */
1277 if (!v71) {
1278 (dbg->prsr);
1279 }
1280
1281 /* Re-enable debug */
1282 dbg->dscr_e = ctx->dscr_e;
1283
1284 /* Disable write access to registers */
1285 dbg->lar = LAR_LOCKED;
1286 }
1287
1288 void save_v7_debug(unsigned *context)
1289 {
1290 unsigned v71 = 0, didr = read_dbg_didr();
1291
1292 v71 = (((didr >> DIDR_VERSION_SHIFT) & DIDR_VERSION_MASK) ==
1293 DIDR_VERSION_7_1);
1294
1295 /*
1296 * TODO: Code for saving the v7.0 Debug context through the
1297 * cp14 interface has not been implemented as yet.
1298 */
1299 if (v71)
1300 save_v71_debug_cp14(context);
1301 else
1302 save_v7_debug_mmapped(context);
1303
1304 return;
1305 }
1306
1307 void restore_v7_debug(unsigned *context)
1308 {
1309 unsigned v71 = 0, didr = read_dbg_didr();
1310
1311 v71 = (((didr >> DIDR_VERSION_SHIFT) & DIDR_VERSION_MASK) ==
1312 DIDR_VERSION_7_1);
1313
1314 /*
1315 * TODO: Code for restoring the v7.0 Debug context through the
1316 * cp14 interface has not been implemented as yet.
1317 */
1318 if (v71)
1319 restore_v71_debug_cp14(context);
1320 else
1321 restore_v7_debug_mmapped(context);
1322
1323 return;
1324 }
1325
1326 void stop_generic_timer(generic_timer_context * ctr_ctx)
1327 {
1328 /*
1329 * Disable the timer and mask the irq to prevent
1330 * suprious interrupts on this cpu interface. It
1331 * will bite us when we come back if we don't. It
1332 * will be replayed on the inbound cluster.
1333 */
1334 write_cntp_ctl(TIMER_MASK_IRQ);
1335
1336 #if 0 // wait GIC APIs by Sten
1337 /*
1338 * If the local timer interrupt was being used as
1339 * the asynchronous trigger, then it was disabled
1340 * in handle_interrupt() to prevent this level-
1341 * triggerred interrupt from firing. Now that its
1342 * been acked at the peripheral. We can renable it
1343 */
1344 if (!hyp_timer_trigger) {
1345 if (ctr_ctx->cntp_ctl & TIMER_IRQ_STAT)
1346 gic_enable_int(LCL_TIMER_IRQ);
1347 }
1348 #endif
1349
1350 return;
1351 }
1352
1353 void save_pmu_context(unsigned cluster_id, unsigned cpu_id)
1354 {
1355 switch (pmu_mode) {
1356 case PMU_STATE1:
1357 save_performance_monitors(migration_ctx[cpu_id]);
1358 break;
1359 case PMU_STATE2:
1360 save_performance_monitors(clusters_ctx[cluster_id][cpu_id]);
1361 break;
1362 case PMU_STATE0:
1363 default:
1364 break;
1365 };
1366 }
1367
1368 void restore_pmu_context(unsigned cluster_id, unsigned cpu_id)
1369 {
1370 switch (pmu_mode) {
1371 case PMU_STATE1:
1372 restore_performance_monitors(migration_ctx[cpu_id]);
1373 break;
1374 case PMU_STATE2:
1375 restore_performance_monitors(clusters_ctx[cluster_id][cpu_id]);
1376 break;
1377 case PMU_STATE0:
1378 default:
1379 break;
1380 };
1381 }
1382
1383 static void platform_save_context(void)
1384 {
1385 unsigned cpu_id = read_cpuid(), cluster_id = read_clusterid();
1386 cpu_context *ns_cpu_ctx = &switcher_context.cluster.core[cpu_id].ns_cpu_ctx;
1387 unsigned *gp_context = ns_cpu_ctx->banked_cpu_regs;
1388 unsigned *vfp_context = ns_cpu_ctx->vfp_regs;
1389 //debug_context_t *debug_context = &ns_cpu_ctx->debug_ctx;
1390 unsigned int *dbg_ctx = ns_cpu_ctx->dbg_data;
1391 banked_cp15_context *cp15_context = &ns_cpu_ctx->banked_cp15_regs;
1392 generic_timer_context *cp15_timer_ctx = &ns_cpu_ctx->cp15_timer_ctx;
1393 cp15_fault_regs *fault_ctx = &cp15_context->ns_cp15_fault_regs;
1394 gic_cpu_context *gic_pvt_context = &ns_cpu_ctx->gic_cpu_ctx;
1395 global_context *gbl_context = &switcher_context.cluster.ns_cluster_ctx;
1396
1397 //printk("[platform_save_context] cpu_id=%d", cpu_id);
1398
1399 /*
1400 * Save the 32-bit Generic timer context & stop them
1401 */
1402 save_generic_timer((unsigned *)cp15_timer_ctx, 0x0);
1403 stop_generic_timer(cp15_timer_ctx);
1404
1405 /*
1406 * Save v7 generic performance monitors
1407 * Save cpu general purpose banked registers
1408 * Save cp15 context
1409 */
1410 save_pmu_context(cluster_id, cpu_id);
1411 //save_banked_registers(gp_context); // move to tail
1412 save_cp15(cp15_context->cp15_misc_regs);
1413 save_control_registers(cp15_context->cp15_ctrl_regs, 0x0);
1414 save_mmu(cp15_context->cp15_mmu_regs);
1415 save_fault_status((unsigned *)fault_ctx);
1416
1417 save_vfp(vfp_context);
1418
1419 #if 1 // GIC APIs by Sten
1420 if(cpu_id==0)
1421 {
1422 /* Save cpu interface (cpu view) context */
1423 save_gic_interface(gic_pvt_context->gic_cpu_if_regs,GIC_CPU_BASE);
1424 /*
1425 * TODO:
1426 * Is it safe for the secondary cpu to save its context
1427 * while the GIC distributor is on. Should be as its
1428 * banked context and the cpu itself is the only one
1429 * who can change it. Still have to consider cases e.g
1430 * SGIs/Localtimers becoming pending.
1431 */
1432 /* Save distributoer interface private context */
1433 save_gic_distributor_private(gic_pvt_context->gic_dist_if_pvt_regs,GIC_DIST_BASE);
1434 /* Save distributoer interface global context */
1435 save_gic_distributor_shared(gbl_context->gic_dist_if_regs,GIC_DIST_BASE);
1436 }
1437 #endif
1438
1439 //save_v7_debug((unsigned *)debug_context);
1440 //for save/restore breakpoint and watchpoint
1441 save_dbg_regs(dbg_ctx);
1442
1443 save_mcu_biu_register();
1444
1445 if (power_state[cpu_id] == STATUS_DORMANT) {
1446 /* disable L2 invalidate when reset */
1447 reg_write(CA7_CACHE_CONFIG, reg_read(CA7_CACHE_CONFIG) | (1U << 4));
1448 }
1449
1450 dormant_ret_flag[cpu_id] = 0;
1451
1452 save_banked_registers(gp_context);
1453 }
1454
1455 static void platform_restore_context(void)
1456 {
1457 unsigned cpu_id = read_cpuid();
1458 unsigned cluster_id = read_clusterid();
1459
1460 cpu_context *ns_cpu_ctx = (cpu_context *) __pa(&switcher_context.cluster.core[cpu_id].ns_cpu_ctx);
1461 global_context *gbl_context = (global_context *) __pa(&switcher_context.cluster.ns_cluster_ctx);
1462 unsigned *vfp_context = ns_cpu_ctx->vfp_regs;
1463 //debug_context_t *debug_context = &ns_cpu_ctx->debug_ctx;
1464 //generic_timer_context *cp15_timer_ctx = &ns_cpu_ctx->cp15_timer_ctx;
1465 banked_cp15_context *cp15_context = &ns_cpu_ctx->banked_cp15_regs;
1466 cp15_fault_regs *fault_ctx = &cp15_context->ns_cp15_fault_regs;
1467 gic_cpu_context *gic_pvt_context = &ns_cpu_ctx->gic_cpu_ctx;
1468
1469 cpu_context *ns_cpu_ctx_v = &switcher_context.cluster.core[cpu_id].ns_cpu_ctx;
1470 unsigned *gp_context_v = ns_cpu_ctx_v->banked_cpu_regs;
1471 banked_cp15_context *cp15_context_v = &ns_cpu_ctx_v->banked_cp15_regs;
1472 generic_timer_context *cp15_timer_ctx_v = &ns_cpu_ctx_v->cp15_timer_ctx;
1473 unsigned int *dbg_ctx = ns_cpu_ctx_v->dbg_data;
1474
1475 #if 1 // GIC APIs by Sten
1476 if(cpu_id==0)
1477 {
1478 /*restores the global context */
1479 restore_gic_distributor_shared(gbl_context->gic_dist_if_regs,IO_VIRT_TO_PHYS(GIC_DIST_BASE));
1480 /*restores the private context */
1481 restore_gic_distributor_private(gic_pvt_context->gic_dist_if_pvt_regs,IO_VIRT_TO_PHYS(GIC_DIST_BASE));
1482 /* Restore GIC context */
1483 restore_gic_interface(gic_pvt_context->gic_cpu_if_regs,IO_VIRT_TO_PHYS(GIC_CPU_BASE));
1484 }
1485 #endif
1486
1487 isb();
1488 dsb();
1489 restore_vfp(vfp_context);
1490 /*
1491 * Restore cp15 context
1492 * Restore cpu general purpose banked registers
1493 * Restore v7 generic performance monitors
1494 * Restore the 32-bit Generic timer context
1495 */
1496 restore_fault_status((unsigned *)fault_ctx);
1497
1498 restore_mmu(cp15_context->cp15_mmu_regs);
1499
1500 invalidate_unified_TLB_inner_shareable();
1501
1502 mt_restore_control_registers(cp15_context->cp15_ctrl_regs, 0x0);
1503 isb();
1504 dsb();
1505
1506 //now MMU is restored, welcome to virtual world-----------------------
1507
1508 restore_cp15(cp15_context_v->cp15_misc_regs);
1509
1510 restore_pmu_context(cluster_id, cpu_id);
1511
1512 restore_generic_timer((unsigned *)cp15_timer_ctx_v, 0x0);
1513
1514 //restore_v7_debug((unsigned *)debug_context);
1515 restore_dbg_regs(dbg_ctx);
1516
1517 restore_mcu_biu_register();
1518
1519 if (power_state[cpu_id] == STATUS_DORMANT) {
1520 /* enable L2 invalidate when reset */
1521 reg_write(CA7_CACHE_CONFIG, reg_read(CA7_CACHE_CONFIG) & ~(1U << 4));
1522 }
1523 #if 0 /* FIXME early porting */
1524 trace_stop_dormant();
1525 trace_start_dormant();
1526 #endif
1527 if (power_state[cpu_id] == STATUS_DORMANT) {
1528 dormant_ret_flag[cpu_id] = 1;
1529 } else if (power_state[cpu_id] == STATUS_SHUTDOWN) {
1530 dormant_ret_flag[cpu_id] = 2;
1531 }
1532
1533 power_state[cpu_id] = STATUS_RUN;
1534
1535 restore_banked_registers(gp_context_v);
1536 }
1537
1538 int cpu_power_down(int mode)
1539 {
1540 unsigned cpu_id;
1541
1542 void (*restore_ptr)(void);
1543 restore_ptr = cpu_wake_up;
1544
1545 cpu_id = read_cpuid();
1546
1547 #if !defined(CONFIG_TRUSTONIC_TEE_SUPPORT)
1548 reg_write(BOOTROM_BOOT_ADDR, __pa(restore_ptr));
1549 #else //#if !defined(CONFIG_TRUSTONIC_TEE_SUPPORT)
1550 mt_secure_call(MC_FC_SLEEP, __pa(restore_ptr), cpu_id, 0);
1551 #endif //#if !defined(CONFIG_TRUSTONIC_TEE_SUPPORT)
1552
1553 #if defined(CONFIG_MTK_IN_HOUSE_TEE_SUPPORT)
1554 *((unsigned int*)NS_SLAVE_BOOT_ADDR) = __pa(restore_ptr);
1555 #endif
1556
1557 power_state[cpu_id] = mode;
1558
1559 #if 1
1560 __disable_dcache();
1561
1562 dsb();
1563 __asm__ __volatile__("MCR p15,0,r0,c8,c7,1");
1564 dsb();
1565
1566 if (power_state[cpu_id] == STATUS_DORMANT)
1567 {
1568 /* Flush all data from the L1 data cache */
1569 inner_dcache_flush_L1();
1570
1571 /* Clean all data from the L2 data cache */
1572 __inner_clean_dcache_L2();
1573 }
1574 else
1575 {
1576 /* Clean and invalidate all data from the L1 +L2 data cache */
1577 inner_dcache_flush_all();
1578 }
1579
1580 /* Execute a CLREX instruction */
1581 __asm__ __volatile__("clrex");
1582 #endif
1583
1584 platform_save_context();
1585
1586 #if defined(CONFIG_MTK_IN_HOUSE_TEE_SUPPORT)
1587 // After we returned from dormant, platform_restore_context
1588 // will bring us back to here.
1589 // Check dormant_ret_flag and only enter TrustZone dormant
1590 // if we are not on restore path
1591 if (!dormant_ret_flag[cpu_id] && !kree_pm_cpu_dormant(mode))
1592 {
1593 if (power_state[cpu_id] == STATUS_DORMANT) {
1594 dormant_ret_flag[cpu_id] = 1;
1595 } else if (power_state[cpu_id] == STATUS_SHUTDOWN) {
1596 dormant_ret_flag[cpu_id] = 2;
1597 }
1598 }
1599 #endif
1600
1601 return dormant_ret_flag[cpu_id];
1602 }
1603
1604 void cpu_start_restore(void)
1605 {
1606 platform_restore_context();
1607 }
1608
1609 void cpu_dormant_init(void)
1610 {
1611 //set Boot ROM power-down control to power down
1612 reg_write(BOOTROM_PWR_CTRL, reg_read(BOOTROM_PWR_CTRL) | 0x80000000);
1613 }
1614
1615 void cpu_check_dormant_abort(void)
1616 {
1617 unsigned cpu_id;
1618 cpu_id = read_cpuid();
1619
1620 if (power_state[cpu_id] != STATUS_RUN)
1621 {
1622 //unsigned cpu_id;
1623 unsigned cluster_id;
1624 cpu_context *ns_cpu_ctx;
1625 generic_timer_context *cp15_timer_ctx;
1626
1627 __enable_cache();
1628
1629 //cpu_id = read_cpuid();
1630 cluster_id = read_clusterid();
1631 ns_cpu_ctx = &switcher_context.cluster.core[cpu_id].ns_cpu_ctx;
1632 cp15_timer_ctx = &ns_cpu_ctx->cp15_timer_ctx;
1633
1634 // restore timer and performance monitor
1635 restore_generic_timer((unsigned *)cp15_timer_ctx, 0x0);
1636 restore_pmu_context(cluster_id, cpu_id);
1637
1638 if (power_state[cpu_id] == STATUS_DORMANT) {
1639 /* enable L2 invalidate when reset */
1640 reg_write(CA7_CACHE_CONFIG, reg_read(CA7_CACHE_CONFIG) & ~(1U << 4));
1641 }
1642
1643 power_state[cpu_id] = STATUS_RUN;
1644 }
1645 else
1646 {
1647 if ((cpu_id == 0) && (dormant_ret_flag[cpu_id] == 2))
1648 {
1649 /* workaround for ARM CA7 Errata 802022 */
1650 extern void cpu_wake_up_errata_802022(void);
1651 reg_write(BOOTROM_BOOT_ADDR, virt_to_phys(cpu_wake_up_errata_802022));
1652 #if defined(CONFIG_MTK_IN_HOUSE_TEE_SUPPORT)
1653 kree_pm_cpu_dormant_workaround_wake(1);
1654 #endif
1655
1656 if (num_possible_cpus() == 4)
1657 {
1658 spm_mtcmos_ctrl_cpu1(STA_POWER_ON, 1);
1659 spm_mtcmos_ctrl_cpu2(STA_POWER_ON, 1);
1660 spm_mtcmos_ctrl_cpu3(STA_POWER_ON, 1);
1661
1662 spm_mtcmos_ctrl_cpu3(STA_POWER_DOWN, 1);
1663 spm_mtcmos_ctrl_cpu2(STA_POWER_DOWN, 1);
1664 spm_mtcmos_ctrl_cpu1(STA_POWER_DOWN, 1);
1665 }
1666 else if (num_possible_cpus() == 3)
1667 {
1668 spm_mtcmos_ctrl_cpu1(STA_POWER_ON, 1);
1669 spm_mtcmos_ctrl_cpu2(STA_POWER_ON, 1);
1670
1671 spm_mtcmos_ctrl_cpu2(STA_POWER_DOWN, 1);
1672 spm_mtcmos_ctrl_cpu1(STA_POWER_DOWN, 1);
1673 }
1674 else if (num_possible_cpus() == 2)
1675 {
1676 spm_mtcmos_ctrl_cpu1(STA_POWER_ON, 1);
1677
1678 spm_mtcmos_ctrl_cpu1(STA_POWER_DOWN, 1);
1679 }
1680
1681 /* set to normal boot address after workaround finish */
1682 #if defined(CONFIG_MTK_IN_HOUSE_TEE_SUPPORT)
1683 kree_pm_cpu_dormant_workaround_wake(0);
1684 #endif
1685 }
1686
1687 __enable_cache();
1688 }
1689 }
1690
1691 MODULE_AUTHOR("Wan-Ching Huang <marc.huang@mediatek.com>");
1692 MODULE_DESCRIPTION("MT658x Dormant/Shutdown Mode Driver $Revision: #1 $");
1693