Merge commit 'gcl/next' into next
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / powerpc / include / asm / paca.h
CommitLineData
1da177e4 1/*
8882a4da
DG
2 * This control block defines the PACA which defines the processor
3 * specific data for each logical processor on the system.
1da177e4
LT
4 * There are some pointers defined that are utilized by PLIC.
5 *
6 * C 2001 PPC 64 Team, IBM Corp
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
8882a4da
DG
12 */
13#ifndef _ASM_POWERPC_PACA_H
14#define _ASM_POWERPC_PACA_H
88ced031 15#ifdef __KERNEL__
1da177e4 16
dce6670a
BH
17#include <asm/types.h>
18#include <asm/lppaca.h>
19#include <asm/mmu.h>
20#include <asm/page.h>
21#include <asm/exception-64e.h>
1da177e4
LT
22
23register struct paca_struct *local_paca asm("r13");
048c8bc9
HD
24
25#if defined(CONFIG_DEBUG_PREEMPT) && defined(CONFIG_SMP)
26extern unsigned int debug_smp_processor_id(void); /* from linux/smp.h */
27/*
28 * Add standard checks that preemption cannot occur when using get_paca():
29 * otherwise the paca_struct it points to may be the wrong one just after.
30 */
31#define get_paca() ((void) debug_smp_processor_id(), local_paca)
32#else
1da177e4 33#define get_paca() local_paca
048c8bc9
HD
34#endif
35
3356bb9f 36#define get_lppaca() (get_paca()->lppaca_ptr)
2f6093c8 37#define get_slb_shadow() (get_paca()->slb_shadow_ptr)
1da177e4
LT
38
39struct task_struct;
1da177e4
LT
40
41/*
42 * Defines the layout of the paca.
43 *
44 * This structure is not directly accessed by firmware or the service
30ff2e87 45 * processor.
1da177e4
LT
46 */
47struct paca_struct {
91c60b5b 48#ifdef CONFIG_PPC_BOOK3S
1da177e4
LT
49 /*
50 * Because hw_cpu_id, unlike other paca fields, is accessed
51 * routinely from other CPUs (from the IRQ code), we stick to
52 * read-only (after boot) fields in the first cacheline to
53 * avoid cacheline bouncing.
54 */
55
1da177e4 56 struct lppaca *lppaca_ptr; /* Pointer to LpPaca for PLIC */
91c60b5b 57#endif /* CONFIG_PPC_BOOK3S */
1da177e4 58 /*
2ef9481e 59 * MAGIC: the spinlock functions in arch/powerpc/lib/locks.c
1da177e4
LT
60 * load lock_token and paca_index with a single lwz
61 * instruction. They must travel together and be properly
62 * aligned.
63 */
64 u16 lock_token; /* Constant 0x8000, used in locks */
65 u16 paca_index; /* Logical processor number */
66
1da177e4 67 u64 kernel_toc; /* Kernel TOC address */
1f6a93e4
PM
68 u64 kernelbase; /* Base address of kernel */
69 u64 kernel_msr; /* MSR while running in kernel */
91c60b5b 70#ifdef CONFIG_PPC_STD_MMU_64
1da177e4
LT
71 u64 stab_real; /* Absolute address of segment table */
72 u64 stab_addr; /* Virtual address of segment table */
91c60b5b 73#endif /* CONFIG_PPC_STD_MMU_64 */
1da177e4 74 void *emergency_sp; /* pointer to emergency stack */
7a0268fa 75 u64 data_offset; /* per cpu data offset */
1da177e4
LT
76 s16 hw_cpu_id; /* Physical processor number */
77 u8 cpu_start; /* At startup, processor spins until */
78 /* this becomes non-zero. */
91c60b5b 79#ifdef CONFIG_PPC_STD_MMU_64
e91948fd 80 struct slb_shadow *slb_shadow_ptr;
1da177e4
LT
81
82 /*
83 * Now, starting in cacheline 2, the exception save areas
84 */
3c726f8d
BH
85 /* used for most interrupts/exceptions */
86 u64 exgen[10] __attribute__((aligned(0x80)));
87 u64 exmc[10]; /* used for machine checks */
88 u64 exslb[10]; /* used for SLB/segment table misses
89 * on the linear mapping */
91c60b5b 90 /* SLB related definitions */
bf72aeba 91 u16 vmalloc_sllp;
1da177e4 92 u16 slb_cache_ptr;
d0f13e3c 93 u16 slb_cache[SLB_CACHE_ENTRIES];
91c60b5b
BH
94#endif /* CONFIG_PPC_STD_MMU_64 */
95
dce6670a
BH
96#ifdef CONFIG_PPC_BOOK3E
97 pgd_t *pgd; /* Current PGD */
98 pgd_t *kernel_pgd; /* Kernel PGD */
99 u64 exgen[8] __attribute__((aligned(0x80)));
100 u64 extlb[EX_TLB_SIZE*3] __attribute__((aligned(0x80)));
101 u64 exmc[8]; /* used for machine checks */
102 u64 excrit[8]; /* used for crit interrupts */
103 u64 exdbg[8]; /* used for debug interrupts */
104
105 /* Kernel stack pointers for use by special exceptions */
106 void *mc_kstack;
107 void *crit_kstack;
108 void *dbg_kstack;
109#endif /* CONFIG_PPC_BOOK3E */
110
91c60b5b 111 mm_context_t context;
1da177e4
LT
112
113 /*
114 * then miscellaneous read-write fields
115 */
116 struct task_struct *__current; /* Pointer to current */
117 u64 kstack; /* Saved Kernel stack addr */
118 u64 stab_rr; /* stab/slb round-robin counter */
1da177e4
LT
119 u64 saved_r1; /* r1 save for RTAS calls */
120 u64 saved_msr; /* MSR saved here by enter_rtas */
68730401 121 u16 trap_save; /* Used when bad stack is encountered */
d04c56f7
PM
122 u8 soft_enabled; /* irq soft-enable flag */
123 u8 hard_enabled; /* set if irqs are enabled in MSR */
f007cacf 124 u8 io_sync; /* writel() needs spin_unlock sync */
57c0c15b 125 u8 perf_event_pending; /* PM interrupt while soft-disabled */
c6622f63
PM
126
127 /* Stuff for accurate time accounting */
128 u64 user_time; /* accumulated usermode TB ticks */
129 u64 system_time; /* accumulated system TB ticks */
130 u64 startpurr; /* PURR/TB value snapshot */
4603ac18 131 u64 startspurr; /* SPURR value snapshot */
4b7ae55d
AG
132
133#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
134 struct {
135 u64 esid;
136 u64 vsid;
137 } kvm_slb[64]; /* guest SLB */
138 u8 kvm_slb_max; /* highest used guest slb entry */
139 u8 kvm_in_guest; /* are we inside the guest? */
140#endif
1da177e4
LT
141};
142
143extern struct paca_struct paca[];
90035fe3 144extern void initialise_pacas(void);
1da177e4 145
88ced031 146#endif /* __KERNEL__ */
8882a4da 147#endif /* _ASM_POWERPC_PACA_H */