arm64: smp: add missing completion for secondary boot
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / arm64 / include / asm / processor.h
CommitLineData
9cce7a43
CM
1/*
2 * Based on arch/arm/include/asm/processor.h
3 *
4 * Copyright (C) 1995-1999 Russell King
5 * Copyright (C) 2012 ARM Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19#ifndef __ASM_PROCESSOR_H
20#define __ASM_PROCESSOR_H
21
22/*
23 * Default implementation of macro that returns current
24 * instruction pointer ("program counter").
25 */
26#define current_text_addr() ({ __label__ _l; _l: &&_l;})
27
28#ifdef __KERNEL__
29
30#include <linux/string.h>
31
32#include <asm/fpsimd.h>
33#include <asm/hw_breakpoint.h>
34#include <asm/ptrace.h>
35#include <asm/types.h>
36
37#ifdef __KERNEL__
38#define STACK_TOP_MAX TASK_SIZE_64
39#ifdef CONFIG_COMPAT
40#define AARCH32_VECTORS_BASE 0xffff0000
41#define STACK_TOP (test_thread_flag(TIF_32BIT) ? \
42 AARCH32_VECTORS_BASE : STACK_TOP_MAX)
43#else
44#define STACK_TOP STACK_TOP_MAX
45#endif /* CONFIG_COMPAT */
46#endif /* __KERNEL__ */
47
48struct debug_info {
49 /* Have we suspended stepping by a debugger? */
50 int suspended_step;
51 /* Allow breakpoints and watchpoints to be disabled for this thread. */
52 int bps_disabled;
53 int wps_disabled;
54 /* Hardware breakpoints pinned to this task. */
55 struct perf_event *hbp_break[ARM_MAX_BRP];
56 struct perf_event *hbp_watch[ARM_MAX_WRP];
57};
58
59struct cpu_context {
60 unsigned long x19;
61 unsigned long x20;
62 unsigned long x21;
63 unsigned long x22;
64 unsigned long x23;
65 unsigned long x24;
66 unsigned long x25;
67 unsigned long x26;
68 unsigned long x27;
69 unsigned long x28;
70 unsigned long fp;
71 unsigned long sp;
72 unsigned long pc;
73};
74
75struct thread_struct {
76 struct cpu_context cpu_context; /* cpu context */
77 unsigned long tp_value;
78 struct fpsimd_state fpsimd_state;
79 unsigned long fault_address; /* fault info */
80 struct debug_info debug; /* debugging */
81};
82
83#define INIT_THREAD { }
84
85static inline void start_thread_common(struct pt_regs *regs, unsigned long pc)
86{
87 memset(regs, 0, sizeof(*regs));
88 regs->syscallno = ~0UL;
89 regs->pc = pc;
90}
91
92static inline void start_thread(struct pt_regs *regs, unsigned long pc,
93 unsigned long sp)
94{
9cce7a43
CM
95 start_thread_common(regs, pc);
96 regs->pstate = PSR_MODE_EL0t;
97 regs->sp = sp;
9cce7a43
CM
98}
99
100#ifdef CONFIG_COMPAT
101static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc,
102 unsigned long sp)
103{
9cce7a43
CM
104 start_thread_common(regs, pc);
105 regs->pstate = COMPAT_PSR_MODE_USR;
106 if (pc & 1)
107 regs->pstate |= COMPAT_PSR_T_BIT;
108 regs->compat_sp = sp;
9cce7a43
CM
109}
110#endif
111
112/* Forward declaration, a strange C thing */
113struct task_struct;
114
115/* Free all resources held by a thread. */
116extern void release_thread(struct task_struct *);
117
118/* Prepare to copy thread state - unlazy all lazy status */
119#define prepare_to_copy(tsk) do { } while (0)
120
121unsigned long get_wchan(struct task_struct *p);
122
123#define cpu_relax() barrier()
124
125/* Thread switching */
126extern struct task_struct *cpu_switch_to(struct task_struct *prev,
127 struct task_struct *next);
128
129/*
130 * Create a new kernel thread
131 */
132extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
133
134#define task_pt_regs(p) \
135 ((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1)
136
137#define KSTK_EIP(tsk) task_pt_regs(tsk)->pc
138#define KSTK_ESP(tsk) task_pt_regs(tsk)->sp
139
140/*
141 * Prefetching support
142 */
143#define ARCH_HAS_PREFETCH
144static inline void prefetch(const void *ptr)
145{
146 asm volatile("prfm pldl1keep, %a0\n" : : "p" (ptr));
147}
148
149#define ARCH_HAS_PREFETCHW
150static inline void prefetchw(const void *ptr)
151{
152 asm volatile("prfm pstl1keep, %a0\n" : : "p" (ptr));
153}
154
155#define ARCH_HAS_SPINLOCK_PREFETCH
156static inline void spin_lock_prefetch(const void *x)
157{
158 prefetchw(x);
159}
160
161#define HAVE_ARCH_PICK_MMAP_LAYOUT
162
163#endif
164
165#endif /* __ASM_PROCESSOR_H */