2 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/types.h>
23 #include <asm/cacheflush.h>
24 #include <asm/irqflags.h>
26 #include <asm/tlbflush.h>
27 #include <linux/init.h>
30 #include <linux/rkp.h>
31 #include <linux/vmm.h>
34 #define VMM_32BIT_SMC_CALL_MAGIC 0x82000400
35 #define VMM_64BIT_SMC_CALL_MAGIC 0xC2000400
37 #define VMM_STACK_OFFSET 0x2000
39 #define VMM_MODE_AARCH32 0
40 #define VMM_MODE_AARCH64 1
43 void *base
, *text_head
, *bss
; //VA only
44 size_t size
, text_head_size
, bss_size
;
49 extern char _vmm_disable
;
51 char * __initdata vmm
;
52 size_t __initdata vmm_size
;
54 int __init
vmm_disable(void)
56 _vmm_goto_EL2(VMM_64BIT_SMC_CALL_MAGIC
, (void *)virt_to_phys(&_vmm_disable
),
57 VMM_STACK_OFFSET
, VMM_MODE_AARCH64
, NULL
, 0);
59 RKP_LOGA("%s\n", __FUNCTION__
);
63 int __init
vmm_entry(struct vmm_elf_info
*vei
)
66 * 1. get entry point pa. (.text.head section)
67 * 2. ask el3 to bring us there.
71 void *entry
= (void *)virt_to_phys(vei
->text_head
);
76 RKP_LOGA("entry point=%p\n", entry
);
77 status
= _vmm_goto_EL2(VMM_64BIT_SMC_CALL_MAGIC
, entry
,
78 VMM_STACK_OFFSET
, VMM_MODE_AARCH64
,
79 (void *)RKP_VMM_START
, RKP_VMM_SIZE
);
81 RKP_LOGA("status=%d\n", status
);
85 int __init
vmm_init(void)
88 * 1. copy vmm.elf from kimage to reserved area.
89 * 2. wipe out vmm.elf on kimage.
90 * 3. get .bss and .text.head section.
91 * 4. zero out .bss on coped vmm.elf
96 struct vmm_elf_info vmm_reserved
= {
97 .base
= (void *)phys_to_virt(RKP_VMM_START
),
100 struct vmm_elf_info vmm_kimage
= {
102 .size
= (size_t)(&_evmm
- &_svmm
)
105 /* copy elf to reserved area and terminate one on kimage */
106 BUG_ON(vmm_kimage
.size
> vmm_reserved
.size
);
107 memcpy(vmm_reserved
.base
, vmm_kimage
.base
, vmm_kimage
.size
);
108 memset(vmm_kimage
.base
, 0, vmm_kimage
.size
);
110 /* get .bss and .text.head info*/
111 if (ld_get_sect(vmm_reserved
.base
, ".bss", &vmm_reserved
.bss
, &vmm_reserved
.bss_size
)) {
112 RKP_LOGA("Can't fine .bss section from vmm_reserved.base=%p\n",
117 if (ld_get_sect(vmm_reserved
.base
, ".text.head",
118 &vmm_reserved
.text_head
, &vmm_reserved
.text_head_size
)) {
119 RKP_LOGA("Can't find .text.head section from vmm_reserved.base=%p\n",
125 memset(vmm_reserved
.bss
, 0, vmm_reserved
.bss_size
);
128 RKP_LOGA("vmm_reserved\n"
129 " .base=%p .size=%lu\n"
130 " .bss=%p .bss_size=%lu\n"
131 " .text_head=%p .text_head_size=%lu\n",
135 vmm_reserved
.bss_size
,
136 vmm_reserved
.text_head
,
137 vmm_reserved
.text_head_size
);
138 RKP_LOGA("vmm_kimage\n"
139 ".base=%p .size=%lu\n",
140 vmm_kimage
.base
, vmm_kimage
.size
);
141 RKP_LOGA("vmm_start=%x, vmm_size=%lu\n", RKP_VMM_START
, RKP_VMM_SIZE
);
144 ret
= vmm_entry(&vmm_reserved
);
149 EXPORT_SYMBOL(vmm_init
);