FROMLIST: [PATCH v5 08/12] arm: vdso: Add ARCH_CLOCK_FIXED_MASK
[GitHub/exynos8895/android_kernel_samsung_universal8895.git] / init / main.c
1 /*
2 * linux/init/main.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 *
6 * GK 2/5/95 - Changed to support mounting root fs via NFS
7 * Added initrd & change_root: Werner Almesberger & Hans Lermen, Feb '96
8 * Moan early if gcc is old, avoiding bogus kernels - Paul Gortmaker, May '96
9 * Simplified starting of init: Michael A. Griffith <grif@acm.org>
10 */
11
12 #define DEBUG /* Enable initcall_debug */
13
14 #include <linux/types.h>
15 #include <linux/module.h>
16 #include <linux/proc_fs.h>
17 #include <linux/kernel.h>
18 #include <linux/syscalls.h>
19 #include <linux/stackprotector.h>
20 #include <linux/string.h>
21 #include <linux/ctype.h>
22 #include <linux/delay.h>
23 #include <linux/ioport.h>
24 #include <linux/init.h>
25 #include <linux/initrd.h>
26 #include <linux/bootmem.h>
27 #include <linux/acpi.h>
28 #include <linux/tty.h>
29 #include <linux/percpu.h>
30 #include <linux/kmod.h>
31 #include <linux/vmalloc.h>
32 #include <linux/kernel_stat.h>
33 #include <linux/start_kernel.h>
34 #include <linux/security.h>
35 #include <linux/smp.h>
36 #include <linux/profile.h>
37 #include <linux/rcupdate.h>
38 #include <linux/moduleparam.h>
39 #include <linux/kallsyms.h>
40 #include <linux/writeback.h>
41 #include <linux/cpu.h>
42 #include <linux/cpuset.h>
43 #include <linux/cgroup.h>
44 #include <linux/efi.h>
45 #include <linux/tick.h>
46 #include <linux/interrupt.h>
47 #include <linux/taskstats_kern.h>
48 #include <linux/delayacct.h>
49 #include <linux/unistd.h>
50 #include <linux/rmap.h>
51 #include <linux/mempolicy.h>
52 #include <linux/key.h>
53 #include <linux/buffer_head.h>
54 #include <linux/page_ext.h>
55 #include <linux/debug_locks.h>
56 #include <linux/debugobjects.h>
57 #include <linux/lockdep.h>
58 #include <linux/kmemleak.h>
59 #include <linux/pid_namespace.h>
60 #include <linux/device.h>
61 #include <linux/kthread.h>
62 #include <linux/sched.h>
63 #include <linux/signal.h>
64 #include <linux/idr.h>
65 #include <linux/kgdb.h>
66 #include <linux/ftrace.h>
67 #include <linux/async.h>
68 #include <linux/kmemcheck.h>
69 #include <linux/sfi.h>
70 #include <linux/shmem_fs.h>
71 #include <linux/slab.h>
72 #include <linux/perf_event.h>
73 #include <linux/file.h>
74 #include <linux/ptrace.h>
75 #include <linux/blkdev.h>
76 #include <linux/elevator.h>
77 #include <linux/sched_clock.h>
78 #include <linux/context_tracking.h>
79 #include <linux/random.h>
80 #include <linux/list.h>
81 #include <linux/integrity.h>
82 #include <linux/proc_ns.h>
83 #include <linux/io.h>
84 #include <linux/kaiser.h>
85
86 #include <asm/io.h>
87 #include <asm/bugs.h>
88 #include <asm/setup.h>
89 #include <asm/sections.h>
90 #include <asm/cacheflush.h>
91
92 #ifdef CONFIG_SEC_EXT
93 #include <linux/sec_ext.h>
94 #endif
95 #ifdef CONFIG_RKP
96 #include <linux/vmm.h>
97 #include <linux/rkp.h>
98 #endif //CONFIG_RKP
99 #ifdef CONFIG_RELOCATABLE_KERNEL
100 #include <linux/memblock.h>
101 #endif
102 static int kernel_init(void *);
103
104 extern void init_IRQ(void);
105 extern void fork_init(void);
106 extern void radix_tree_init(void);
107
108 #ifdef CONFIG_RKP
109 extern struct vm_struct *vmlist;
110 #endif
111
112 /*
113 * Debug helper: via this flag we know that we are in 'early bootup code'
114 * where only the boot processor is running with IRQ disabled. This means
115 * two things - IRQ must not be enabled before the flag is cleared and some
116 * operations which are not allowed with IRQ disabled are allowed while the
117 * flag is set.
118 */
119 bool early_boot_irqs_disabled __read_mostly;
120
121 enum system_states system_state __read_mostly;
122 EXPORT_SYMBOL(system_state);
123
124 /*
125 * Boot command-line arguments
126 */
127 #define MAX_INIT_ARGS CONFIG_INIT_ENV_ARG_LIMIT
128 #define MAX_INIT_ENVS CONFIG_INIT_ENV_ARG_LIMIT
129
130 extern void time_init(void);
131 /* Default late time init is NULL. archs can override this later. */
132 void (*__initdata late_time_init)(void);
133
134 /* Untouched command line saved by arch-specific code. */
135 char __initdata boot_command_line[COMMAND_LINE_SIZE];
136 /* Untouched saved command line (eg. for /proc) */
137 char *saved_command_line;
138 /* Command line for parameter parsing */
139 static char *static_command_line;
140 /* Command line for per-initcall parameter parsing */
141 static char *initcall_command_line;
142
143 static char *execute_command;
144 static char *ramdisk_execute_command;
145
146 /*
147 * Used to generate warnings if static_key manipulation functions are used
148 * before jump_label_init is called.
149 */
150 bool static_key_initialized __read_mostly;
151 EXPORT_SYMBOL_GPL(static_key_initialized);
152
153 /*
154 * If set, this is an indication to the drivers that reset the underlying
155 * device before going ahead with the initialization otherwise driver might
156 * rely on the BIOS and skip the reset operation.
157 *
158 * This is useful if kernel is booting in an unreliable environment.
159 * For ex. kdump situation where previous kernel has crashed, BIOS has been
160 * skipped and devices will be in unknown state.
161 */
162 unsigned int reset_devices;
163 EXPORT_SYMBOL(reset_devices);
164
165 int ddr_start_type = 0;
166
167 static int __init set_reset_devices(char *str)
168 {
169 reset_devices = 1;
170 return 1;
171 }
172
173 __setup("reset_devices", set_reset_devices);
174 #ifdef CONFIG_RELOCATABLE_KERNEL
175 static unsigned long kaslr_mem __initdata;
176 static unsigned long kaslr_size __initdata;
177
178 static int __init set_kaslr_region(char *str){
179 char *endp;
180
181 kaslr_size = memparse(str, &endp);
182 if( *endp == '@')
183 kaslr_mem = memparse(endp+1, NULL);
184
185 if (memblock_reserve(kaslr_mem, kaslr_size)) {
186 pr_err("%s: failed reserving size %lx " \
187 "at base 0x%lx\n", __func__, kaslr_size, kaslr_mem);
188 return -1;
189 }
190 pr_info("kaslr :%s, base:%lx, size:%lx \n", __func__, kaslr_mem, kaslr_size);
191 return 0;
192 }
193 __setup("kaslr_region=", set_kaslr_region);
194 #endif
195
196 static const char *argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
197 const char *envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
198 static const char *panic_later, *panic_param;
199
200 extern const struct obs_kernel_param __setup_start[], __setup_end[];
201
202 static int __init obsolete_checksetup(char *line)
203 {
204 const struct obs_kernel_param *p;
205 int had_early_param = 0;
206
207 p = __setup_start;
208 do {
209 int n = strlen(p->str);
210 if (parameqn(line, p->str, n)) {
211 if (p->early) {
212 /* Already done in parse_early_param?
213 * (Needs exact match on param part).
214 * Keep iterating, as we can have early
215 * params and __setups of same names 8( */
216 if (line[n] == '\0' || line[n] == '=')
217 had_early_param = 1;
218 } else if (!p->setup_func) {
219 pr_warn("Parameter %s is obsolete, ignored\n",
220 p->str);
221 return 1;
222 } else if (p->setup_func(line + n))
223 return 1;
224 }
225 p++;
226 } while (p < __setup_end);
227
228 return had_early_param;
229 }
230
231 /*
232 * This should be approx 2 Bo*oMips to start (note initial shift), and will
233 * still work even if initially too large, it will just take slightly longer
234 */
235 unsigned long loops_per_jiffy = (1<<12);
236 EXPORT_SYMBOL(loops_per_jiffy);
237
238 static int __init debug_kernel(char *str)
239 {
240 console_loglevel = CONSOLE_LOGLEVEL_DEBUG;
241 return 0;
242 }
243
244 static int __init quiet_kernel(char *str)
245 {
246 console_loglevel = CONSOLE_LOGLEVEL_QUIET;
247 return 0;
248 }
249
250 early_param("debug", debug_kernel);
251 early_param("quiet", quiet_kernel);
252
253 static int __init loglevel(char *str)
254 {
255 int newlevel;
256
257 /*
258 * Only update loglevel value when a correct setting was passed,
259 * to prevent blind crashes (when loglevel being set to 0) that
260 * are quite hard to debug
261 */
262 if (get_option(&str, &newlevel)) {
263 console_loglevel = newlevel;
264 return 0;
265 }
266
267 return -EINVAL;
268 }
269
270 early_param("loglevel", loglevel);
271
272 /* Change NUL term back to "=", to make "param" the whole string. */
273 static int __init repair_env_string(char *param, char *val,
274 const char *unused, void *arg)
275 {
276 if (val) {
277 /* param=val or param="val"? */
278 if (val == param+strlen(param)+1)
279 val[-1] = '=';
280 else if (val == param+strlen(param)+2) {
281 val[-2] = '=';
282 memmove(val-1, val, strlen(val)+1);
283 val--;
284 } else
285 BUG();
286 }
287 return 0;
288 }
289
290 /* Anything after -- gets handed straight to init. */
291 static int __init set_init_arg(char *param, char *val,
292 const char *unused, void *arg)
293 {
294 unsigned int i;
295
296 if (panic_later)
297 return 0;
298
299 repair_env_string(param, val, unused, NULL);
300
301 for (i = 0; argv_init[i]; i++) {
302 if (i == MAX_INIT_ARGS) {
303 panic_later = "init";
304 panic_param = param;
305 return 0;
306 }
307 }
308 argv_init[i] = param;
309 return 0;
310 }
311
312 /*
313 * Unknown boot options get handed to init, unless they look like
314 * unused parameters (modprobe will find them in /proc/cmdline).
315 */
316 static int __init unknown_bootoption(char *param, char *val,
317 const char *unused, void *arg)
318 {
319 repair_env_string(param, val, unused, NULL);
320
321 /* Handle obsolete-style parameters */
322 if (obsolete_checksetup(param))
323 return 0;
324
325 /* Unused module parameter. */
326 if (strchr(param, '.') && (!val || strchr(param, '.') < val))
327 return 0;
328
329 if (panic_later)
330 return 0;
331
332 if (val) {
333 /* Environment option */
334 unsigned int i;
335 for (i = 0; envp_init[i]; i++) {
336 if (i == MAX_INIT_ENVS) {
337 panic_later = "env";
338 panic_param = param;
339 }
340 if (!strncmp(param, envp_init[i], val - param))
341 break;
342 }
343 envp_init[i] = param;
344 } else {
345 /* Command line option */
346 unsigned int i;
347 for (i = 0; argv_init[i]; i++) {
348 if (i == MAX_INIT_ARGS) {
349 panic_later = "init";
350 panic_param = param;
351 }
352 }
353 argv_init[i] = param;
354 }
355 return 0;
356 }
357
358 static int __init init_setup(char *str)
359 {
360 unsigned int i;
361
362 execute_command = str;
363 /*
364 * In case LILO is going to boot us with default command line,
365 * it prepends "auto" before the whole cmdline which makes
366 * the shell think it should execute a script with such name.
367 * So we ignore all arguments entered _before_ init=... [MJ]
368 */
369 for (i = 1; i < MAX_INIT_ARGS; i++)
370 argv_init[i] = NULL;
371 return 1;
372 }
373 __setup("init=", init_setup);
374
375 static int __init rdinit_setup(char *str)
376 {
377 unsigned int i;
378
379 ramdisk_execute_command = str;
380 /* See "auto" comment in init_setup */
381 for (i = 1; i < MAX_INIT_ARGS; i++)
382 argv_init[i] = NULL;
383 return 1;
384 }
385 __setup("rdinit=", rdinit_setup);
386
387 #ifndef CONFIG_SMP
388 static const unsigned int setup_max_cpus = NR_CPUS;
389 static inline void setup_nr_cpu_ids(void) { }
390 static inline void smp_prepare_cpus(unsigned int maxcpus) { }
391 #endif
392
393 /*
394 * We need to store the untouched command line for future reference.
395 * We also need to store the touched command line since the parameter
396 * parsing is performed in place, and we should allow a component to
397 * store reference of name/value for future reference.
398 */
399 static void __init setup_command_line(char *command_line)
400 {
401 saved_command_line =
402 memblock_virt_alloc(strlen(boot_command_line) + 1, 0);
403 initcall_command_line =
404 memblock_virt_alloc(strlen(boot_command_line) + 1, 0);
405 static_command_line = memblock_virt_alloc(strlen(command_line) + 1, 0);
406 strcpy(saved_command_line, boot_command_line);
407 strcpy(static_command_line, command_line);
408 }
409
410 /*
411 * We need to finalize in a non-__init function or else race conditions
412 * between the root thread and the init thread may cause start_kernel to
413 * be reaped by free_initmem before the root thread has proceeded to
414 * cpu_idle.
415 *
416 * gcc-3.4 accidentally inlines this function, so use noinline.
417 */
418
419 static __initdata DECLARE_COMPLETION(kthreadd_done);
420
421 static noinline void __init_refok rest_init(void)
422 {
423 int pid;
424
425 rcu_scheduler_starting();
426 smpboot_thread_init();
427 /*
428 * We need to spawn init first so that it obtains pid 1, however
429 * the init task will end up wanting to create kthreads, which, if
430 * we schedule it before we create kthreadd, will OOPS.
431 */
432 kernel_thread(kernel_init, NULL, CLONE_FS);
433 numa_default_policy();
434 pid = kernel_thread(kthreadd, NULL, CLONE_FS | CLONE_FILES);
435 rcu_read_lock();
436 kthreadd_task = find_task_by_pid_ns(pid, &init_pid_ns);
437 rcu_read_unlock();
438 complete(&kthreadd_done);
439
440 /*
441 * The boot idle thread must execute schedule()
442 * at least once to get things moving:
443 */
444 init_idle_bootup_task(current);
445 schedule_preempt_disabled();
446 /* Call into cpu_idle with preempt disabled */
447 cpu_startup_entry(CPUHP_ONLINE);
448 }
449
450 #ifdef CONFIG_RKP_KDP
451 RKP_RO_AREA int is_boot_recovery = 0;
452 #endif
453
454 /* Check for early params. */
455 static int __init do_early_param(char *param, char *val,
456 const char *unused, void *arg)
457 {
458 const struct obs_kernel_param *p;
459
460 for (p = __setup_start; p < __setup_end; p++) {
461 if ((p->early && parameq(param, p->str)) ||
462 (strcmp(param, "console") == 0 &&
463 strcmp(p->str, "earlycon") == 0)
464 ) {
465 if (p->setup_func(val) != 0)
466 pr_warn("Malformed early option '%s'\n", param);
467 }
468 }
469 /* We accept everything at this stage. */
470 #ifdef CONFIG_KNOX_KAP
471 if ((strncmp(param, "androidboot.security_mode", 26) == 0)) {
472 pr_warn("val = %d\n",*val);
473 if ((strncmp(val, "1526595585", 10) == 0)) {
474 pr_info("Security Boot Mode \n");
475 }
476 }
477
478 #endif
479 #ifdef CONFIG_RKP_KDP
480 if ((strncmp(param, "bootmode", 9) == 0)) {
481 //printk("\n RKP22 In Recovery Mode= %d\n",*val);
482 if ((strncmp(val, "2", 2) == 0)) {
483 is_boot_recovery = 1;
484 }
485 }
486 #endif
487 return 0;
488 }
489
490 void __init parse_early_options(char *cmdline)
491 {
492 parse_args("early options", cmdline, NULL, 0, 0, 0, NULL,
493 do_early_param);
494 }
495
496 /* Arch code calls this early on, or if not, just before other parsing. */
497 void __init parse_early_param(void)
498 {
499 static int done __initdata;
500 static char tmp_cmdline[COMMAND_LINE_SIZE] __initdata;
501
502 if (done)
503 return;
504
505 /* All fall through to do_early_param. */
506 strlcpy(tmp_cmdline, boot_command_line, COMMAND_LINE_SIZE);
507 parse_early_options(tmp_cmdline);
508 done = 1;
509 }
510
511 /*
512 * Activate the first processor.
513 */
514
515 static void __init boot_cpu_init(void)
516 {
517 int cpu = smp_processor_id();
518 /* Mark the boot cpu "present", "online" etc for SMP and UP case */
519 set_cpu_online(cpu, true);
520 set_cpu_active(cpu, true);
521 set_cpu_present(cpu, true);
522 set_cpu_possible(cpu, true);
523 }
524
525 void __init __weak smp_setup_processor_id(void)
526 {
527 }
528
529 # if THREAD_SIZE >= PAGE_SIZE
530 void __init __weak thread_stack_cache_init(void)
531 {
532 }
533 #endif
534
535 /*
536 * Set up kernel memory allocators
537 */
538 static void __init mm_init(void)
539 {
540 /*
541 * page_ext requires contiguous pages,
542 * bigger than MAX_ORDER unless SPARSEMEM.
543 */
544 page_ext_init_flatmem();
545 mem_init();
546 kmem_cache_init();
547 percpu_init_late();
548 pgtable_init();
549 vmalloc_init();
550 ioremap_huge_init();
551 kaiser_init();
552 }
553 #ifdef CONFIG_RKP
554
555 #ifdef CONFIG_RKP_6G
556 __attribute__((section(".rkp.bitmap"))) u8 rkp_pgt_bitmap_arr[0x30000] = {0};
557 __attribute__((section(".rkp.dblmap"))) u8 rkp_map_bitmap_arr[0x30000] = {0};
558 #else
559 __attribute__((section(".rkp.bitmap"))) u8 rkp_pgt_bitmap_arr[0x20000] = {0};
560 __attribute__((section(".rkp.dblmap"))) u8 rkp_map_bitmap_arr[0x20000] = {0};
561 #endif
562
563 u8 rkp_started = 0;
564
565 static void __init rkp_init(void)
566 {
567 rkp_init_t init;
568 struct vm_struct *p;
569
570 init.magic = RKP_INIT_MAGIC;
571 init.vmalloc_start = (u64)VMALLOC_START;
572 //init.vmalloc_end = (u64)high_memory;
573 init.vmalloc_end = (u64)VMALLOC_END;
574 printk("in rkp_init, swapper_pg_dir : %llx\n", (unsigned long long)swapper_pg_dir);
575 init.init_mm_pgd = (u64)__pa(swapper_pg_dir);
576 init.id_map_pgd = (u64)__pa(idmap_pg_dir);
577 init.zero_pg_addr = __pa(empty_zero_page);
578 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
579 init.tramp_pgd = __pa(tramp_pg_dir);
580 #endif
581 init.rkp_pgt_bitmap = (u64)__pa(rkp_pgt_bitmap);
582 init.rkp_dbl_bitmap = (u64)__pa(rkp_map_bitmap);
583 init.rkp_bitmap_size = RKP_PGT_BITMAP_LEN;
584 init._text = (u64) _text;
585 init._etext = (u64) _etext;
586 init.extra_memory_addr = RKP_EXTRA_MEM_START;
587 init.extra_memory_size = RKP_EXTRA_MEM_SIZE;
588 init._srodata = (u64) __start_rodata;
589 init._erodata =(u64) __end_rodata;
590 init.large_memory = 0;
591 //init.fimc_phys_addr = (u64)page_to_phys(vmalloc_to_page((void *)FIMC_LIB_START_VA));
592 init.fimc_phys_addr = 0;
593 for (p = vmlist; p; p = p->next) {
594 if (p->addr == (void *)FIMC_LIB_START_VA) {
595 init.fimc_phys_addr = (u64)(p->phys_addr);
596 break;
597 }
598 }
599 init.fimc_size = FIMC_LIB_SIZE;
600
601 rkp_call(RKP_INIT, (u64)&init, (u64)kimage_voffset, 0, 0, 0);
602 //rkp_call(RKP_INIT, (u64)&init, 0, 0, 0, 0);
603 rkp_started = 1;
604 return;
605 }
606 #endif
607
608 #ifdef CONFIG_RKP_KDP
609 static void __init kdp_init(void)
610 {
611 kdp_init_t cred;
612
613 cred.credSize = sizeof(struct cred);
614 cred.sp_size = rkp_get_task_sec_size();
615 cred.pgd_mm = offsetof(struct mm_struct,pgd);
616 cred.uid_cred = offsetof(struct cred,uid);
617 cred.euid_cred = offsetof(struct cred,euid);
618 cred.gid_cred = offsetof(struct cred,gid);
619 cred.egid_cred = offsetof(struct cred,egid);
620
621 cred.bp_pgd_cred = offsetof(struct cred,bp_pgd);
622 cred.bp_task_cred = offsetof(struct cred,bp_task);
623 cred.type_cred = offsetof(struct cred,type);
624 cred.security_cred = offsetof(struct cred,security);
625 cred.usage_cred = offsetof(struct cred,use_cnt);
626
627 cred.cred_task = offsetof(struct task_struct,cred);
628 cred.mm_task = offsetof(struct task_struct,mm);
629 cred.pid_task = offsetof(struct task_struct,pid);
630 cred.rp_task = offsetof(struct task_struct,real_parent);
631 cred.comm_task = offsetof(struct task_struct,comm);
632
633 cred.bp_cred_secptr = rkp_get_offset_bp_cred();
634
635 cred.task_threadinfo = offsetof(struct thread_info,task);
636 rkp_call(RKP_CMDID(0x40),(u64)&cred,0,0,0,0);
637 }
638 #endif /*CONFIG_RKP_KDP*/
639
640
641 asmlinkage __visible void __init start_kernel(void)
642 {
643 char *command_line;
644 char *after_dashes;
645
646 /*
647 * Need to run as early as possible, to initialize the
648 * lockdep hash:
649 */
650 lockdep_init();
651 set_task_stack_end_magic(&init_task);
652 smp_setup_processor_id();
653 debug_objects_early_init();
654
655 /*
656 * Set up the the initial canary ASAP:
657 */
658 boot_init_stack_canary();
659
660 cgroup_init_early();
661
662 local_irq_disable();
663 early_boot_irqs_disabled = true;
664
665 /*
666 * Interrupts are still disabled. Do necessary setups, then
667 * enable them
668 */
669 boot_cpu_init();
670 page_address_init();
671 pr_notice("%s", linux_banner);
672 setup_arch(&command_line);
673 mm_init_cpumask(&init_mm);
674 setup_command_line(command_line);
675 setup_nr_cpu_ids();
676 setup_per_cpu_areas();
677 smp_prepare_boot_cpu(); /* arch-specific boot-cpu hooks */
678
679 build_all_zonelists(NULL, NULL);
680 page_alloc_init();
681
682 pr_notice("Kernel command line: %s\n", boot_command_line);
683 parse_early_param();
684 after_dashes = parse_args("Booting kernel",
685 static_command_line, __start___param,
686 __stop___param - __start___param,
687 -1, -1, NULL, &unknown_bootoption);
688 if (!IS_ERR_OR_NULL(after_dashes))
689 parse_args("Setting init args", after_dashes, NULL, 0, -1, -1,
690 NULL, set_init_arg);
691
692 jump_label_init();
693
694 /*
695 * These use large bootmem allocations and must precede
696 * kmem_cache_init()
697 */
698 setup_log_buf(0);
699 pidhash_init();
700 vfs_caches_init_early();
701 sort_main_extable();
702 trap_init();
703 #ifdef CONFIG_RKP
704 rkp_reserve_mem();
705 #endif
706 mm_init();
707 #ifdef CONFIG_RKP
708 vmm_init();
709 rkp_init();
710
711 #if !defined(CONFIG_USE_SIGNED_BINARY)
712 rkp_call(RKP_NOSHIP_BIN, 0, 0, 0, 0, 0);
713 #endif
714
715 #ifdef CONFIG_RKP_DEBUG
716 rkp_call(RKP_DEBUG, 0, 0, 0, 0, 0);
717 #endif
718
719 #ifdef CONFIG_RELOCATABLE_KERNEL
720 rkp_call(KASLR_MEM_RESERVE, kaslr_mem, kaslr_size, 0, 0, 0);
721 #endif
722
723 #ifdef CONFIG_RKP_KDP
724 rkp_cred_enable = 1;
725 #endif /*CONFIG_RKP_KDP*/
726 #endif //CONFIG_RKP
727
728 /*
729 * Set up the scheduler prior starting any interrupts (such as the
730 * timer interrupt). Full topology setup happens at smp_init()
731 * time - but meanwhile we still have a functioning scheduler.
732 */
733 sched_init();
734 /*
735 * Disable preemption - early bootup scheduling is extremely
736 * fragile until we cpu_idle() for the first time.
737 */
738 preempt_disable();
739 if (WARN(!irqs_disabled(),
740 "Interrupts were enabled *very* early, fixing it\n"))
741 local_irq_disable();
742 idr_init_cache();
743 rcu_init();
744
745 /* trace_printk() and trace points may be used after this */
746 trace_init();
747
748 context_tracking_init();
749 radix_tree_init();
750 /* init some links before init_ISA_irqs() */
751 early_irq_init();
752 init_IRQ();
753 tick_init();
754 rcu_init_nohz();
755 init_timers();
756 hrtimers_init();
757 softirq_init();
758 timekeeping_init();
759 time_init();
760 sched_clock_postinit();
761 perf_event_init();
762 profile_init();
763 call_function_init();
764 WARN(!irqs_disabled(), "Interrupts were enabled early\n");
765 early_boot_irqs_disabled = false;
766 local_irq_enable();
767
768 kmem_cache_init_late();
769
770 /*
771 * HACK ALERT! This is early. We're enabling the console before
772 * we've done PCI setups etc, and console_init() must be aware of
773 * this. But we do want output early, in case something goes wrong.
774 */
775 console_init();
776 if (panic_later)
777 panic("Too many boot %s vars at `%s'", panic_later,
778 panic_param);
779
780 lockdep_info();
781
782 /*
783 * Need to run this when irqs are enabled, because it wants
784 * to self-test [hard/soft]-irqs on/off lock inversion bugs
785 * too:
786 */
787 locking_selftest();
788
789 #ifdef CONFIG_BLK_DEV_INITRD
790 if (initrd_start && !initrd_below_start_ok &&
791 page_to_pfn(virt_to_page((void *)initrd_start)) < min_low_pfn) {
792 pr_crit("initrd overwritten (0x%08lx < 0x%08lx) - disabling it.\n",
793 page_to_pfn(virt_to_page((void *)initrd_start)),
794 min_low_pfn);
795 initrd_start = 0;
796 }
797 #endif
798 page_ext_init();
799 debug_objects_mem_init();
800 kmemleak_init();
801 setup_per_cpu_pageset();
802 numa_policy_init();
803 if (late_time_init)
804 late_time_init();
805 sched_clock_init();
806 calibrate_delay();
807 pidmap_init();
808 anon_vma_init();
809 acpi_early_init();
810 #ifdef CONFIG_X86
811 if (efi_enabled(EFI_RUNTIME_SERVICES))
812 efi_enter_virtual_mode();
813 #endif
814 #ifdef CONFIG_X86_ESPFIX64
815 /* Should be run before the first non-init thread is created */
816 init_espfix_bsp();
817 #endif
818 thread_stack_cache_init();
819 #ifdef CONFIG_RKP_KDP
820 if (rkp_cred_enable)
821 kdp_init();
822 #endif /*CONFIG_RKP_KDP*/
823 cred_init();
824 fork_init();
825 proc_caches_init();
826 buffer_init();
827 key_init();
828 security_init();
829 dbg_late_init();
830 vfs_caches_init();
831 signals_init();
832 /* rootfs populating might need page-writeback */
833 page_writeback_init();
834 proc_root_init();
835 nsfs_init();
836 cpuset_init();
837 cgroup_init();
838 taskstats_init_early();
839 delayacct_init();
840
841 check_bugs();
842
843 acpi_subsystem_init();
844 sfi_init_late();
845
846 if (efi_enabled(EFI_RUNTIME_SERVICES)) {
847 efi_late_init();
848 efi_free_boot_services();
849 }
850
851 ftrace_init();
852
853 /* Do the rest non-__init'ed, we're now alive */
854 rest_init();
855 }
856
857 /* Call all constructor functions linked into the kernel. */
858 static void __init do_ctors(void)
859 {
860 #ifdef CONFIG_CONSTRUCTORS
861 ctor_fn_t *fn = (ctor_fn_t *) __ctors_start;
862
863 for (; fn < (ctor_fn_t *) __ctors_end; fn++)
864 (*fn)();
865 #endif
866 }
867
868 bool initcall_debug;
869 core_param(initcall_debug, initcall_debug, bool, 0644);
870
871 #ifdef CONFIG_KALLSYMS
872 struct blacklist_entry {
873 struct list_head next;
874 char *buf;
875 };
876
877 static __initdata_or_module LIST_HEAD(blacklisted_initcalls);
878
879 static int __init initcall_blacklist(char *str)
880 {
881 char *str_entry;
882 struct blacklist_entry *entry;
883
884 /* str argument is a comma-separated list of functions */
885 do {
886 str_entry = strsep(&str, ",");
887 if (str_entry) {
888 pr_debug("blacklisting initcall %s\n", str_entry);
889 entry = alloc_bootmem(sizeof(*entry));
890 entry->buf = alloc_bootmem(strlen(str_entry) + 1);
891 strcpy(entry->buf, str_entry);
892 list_add(&entry->next, &blacklisted_initcalls);
893 }
894 } while (str_entry);
895
896 return 0;
897 }
898
899 static bool __init_or_module initcall_blacklisted(initcall_t fn)
900 {
901 struct list_head *tmp;
902 struct blacklist_entry *entry;
903 char *fn_name;
904
905 fn_name = kasprintf(GFP_KERNEL, "%pf", fn);
906 if (!fn_name)
907 return false;
908
909 list_for_each(tmp, &blacklisted_initcalls) {
910 entry = list_entry(tmp, struct blacklist_entry, next);
911 if (!strcmp(fn_name, entry->buf)) {
912 pr_debug("initcall %s blacklisted\n", fn_name);
913 kfree(fn_name);
914 return true;
915 }
916 }
917
918 kfree(fn_name);
919 return false;
920 }
921 #else
922 static int __init initcall_blacklist(char *str)
923 {
924 pr_warn("initcall_blacklist requires CONFIG_KALLSYMS\n");
925 return 0;
926 }
927
928 static bool __init_or_module initcall_blacklisted(initcall_t fn)
929 {
930 return false;
931 }
932 #endif
933 __setup("initcall_blacklist=", initcall_blacklist);
934
935 static int __init_or_module do_one_initcall_debug(initcall_t fn)
936 {
937 ktime_t calltime, delta, rettime;
938 unsigned long long duration;
939 int ret;
940
941 if (initcall_debug)
942 printk(KERN_DEBUG "calling %pF @ %i\n", fn, task_pid_nr(current));
943 calltime = ktime_get();
944 ret = fn();
945 rettime = ktime_get();
946 delta = ktime_sub(rettime, calltime);
947 duration = (unsigned long long) ktime_to_ns(delta) >> 10;
948 if (initcall_debug)
949 printk(KERN_DEBUG "initcall %pF returned %d after %lld usecs\n",
950 fn, ret, duration);
951
952 #ifdef CONFIG_SEC_INITCALL_DEBUG
953 if (SEC_INITCALL_DEBUG_MIN_TIME < duration)
954 sec_initcall_debug_add(fn, duration);
955 #endif
956
957 return ret;
958 }
959
960 int __init_or_module do_one_initcall(initcall_t fn)
961 {
962 int count = preempt_count();
963 int ret;
964 char msgbuf[64];
965
966 if (initcall_blacklisted(fn))
967 return -EPERM;
968
969 #ifdef CONFIG_SEC_INITCALL_DEBUG
970 ret = do_one_initcall_debug(fn);
971 #else
972 if (initcall_debug)
973 ret = do_one_initcall_debug(fn);
974 else
975 ret = fn();
976 #endif
977 msgbuf[0] = 0;
978
979 if (preempt_count() != count) {
980 sprintf(msgbuf, "preemption imbalance ");
981 preempt_count_set(count);
982 }
983 if (irqs_disabled()) {
984 strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
985 local_irq_enable();
986 }
987 WARN(msgbuf[0], "initcall %pF returned with %s\n", fn, msgbuf);
988
989 return ret;
990 }
991
992
993 extern initcall_t __initcall_start[];
994 extern initcall_t __initcall0_start[];
995 extern initcall_t __initcall1_start[];
996 extern initcall_t __initcall2_start[];
997 extern initcall_t __initcall3_start[];
998 extern initcall_t __initcall4_start[];
999 extern initcall_t __initcall5_start[];
1000 extern initcall_t __initcall6_start[];
1001 extern initcall_t __initcall7_start[];
1002 extern initcall_t __initcall_end[];
1003
1004 static initcall_t *initcall_levels[] __initdata = {
1005 __initcall0_start,
1006 __initcall1_start,
1007 __initcall2_start,
1008 __initcall3_start,
1009 __initcall4_start,
1010 __initcall5_start,
1011 __initcall6_start,
1012 __initcall7_start,
1013 __initcall_end,
1014 };
1015
1016 /* Keep these in sync with initcalls in include/linux/init.h */
1017 static char *initcall_level_names[] __initdata = {
1018 "early",
1019 "core",
1020 "postcore",
1021 "arch",
1022 "subsys",
1023 "fs",
1024 "device",
1025 "late",
1026 };
1027
1028 static void __init do_initcall_level(int level)
1029 {
1030 initcall_t *fn;
1031
1032 strcpy(initcall_command_line, saved_command_line);
1033 parse_args(initcall_level_names[level],
1034 initcall_command_line, __start___param,
1035 __stop___param - __start___param,
1036 level, level,
1037 NULL, &repair_env_string);
1038
1039 for (fn = initcall_levels[level]; fn < initcall_levels[level+1]; fn++)
1040 do_one_initcall(*fn);
1041
1042 #ifdef CONFIG_SEC_BOOTSTAT
1043 sec_bootstat_add_initcall(initcall_level_names[level]);
1044 #endif
1045
1046 }
1047
1048 static void __init do_initcalls(void)
1049 {
1050 int level;
1051
1052 for (level = 0; level < ARRAY_SIZE(initcall_levels) - 1; level++)
1053 do_initcall_level(level);
1054 }
1055
1056 /*
1057 * Ok, the machine is now initialized. None of the devices
1058 * have been touched yet, but the CPU subsystem is up and
1059 * running, and memory and process management works.
1060 *
1061 * Now we can finally start doing some real work..
1062 */
1063 static void __init do_basic_setup(void)
1064 {
1065 cpuset_init_smp();
1066 shmem_init();
1067 driver_init();
1068 init_irq_proc();
1069 do_ctors();
1070 usermodehelper_enable();
1071 do_initcalls();
1072 random_int_secret_init();
1073 }
1074
1075 static void __init do_pre_smp_initcalls(void)
1076 {
1077 initcall_t *fn;
1078
1079 for (fn = __initcall_start; fn < __initcall0_start; fn++)
1080 do_one_initcall(*fn);
1081 }
1082
1083 /*
1084 * This function requests modules which should be loaded by default and is
1085 * called twice right after initrd is mounted and right before init is
1086 * exec'd. If such modules are on either initrd or rootfs, they will be
1087 * loaded before control is passed to userland.
1088 */
1089 void __init load_default_modules(void)
1090 {
1091 load_default_elevator_module();
1092 }
1093
1094 static int run_init_process(const char *init_filename)
1095 {
1096 argv_init[0] = init_filename;
1097 return do_execve(getname_kernel(init_filename),
1098 (const char __user *const __user *)argv_init,
1099 (const char __user *const __user *)envp_init);
1100 }
1101
1102 static int try_to_run_init_process(const char *init_filename)
1103 {
1104 int ret;
1105
1106 ret = run_init_process(init_filename);
1107
1108 if (ret && ret != -ENOENT) {
1109 pr_err("Starting init: %s exists but couldn't execute it (error %d)\n",
1110 init_filename, ret);
1111 }
1112
1113 return ret;
1114 }
1115
1116 #ifdef CONFIG_DEFERRED_INITCALLS
1117 extern initcall_t __deferred_initcall_start[], __deferred_initcall_end[];
1118
1119 /* call deferred init routines */
1120 static void __ref do_deferred_initcalls(struct work_struct *work)
1121 {
1122 initcall_t *call;
1123 static bool already_run;
1124
1125 if (already_run) {
1126 pr_warn("%s() has already run\n", __func__);
1127 return;
1128 }
1129
1130 already_run = true;
1131
1132 pr_err("Running %s()\n", __func__);
1133
1134 for (call = __deferred_initcall_start;
1135 call < __deferred_initcall_end; call++)
1136 do_one_initcall(*call);
1137
1138 free_initmem();
1139 }
1140
1141 static DECLARE_WORK(deferred_initcall_work, do_deferred_initcalls);
1142 #endif
1143
1144 #ifdef CONFIG_SEC_GPIO_DVS
1145 extern void gpio_dvs_check_initgpio(void);
1146 #endif
1147
1148 static noinline void __init kernel_init_freeable(void);
1149
1150 #ifdef CONFIG_DEBUG_RODATA
1151 static bool rodata_enabled = true;
1152 static int __init set_debug_rodata(char *str)
1153 {
1154 return strtobool(str, &rodata_enabled);
1155 }
1156 __setup("rodata=", set_debug_rodata);
1157
1158 static void mark_readonly(void)
1159 {
1160 if (rodata_enabled)
1161 mark_rodata_ro();
1162 else
1163 pr_info("Kernel memory protection disabled.\n");
1164 }
1165 #else
1166 static inline void mark_readonly(void)
1167 {
1168 pr_warn("This architecture does not have kernel memory protection.\n");
1169 }
1170 #endif
1171
1172 static int __ref kernel_init(void *unused)
1173 {
1174 int ret;
1175
1176 kernel_init_freeable();
1177 #ifdef CONFIG_SEC_GPIO_DVS
1178 /************************ Caution !!! ****************************/
1179 /* This function must be located in appropriate INIT position
1180 * in accordance with the specification of each BB vendor.
1181 */
1182 /************************ Caution !!! ****************************/
1183 pr_info("%s: GPIO DVS: check init gpio\n", __func__);
1184 gpio_dvs_check_initgpio();
1185 #endif
1186 /* need to finish all async __init code before freeing the memory */
1187 async_synchronize_full();
1188 #ifndef CONFIG_DEFERRED_INITCALLS
1189 free_initmem();
1190 #endif
1191 mark_readonly();
1192 system_state = SYSTEM_RUNNING;
1193 numa_default_policy();
1194
1195 flush_delayed_fput();
1196
1197 if (ramdisk_execute_command) {
1198 ret = run_init_process(ramdisk_execute_command);
1199 if (!ret) {
1200 #ifdef CONFIG_DEFERRED_INITCALLS
1201 schedule_work(&deferred_initcall_work);
1202 #endif
1203 return 0;
1204 }
1205 pr_err("Failed to execute %s (error %d)\n",
1206 ramdisk_execute_command, ret);
1207 }
1208
1209 /*
1210 * We try each of these until one succeeds.
1211 *
1212 * The Bourne shell can be used instead of init if we are
1213 * trying to recover a really broken machine.
1214 */
1215 if (execute_command) {
1216 ret = run_init_process(execute_command);
1217 if (!ret) {
1218 #ifdef CONFIG_DEFERRED_INITCALLS
1219 schedule_work(&deferred_initcall_work);
1220 #endif
1221 return 0;
1222 }
1223 panic("Requested init %s failed (error %d).",
1224 execute_command, ret);
1225 }
1226 if (!try_to_run_init_process("/sbin/init") ||
1227 !try_to_run_init_process("/etc/init") ||
1228 !try_to_run_init_process("/bin/init") ||
1229 !try_to_run_init_process("/bin/sh"))
1230 return 0;
1231
1232 panic("No working init found. Try passing init= option to kernel. "
1233 "See Linux Documentation/init.txt for guidance.");
1234 }
1235
1236 static noinline void __init kernel_init_freeable(void)
1237 {
1238 /*
1239 * Wait until kthreadd is all set-up.
1240 */
1241 wait_for_completion(&kthreadd_done);
1242
1243 /* Now the scheduler is fully set up and can do blocking allocations */
1244 gfp_allowed_mask = __GFP_BITS_MASK;
1245
1246 /*
1247 * init can allocate pages on any node
1248 */
1249 set_mems_allowed(node_states[N_MEMORY]);
1250 /*
1251 * init can run on any cpu.
1252 */
1253 set_cpus_allowed_ptr(current, cpu_all_mask);
1254
1255 cad_pid = task_pid(current);
1256
1257 smp_prepare_cpus(setup_max_cpus);
1258
1259 do_pre_smp_initcalls();
1260 lockup_detector_init();
1261
1262 smp_init();
1263 sched_init_smp();
1264
1265 page_alloc_init_late();
1266
1267 do_basic_setup();
1268
1269 /* Open the /dev/console on the rootfs, this should never fail */
1270 if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
1271 pr_err("Warning: unable to open an initial console.\n");
1272
1273 (void) sys_dup(0);
1274 (void) sys_dup(0);
1275 /*
1276 * check if there is an early userspace init. If yes, let it do all
1277 * the work
1278 */
1279
1280 if (!ramdisk_execute_command)
1281 ramdisk_execute_command = "/init";
1282
1283 if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
1284 ramdisk_execute_command = NULL;
1285 prepare_namespace();
1286 }
1287
1288 /*
1289 * Ok, we have completed the initial bootup, and
1290 * we're essentially up and running. Get rid of the
1291 * initmem segments and start the user-mode stuff..
1292 *
1293 * rootfs is available now, try loading the public keys
1294 * and default modules
1295 */
1296
1297 integrity_load_keys();
1298 load_default_modules();
1299 }