Merge tag 'v3.10.55' into update
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / arm / kernel / setup.c
1 /*
2 * linux/arch/arm/kernel/setup.c
3 *
4 * Copyright (C) 1995-2001 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10 #include <linux/export.h>
11 #include <linux/kernel.h>
12 #include <linux/stddef.h>
13 #include <linux/ioport.h>
14 #include <linux/delay.h>
15 #include <linux/utsname.h>
16 #include <linux/initrd.h>
17 #include <linux/console.h>
18 #include <linux/bootmem.h>
19 #include <linux/seq_file.h>
20 #include <linux/screen_info.h>
21 #include <linux/of_platform.h>
22 #include <linux/init.h>
23 #include <linux/kexec.h>
24 #include <linux/of_fdt.h>
25 #include <linux/cpu.h>
26 #include <linux/interrupt.h>
27 #include <linux/smp.h>
28 #include <linux/proc_fs.h>
29 #include <linux/memblock.h>
30 #include <linux/bug.h>
31 #include <linux/compiler.h>
32 #include <linux/sort.h>
33
34 #include <asm/unified.h>
35 #include <asm/cp15.h>
36 #include <asm/cpu.h>
37 #include <asm/cputype.h>
38 #include <asm/elf.h>
39 #include <asm/procinfo.h>
40 #include <asm/psci.h>
41 #include <asm/sections.h>
42 #include <asm/setup.h>
43 #include <asm/smp_plat.h>
44 #include <asm/mach-types.h>
45 #include <asm/cacheflush.h>
46 #include <asm/cachetype.h>
47 #include <asm/tlbflush.h>
48
49 #include <asm/prom.h>
50 #include <asm/mach/arch.h>
51 #include <asm/mach/irq.h>
52 #include <asm/mach/time.h>
53 #include <asm/system_info.h>
54 #include <asm/system_misc.h>
55 #include <asm/traps.h>
56 #include <asm/unwind.h>
57 #include <asm/memblock.h>
58 #include <asm/virt.h>
59
60 #include "atags.h"
61
62
63 #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
64 char fpe_type[8];
65
66 static int __init fpe_setup(char *line)
67 {
68 memcpy(fpe_type, line, 8);
69 return 1;
70 }
71
72 __setup("fpe=", fpe_setup);
73 #endif
74
75 extern void paging_init(struct machine_desc *desc);
76 extern void sanity_check_meminfo(void);
77 extern void reboot_setup(char *str);
78 extern void setup_dma_zone(struct machine_desc *desc);
79
80 unsigned int processor_id;
81 EXPORT_SYMBOL(processor_id);
82 unsigned int __machine_arch_type __read_mostly;
83 EXPORT_SYMBOL(__machine_arch_type);
84 unsigned int cacheid __read_mostly;
85 EXPORT_SYMBOL(cacheid);
86
87 unsigned int __atags_pointer __initdata;
88
89 unsigned int system_rev;
90 EXPORT_SYMBOL(system_rev);
91
92 unsigned int system_serial_low;
93 EXPORT_SYMBOL(system_serial_low);
94
95 unsigned int system_serial_high;
96 EXPORT_SYMBOL(system_serial_high);
97
98 unsigned int elf_hwcap __read_mostly;
99 EXPORT_SYMBOL(elf_hwcap);
100
101
102 #ifdef MULTI_CPU
103 struct processor processor __read_mostly;
104 #endif
105 #ifdef MULTI_TLB
106 struct cpu_tlb_fns cpu_tlb __read_mostly;
107 #endif
108 #ifdef MULTI_USER
109 struct cpu_user_fns cpu_user __read_mostly;
110 #endif
111 #ifdef MULTI_CACHE
112 struct cpu_cache_fns cpu_cache __read_mostly;
113 #endif
114 #ifdef CONFIG_OUTER_CACHE
115 struct outer_cache_fns outer_cache __read_mostly;
116 EXPORT_SYMBOL(outer_cache);
117 #endif
118
119 /*
120 * Cached cpu_architecture() result for use by assembler code.
121 * C code should use the cpu_architecture() function instead of accessing this
122 * variable directly.
123 */
124 int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN;
125
126 struct stack {
127 u32 irq[3];
128 u32 abt[3];
129 u32 und[3];
130 } ____cacheline_aligned;
131
132 static struct stack stacks[NR_CPUS];
133
134 char elf_platform[ELF_PLATFORM_SIZE];
135 EXPORT_SYMBOL(elf_platform);
136
137 static const char *cpu_name;
138 static const char *machine_name;
139 static char __initdata cmd_line[COMMAND_LINE_SIZE];
140 struct machine_desc *machine_desc __initdata;
141
142 static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
143 #define ENDIANNESS ((char)endian_test.l)
144
145 DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
146
147 /*
148 * Standard memory resources
149 */
150 static struct resource mem_res[] = {
151 {
152 .name = "Video RAM",
153 .start = 0,
154 .end = 0,
155 .flags = IORESOURCE_MEM
156 },
157 {
158 .name = "Kernel code",
159 .start = 0,
160 .end = 0,
161 .flags = IORESOURCE_MEM
162 },
163 {
164 .name = "Kernel data",
165 .start = 0,
166 .end = 0,
167 .flags = IORESOURCE_MEM
168 }
169 };
170
171 #define video_ram mem_res[0]
172 #define kernel_code mem_res[1]
173 #define kernel_data mem_res[2]
174
175 static struct resource io_res[] = {
176 {
177 .name = "reserved",
178 .start = 0x3bc,
179 .end = 0x3be,
180 .flags = IORESOURCE_IO | IORESOURCE_BUSY
181 },
182 {
183 .name = "reserved",
184 .start = 0x378,
185 .end = 0x37f,
186 .flags = IORESOURCE_IO | IORESOURCE_BUSY
187 },
188 {
189 .name = "reserved",
190 .start = 0x278,
191 .end = 0x27f,
192 .flags = IORESOURCE_IO | IORESOURCE_BUSY
193 }
194 };
195
196 #define lp0 io_res[0]
197 #define lp1 io_res[1]
198 #define lp2 io_res[2]
199
200 static const char *proc_arch[] = {
201 "undefined/unknown",
202 "3",
203 "4",
204 "4T",
205 "5",
206 "5T",
207 "5TE",
208 "5TEJ",
209 "6TEJ",
210 "7",
211 "?(11)",
212 "?(12)",
213 "?(13)",
214 "?(14)",
215 "?(15)",
216 "?(16)",
217 "?(17)",
218 };
219
220 static int __get_cpu_architecture(void)
221 {
222 int cpu_arch;
223
224 if ((read_cpuid_id() & 0x0008f000) == 0) {
225 cpu_arch = CPU_ARCH_UNKNOWN;
226 } else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
227 cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
228 } else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
229 cpu_arch = (read_cpuid_id() >> 16) & 7;
230 if (cpu_arch)
231 cpu_arch += CPU_ARCH_ARMv3;
232 } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
233 unsigned int mmfr0;
234
235 /* Revised CPUID format. Read the Memory Model Feature
236 * Register 0 and check for VMSAv7 or PMSAv7 */
237 asm("mrc p15, 0, %0, c0, c1, 4"
238 : "=r" (mmfr0));
239 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
240 (mmfr0 & 0x000000f0) >= 0x00000030)
241 cpu_arch = CPU_ARCH_ARMv7;
242 else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
243 (mmfr0 & 0x000000f0) == 0x00000020)
244 cpu_arch = CPU_ARCH_ARMv6;
245 else
246 cpu_arch = CPU_ARCH_UNKNOWN;
247 } else
248 cpu_arch = CPU_ARCH_UNKNOWN;
249
250 return cpu_arch;
251 }
252
253 int __pure cpu_architecture(void)
254 {
255 BUG_ON(__cpu_architecture == CPU_ARCH_UNKNOWN);
256
257 return __cpu_architecture;
258 }
259
260 static int cpu_has_aliasing_icache(unsigned int arch)
261 {
262 int aliasing_icache;
263 unsigned int id_reg, num_sets, line_size;
264
265 #ifdef CONFIG_BIG_LITTLE
266 /*
267 * We expect a combination of Cortex-A15 and Cortex-A7 cores.
268 * A7 = VIPT aliasing I-cache
269 * A15 = PIPT (non-aliasing) I-cache
270 * To cater for this discrepancy, let's assume aliasing I-cache
271 * all the time. This means unneeded extra work on the A15 but
272 * only ptrace is affected which is not performance critical.
273 */
274 if ((read_cpuid_id() & 0xff0ffff0) == 0x410fc0f0)
275 return 1;
276 #endif
277
278 /* PIPT caches never alias. */
279 if (icache_is_pipt())
280 return 0;
281
282 /* arch specifies the register format */
283 switch (arch) {
284 case CPU_ARCH_ARMv7:
285 asm("mcr p15, 2, %0, c0, c0, 0 @ set CSSELR"
286 : /* No output operands */
287 : "r" (1));
288 isb();
289 asm("mrc p15, 1, %0, c0, c0, 0 @ read CCSIDR"
290 : "=r" (id_reg));
291 line_size = 4 << ((id_reg & 0x7) + 2);
292 num_sets = ((id_reg >> 13) & 0x7fff) + 1;
293 aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
294 break;
295 case CPU_ARCH_ARMv6:
296 aliasing_icache = read_cpuid_cachetype() & (1 << 11);
297 break;
298 default:
299 /* I-cache aliases will be handled by D-cache aliasing code */
300 aliasing_icache = 0;
301 }
302
303 return aliasing_icache;
304 }
305
306 static void __init cacheid_init(void)
307 {
308 unsigned int arch = cpu_architecture();
309
310 if (arch >= CPU_ARCH_ARMv6) {
311 unsigned int cachetype = read_cpuid_cachetype();
312 if ((cachetype & (7 << 29)) == 4 << 29) {
313 /* ARMv7 register format */
314 arch = CPU_ARCH_ARMv7;
315 cacheid = CACHEID_VIPT_NONALIASING;
316 switch (cachetype & (3 << 14)) {
317 case (1 << 14):
318 cacheid |= CACHEID_ASID_TAGGED;
319 break;
320 case (3 << 14):
321 cacheid |= CACHEID_PIPT;
322 break;
323 }
324 } else {
325 arch = CPU_ARCH_ARMv6;
326 if (cachetype & (1 << 23))
327 cacheid = CACHEID_VIPT_ALIASING;
328 else
329 cacheid = CACHEID_VIPT_NONALIASING;
330 }
331 if (cpu_has_aliasing_icache(arch))
332 cacheid |= CACHEID_VIPT_I_ALIASING;
333 } else {
334 cacheid = CACHEID_VIVT;
335 }
336
337 printk("CPU: %s data cache, %s instruction cache\n",
338 cache_is_vivt() ? "VIVT" :
339 cache_is_vipt_aliasing() ? "VIPT aliasing" :
340 cache_is_vipt_nonaliasing() ? "PIPT / VIPT nonaliasing" : "unknown",
341 cache_is_vivt() ? "VIVT" :
342 icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
343 icache_is_vipt_aliasing() ? "VIPT aliasing" :
344 icache_is_pipt() ? "PIPT" :
345 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
346 }
347
348 /*
349 * These functions re-use the assembly code in head.S, which
350 * already provide the required functionality.
351 */
352 extern struct proc_info_list *lookup_processor_type(unsigned int);
353
354 void __init early_print(const char *str, ...)
355 {
356 extern void printascii(const char *);
357 char buf[256];
358 va_list ap;
359
360 va_start(ap, str);
361 vsnprintf(buf, sizeof(buf), str, ap);
362 va_end(ap);
363
364 #ifdef CONFIG_DEBUG_LL
365 printascii(buf);
366 #endif
367 printk("%s", buf);
368 }
369
370 static void __init cpuid_init_hwcaps(void)
371 {
372 unsigned int divide_instrs;
373
374 if (cpu_architecture() < CPU_ARCH_ARMv7)
375 return;
376
377 divide_instrs = (read_cpuid_ext(CPUID_EXT_ISAR0) & 0x0f000000) >> 24;
378
379 switch (divide_instrs) {
380 case 2:
381 elf_hwcap |= HWCAP_IDIVA;
382 case 1:
383 elf_hwcap |= HWCAP_IDIVT;
384 }
385 }
386
387 static void __init feat_v6_fixup(void)
388 {
389 int id = read_cpuid_id();
390
391 if ((id & 0xff0f0000) != 0x41070000)
392 return;
393
394 /*
395 * HWCAP_TLS is available only on 1136 r1p0 and later,
396 * see also kuser_get_tls_init.
397 */
398 if ((((id >> 4) & 0xfff) == 0xb36) && (((id >> 20) & 3) == 0))
399 elf_hwcap &= ~HWCAP_TLS;
400 }
401
402 /*
403 * cpu_init - initialise one CPU.
404 *
405 * cpu_init sets up the per-CPU stacks.
406 */
407 void notrace cpu_init(void)
408 {
409 unsigned int cpu = smp_processor_id();
410 struct stack *stk = &stacks[cpu];
411
412 if (cpu >= NR_CPUS) {
413 printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu);
414 BUG();
415 }
416
417 erratum_a15_798181_init();
418
419 /*
420 * This only works on resume and secondary cores. For booting on the
421 * boot cpu, smp_prepare_boot_cpu is called after percpu area setup.
422 */
423 set_my_cpu_offset(per_cpu_offset(cpu));
424
425 cpu_proc_init();
426
427 /*
428 * Define the placement constraint for the inline asm directive below.
429 * In Thumb-2, msr with an immediate value is not allowed.
430 */
431 #ifdef CONFIG_THUMB2_KERNEL
432 #define PLC "r"
433 #else
434 #define PLC "I"
435 #endif
436
437 /*
438 * setup stacks for re-entrant exception handlers
439 */
440 __asm__ (
441 "msr cpsr_c, %1\n\t"
442 "add r14, %0, %2\n\t"
443 "mov sp, r14\n\t"
444 "msr cpsr_c, %3\n\t"
445 "add r14, %0, %4\n\t"
446 "mov sp, r14\n\t"
447 "msr cpsr_c, %5\n\t"
448 "add r14, %0, %6\n\t"
449 "mov sp, r14\n\t"
450 "msr cpsr_c, %7"
451 :
452 : "r" (stk),
453 PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
454 "I" (offsetof(struct stack, irq[0])),
455 PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
456 "I" (offsetof(struct stack, abt[0])),
457 PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
458 "I" (offsetof(struct stack, und[0])),
459 PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
460 : "r14");
461 }
462
463 u32 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID };
464
465 void __init smp_setup_processor_id(void)
466 {
467 int i;
468 u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0;
469 u32 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
470
471 cpu_logical_map(0) = cpu;
472 for (i = 1; i < nr_cpu_ids; ++i)
473 cpu_logical_map(i) = i == cpu ? 0 : i;
474 /*
475 * clear __my_cpu_offset on boot CPU to avoid hang caused by
476 * using percpu variable early, for example, lockdep will
477 * access percpu variable inside lock_release
478 */
479 set_my_cpu_offset(0);
480 printk(KERN_INFO "Booting Linux on physical CPU 0x%x\n", mpidr);
481 }
482
483 static void __init setup_processor(void)
484 {
485 struct proc_info_list *list;
486
487 /*
488 * locate processor in the list of supported processor
489 * types. The linker builds this table for us from the
490 * entries in arch/arm/mm/proc-*.S
491 */
492 list = lookup_processor_type(read_cpuid_id());
493 if (!list) {
494 printk("CPU configuration botched (ID %08x), unable "
495 "to continue.\n", read_cpuid_id());
496 while (1);
497 }
498
499 cpu_name = list->cpu_name;
500 __cpu_architecture = __get_cpu_architecture();
501
502 #ifdef MULTI_CPU
503 processor = *list->proc;
504 #endif
505 #ifdef MULTI_TLB
506 cpu_tlb = *list->tlb;
507 #endif
508 #ifdef MULTI_USER
509 cpu_user = *list->user;
510 #endif
511 #ifdef MULTI_CACHE
512 cpu_cache = *list->cache;
513 #endif
514
515 printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
516 cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
517 proc_arch[cpu_architecture()], cr_alignment);
518
519 snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
520 list->arch_name, ENDIANNESS);
521 snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
522 list->elf_name, ENDIANNESS);
523 elf_hwcap = list->elf_hwcap;
524
525 cpuid_init_hwcaps();
526
527 #ifndef CONFIG_ARM_THUMB
528 elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT);
529 #endif
530
531 feat_v6_fixup();
532
533 cacheid_init();
534 cpu_init();
535 }
536
537 void __init dump_machine_table(void)
538 {
539 struct machine_desc *p;
540
541 early_print("Available machine support:\n\nID (hex)\tNAME\n");
542 for_each_machine_desc(p)
543 early_print("%08x\t%s\n", p->nr, p->name);
544
545 early_print("\nPlease check your kernel config and/or bootloader.\n");
546
547 while (true)
548 /* can't use cpu_relax() here as it may require MMU setup */;
549 }
550
551 int __init arm_add_memory(phys_addr_t start, phys_addr_t size)
552 {
553 struct membank *bank = &meminfo.bank[meminfo.nr_banks];
554 u64 aligned_start;
555
556 if (meminfo.nr_banks >= NR_BANKS) {
557 printk(KERN_CRIT "NR_BANKS too low, "
558 "ignoring memory at 0x%08llx\n", (long long)start);
559 return -EINVAL;
560 }
561
562 /*
563 * Ensure that start/size are aligned to a page boundary.
564 * Size is appropriately rounded down, start is rounded up.
565 */
566 size -= start & ~PAGE_MASK;
567 aligned_start = PAGE_ALIGN(start);
568
569 #ifndef CONFIG_ARCH_PHYS_ADDR_T_64BIT
570 if (aligned_start > ULONG_MAX) {
571 printk(KERN_CRIT "Ignoring memory at 0x%08llx outside "
572 "32-bit physical address space\n", (long long)start);
573 return -EINVAL;
574 }
575
576 if (aligned_start + size > ULONG_MAX) {
577 printk(KERN_CRIT "Truncating memory at 0x%08llx to fit in "
578 "32-bit physical address space\n", (long long)start);
579 /*
580 * To ensure bank->start + bank->size is representable in
581 * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
582 * This means we lose a page after masking.
583 */
584 size = ULONG_MAX - aligned_start;
585 }
586 #endif
587
588 if (aligned_start < PHYS_OFFSET) {
589 if (aligned_start + size <= PHYS_OFFSET) {
590 pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
591 aligned_start, aligned_start + size);
592 return -EINVAL;
593 }
594
595 pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
596 aligned_start, (u64)PHYS_OFFSET);
597
598 size -= PHYS_OFFSET - aligned_start;
599 aligned_start = PHYS_OFFSET;
600 }
601
602 bank->start = aligned_start;
603 bank->size = size & ~(phys_addr_t)(PAGE_SIZE - 1);
604
605 /*
606 * Check whether this memory region has non-zero size or
607 * invalid node number.
608 */
609 if (bank->size == 0)
610 return -EINVAL;
611
612 meminfo.nr_banks++;
613 return 0;
614 }
615
616 /*
617 * Pick out the memory size. We look for mem=size@start,
618 * where start and size are "size[KkMm]"
619 */
620 static int __init early_mem(char *p)
621 {
622 static int usermem __initdata = 0;
623 phys_addr_t size;
624 phys_addr_t start;
625 char *endp;
626
627 /*
628 * If the user specifies memory size, we
629 * blow away any automatically generated
630 * size.
631 */
632 if (usermem == 0) {
633 usermem = 1;
634 meminfo.nr_banks = 0;
635 }
636
637 start = PHYS_OFFSET;
638 size = memparse(p, &endp);
639 if (*endp == '@')
640 start = memparse(endp + 1, NULL);
641
642 arm_add_memory(start, size);
643
644 return 0;
645 }
646 early_param("mem", early_mem);
647
648 static void __init request_standard_resources(struct machine_desc *mdesc)
649 {
650 struct memblock_region *region;
651 struct resource *res;
652
653 kernel_code.start = virt_to_phys(_text);
654 kernel_code.end = virt_to_phys(_etext - 1);
655 kernel_data.start = virt_to_phys(_sdata);
656 kernel_data.end = virt_to_phys(_end - 1);
657
658 for_each_memblock(memory, region) {
659 res = alloc_bootmem_low(sizeof(*res));
660 res->name = "System RAM";
661 res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
662 res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
663 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
664
665 request_resource(&iomem_resource, res);
666
667 if (kernel_code.start >= res->start &&
668 kernel_code.end <= res->end)
669 request_resource(res, &kernel_code);
670 if (kernel_data.start >= res->start &&
671 kernel_data.end <= res->end)
672 request_resource(res, &kernel_data);
673 }
674
675 if (mdesc->video_start) {
676 video_ram.start = mdesc->video_start;
677 video_ram.end = mdesc->video_end;
678 request_resource(&iomem_resource, &video_ram);
679 }
680
681 /*
682 * Some machines don't have the possibility of ever
683 * possessing lp0, lp1 or lp2
684 */
685 if (mdesc->reserve_lp0)
686 request_resource(&ioport_resource, &lp0);
687 if (mdesc->reserve_lp1)
688 request_resource(&ioport_resource, &lp1);
689 if (mdesc->reserve_lp2)
690 request_resource(&ioport_resource, &lp2);
691 }
692
693 #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
694 struct screen_info screen_info = {
695 .orig_video_lines = 30,
696 .orig_video_cols = 80,
697 .orig_video_mode = 0,
698 .orig_video_ega_bx = 0,
699 .orig_video_isVGA = 1,
700 .orig_video_points = 8
701 };
702 #endif
703
704 static int __init customize_machine(void)
705 {
706 /*
707 * customizes platform devices, or adds new ones
708 * On DT based machines, we fall back to populating the
709 * machine from the device tree, if no callback is provided,
710 * otherwise we would always need an init_machine callback.
711 */
712 if (machine_desc->init_machine)
713 machine_desc->init_machine();
714 #ifdef CONFIG_OF
715 else
716 of_platform_populate(NULL, of_default_bus_match_table,
717 NULL, NULL);
718 #endif
719 return 0;
720 }
721 arch_initcall(customize_machine);
722
723 static int __init init_machine_late(void)
724 {
725 if (machine_desc->init_late)
726 machine_desc->init_late();
727 return 0;
728 }
729 late_initcall(init_machine_late);
730
731 #ifdef CONFIG_KEXEC
732 static inline unsigned long long get_total_mem(void)
733 {
734 unsigned long total;
735
736 total = max_low_pfn - min_low_pfn;
737 return total << PAGE_SHIFT;
738 }
739
740 /**
741 * reserve_crashkernel() - reserves memory are for crash kernel
742 *
743 * This function reserves memory area given in "crashkernel=" kernel command
744 * line parameter. The memory reserved is used by a dump capture kernel when
745 * primary kernel is crashing.
746 */
747 static void __init reserve_crashkernel(void)
748 {
749 unsigned long long crash_size, crash_base;
750 unsigned long long total_mem;
751 int ret;
752
753 total_mem = get_total_mem();
754 ret = parse_crashkernel(boot_command_line, total_mem,
755 &crash_size, &crash_base);
756 if (ret)
757 return;
758
759 ret = reserve_bootmem(crash_base, crash_size, BOOTMEM_EXCLUSIVE);
760 if (ret < 0) {
761 printk(KERN_WARNING "crashkernel reservation failed - "
762 "memory is in use (0x%lx)\n", (unsigned long)crash_base);
763 return;
764 }
765
766 printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
767 "for crashkernel (System RAM: %ldMB)\n",
768 (unsigned long)(crash_size >> 20),
769 (unsigned long)(crash_base >> 20),
770 (unsigned long)(total_mem >> 20));
771
772 crashk_res.start = crash_base;
773 crashk_res.end = crash_base + crash_size - 1;
774 insert_resource(&iomem_resource, &crashk_res);
775 }
776 #else
777 static inline void reserve_crashkernel(void) {}
778 #endif /* CONFIG_KEXEC */
779
780 static int __init meminfo_cmp(const void *_a, const void *_b)
781 {
782 const struct membank *a = _a, *b = _b;
783 long cmp = bank_pfn_start(a) - bank_pfn_start(b);
784 return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
785 }
786
787 void __init hyp_mode_check(void)
788 {
789 #ifdef CONFIG_ARM_VIRT_EXT
790 if (is_hyp_mode_available()) {
791 pr_info("CPU: All CPU(s) started in HYP mode.\n");
792 pr_info("CPU: Virtualization extensions available.\n");
793 } else if (is_hyp_mode_mismatched()) {
794 pr_warn("CPU: WARNING: CPU(s) started in wrong/inconsistent modes (primary CPU mode 0x%x)\n",
795 __boot_cpu_mode & MODE_MASK);
796 pr_warn("CPU: This may indicate a broken bootloader or firmware.\n");
797 } else
798 pr_info("CPU: All CPU(s) started in SVC mode.\n");
799 #endif
800 }
801
802 void __init setup_arch(char **cmdline_p)
803 {
804 struct machine_desc *mdesc;
805
806 setup_processor();
807 mdesc = setup_machine_fdt(__atags_pointer);
808 if (!mdesc)
809 mdesc = setup_machine_tags(__atags_pointer, __machine_arch_type);
810 machine_desc = mdesc;
811 machine_name = mdesc->name;
812
813 setup_dma_zone(mdesc);
814
815 if (mdesc->restart_mode)
816 reboot_setup(&mdesc->restart_mode);
817
818 init_mm.start_code = (unsigned long) _text;
819 init_mm.end_code = (unsigned long) _etext;
820 init_mm.end_data = (unsigned long) _edata;
821 init_mm.brk = (unsigned long) _end;
822
823 /* populate cmd_line too for later use, preserving boot_command_line */
824 strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
825 *cmdline_p = cmd_line;
826
827 parse_early_param();
828
829 sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
830 sanity_check_meminfo();
831 arm_memblock_init(&meminfo, mdesc);
832
833 paging_init(mdesc);
834 request_standard_resources(mdesc);
835
836 if (mdesc->restart)
837 arm_pm_restart = mdesc->restart;
838
839 unflatten_device_tree();
840
841 arm_dt_init_cpu_maps();
842 psci_init();
843 #ifdef CONFIG_SMP
844 if (is_smp()) {
845 if (psci_smp_available())
846 smp_set_ops(&psci_smp_ops);
847 else if (mdesc->smp)
848 smp_set_ops(mdesc->smp);
849 smp_init_cpus();
850 }
851 #endif
852
853 if (!is_smp())
854 hyp_mode_check();
855
856 reserve_crashkernel();
857
858 #ifdef CONFIG_MULTI_IRQ_HANDLER
859 handle_arch_irq = mdesc->handle_irq;
860 #endif
861
862 #ifdef CONFIG_VT
863 #if defined(CONFIG_VGA_CONSOLE)
864 conswitchp = &vga_con;
865 #elif defined(CONFIG_DUMMY_CONSOLE)
866 conswitchp = &dummy_con;
867 #endif
868 #endif
869
870 if (mdesc->init_early)
871 mdesc->init_early();
872 }
873
874
875 static int __init topology_init(void)
876 {
877 int cpu;
878
879 for_each_possible_cpu(cpu) {
880 struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
881 cpuinfo->cpu.hotpluggable = 1;
882 register_cpu(&cpuinfo->cpu, cpu);
883 }
884
885 return 0;
886 }
887 subsys_initcall(topology_init);
888
889 #ifdef CONFIG_HAVE_PROC_CPU
890 static int __init proc_cpu_init(void)
891 {
892 struct proc_dir_entry *res;
893
894 res = proc_mkdir("cpu", NULL);
895 if (!res)
896 return -ENOMEM;
897 return 0;
898 }
899 fs_initcall(proc_cpu_init);
900 #endif
901
902 static const char *hwcap_str[] = {
903 "swp",
904 "half",
905 "thumb",
906 "26bit",
907 "fastmult",
908 "fpa",
909 "vfp",
910 "edsp",
911 "java",
912 "iwmmxt",
913 "crunch",
914 "thumbee",
915 "neon",
916 "vfpv3",
917 "vfpv3d16",
918 "tls",
919 "vfpv4",
920 "idiva",
921 "idivt",
922 NULL
923 };
924
925 static void c_show_features(struct seq_file *m, u32 cpuid)
926 {
927 int j;
928
929 /* dump out the processor features */
930 seq_puts(m, "Features\t: ");
931
932 for (j = 0; hwcap_str[j]; j++)
933 if (elf_hwcap & (1 << j))
934 seq_printf(m, "%s ", hwcap_str[j]);
935
936 seq_printf(m, "\nCPU implementer\t: 0x%02x\n", cpuid >> 24);
937 seq_printf(m, "CPU architecture: %s\n",
938 proc_arch[cpu_architecture()]);
939
940 if ((cpuid & 0x0008f000) == 0x00000000) {
941 /* pre-ARM7 */
942 seq_printf(m, "CPU part\t: %07x\n", cpuid >> 4);
943 } else {
944 if ((cpuid & 0x0008f000) == 0x00007000) {
945 /* ARM7 */
946 seq_printf(m, "CPU variant\t: 0x%02x\n",
947 (cpuid >> 16) & 127);
948 } else {
949 /* post-ARM7 */
950 seq_printf(m, "CPU variant\t: 0x%x\n",
951 (cpuid >> 20) & 15);
952 }
953 seq_printf(m, "CPU part\t: 0x%03x\n",
954 (cpuid >> 4) & 0xfff);
955 }
956 seq_printf(m, "CPU revision\t: %d\n\n", cpuid & 15);
957 }
958
959 static int c_show(struct seq_file *m, void *v)
960 {
961 int i;
962 u32 cpuid;
963 int compat = config_enabled(CONFIG_COMPAT_CPUINFO);
964
965 if (compat)
966 seq_printf(m, "Processor\t: %s rev %d (%s)\n",
967 cpu_name, read_cpuid_id() & 15, elf_platform);
968
969 for_each_online_cpu(i) {
970 /*
971 * glibc reads /proc/cpuinfo to determine the number of
972 * online processors, looking for lines beginning with
973 * "processor". Give glibc what it expects.
974 */
975 cpuid = is_smp() ? per_cpu(cpu_data, i).cpuid : read_cpuid_id();
976 if (!compat)
977 seq_printf(m, "Processor\t: %s rev %d (%s)\n",
978 cpu_name, cpuid & 15, elf_platform);
979 seq_printf(m, "processor\t: %d\n", i);
980
981 #if defined(CONFIG_SMP)
982 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
983 per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
984 (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
985 #else
986 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
987 loops_per_jiffy / (500000/HZ),
988 (loops_per_jiffy / (5000/HZ)) % 100);
989 #endif
990 if (!compat)
991 c_show_features(m, cpuid);
992 else
993 seq_printf(m, "\n");
994 }
995
996 if (compat)
997 c_show_features(m, cpuid);
998 seq_printf(m, "Hardware\t: %s\n", machine_name);
999 seq_printf(m, "Revision\t: %04x\n", system_rev);
1000 seq_printf(m, "Serial\t\t: %08x%08x\n",
1001 system_serial_high, system_serial_low);
1002
1003 return 0;
1004 }
1005
1006 static void *c_start(struct seq_file *m, loff_t *pos)
1007 {
1008 return *pos < 1 ? (void *)1 : NULL;
1009 }
1010
1011 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1012 {
1013 ++*pos;
1014 return NULL;
1015 }
1016
1017 static void c_stop(struct seq_file *m, void *v)
1018 {
1019 }
1020
1021 const struct seq_operations cpuinfo_op = {
1022 .start = c_start,
1023 .next = c_next,
1024 .stop = c_stop,
1025 .show = c_show
1026 };