ARM: ignore memory below PHYS_OFFSET
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / arm / kernel / setup.c
1 /*
2 * linux/arch/arm/kernel/setup.c
3 *
4 * Copyright (C) 1995-2001 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10 #include <linux/export.h>
11 #include <linux/kernel.h>
12 #include <linux/stddef.h>
13 #include <linux/ioport.h>
14 #include <linux/delay.h>
15 #include <linux/utsname.h>
16 #include <linux/initrd.h>
17 #include <linux/console.h>
18 #include <linux/bootmem.h>
19 #include <linux/seq_file.h>
20 #include <linux/screen_info.h>
21 #include <linux/of_platform.h>
22 #include <linux/init.h>
23 #include <linux/kexec.h>
24 #include <linux/of_fdt.h>
25 #include <linux/cpu.h>
26 #include <linux/interrupt.h>
27 #include <linux/smp.h>
28 #include <linux/proc_fs.h>
29 #include <linux/memblock.h>
30 #include <linux/bug.h>
31 #include <linux/compiler.h>
32 #include <linux/sort.h>
33
34 #include <asm/unified.h>
35 #include <asm/cp15.h>
36 #include <asm/cpu.h>
37 #include <asm/cputype.h>
38 #include <asm/elf.h>
39 #include <asm/procinfo.h>
40 #include <asm/sections.h>
41 #include <asm/setup.h>
42 #include <asm/smp_plat.h>
43 #include <asm/mach-types.h>
44 #include <asm/cacheflush.h>
45 #include <asm/cachetype.h>
46 #include <asm/tlbflush.h>
47
48 #include <asm/prom.h>
49 #include <asm/mach/arch.h>
50 #include <asm/mach/irq.h>
51 #include <asm/mach/time.h>
52 #include <asm/system_info.h>
53 #include <asm/system_misc.h>
54 #include <asm/traps.h>
55 #include <asm/unwind.h>
56 #include <asm/memblock.h>
57 #include <asm/virt.h>
58
59 #include "atags.h"
60
61
62 #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
63 char fpe_type[8];
64
65 static int __init fpe_setup(char *line)
66 {
67 memcpy(fpe_type, line, 8);
68 return 1;
69 }
70
71 __setup("fpe=", fpe_setup);
72 #endif
73
74 extern void paging_init(struct machine_desc *desc);
75 extern void sanity_check_meminfo(void);
76 extern void reboot_setup(char *str);
77 extern void setup_dma_zone(struct machine_desc *desc);
78
79 unsigned int processor_id;
80 EXPORT_SYMBOL(processor_id);
81 unsigned int __machine_arch_type __read_mostly;
82 EXPORT_SYMBOL(__machine_arch_type);
83 unsigned int cacheid __read_mostly;
84 EXPORT_SYMBOL(cacheid);
85
86 unsigned int __atags_pointer __initdata;
87
88 unsigned int system_rev;
89 EXPORT_SYMBOL(system_rev);
90
91 unsigned int system_serial_low;
92 EXPORT_SYMBOL(system_serial_low);
93
94 unsigned int system_serial_high;
95 EXPORT_SYMBOL(system_serial_high);
96
97 unsigned int elf_hwcap __read_mostly;
98 EXPORT_SYMBOL(elf_hwcap);
99
100
101 #ifdef MULTI_CPU
102 struct processor processor __read_mostly;
103 #endif
104 #ifdef MULTI_TLB
105 struct cpu_tlb_fns cpu_tlb __read_mostly;
106 #endif
107 #ifdef MULTI_USER
108 struct cpu_user_fns cpu_user __read_mostly;
109 #endif
110 #ifdef MULTI_CACHE
111 struct cpu_cache_fns cpu_cache __read_mostly;
112 #endif
113 #ifdef CONFIG_OUTER_CACHE
114 struct outer_cache_fns outer_cache __read_mostly;
115 EXPORT_SYMBOL(outer_cache);
116 #endif
117
118 /*
119 * Cached cpu_architecture() result for use by assembler code.
120 * C code should use the cpu_architecture() function instead of accessing this
121 * variable directly.
122 */
123 int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN;
124
125 struct stack {
126 u32 irq[3];
127 u32 abt[3];
128 u32 und[3];
129 } ____cacheline_aligned;
130
131 static struct stack stacks[NR_CPUS];
132
133 char elf_platform[ELF_PLATFORM_SIZE];
134 EXPORT_SYMBOL(elf_platform);
135
136 static const char *cpu_name;
137 static const char *machine_name;
138 static char __initdata cmd_line[COMMAND_LINE_SIZE];
139 struct machine_desc *machine_desc __initdata;
140
141 static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
142 #define ENDIANNESS ((char)endian_test.l)
143
144 DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
145
146 /*
147 * Standard memory resources
148 */
149 static struct resource mem_res[] = {
150 {
151 .name = "Video RAM",
152 .start = 0,
153 .end = 0,
154 .flags = IORESOURCE_MEM
155 },
156 {
157 .name = "Kernel code",
158 .start = 0,
159 .end = 0,
160 .flags = IORESOURCE_MEM
161 },
162 {
163 .name = "Kernel data",
164 .start = 0,
165 .end = 0,
166 .flags = IORESOURCE_MEM
167 }
168 };
169
170 #define video_ram mem_res[0]
171 #define kernel_code mem_res[1]
172 #define kernel_data mem_res[2]
173
174 static struct resource io_res[] = {
175 {
176 .name = "reserved",
177 .start = 0x3bc,
178 .end = 0x3be,
179 .flags = IORESOURCE_IO | IORESOURCE_BUSY
180 },
181 {
182 .name = "reserved",
183 .start = 0x378,
184 .end = 0x37f,
185 .flags = IORESOURCE_IO | IORESOURCE_BUSY
186 },
187 {
188 .name = "reserved",
189 .start = 0x278,
190 .end = 0x27f,
191 .flags = IORESOURCE_IO | IORESOURCE_BUSY
192 }
193 };
194
195 #define lp0 io_res[0]
196 #define lp1 io_res[1]
197 #define lp2 io_res[2]
198
199 static const char *proc_arch[] = {
200 "undefined/unknown",
201 "3",
202 "4",
203 "4T",
204 "5",
205 "5T",
206 "5TE",
207 "5TEJ",
208 "6TEJ",
209 "7",
210 "?(11)",
211 "?(12)",
212 "?(13)",
213 "?(14)",
214 "?(15)",
215 "?(16)",
216 "?(17)",
217 };
218
219 static int __get_cpu_architecture(void)
220 {
221 int cpu_arch;
222
223 if ((read_cpuid_id() & 0x0008f000) == 0) {
224 cpu_arch = CPU_ARCH_UNKNOWN;
225 } else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
226 cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
227 } else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
228 cpu_arch = (read_cpuid_id() >> 16) & 7;
229 if (cpu_arch)
230 cpu_arch += CPU_ARCH_ARMv3;
231 } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
232 unsigned int mmfr0;
233
234 /* Revised CPUID format. Read the Memory Model Feature
235 * Register 0 and check for VMSAv7 or PMSAv7 */
236 asm("mrc p15, 0, %0, c0, c1, 4"
237 : "=r" (mmfr0));
238 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
239 (mmfr0 & 0x000000f0) >= 0x00000030)
240 cpu_arch = CPU_ARCH_ARMv7;
241 else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
242 (mmfr0 & 0x000000f0) == 0x00000020)
243 cpu_arch = CPU_ARCH_ARMv6;
244 else
245 cpu_arch = CPU_ARCH_UNKNOWN;
246 } else
247 cpu_arch = CPU_ARCH_UNKNOWN;
248
249 return cpu_arch;
250 }
251
252 int __pure cpu_architecture(void)
253 {
254 BUG_ON(__cpu_architecture == CPU_ARCH_UNKNOWN);
255
256 return __cpu_architecture;
257 }
258
259 static int cpu_has_aliasing_icache(unsigned int arch)
260 {
261 int aliasing_icache;
262 unsigned int id_reg, num_sets, line_size;
263
264 /* PIPT caches never alias. */
265 if (icache_is_pipt())
266 return 0;
267
268 /* arch specifies the register format */
269 switch (arch) {
270 case CPU_ARCH_ARMv7:
271 asm("mcr p15, 2, %0, c0, c0, 0 @ set CSSELR"
272 : /* No output operands */
273 : "r" (1));
274 isb();
275 asm("mrc p15, 1, %0, c0, c0, 0 @ read CCSIDR"
276 : "=r" (id_reg));
277 line_size = 4 << ((id_reg & 0x7) + 2);
278 num_sets = ((id_reg >> 13) & 0x7fff) + 1;
279 aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
280 break;
281 case CPU_ARCH_ARMv6:
282 aliasing_icache = read_cpuid_cachetype() & (1 << 11);
283 break;
284 default:
285 /* I-cache aliases will be handled by D-cache aliasing code */
286 aliasing_icache = 0;
287 }
288
289 return aliasing_icache;
290 }
291
292 static void __init cacheid_init(void)
293 {
294 unsigned int arch = cpu_architecture();
295
296 if (arch >= CPU_ARCH_ARMv6) {
297 unsigned int cachetype = read_cpuid_cachetype();
298 if ((cachetype & (7 << 29)) == 4 << 29) {
299 /* ARMv7 register format */
300 arch = CPU_ARCH_ARMv7;
301 cacheid = CACHEID_VIPT_NONALIASING;
302 switch (cachetype & (3 << 14)) {
303 case (1 << 14):
304 cacheid |= CACHEID_ASID_TAGGED;
305 break;
306 case (3 << 14):
307 cacheid |= CACHEID_PIPT;
308 break;
309 }
310 } else {
311 arch = CPU_ARCH_ARMv6;
312 if (cachetype & (1 << 23))
313 cacheid = CACHEID_VIPT_ALIASING;
314 else
315 cacheid = CACHEID_VIPT_NONALIASING;
316 }
317 if (cpu_has_aliasing_icache(arch))
318 cacheid |= CACHEID_VIPT_I_ALIASING;
319 } else {
320 cacheid = CACHEID_VIVT;
321 }
322
323 printk("CPU: %s data cache, %s instruction cache\n",
324 cache_is_vivt() ? "VIVT" :
325 cache_is_vipt_aliasing() ? "VIPT aliasing" :
326 cache_is_vipt_nonaliasing() ? "PIPT / VIPT nonaliasing" : "unknown",
327 cache_is_vivt() ? "VIVT" :
328 icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
329 icache_is_vipt_aliasing() ? "VIPT aliasing" :
330 icache_is_pipt() ? "PIPT" :
331 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
332 }
333
334 /*
335 * These functions re-use the assembly code in head.S, which
336 * already provide the required functionality.
337 */
338 extern struct proc_info_list *lookup_processor_type(unsigned int);
339
340 void __init early_print(const char *str, ...)
341 {
342 extern void printascii(const char *);
343 char buf[256];
344 va_list ap;
345
346 va_start(ap, str);
347 vsnprintf(buf, sizeof(buf), str, ap);
348 va_end(ap);
349
350 #ifdef CONFIG_DEBUG_LL
351 printascii(buf);
352 #endif
353 printk("%s", buf);
354 }
355
356 static void __init cpuid_init_hwcaps(void)
357 {
358 unsigned int divide_instrs;
359
360 if (cpu_architecture() < CPU_ARCH_ARMv7)
361 return;
362
363 divide_instrs = (read_cpuid_ext(CPUID_EXT_ISAR0) & 0x0f000000) >> 24;
364
365 switch (divide_instrs) {
366 case 2:
367 elf_hwcap |= HWCAP_IDIVA;
368 case 1:
369 elf_hwcap |= HWCAP_IDIVT;
370 }
371 }
372
373 static void __init feat_v6_fixup(void)
374 {
375 int id = read_cpuid_id();
376
377 if ((id & 0xff0f0000) != 0x41070000)
378 return;
379
380 /*
381 * HWCAP_TLS is available only on 1136 r1p0 and later,
382 * see also kuser_get_tls_init.
383 */
384 if ((((id >> 4) & 0xfff) == 0xb36) && (((id >> 20) & 3) == 0))
385 elf_hwcap &= ~HWCAP_TLS;
386 }
387
388 /*
389 * cpu_init - initialise one CPU.
390 *
391 * cpu_init sets up the per-CPU stacks.
392 */
393 void notrace cpu_init(void)
394 {
395 unsigned int cpu = smp_processor_id();
396 struct stack *stk = &stacks[cpu];
397
398 if (cpu >= NR_CPUS) {
399 printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu);
400 BUG();
401 }
402
403 /*
404 * This only works on resume and secondary cores. For booting on the
405 * boot cpu, smp_prepare_boot_cpu is called after percpu area setup.
406 */
407 set_my_cpu_offset(per_cpu_offset(cpu));
408
409 cpu_proc_init();
410
411 /*
412 * Define the placement constraint for the inline asm directive below.
413 * In Thumb-2, msr with an immediate value is not allowed.
414 */
415 #ifdef CONFIG_THUMB2_KERNEL
416 #define PLC "r"
417 #else
418 #define PLC "I"
419 #endif
420
421 /*
422 * setup stacks for re-entrant exception handlers
423 */
424 __asm__ (
425 "msr cpsr_c, %1\n\t"
426 "add r14, %0, %2\n\t"
427 "mov sp, r14\n\t"
428 "msr cpsr_c, %3\n\t"
429 "add r14, %0, %4\n\t"
430 "mov sp, r14\n\t"
431 "msr cpsr_c, %5\n\t"
432 "add r14, %0, %6\n\t"
433 "mov sp, r14\n\t"
434 "msr cpsr_c, %7"
435 :
436 : "r" (stk),
437 PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
438 "I" (offsetof(struct stack, irq[0])),
439 PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
440 "I" (offsetof(struct stack, abt[0])),
441 PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
442 "I" (offsetof(struct stack, und[0])),
443 PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
444 : "r14");
445 }
446
447 u32 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID };
448
449 void __init smp_setup_processor_id(void)
450 {
451 int i;
452 u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0;
453 u32 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
454
455 cpu_logical_map(0) = cpu;
456 for (i = 1; i < nr_cpu_ids; ++i)
457 cpu_logical_map(i) = i == cpu ? 0 : i;
458
459 printk(KERN_INFO "Booting Linux on physical CPU 0x%x\n", mpidr);
460 }
461
462 static void __init setup_processor(void)
463 {
464 struct proc_info_list *list;
465
466 /*
467 * locate processor in the list of supported processor
468 * types. The linker builds this table for us from the
469 * entries in arch/arm/mm/proc-*.S
470 */
471 list = lookup_processor_type(read_cpuid_id());
472 if (!list) {
473 printk("CPU configuration botched (ID %08x), unable "
474 "to continue.\n", read_cpuid_id());
475 while (1);
476 }
477
478 cpu_name = list->cpu_name;
479 __cpu_architecture = __get_cpu_architecture();
480
481 #ifdef MULTI_CPU
482 processor = *list->proc;
483 #endif
484 #ifdef MULTI_TLB
485 cpu_tlb = *list->tlb;
486 #endif
487 #ifdef MULTI_USER
488 cpu_user = *list->user;
489 #endif
490 #ifdef MULTI_CACHE
491 cpu_cache = *list->cache;
492 #endif
493
494 printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
495 cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
496 proc_arch[cpu_architecture()], cr_alignment);
497
498 snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
499 list->arch_name, ENDIANNESS);
500 snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
501 list->elf_name, ENDIANNESS);
502 elf_hwcap = list->elf_hwcap;
503
504 cpuid_init_hwcaps();
505
506 #ifndef CONFIG_ARM_THUMB
507 elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT);
508 #endif
509
510 feat_v6_fixup();
511
512 cacheid_init();
513 cpu_init();
514 }
515
516 void __init dump_machine_table(void)
517 {
518 struct machine_desc *p;
519
520 early_print("Available machine support:\n\nID (hex)\tNAME\n");
521 for_each_machine_desc(p)
522 early_print("%08x\t%s\n", p->nr, p->name);
523
524 early_print("\nPlease check your kernel config and/or bootloader.\n");
525
526 while (true)
527 /* can't use cpu_relax() here as it may require MMU setup */;
528 }
529
530 int __init arm_add_memory(phys_addr_t start, phys_addr_t size)
531 {
532 struct membank *bank = &meminfo.bank[meminfo.nr_banks];
533 u64 aligned_start;
534
535 if (meminfo.nr_banks >= NR_BANKS) {
536 printk(KERN_CRIT "NR_BANKS too low, "
537 "ignoring memory at 0x%08llx\n", (long long)start);
538 return -EINVAL;
539 }
540
541 /*
542 * Ensure that start/size are aligned to a page boundary.
543 * Size is appropriately rounded down, start is rounded up.
544 */
545 size -= start & ~PAGE_MASK;
546 aligned_start = PAGE_ALIGN(start);
547
548 #ifndef CONFIG_ARCH_PHYS_ADDR_T_64BIT
549 if (aligned_start > ULONG_MAX) {
550 printk(KERN_CRIT "Ignoring memory at 0x%08llx outside "
551 "32-bit physical address space\n", (long long)start);
552 return -EINVAL;
553 }
554
555 if (aligned_start + size > ULONG_MAX) {
556 printk(KERN_CRIT "Truncating memory at 0x%08llx to fit in "
557 "32-bit physical address space\n", (long long)start);
558 /*
559 * To ensure bank->start + bank->size is representable in
560 * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
561 * This means we lose a page after masking.
562 */
563 size = ULONG_MAX - aligned_start;
564 }
565 #endif
566
567 if (aligned_start < PHYS_OFFSET) {
568 if (aligned_start + size <= PHYS_OFFSET) {
569 pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
570 aligned_start, aligned_start + size);
571 return -EINVAL;
572 }
573
574 pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
575 aligned_start, (u64)PHYS_OFFSET);
576
577 size -= PHYS_OFFSET - aligned_start;
578 aligned_start = PHYS_OFFSET;
579 }
580
581 bank->start = aligned_start;
582 bank->size = size & ~(phys_addr_t)(PAGE_SIZE - 1);
583
584 /*
585 * Check whether this memory region has non-zero size or
586 * invalid node number.
587 */
588 if (bank->size == 0)
589 return -EINVAL;
590
591 meminfo.nr_banks++;
592 return 0;
593 }
594
595 /*
596 * Pick out the memory size. We look for mem=size@start,
597 * where start and size are "size[KkMm]"
598 */
599 static int __init early_mem(char *p)
600 {
601 static int usermem __initdata = 0;
602 phys_addr_t size;
603 phys_addr_t start;
604 char *endp;
605
606 /*
607 * If the user specifies memory size, we
608 * blow away any automatically generated
609 * size.
610 */
611 if (usermem == 0) {
612 usermem = 1;
613 meminfo.nr_banks = 0;
614 }
615
616 start = PHYS_OFFSET;
617 size = memparse(p, &endp);
618 if (*endp == '@')
619 start = memparse(endp + 1, NULL);
620
621 arm_add_memory(start, size);
622
623 return 0;
624 }
625 early_param("mem", early_mem);
626
627 static void __init request_standard_resources(struct machine_desc *mdesc)
628 {
629 struct memblock_region *region;
630 struct resource *res;
631
632 kernel_code.start = virt_to_phys(_text);
633 kernel_code.end = virt_to_phys(_etext - 1);
634 kernel_data.start = virt_to_phys(_sdata);
635 kernel_data.end = virt_to_phys(_end - 1);
636
637 for_each_memblock(memory, region) {
638 res = alloc_bootmem_low(sizeof(*res));
639 res->name = "System RAM";
640 res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
641 res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
642 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
643
644 request_resource(&iomem_resource, res);
645
646 if (kernel_code.start >= res->start &&
647 kernel_code.end <= res->end)
648 request_resource(res, &kernel_code);
649 if (kernel_data.start >= res->start &&
650 kernel_data.end <= res->end)
651 request_resource(res, &kernel_data);
652 }
653
654 if (mdesc->video_start) {
655 video_ram.start = mdesc->video_start;
656 video_ram.end = mdesc->video_end;
657 request_resource(&iomem_resource, &video_ram);
658 }
659
660 /*
661 * Some machines don't have the possibility of ever
662 * possessing lp0, lp1 or lp2
663 */
664 if (mdesc->reserve_lp0)
665 request_resource(&ioport_resource, &lp0);
666 if (mdesc->reserve_lp1)
667 request_resource(&ioport_resource, &lp1);
668 if (mdesc->reserve_lp2)
669 request_resource(&ioport_resource, &lp2);
670 }
671
672 #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
673 struct screen_info screen_info = {
674 .orig_video_lines = 30,
675 .orig_video_cols = 80,
676 .orig_video_mode = 0,
677 .orig_video_ega_bx = 0,
678 .orig_video_isVGA = 1,
679 .orig_video_points = 8
680 };
681 #endif
682
683 static int __init customize_machine(void)
684 {
685 /*
686 * customizes platform devices, or adds new ones
687 * On DT based machines, we fall back to populating the
688 * machine from the device tree, if no callback is provided,
689 * otherwise we would always need an init_machine callback.
690 */
691 if (machine_desc->init_machine)
692 machine_desc->init_machine();
693 #ifdef CONFIG_OF
694 else
695 of_platform_populate(NULL, of_default_bus_match_table,
696 NULL, NULL);
697 #endif
698 return 0;
699 }
700 arch_initcall(customize_machine);
701
702 static int __init init_machine_late(void)
703 {
704 if (machine_desc->init_late)
705 machine_desc->init_late();
706 return 0;
707 }
708 late_initcall(init_machine_late);
709
710 #ifdef CONFIG_KEXEC
711 static inline unsigned long long get_total_mem(void)
712 {
713 unsigned long total;
714
715 total = max_low_pfn - min_low_pfn;
716 return total << PAGE_SHIFT;
717 }
718
719 /**
720 * reserve_crashkernel() - reserves memory are for crash kernel
721 *
722 * This function reserves memory area given in "crashkernel=" kernel command
723 * line parameter. The memory reserved is used by a dump capture kernel when
724 * primary kernel is crashing.
725 */
726 static void __init reserve_crashkernel(void)
727 {
728 unsigned long long crash_size, crash_base;
729 unsigned long long total_mem;
730 int ret;
731
732 total_mem = get_total_mem();
733 ret = parse_crashkernel(boot_command_line, total_mem,
734 &crash_size, &crash_base);
735 if (ret)
736 return;
737
738 ret = reserve_bootmem(crash_base, crash_size, BOOTMEM_EXCLUSIVE);
739 if (ret < 0) {
740 printk(KERN_WARNING "crashkernel reservation failed - "
741 "memory is in use (0x%lx)\n", (unsigned long)crash_base);
742 return;
743 }
744
745 printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
746 "for crashkernel (System RAM: %ldMB)\n",
747 (unsigned long)(crash_size >> 20),
748 (unsigned long)(crash_base >> 20),
749 (unsigned long)(total_mem >> 20));
750
751 crashk_res.start = crash_base;
752 crashk_res.end = crash_base + crash_size - 1;
753 insert_resource(&iomem_resource, &crashk_res);
754 }
755 #else
756 static inline void reserve_crashkernel(void) {}
757 #endif /* CONFIG_KEXEC */
758
759 static int __init meminfo_cmp(const void *_a, const void *_b)
760 {
761 const struct membank *a = _a, *b = _b;
762 long cmp = bank_pfn_start(a) - bank_pfn_start(b);
763 return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
764 }
765
766 void __init hyp_mode_check(void)
767 {
768 #ifdef CONFIG_ARM_VIRT_EXT
769 if (is_hyp_mode_available()) {
770 pr_info("CPU: All CPU(s) started in HYP mode.\n");
771 pr_info("CPU: Virtualization extensions available.\n");
772 } else if (is_hyp_mode_mismatched()) {
773 pr_warn("CPU: WARNING: CPU(s) started in wrong/inconsistent modes (primary CPU mode 0x%x)\n",
774 __boot_cpu_mode & MODE_MASK);
775 pr_warn("CPU: This may indicate a broken bootloader or firmware.\n");
776 } else
777 pr_info("CPU: All CPU(s) started in SVC mode.\n");
778 #endif
779 }
780
781 void __init setup_arch(char **cmdline_p)
782 {
783 struct machine_desc *mdesc;
784
785 setup_processor();
786 mdesc = setup_machine_fdt(__atags_pointer);
787 if (!mdesc)
788 mdesc = setup_machine_tags(__atags_pointer, __machine_arch_type);
789 machine_desc = mdesc;
790 machine_name = mdesc->name;
791
792 setup_dma_zone(mdesc);
793
794 if (mdesc->restart_mode)
795 reboot_setup(&mdesc->restart_mode);
796
797 init_mm.start_code = (unsigned long) _text;
798 init_mm.end_code = (unsigned long) _etext;
799 init_mm.end_data = (unsigned long) _edata;
800 init_mm.brk = (unsigned long) _end;
801
802 /* populate cmd_line too for later use, preserving boot_command_line */
803 strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
804 *cmdline_p = cmd_line;
805
806 parse_early_param();
807
808 sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
809 sanity_check_meminfo();
810 arm_memblock_init(&meminfo, mdesc);
811
812 paging_init(mdesc);
813 request_standard_resources(mdesc);
814
815 if (mdesc->restart)
816 arm_pm_restart = mdesc->restart;
817
818 unflatten_device_tree();
819
820 arm_dt_init_cpu_maps();
821 #ifdef CONFIG_SMP
822 if (is_smp()) {
823 smp_set_ops(mdesc->smp);
824 smp_init_cpus();
825 }
826 #endif
827
828 if (!is_smp())
829 hyp_mode_check();
830
831 reserve_crashkernel();
832
833 #ifdef CONFIG_MULTI_IRQ_HANDLER
834 handle_arch_irq = mdesc->handle_irq;
835 #endif
836
837 #ifdef CONFIG_VT
838 #if defined(CONFIG_VGA_CONSOLE)
839 conswitchp = &vga_con;
840 #elif defined(CONFIG_DUMMY_CONSOLE)
841 conswitchp = &dummy_con;
842 #endif
843 #endif
844
845 if (mdesc->init_early)
846 mdesc->init_early();
847 }
848
849
850 static int __init topology_init(void)
851 {
852 int cpu;
853
854 for_each_possible_cpu(cpu) {
855 struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
856 cpuinfo->cpu.hotpluggable = 1;
857 register_cpu(&cpuinfo->cpu, cpu);
858 }
859
860 return 0;
861 }
862 subsys_initcall(topology_init);
863
864 #ifdef CONFIG_HAVE_PROC_CPU
865 static int __init proc_cpu_init(void)
866 {
867 struct proc_dir_entry *res;
868
869 res = proc_mkdir("cpu", NULL);
870 if (!res)
871 return -ENOMEM;
872 return 0;
873 }
874 fs_initcall(proc_cpu_init);
875 #endif
876
877 static const char *hwcap_str[] = {
878 "swp",
879 "half",
880 "thumb",
881 "26bit",
882 "fastmult",
883 "fpa",
884 "vfp",
885 "edsp",
886 "java",
887 "iwmmxt",
888 "crunch",
889 "thumbee",
890 "neon",
891 "vfpv3",
892 "vfpv3d16",
893 "tls",
894 "vfpv4",
895 "idiva",
896 "idivt",
897 NULL
898 };
899
900 static int c_show(struct seq_file *m, void *v)
901 {
902 int i, j;
903 u32 cpuid;
904
905 for_each_online_cpu(i) {
906 /*
907 * glibc reads /proc/cpuinfo to determine the number of
908 * online processors, looking for lines beginning with
909 * "processor". Give glibc what it expects.
910 */
911 seq_printf(m, "processor\t: %d\n", i);
912 cpuid = is_smp() ? per_cpu(cpu_data, i).cpuid : read_cpuid_id();
913 seq_printf(m, "model name\t: %s rev %d (%s)\n",
914 cpu_name, cpuid & 15, elf_platform);
915
916 #if defined(CONFIG_SMP)
917 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
918 per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
919 (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
920 #else
921 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
922 loops_per_jiffy / (500000/HZ),
923 (loops_per_jiffy / (5000/HZ)) % 100);
924 #endif
925 /* dump out the processor features */
926 seq_puts(m, "Features\t: ");
927
928 for (j = 0; hwcap_str[j]; j++)
929 if (elf_hwcap & (1 << j))
930 seq_printf(m, "%s ", hwcap_str[j]);
931
932 seq_printf(m, "\nCPU implementer\t: 0x%02x\n", cpuid >> 24);
933 seq_printf(m, "CPU architecture: %s\n",
934 proc_arch[cpu_architecture()]);
935
936 if ((cpuid & 0x0008f000) == 0x00000000) {
937 /* pre-ARM7 */
938 seq_printf(m, "CPU part\t: %07x\n", cpuid >> 4);
939 } else {
940 if ((cpuid & 0x0008f000) == 0x00007000) {
941 /* ARM7 */
942 seq_printf(m, "CPU variant\t: 0x%02x\n",
943 (cpuid >> 16) & 127);
944 } else {
945 /* post-ARM7 */
946 seq_printf(m, "CPU variant\t: 0x%x\n",
947 (cpuid >> 20) & 15);
948 }
949 seq_printf(m, "CPU part\t: 0x%03x\n",
950 (cpuid >> 4) & 0xfff);
951 }
952 seq_printf(m, "CPU revision\t: %d\n\n", cpuid & 15);
953 }
954
955 seq_printf(m, "Hardware\t: %s\n", machine_name);
956 seq_printf(m, "Revision\t: %04x\n", system_rev);
957 seq_printf(m, "Serial\t\t: %08x%08x\n",
958 system_serial_high, system_serial_low);
959
960 return 0;
961 }
962
963 static void *c_start(struct seq_file *m, loff_t *pos)
964 {
965 return *pos < 1 ? (void *)1 : NULL;
966 }
967
968 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
969 {
970 ++*pos;
971 return NULL;
972 }
973
974 static void c_stop(struct seq_file *m, void *v)
975 {
976 }
977
978 const struct seq_operations cpuinfo_op = {
979 .start = c_start,
980 .next = c_next,
981 .stop = c_stop,
982 .show = c_show
983 };