Merge tag 'v3.10.55' into update
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / arm / kernel / setup.c
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/arm/kernel/setup.c
3 *
4 * Copyright (C) 1995-2001 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
ecea4ab6 10#include <linux/export.h>
1da177e4
LT
11#include <linux/kernel.h>
12#include <linux/stddef.h>
13#include <linux/ioport.h>
14#include <linux/delay.h>
15#include <linux/utsname.h>
16#include <linux/initrd.h>
17#include <linux/console.h>
18#include <linux/bootmem.h>
19#include <linux/seq_file.h>
894673ee 20#include <linux/screen_info.h>
883a106b 21#include <linux/of_platform.h>
1da177e4 22#include <linux/init.h>
3c57fb43 23#include <linux/kexec.h>
93c02ab4 24#include <linux/of_fdt.h>
1da177e4
LT
25#include <linux/cpu.h>
26#include <linux/interrupt.h>
7bbb7940 27#include <linux/smp.h>
e119bfff 28#include <linux/proc_fs.h>
2778f620 29#include <linux/memblock.h>
2ecccf90
DM
30#include <linux/bug.h>
31#include <linux/compiler.h>
27a3f0e9 32#include <linux/sort.h>
1da177e4 33
b86040a5 34#include <asm/unified.h>
15d07dc9 35#include <asm/cp15.h>
1da177e4 36#include <asm/cpu.h>
0ba8b9b2 37#include <asm/cputype.h>
1da177e4 38#include <asm/elf.h>
1da177e4 39#include <asm/procinfo.h>
6fa3eb70 40#include <asm/psci.h>
37efe642 41#include <asm/sections.h>
1da177e4 42#include <asm/setup.h>
f00ec48f 43#include <asm/smp_plat.h>
1da177e4
LT
44#include <asm/mach-types.h>
45#include <asm/cacheflush.h>
46097c7d 46#include <asm/cachetype.h>
1da177e4
LT
47#include <asm/tlbflush.h>
48
93c02ab4 49#include <asm/prom.h>
1da177e4
LT
50#include <asm/mach/arch.h>
51#include <asm/mach/irq.h>
52#include <asm/mach/time.h>
9f97da78
DH
53#include <asm/system_info.h>
54#include <asm/system_misc.h>
5cbad0eb 55#include <asm/traps.h>
bff595c1 56#include <asm/unwind.h>
1c16d242 57#include <asm/memblock.h>
4588c34d 58#include <asm/virt.h>
1da177e4 59
4cd9d6f7 60#include "atags.h"
0fc1c832 61
1da177e4
LT
62
63#if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
64char fpe_type[8];
65
66static int __init fpe_setup(char *line)
67{
68 memcpy(fpe_type, line, 8);
69 return 1;
70}
71
72__setup("fpe=", fpe_setup);
73#endif
74
4b5f32ce 75extern void paging_init(struct machine_desc *desc);
0371d3f7 76extern void sanity_check_meminfo(void);
1da177e4 77extern void reboot_setup(char *str);
c7909509 78extern void setup_dma_zone(struct machine_desc *desc);
1da177e4
LT
79
80unsigned int processor_id;
c18f6581 81EXPORT_SYMBOL(processor_id);
0385ebc0 82unsigned int __machine_arch_type __read_mostly;
1da177e4 83EXPORT_SYMBOL(__machine_arch_type);
0385ebc0 84unsigned int cacheid __read_mostly;
c0e95878 85EXPORT_SYMBOL(cacheid);
1da177e4 86
9d20fdd5
BG
87unsigned int __atags_pointer __initdata;
88
1da177e4
LT
89unsigned int system_rev;
90EXPORT_SYMBOL(system_rev);
91
92unsigned int system_serial_low;
93EXPORT_SYMBOL(system_serial_low);
94
95unsigned int system_serial_high;
96EXPORT_SYMBOL(system_serial_high);
97
0385ebc0 98unsigned int elf_hwcap __read_mostly;
1da177e4
LT
99EXPORT_SYMBOL(elf_hwcap);
100
101
102#ifdef MULTI_CPU
0385ebc0 103struct processor processor __read_mostly;
1da177e4
LT
104#endif
105#ifdef MULTI_TLB
0385ebc0 106struct cpu_tlb_fns cpu_tlb __read_mostly;
1da177e4
LT
107#endif
108#ifdef MULTI_USER
0385ebc0 109struct cpu_user_fns cpu_user __read_mostly;
1da177e4
LT
110#endif
111#ifdef MULTI_CACHE
0385ebc0 112struct cpu_cache_fns cpu_cache __read_mostly;
1da177e4 113#endif
953233dc 114#ifdef CONFIG_OUTER_CACHE
0385ebc0 115struct outer_cache_fns outer_cache __read_mostly;
6c09f09d 116EXPORT_SYMBOL(outer_cache);
953233dc 117#endif
1da177e4 118
2ecccf90
DM
119/*
120 * Cached cpu_architecture() result for use by assembler code.
121 * C code should use the cpu_architecture() function instead of accessing this
122 * variable directly.
123 */
124int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN;
125
ccea7a19
RK
126struct stack {
127 u32 irq[3];
128 u32 abt[3];
129 u32 und[3];
130} ____cacheline_aligned;
131
132static struct stack stacks[NR_CPUS];
133
1da177e4
LT
134char elf_platform[ELF_PLATFORM_SIZE];
135EXPORT_SYMBOL(elf_platform);
136
1da177e4
LT
137static const char *cpu_name;
138static const char *machine_name;
48ab7e09 139static char __initdata cmd_line[COMMAND_LINE_SIZE];
8ff1443c 140struct machine_desc *machine_desc __initdata;
1da177e4 141
1da177e4
LT
142static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
143#define ENDIANNESS ((char)endian_test.l)
144
145DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
146
147/*
148 * Standard memory resources
149 */
150static struct resource mem_res[] = {
740e518e
GKH
151 {
152 .name = "Video RAM",
153 .start = 0,
154 .end = 0,
155 .flags = IORESOURCE_MEM
156 },
157 {
a36d8e5b 158 .name = "Kernel code",
740e518e
GKH
159 .start = 0,
160 .end = 0,
161 .flags = IORESOURCE_MEM
162 },
163 {
164 .name = "Kernel data",
165 .start = 0,
166 .end = 0,
167 .flags = IORESOURCE_MEM
168 }
1da177e4
LT
169};
170
171#define video_ram mem_res[0]
172#define kernel_code mem_res[1]
173#define kernel_data mem_res[2]
174
175static struct resource io_res[] = {
740e518e
GKH
176 {
177 .name = "reserved",
178 .start = 0x3bc,
179 .end = 0x3be,
180 .flags = IORESOURCE_IO | IORESOURCE_BUSY
181 },
182 {
183 .name = "reserved",
184 .start = 0x378,
185 .end = 0x37f,
186 .flags = IORESOURCE_IO | IORESOURCE_BUSY
187 },
188 {
189 .name = "reserved",
190 .start = 0x278,
191 .end = 0x27f,
192 .flags = IORESOURCE_IO | IORESOURCE_BUSY
193 }
1da177e4
LT
194};
195
196#define lp0 io_res[0]
197#define lp1 io_res[1]
198#define lp2 io_res[2]
199
1da177e4
LT
200static const char *proc_arch[] = {
201 "undefined/unknown",
202 "3",
203 "4",
204 "4T",
205 "5",
206 "5T",
207 "5TE",
208 "5TEJ",
209 "6TEJ",
6b090a25 210 "7",
1da177e4
LT
211 "?(11)",
212 "?(12)",
213 "?(13)",
214 "?(14)",
215 "?(15)",
216 "?(16)",
217 "?(17)",
218};
219
2ecccf90 220static int __get_cpu_architecture(void)
1da177e4
LT
221{
222 int cpu_arch;
223
0ba8b9b2 224 if ((read_cpuid_id() & 0x0008f000) == 0) {
1da177e4 225 cpu_arch = CPU_ARCH_UNKNOWN;
0ba8b9b2
RK
226 } else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
227 cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
228 } else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
229 cpu_arch = (read_cpuid_id() >> 16) & 7;
1da177e4
LT
230 if (cpu_arch)
231 cpu_arch += CPU_ARCH_ARMv3;
0ba8b9b2 232 } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
180005c4
CM
233 unsigned int mmfr0;
234
235 /* Revised CPUID format. Read the Memory Model Feature
236 * Register 0 and check for VMSAv7 or PMSAv7 */
237 asm("mrc p15, 0, %0, c0, c1, 4"
238 : "=r" (mmfr0));
315cfe78
CM
239 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
240 (mmfr0 & 0x000000f0) >= 0x00000030)
180005c4
CM
241 cpu_arch = CPU_ARCH_ARMv7;
242 else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
243 (mmfr0 & 0x000000f0) == 0x00000020)
244 cpu_arch = CPU_ARCH_ARMv6;
245 else
246 cpu_arch = CPU_ARCH_UNKNOWN;
247 } else
248 cpu_arch = CPU_ARCH_UNKNOWN;
1da177e4
LT
249
250 return cpu_arch;
251}
252
2ecccf90
DM
253int __pure cpu_architecture(void)
254{
255 BUG_ON(__cpu_architecture == CPU_ARCH_UNKNOWN);
256
257 return __cpu_architecture;
258}
259
8925ec4c
WD
260static int cpu_has_aliasing_icache(unsigned int arch)
261{
262 int aliasing_icache;
263 unsigned int id_reg, num_sets, line_size;
264
6fa3eb70
S
265#ifdef CONFIG_BIG_LITTLE
266 /*
267 * We expect a combination of Cortex-A15 and Cortex-A7 cores.
268 * A7 = VIPT aliasing I-cache
269 * A15 = PIPT (non-aliasing) I-cache
270 * To cater for this discrepancy, let's assume aliasing I-cache
271 * all the time. This means unneeded extra work on the A15 but
272 * only ptrace is affected which is not performance critical.
273 */
274 if ((read_cpuid_id() & 0xff0ffff0) == 0x410fc0f0)
275 return 1;
276#endif
277
7f94e9cc
WD
278 /* PIPT caches never alias. */
279 if (icache_is_pipt())
280 return 0;
281
8925ec4c
WD
282 /* arch specifies the register format */
283 switch (arch) {
284 case CPU_ARCH_ARMv7:
5fb31a96
LW
285 asm("mcr p15, 2, %0, c0, c0, 0 @ set CSSELR"
286 : /* No output operands */
8925ec4c 287 : "r" (1));
5fb31a96
LW
288 isb();
289 asm("mrc p15, 1, %0, c0, c0, 0 @ read CCSIDR"
290 : "=r" (id_reg));
8925ec4c
WD
291 line_size = 4 << ((id_reg & 0x7) + 2);
292 num_sets = ((id_reg >> 13) & 0x7fff) + 1;
293 aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
294 break;
295 case CPU_ARCH_ARMv6:
296 aliasing_icache = read_cpuid_cachetype() & (1 << 11);
297 break;
298 default:
299 /* I-cache aliases will be handled by D-cache aliasing code */
300 aliasing_icache = 0;
301 }
302
303 return aliasing_icache;
304}
305
c0e95878
RK
306static void __init cacheid_init(void)
307{
c0e95878
RK
308 unsigned int arch = cpu_architecture();
309
b57ee99f 310 if (arch >= CPU_ARCH_ARMv6) {
ac52e83f 311 unsigned int cachetype = read_cpuid_cachetype();
b57ee99f
CM
312 if ((cachetype & (7 << 29)) == 4 << 29) {
313 /* ARMv7 register format */
72dc53ac 314 arch = CPU_ARCH_ARMv7;
b57ee99f 315 cacheid = CACHEID_VIPT_NONALIASING;
7f94e9cc
WD
316 switch (cachetype & (3 << 14)) {
317 case (1 << 14):
b57ee99f 318 cacheid |= CACHEID_ASID_TAGGED;
7f94e9cc
WD
319 break;
320 case (3 << 14):
321 cacheid |= CACHEID_PIPT;
322 break;
323 }
8925ec4c 324 } else {
72dc53ac
WD
325 arch = CPU_ARCH_ARMv6;
326 if (cachetype & (1 << 23))
327 cacheid = CACHEID_VIPT_ALIASING;
328 else
329 cacheid = CACHEID_VIPT_NONALIASING;
8925ec4c 330 }
72dc53ac
WD
331 if (cpu_has_aliasing_icache(arch))
332 cacheid |= CACHEID_VIPT_I_ALIASING;
c0e95878
RK
333 } else {
334 cacheid = CACHEID_VIVT;
335 }
2b4ae1f1
RK
336
337 printk("CPU: %s data cache, %s instruction cache\n",
338 cache_is_vivt() ? "VIVT" :
339 cache_is_vipt_aliasing() ? "VIPT aliasing" :
7f94e9cc 340 cache_is_vipt_nonaliasing() ? "PIPT / VIPT nonaliasing" : "unknown",
2b4ae1f1
RK
341 cache_is_vivt() ? "VIVT" :
342 icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
8925ec4c 343 icache_is_vipt_aliasing() ? "VIPT aliasing" :
7f94e9cc 344 icache_is_pipt() ? "PIPT" :
2b4ae1f1 345 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
c0e95878
RK
346}
347
1da177e4
LT
348/*
349 * These functions re-use the assembly code in head.S, which
350 * already provide the required functionality.
351 */
0f44ba1d 352extern struct proc_info_list *lookup_processor_type(unsigned int);
6fc31d54 353
93c02ab4 354void __init early_print(const char *str, ...)
6fc31d54
RK
355{
356 extern void printascii(const char *);
357 char buf[256];
358 va_list ap;
359
360 va_start(ap, str);
361 vsnprintf(buf, sizeof(buf), str, ap);
362 va_end(ap);
363
364#ifdef CONFIG_DEBUG_LL
365 printascii(buf);
366#endif
367 printk("%s", buf);
368}
369
8164f7af
SB
370static void __init cpuid_init_hwcaps(void)
371{
372 unsigned int divide_instrs;
373
374 if (cpu_architecture() < CPU_ARCH_ARMv7)
375 return;
376
377 divide_instrs = (read_cpuid_ext(CPUID_EXT_ISAR0) & 0x0f000000) >> 24;
378
379 switch (divide_instrs) {
380 case 2:
381 elf_hwcap |= HWCAP_IDIVA;
382 case 1:
383 elf_hwcap |= HWCAP_IDIVT;
384 }
385}
386
f159f4ed
TL
387static void __init feat_v6_fixup(void)
388{
389 int id = read_cpuid_id();
390
391 if ((id & 0xff0f0000) != 0x41070000)
392 return;
393
394 /*
395 * HWCAP_TLS is available only on 1136 r1p0 and later,
396 * see also kuser_get_tls_init.
397 */
398 if ((((id >> 4) & 0xfff) == 0xb36) && (((id >> 20) & 3) == 0))
399 elf_hwcap &= ~HWCAP_TLS;
400}
401
ccea7a19
RK
402/*
403 * cpu_init - initialise one CPU.
404 *
90f1e084 405 * cpu_init sets up the per-CPU stacks.
ccea7a19 406 */
1783d457 407void notrace cpu_init(void)
ccea7a19
RK
408{
409 unsigned int cpu = smp_processor_id();
410 struct stack *stk = &stacks[cpu];
411
412 if (cpu >= NR_CPUS) {
413 printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu);
414 BUG();
415 }
416
6fa3eb70
S
417 erratum_a15_798181_init();
418
14318efb
RH
419 /*
420 * This only works on resume and secondary cores. For booting on the
421 * boot cpu, smp_prepare_boot_cpu is called after percpu area setup.
422 */
423 set_my_cpu_offset(per_cpu_offset(cpu));
424
b69874e4
RK
425 cpu_proc_init();
426
b86040a5
CM
427 /*
428 * Define the placement constraint for the inline asm directive below.
429 * In Thumb-2, msr with an immediate value is not allowed.
430 */
431#ifdef CONFIG_THUMB2_KERNEL
432#define PLC "r"
433#else
434#define PLC "I"
435#endif
436
ccea7a19
RK
437 /*
438 * setup stacks for re-entrant exception handlers
439 */
440 __asm__ (
441 "msr cpsr_c, %1\n\t"
b86040a5
CM
442 "add r14, %0, %2\n\t"
443 "mov sp, r14\n\t"
ccea7a19 444 "msr cpsr_c, %3\n\t"
b86040a5
CM
445 "add r14, %0, %4\n\t"
446 "mov sp, r14\n\t"
ccea7a19 447 "msr cpsr_c, %5\n\t"
b86040a5
CM
448 "add r14, %0, %6\n\t"
449 "mov sp, r14\n\t"
ccea7a19
RK
450 "msr cpsr_c, %7"
451 :
452 : "r" (stk),
b86040a5 453 PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
ccea7a19 454 "I" (offsetof(struct stack, irq[0])),
b86040a5 455 PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
ccea7a19 456 "I" (offsetof(struct stack, abt[0])),
b86040a5 457 PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
ccea7a19 458 "I" (offsetof(struct stack, und[0])),
b86040a5 459 PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
aaaa3f9e 460 : "r14");
ccea7a19
RK
461}
462
18d7f152 463u32 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID };
eb50439b
WD
464
465void __init smp_setup_processor_id(void)
466{
467 int i;
cb8cf4f8
LP
468 u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0;
469 u32 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
eb50439b
WD
470
471 cpu_logical_map(0) = cpu;
cb8cf4f8 472 for (i = 1; i < nr_cpu_ids; ++i)
eb50439b 473 cpu_logical_map(i) = i == cpu ? 0 : i;
6fa3eb70
S
474 /*
475 * clear __my_cpu_offset on boot CPU to avoid hang caused by
476 * using percpu variable early, for example, lockdep will
477 * access percpu variable inside lock_release
478 */
479 set_my_cpu_offset(0);
cb8cf4f8 480 printk(KERN_INFO "Booting Linux on physical CPU 0x%x\n", mpidr);
eb50439b
WD
481}
482
b69874e4
RK
483static void __init setup_processor(void)
484{
485 struct proc_info_list *list;
486
487 /*
488 * locate processor in the list of supported processor
489 * types. The linker builds this table for us from the
490 * entries in arch/arm/mm/proc-*.S
491 */
492 list = lookup_processor_type(read_cpuid_id());
493 if (!list) {
494 printk("CPU configuration botched (ID %08x), unable "
495 "to continue.\n", read_cpuid_id());
496 while (1);
497 }
498
499 cpu_name = list->cpu_name;
2ecccf90 500 __cpu_architecture = __get_cpu_architecture();
b69874e4
RK
501
502#ifdef MULTI_CPU
503 processor = *list->proc;
504#endif
505#ifdef MULTI_TLB
506 cpu_tlb = *list->tlb;
507#endif
508#ifdef MULTI_USER
509 cpu_user = *list->user;
510#endif
511#ifdef MULTI_CACHE
512 cpu_cache = *list->cache;
513#endif
514
515 printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
516 cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
517 proc_arch[cpu_architecture()], cr_alignment);
518
a34dbfb0
WD
519 snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
520 list->arch_name, ENDIANNESS);
521 snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
522 list->elf_name, ENDIANNESS);
b69874e4 523 elf_hwcap = list->elf_hwcap;
8164f7af
SB
524
525 cpuid_init_hwcaps();
526
b69874e4 527#ifndef CONFIG_ARM_THUMB
c40e3641 528 elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT);
b69874e4
RK
529#endif
530
531 feat_v6_fixup();
532
533 cacheid_init();
534 cpu_init();
535}
536
93c02ab4 537void __init dump_machine_table(void)
1da177e4 538{
dce72dd0 539 struct machine_desc *p;
1da177e4 540
6291319d
GL
541 early_print("Available machine support:\n\nID (hex)\tNAME\n");
542 for_each_machine_desc(p)
dce72dd0 543 early_print("%08x\t%s\n", p->nr, p->name);
1da177e4 544
dce72dd0 545 early_print("\nPlease check your kernel config and/or bootloader.\n");
1da177e4 546
dce72dd0
NP
547 while (true)
548 /* can't use cpu_relax() here as it may require MMU setup */;
1da177e4
LT
549}
550
a5d5f7da 551int __init arm_add_memory(phys_addr_t start, phys_addr_t size)
3a669411 552{
4b5f32ce 553 struct membank *bank = &meminfo.bank[meminfo.nr_banks];
307af156 554 u64 aligned_start;
4b5f32ce
NP
555
556 if (meminfo.nr_banks >= NR_BANKS) {
557 printk(KERN_CRIT "NR_BANKS too low, "
29a38193 558 "ignoring memory at 0x%08llx\n", (long long)start);
4b5f32ce
NP
559 return -EINVAL;
560 }
05f96ef1 561
3a669411
RK
562 /*
563 * Ensure that start/size are aligned to a page boundary.
564 * Size is appropriately rounded down, start is rounded up.
565 */
566 size -= start & ~PAGE_MASK;
307af156 567 aligned_start = PAGE_ALIGN(start);
e5ab8580 568
307af156
MD
569#ifndef CONFIG_ARCH_PHYS_ADDR_T_64BIT
570 if (aligned_start > ULONG_MAX) {
571 printk(KERN_CRIT "Ignoring memory at 0x%08llx outside "
572 "32-bit physical address space\n", (long long)start);
573 return -EINVAL;
574 }
575
576 if (aligned_start + size > ULONG_MAX) {
e5ab8580
WD
577 printk(KERN_CRIT "Truncating memory at 0x%08llx to fit in "
578 "32-bit physical address space\n", (long long)start);
579 /*
580 * To ensure bank->start + bank->size is representable in
581 * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
582 * This means we lose a page after masking.
583 */
307af156 584 size = ULONG_MAX - aligned_start;
e5ab8580
WD
585 }
586#endif
587
22fc7228
RK
588 if (aligned_start < PHYS_OFFSET) {
589 if (aligned_start + size <= PHYS_OFFSET) {
590 pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
591 aligned_start, aligned_start + size);
592 return -EINVAL;
593 }
594
595 pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
596 aligned_start, (u64)PHYS_OFFSET);
597
598 size -= PHYS_OFFSET - aligned_start;
599 aligned_start = PHYS_OFFSET;
600 }
601
307af156 602 bank->start = aligned_start;
a5d5f7da 603 bank->size = size & ~(phys_addr_t)(PAGE_SIZE - 1);
4b5f32ce
NP
604
605 /*
606 * Check whether this memory region has non-zero size or
607 * invalid node number.
608 */
be370302 609 if (bank->size == 0)
4b5f32ce
NP
610 return -EINVAL;
611
612 meminfo.nr_banks++;
613 return 0;
3a669411
RK
614}
615
1da177e4
LT
616/*
617 * Pick out the memory size. We look for mem=size@start,
618 * where start and size are "size[KkMm]"
619 */
2b0d8c25 620static int __init early_mem(char *p)
1da177e4
LT
621{
622 static int usermem __initdata = 0;
a5d5f7da 623 phys_addr_t size;
f60892d3 624 phys_addr_t start;
2b0d8c25 625 char *endp;
1da177e4
LT
626
627 /*
628 * If the user specifies memory size, we
629 * blow away any automatically generated
630 * size.
631 */
632 if (usermem == 0) {
633 usermem = 1;
634 meminfo.nr_banks = 0;
635 }
636
637 start = PHYS_OFFSET;
2b0d8c25
JK
638 size = memparse(p, &endp);
639 if (*endp == '@')
640 start = memparse(endp + 1, NULL);
1da177e4 641
1c97b73e 642 arm_add_memory(start, size);
1da177e4 643
2b0d8c25 644 return 0;
1da177e4 645}
2b0d8c25 646early_param("mem", early_mem);
1da177e4 647
11b9369c 648static void __init request_standard_resources(struct machine_desc *mdesc)
1da177e4 649{
11b9369c 650 struct memblock_region *region;
1da177e4 651 struct resource *res;
1da177e4 652
37efe642
RK
653 kernel_code.start = virt_to_phys(_text);
654 kernel_code.end = virt_to_phys(_etext - 1);
842eab40 655 kernel_data.start = virt_to_phys(_sdata);
37efe642 656 kernel_data.end = virt_to_phys(_end - 1);
1da177e4 657
11b9369c 658 for_each_memblock(memory, region) {
1da177e4
LT
659 res = alloc_bootmem_low(sizeof(*res));
660 res->name = "System RAM";
11b9369c
DZ
661 res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
662 res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
1da177e4
LT
663 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
664
665 request_resource(&iomem_resource, res);
666
667 if (kernel_code.start >= res->start &&
668 kernel_code.end <= res->end)
669 request_resource(res, &kernel_code);
670 if (kernel_data.start >= res->start &&
671 kernel_data.end <= res->end)
672 request_resource(res, &kernel_data);
673 }
674
675 if (mdesc->video_start) {
676 video_ram.start = mdesc->video_start;
677 video_ram.end = mdesc->video_end;
678 request_resource(&iomem_resource, &video_ram);
679 }
680
681 /*
682 * Some machines don't have the possibility of ever
683 * possessing lp0, lp1 or lp2
684 */
685 if (mdesc->reserve_lp0)
686 request_resource(&ioport_resource, &lp0);
687 if (mdesc->reserve_lp1)
688 request_resource(&ioport_resource, &lp1);
689 if (mdesc->reserve_lp2)
690 request_resource(&ioport_resource, &lp2);
691}
692
1da177e4
LT
693#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
694struct screen_info screen_info = {
695 .orig_video_lines = 30,
696 .orig_video_cols = 80,
697 .orig_video_mode = 0,
698 .orig_video_ega_bx = 0,
699 .orig_video_isVGA = 1,
700 .orig_video_points = 8
701};
4394c124 702#endif
1da177e4 703
1da177e4
LT
704static int __init customize_machine(void)
705{
883a106b
AB
706 /*
707 * customizes platform devices, or adds new ones
708 * On DT based machines, we fall back to populating the
709 * machine from the device tree, if no callback is provided,
710 * otherwise we would always need an init_machine callback.
711 */
8ff1443c
RK
712 if (machine_desc->init_machine)
713 machine_desc->init_machine();
883a106b
AB
714#ifdef CONFIG_OF
715 else
716 of_platform_populate(NULL, of_default_bus_match_table,
717 NULL, NULL);
718#endif
1da177e4
LT
719 return 0;
720}
721arch_initcall(customize_machine);
722
90de4137
SG
723static int __init init_machine_late(void)
724{
725 if (machine_desc->init_late)
726 machine_desc->init_late();
727 return 0;
728}
729late_initcall(init_machine_late);
730
3c57fb43
MW
731#ifdef CONFIG_KEXEC
732static inline unsigned long long get_total_mem(void)
733{
734 unsigned long total;
735
736 total = max_low_pfn - min_low_pfn;
737 return total << PAGE_SHIFT;
738}
739
740/**
741 * reserve_crashkernel() - reserves memory are for crash kernel
742 *
743 * This function reserves memory area given in "crashkernel=" kernel command
744 * line parameter. The memory reserved is used by a dump capture kernel when
745 * primary kernel is crashing.
746 */
747static void __init reserve_crashkernel(void)
748{
749 unsigned long long crash_size, crash_base;
750 unsigned long long total_mem;
751 int ret;
752
753 total_mem = get_total_mem();
754 ret = parse_crashkernel(boot_command_line, total_mem,
755 &crash_size, &crash_base);
756 if (ret)
757 return;
758
759 ret = reserve_bootmem(crash_base, crash_size, BOOTMEM_EXCLUSIVE);
760 if (ret < 0) {
761 printk(KERN_WARNING "crashkernel reservation failed - "
762 "memory is in use (0x%lx)\n", (unsigned long)crash_base);
763 return;
764 }
765
766 printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
767 "for crashkernel (System RAM: %ldMB)\n",
768 (unsigned long)(crash_size >> 20),
769 (unsigned long)(crash_base >> 20),
770 (unsigned long)(total_mem >> 20));
771
772 crashk_res.start = crash_base;
773 crashk_res.end = crash_base + crash_size - 1;
774 insert_resource(&iomem_resource, &crashk_res);
775}
776#else
777static inline void reserve_crashkernel(void) {}
778#endif /* CONFIG_KEXEC */
779
27a3f0e9
NP
780static int __init meminfo_cmp(const void *_a, const void *_b)
781{
782 const struct membank *a = _a, *b = _b;
783 long cmp = bank_pfn_start(a) - bank_pfn_start(b);
784 return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
785}
6291319d 786
4588c34d
DM
787void __init hyp_mode_check(void)
788{
789#ifdef CONFIG_ARM_VIRT_EXT
790 if (is_hyp_mode_available()) {
791 pr_info("CPU: All CPU(s) started in HYP mode.\n");
792 pr_info("CPU: Virtualization extensions available.\n");
793 } else if (is_hyp_mode_mismatched()) {
794 pr_warn("CPU: WARNING: CPU(s) started in wrong/inconsistent modes (primary CPU mode 0x%x)\n",
795 __boot_cpu_mode & MODE_MASK);
796 pr_warn("CPU: This may indicate a broken bootloader or firmware.\n");
797 } else
798 pr_info("CPU: All CPU(s) started in SVC mode.\n");
799#endif
800}
801
6291319d
GL
802void __init setup_arch(char **cmdline_p)
803{
804 struct machine_desc *mdesc;
805
6291319d 806 setup_processor();
93c02ab4
GL
807 mdesc = setup_machine_fdt(__atags_pointer);
808 if (!mdesc)
b8b499c8 809 mdesc = setup_machine_tags(__atags_pointer, __machine_arch_type);
6291319d
GL
810 machine_desc = mdesc;
811 machine_name = mdesc->name;
812
c7909509
MS
813 setup_dma_zone(mdesc);
814
b44c350d
RK
815 if (mdesc->restart_mode)
816 reboot_setup(&mdesc->restart_mode);
6291319d 817
37efe642
RK
818 init_mm.start_code = (unsigned long) _text;
819 init_mm.end_code = (unsigned long) _etext;
820 init_mm.end_data = (unsigned long) _edata;
821 init_mm.brk = (unsigned long) _end;
1da177e4 822
48ab7e09
JK
823 /* populate cmd_line too for later use, preserving boot_command_line */
824 strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
825 *cmdline_p = cmd_line;
2b0d8c25
JK
826
827 parse_early_param();
828
27a3f0e9 829 sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
0371d3f7 830 sanity_check_meminfo();
8d717a52 831 arm_memblock_init(&meminfo, mdesc);
2778f620 832
4b5f32ce 833 paging_init(mdesc);
11b9369c 834 request_standard_resources(mdesc);
1da177e4 835
a528721d
RK
836 if (mdesc->restart)
837 arm_pm_restart = mdesc->restart;
838
93c02ab4
GL
839 unflatten_device_tree();
840
5587164e 841 arm_dt_init_cpu_maps();
6fa3eb70 842 psci_init();
7bbb7940 843#ifdef CONFIG_SMP
abcee5fb 844 if (is_smp()) {
6fa3eb70
S
845 if (psci_smp_available())
846 smp_set_ops(&psci_smp_ops);
847 else if (mdesc->smp)
848 smp_set_ops(mdesc->smp);
f00ec48f 849 smp_init_cpus();
abcee5fb 850 }
7bbb7940 851#endif
4588c34d
DM
852
853 if (!is_smp())
854 hyp_mode_check();
855
3c57fb43 856 reserve_crashkernel();
7bbb7940 857
52108641 858#ifdef CONFIG_MULTI_IRQ_HANDLER
859 handle_arch_irq = mdesc->handle_irq;
860#endif
1da177e4
LT
861
862#ifdef CONFIG_VT
863#if defined(CONFIG_VGA_CONSOLE)
864 conswitchp = &vga_con;
865#elif defined(CONFIG_DUMMY_CONSOLE)
866 conswitchp = &dummy_con;
867#endif
868#endif
dec12e62
RK
869
870 if (mdesc->init_early)
871 mdesc->init_early();
1da177e4
LT
872}
873
874
875static int __init topology_init(void)
876{
877 int cpu;
878
66fb8bd2
RK
879 for_each_possible_cpu(cpu) {
880 struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
881 cpuinfo->cpu.hotpluggable = 1;
882 register_cpu(&cpuinfo->cpu, cpu);
883 }
1da177e4
LT
884
885 return 0;
886}
1da177e4
LT
887subsys_initcall(topology_init);
888
e119bfff
RK
889#ifdef CONFIG_HAVE_PROC_CPU
890static int __init proc_cpu_init(void)
891{
892 struct proc_dir_entry *res;
893
894 res = proc_mkdir("cpu", NULL);
895 if (!res)
896 return -ENOMEM;
897 return 0;
898}
899fs_initcall(proc_cpu_init);
900#endif
901
1da177e4
LT
902static const char *hwcap_str[] = {
903 "swp",
904 "half",
905 "thumb",
906 "26bit",
907 "fastmult",
908 "fpa",
909 "vfp",
910 "edsp",
911 "java",
8f7f9435 912 "iwmmxt",
99e4a6dd 913 "crunch",
4369ae16 914 "thumbee",
2bedbdf4 915 "neon",
7279dc3e
CM
916 "vfpv3",
917 "vfpv3d16",
254cdf8e
WD
918 "tls",
919 "vfpv4",
920 "idiva",
921 "idivt",
1da177e4
LT
922 NULL
923};
924
6fa3eb70
S
925static void c_show_features(struct seq_file *m, u32 cpuid)
926{
927 int j;
928
929 /* dump out the processor features */
930 seq_puts(m, "Features\t: ");
931
932 for (j = 0; hwcap_str[j]; j++)
933 if (elf_hwcap & (1 << j))
934 seq_printf(m, "%s ", hwcap_str[j]);
935
936 seq_printf(m, "\nCPU implementer\t: 0x%02x\n", cpuid >> 24);
937 seq_printf(m, "CPU architecture: %s\n",
938 proc_arch[cpu_architecture()]);
939
940 if ((cpuid & 0x0008f000) == 0x00000000) {
941 /* pre-ARM7 */
942 seq_printf(m, "CPU part\t: %07x\n", cpuid >> 4);
943 } else {
944 if ((cpuid & 0x0008f000) == 0x00007000) {
945 /* ARM7 */
946 seq_printf(m, "CPU variant\t: 0x%02x\n",
947 (cpuid >> 16) & 127);
948 } else {
949 /* post-ARM7 */
950 seq_printf(m, "CPU variant\t: 0x%x\n",
951 (cpuid >> 20) & 15);
952 }
953 seq_printf(m, "CPU part\t: 0x%03x\n",
954 (cpuid >> 4) & 0xfff);
955 }
956 seq_printf(m, "CPU revision\t: %d\n\n", cpuid & 15);
957}
958
1da177e4
LT
959static int c_show(struct seq_file *m, void *v)
960{
6fa3eb70 961 int i;
b4b8f770 962 u32 cpuid;
6fa3eb70
S
963 int compat = config_enabled(CONFIG_COMPAT_CPUINFO);
964
965 if (compat)
966 seq_printf(m, "Processor\t: %s rev %d (%s)\n",
967 cpu_name, read_cpuid_id() & 15, elf_platform);
1da177e4 968
1da177e4 969 for_each_online_cpu(i) {
15559722
RK
970 /*
971 * glibc reads /proc/cpuinfo to determine the number of
972 * online processors, looking for lines beginning with
973 * "processor". Give glibc what it expects.
974 */
b4b8f770 975 cpuid = is_smp() ? per_cpu(cpu_data, i).cpuid : read_cpuid_id();
6fa3eb70
S
976 if (!compat)
977 seq_printf(m, "Processor\t: %s rev %d (%s)\n",
978 cpu_name, cpuid & 15, elf_platform);
979 seq_printf(m, "processor\t: %d\n", i);
b4b8f770
LP
980
981#if defined(CONFIG_SMP)
982 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1da177e4
LT
983 per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
984 (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
b4b8f770
LP
985#else
986 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
987 loops_per_jiffy / (500000/HZ),
988 (loops_per_jiffy / (5000/HZ)) % 100);
1da177e4 989#endif
6fa3eb70
S
990 if (!compat)
991 c_show_features(m, cpuid);
992 else
993 seq_printf(m, "\n");
1da177e4 994 }
1da177e4 995
6fa3eb70
S
996 if (compat)
997 c_show_features(m, cpuid);
1da177e4
LT
998 seq_printf(m, "Hardware\t: %s\n", machine_name);
999 seq_printf(m, "Revision\t: %04x\n", system_rev);
1000 seq_printf(m, "Serial\t\t: %08x%08x\n",
1001 system_serial_high, system_serial_low);
1002
1003 return 0;
1004}
1005
1006static void *c_start(struct seq_file *m, loff_t *pos)
1007{
1008 return *pos < 1 ? (void *)1 : NULL;
1009}
1010
1011static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1012{
1013 ++*pos;
1014 return NULL;
1015}
1016
1017static void c_stop(struct seq_file *m, void *v)
1018{
1019}
1020
2ffd6e18 1021const struct seq_operations cpuinfo_op = {
1da177e4
LT
1022 .start = c_start,
1023 .next = c_next,
1024 .stop = c_stop,
1025 .show = c_show
1026};