iwlwifi: mvm: don't WARN when statistics are handled late
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / arm / kernel / setup.c
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/arm/kernel/setup.c
3 *
4 * Copyright (C) 1995-2001 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
ecea4ab6 10#include <linux/export.h>
1da177e4
LT
11#include <linux/kernel.h>
12#include <linux/stddef.h>
13#include <linux/ioport.h>
14#include <linux/delay.h>
15#include <linux/utsname.h>
16#include <linux/initrd.h>
17#include <linux/console.h>
18#include <linux/bootmem.h>
19#include <linux/seq_file.h>
894673ee 20#include <linux/screen_info.h>
883a106b 21#include <linux/of_platform.h>
1da177e4 22#include <linux/init.h>
3c57fb43 23#include <linux/kexec.h>
93c02ab4 24#include <linux/of_fdt.h>
1da177e4
LT
25#include <linux/cpu.h>
26#include <linux/interrupt.h>
7bbb7940 27#include <linux/smp.h>
e119bfff 28#include <linux/proc_fs.h>
2778f620 29#include <linux/memblock.h>
2ecccf90
DM
30#include <linux/bug.h>
31#include <linux/compiler.h>
27a3f0e9 32#include <linux/sort.h>
1da177e4 33
b86040a5 34#include <asm/unified.h>
15d07dc9 35#include <asm/cp15.h>
1da177e4 36#include <asm/cpu.h>
0ba8b9b2 37#include <asm/cputype.h>
1da177e4 38#include <asm/elf.h>
1da177e4 39#include <asm/procinfo.h>
37efe642 40#include <asm/sections.h>
1da177e4 41#include <asm/setup.h>
f00ec48f 42#include <asm/smp_plat.h>
1da177e4
LT
43#include <asm/mach-types.h>
44#include <asm/cacheflush.h>
46097c7d 45#include <asm/cachetype.h>
1da177e4
LT
46#include <asm/tlbflush.h>
47
93c02ab4 48#include <asm/prom.h>
1da177e4
LT
49#include <asm/mach/arch.h>
50#include <asm/mach/irq.h>
51#include <asm/mach/time.h>
9f97da78
DH
52#include <asm/system_info.h>
53#include <asm/system_misc.h>
5cbad0eb 54#include <asm/traps.h>
bff595c1 55#include <asm/unwind.h>
1c16d242 56#include <asm/memblock.h>
4588c34d 57#include <asm/virt.h>
1da177e4 58
4cd9d6f7 59#include "atags.h"
0fc1c832 60
1da177e4
LT
61
62#if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
63char fpe_type[8];
64
65static int __init fpe_setup(char *line)
66{
67 memcpy(fpe_type, line, 8);
68 return 1;
69}
70
71__setup("fpe=", fpe_setup);
72#endif
73
4b5f32ce 74extern void paging_init(struct machine_desc *desc);
0371d3f7 75extern void sanity_check_meminfo(void);
1da177e4 76extern void reboot_setup(char *str);
c7909509 77extern void setup_dma_zone(struct machine_desc *desc);
1da177e4
LT
78
79unsigned int processor_id;
c18f6581 80EXPORT_SYMBOL(processor_id);
0385ebc0 81unsigned int __machine_arch_type __read_mostly;
1da177e4 82EXPORT_SYMBOL(__machine_arch_type);
0385ebc0 83unsigned int cacheid __read_mostly;
c0e95878 84EXPORT_SYMBOL(cacheid);
1da177e4 85
9d20fdd5
BG
86unsigned int __atags_pointer __initdata;
87
1da177e4
LT
88unsigned int system_rev;
89EXPORT_SYMBOL(system_rev);
90
91unsigned int system_serial_low;
92EXPORT_SYMBOL(system_serial_low);
93
94unsigned int system_serial_high;
95EXPORT_SYMBOL(system_serial_high);
96
0385ebc0 97unsigned int elf_hwcap __read_mostly;
1da177e4
LT
98EXPORT_SYMBOL(elf_hwcap);
99
100
101#ifdef MULTI_CPU
0385ebc0 102struct processor processor __read_mostly;
1da177e4
LT
103#endif
104#ifdef MULTI_TLB
0385ebc0 105struct cpu_tlb_fns cpu_tlb __read_mostly;
1da177e4
LT
106#endif
107#ifdef MULTI_USER
0385ebc0 108struct cpu_user_fns cpu_user __read_mostly;
1da177e4
LT
109#endif
110#ifdef MULTI_CACHE
0385ebc0 111struct cpu_cache_fns cpu_cache __read_mostly;
1da177e4 112#endif
953233dc 113#ifdef CONFIG_OUTER_CACHE
0385ebc0 114struct outer_cache_fns outer_cache __read_mostly;
6c09f09d 115EXPORT_SYMBOL(outer_cache);
953233dc 116#endif
1da177e4 117
2ecccf90
DM
118/*
119 * Cached cpu_architecture() result for use by assembler code.
120 * C code should use the cpu_architecture() function instead of accessing this
121 * variable directly.
122 */
123int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN;
124
ccea7a19
RK
125struct stack {
126 u32 irq[3];
127 u32 abt[3];
128 u32 und[3];
129} ____cacheline_aligned;
130
131static struct stack stacks[NR_CPUS];
132
1da177e4
LT
133char elf_platform[ELF_PLATFORM_SIZE];
134EXPORT_SYMBOL(elf_platform);
135
1da177e4
LT
136static const char *cpu_name;
137static const char *machine_name;
48ab7e09 138static char __initdata cmd_line[COMMAND_LINE_SIZE];
8ff1443c 139struct machine_desc *machine_desc __initdata;
1da177e4 140
1da177e4
LT
141static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
142#define ENDIANNESS ((char)endian_test.l)
143
144DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
145
146/*
147 * Standard memory resources
148 */
149static struct resource mem_res[] = {
740e518e
GKH
150 {
151 .name = "Video RAM",
152 .start = 0,
153 .end = 0,
154 .flags = IORESOURCE_MEM
155 },
156 {
a36d8e5b 157 .name = "Kernel code",
740e518e
GKH
158 .start = 0,
159 .end = 0,
160 .flags = IORESOURCE_MEM
161 },
162 {
163 .name = "Kernel data",
164 .start = 0,
165 .end = 0,
166 .flags = IORESOURCE_MEM
167 }
1da177e4
LT
168};
169
170#define video_ram mem_res[0]
171#define kernel_code mem_res[1]
172#define kernel_data mem_res[2]
173
174static struct resource io_res[] = {
740e518e
GKH
175 {
176 .name = "reserved",
177 .start = 0x3bc,
178 .end = 0x3be,
179 .flags = IORESOURCE_IO | IORESOURCE_BUSY
180 },
181 {
182 .name = "reserved",
183 .start = 0x378,
184 .end = 0x37f,
185 .flags = IORESOURCE_IO | IORESOURCE_BUSY
186 },
187 {
188 .name = "reserved",
189 .start = 0x278,
190 .end = 0x27f,
191 .flags = IORESOURCE_IO | IORESOURCE_BUSY
192 }
1da177e4
LT
193};
194
195#define lp0 io_res[0]
196#define lp1 io_res[1]
197#define lp2 io_res[2]
198
1da177e4
LT
199static const char *proc_arch[] = {
200 "undefined/unknown",
201 "3",
202 "4",
203 "4T",
204 "5",
205 "5T",
206 "5TE",
207 "5TEJ",
208 "6TEJ",
6b090a25 209 "7",
1da177e4
LT
210 "?(11)",
211 "?(12)",
212 "?(13)",
213 "?(14)",
214 "?(15)",
215 "?(16)",
216 "?(17)",
217};
218
2ecccf90 219static int __get_cpu_architecture(void)
1da177e4
LT
220{
221 int cpu_arch;
222
0ba8b9b2 223 if ((read_cpuid_id() & 0x0008f000) == 0) {
1da177e4 224 cpu_arch = CPU_ARCH_UNKNOWN;
0ba8b9b2
RK
225 } else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
226 cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
227 } else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
228 cpu_arch = (read_cpuid_id() >> 16) & 7;
1da177e4
LT
229 if (cpu_arch)
230 cpu_arch += CPU_ARCH_ARMv3;
0ba8b9b2 231 } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
180005c4
CM
232 unsigned int mmfr0;
233
234 /* Revised CPUID format. Read the Memory Model Feature
235 * Register 0 and check for VMSAv7 or PMSAv7 */
236 asm("mrc p15, 0, %0, c0, c1, 4"
237 : "=r" (mmfr0));
315cfe78
CM
238 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
239 (mmfr0 & 0x000000f0) >= 0x00000030)
180005c4
CM
240 cpu_arch = CPU_ARCH_ARMv7;
241 else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
242 (mmfr0 & 0x000000f0) == 0x00000020)
243 cpu_arch = CPU_ARCH_ARMv6;
244 else
245 cpu_arch = CPU_ARCH_UNKNOWN;
246 } else
247 cpu_arch = CPU_ARCH_UNKNOWN;
1da177e4
LT
248
249 return cpu_arch;
250}
251
2ecccf90
DM
252int __pure cpu_architecture(void)
253{
254 BUG_ON(__cpu_architecture == CPU_ARCH_UNKNOWN);
255
256 return __cpu_architecture;
257}
258
8925ec4c
WD
259static int cpu_has_aliasing_icache(unsigned int arch)
260{
261 int aliasing_icache;
262 unsigned int id_reg, num_sets, line_size;
263
7f94e9cc
WD
264 /* PIPT caches never alias. */
265 if (icache_is_pipt())
266 return 0;
267
8925ec4c
WD
268 /* arch specifies the register format */
269 switch (arch) {
270 case CPU_ARCH_ARMv7:
5fb31a96
LW
271 asm("mcr p15, 2, %0, c0, c0, 0 @ set CSSELR"
272 : /* No output operands */
8925ec4c 273 : "r" (1));
5fb31a96
LW
274 isb();
275 asm("mrc p15, 1, %0, c0, c0, 0 @ read CCSIDR"
276 : "=r" (id_reg));
8925ec4c
WD
277 line_size = 4 << ((id_reg & 0x7) + 2);
278 num_sets = ((id_reg >> 13) & 0x7fff) + 1;
279 aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
280 break;
281 case CPU_ARCH_ARMv6:
282 aliasing_icache = read_cpuid_cachetype() & (1 << 11);
283 break;
284 default:
285 /* I-cache aliases will be handled by D-cache aliasing code */
286 aliasing_icache = 0;
287 }
288
289 return aliasing_icache;
290}
291
c0e95878
RK
292static void __init cacheid_init(void)
293{
c0e95878
RK
294 unsigned int arch = cpu_architecture();
295
b57ee99f 296 if (arch >= CPU_ARCH_ARMv6) {
ac52e83f 297 unsigned int cachetype = read_cpuid_cachetype();
b57ee99f
CM
298 if ((cachetype & (7 << 29)) == 4 << 29) {
299 /* ARMv7 register format */
72dc53ac 300 arch = CPU_ARCH_ARMv7;
b57ee99f 301 cacheid = CACHEID_VIPT_NONALIASING;
7f94e9cc
WD
302 switch (cachetype & (3 << 14)) {
303 case (1 << 14):
b57ee99f 304 cacheid |= CACHEID_ASID_TAGGED;
7f94e9cc
WD
305 break;
306 case (3 << 14):
307 cacheid |= CACHEID_PIPT;
308 break;
309 }
8925ec4c 310 } else {
72dc53ac
WD
311 arch = CPU_ARCH_ARMv6;
312 if (cachetype & (1 << 23))
313 cacheid = CACHEID_VIPT_ALIASING;
314 else
315 cacheid = CACHEID_VIPT_NONALIASING;
8925ec4c 316 }
72dc53ac
WD
317 if (cpu_has_aliasing_icache(arch))
318 cacheid |= CACHEID_VIPT_I_ALIASING;
c0e95878
RK
319 } else {
320 cacheid = CACHEID_VIVT;
321 }
2b4ae1f1
RK
322
323 printk("CPU: %s data cache, %s instruction cache\n",
324 cache_is_vivt() ? "VIVT" :
325 cache_is_vipt_aliasing() ? "VIPT aliasing" :
7f94e9cc 326 cache_is_vipt_nonaliasing() ? "PIPT / VIPT nonaliasing" : "unknown",
2b4ae1f1
RK
327 cache_is_vivt() ? "VIVT" :
328 icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
8925ec4c 329 icache_is_vipt_aliasing() ? "VIPT aliasing" :
7f94e9cc 330 icache_is_pipt() ? "PIPT" :
2b4ae1f1 331 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
c0e95878
RK
332}
333
1da177e4
LT
334/*
335 * These functions re-use the assembly code in head.S, which
336 * already provide the required functionality.
337 */
0f44ba1d 338extern struct proc_info_list *lookup_processor_type(unsigned int);
6fc31d54 339
93c02ab4 340void __init early_print(const char *str, ...)
6fc31d54
RK
341{
342 extern void printascii(const char *);
343 char buf[256];
344 va_list ap;
345
346 va_start(ap, str);
347 vsnprintf(buf, sizeof(buf), str, ap);
348 va_end(ap);
349
350#ifdef CONFIG_DEBUG_LL
351 printascii(buf);
352#endif
353 printk("%s", buf);
354}
355
8164f7af
SB
356static void __init cpuid_init_hwcaps(void)
357{
358 unsigned int divide_instrs;
359
360 if (cpu_architecture() < CPU_ARCH_ARMv7)
361 return;
362
363 divide_instrs = (read_cpuid_ext(CPUID_EXT_ISAR0) & 0x0f000000) >> 24;
364
365 switch (divide_instrs) {
366 case 2:
367 elf_hwcap |= HWCAP_IDIVA;
368 case 1:
369 elf_hwcap |= HWCAP_IDIVT;
370 }
371}
372
f159f4ed
TL
373static void __init feat_v6_fixup(void)
374{
375 int id = read_cpuid_id();
376
377 if ((id & 0xff0f0000) != 0x41070000)
378 return;
379
380 /*
381 * HWCAP_TLS is available only on 1136 r1p0 and later,
382 * see also kuser_get_tls_init.
383 */
384 if ((((id >> 4) & 0xfff) == 0xb36) && (((id >> 20) & 3) == 0))
385 elf_hwcap &= ~HWCAP_TLS;
386}
387
ccea7a19
RK
388/*
389 * cpu_init - initialise one CPU.
390 *
90f1e084 391 * cpu_init sets up the per-CPU stacks.
ccea7a19 392 */
1783d457 393void notrace cpu_init(void)
ccea7a19
RK
394{
395 unsigned int cpu = smp_processor_id();
396 struct stack *stk = &stacks[cpu];
397
398 if (cpu >= NR_CPUS) {
399 printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu);
400 BUG();
401 }
402
14318efb
RH
403 /*
404 * This only works on resume and secondary cores. For booting on the
405 * boot cpu, smp_prepare_boot_cpu is called after percpu area setup.
406 */
407 set_my_cpu_offset(per_cpu_offset(cpu));
408
b69874e4
RK
409 cpu_proc_init();
410
b86040a5
CM
411 /*
412 * Define the placement constraint for the inline asm directive below.
413 * In Thumb-2, msr with an immediate value is not allowed.
414 */
415#ifdef CONFIG_THUMB2_KERNEL
416#define PLC "r"
417#else
418#define PLC "I"
419#endif
420
ccea7a19
RK
421 /*
422 * setup stacks for re-entrant exception handlers
423 */
424 __asm__ (
425 "msr cpsr_c, %1\n\t"
b86040a5
CM
426 "add r14, %0, %2\n\t"
427 "mov sp, r14\n\t"
ccea7a19 428 "msr cpsr_c, %3\n\t"
b86040a5
CM
429 "add r14, %0, %4\n\t"
430 "mov sp, r14\n\t"
ccea7a19 431 "msr cpsr_c, %5\n\t"
b86040a5
CM
432 "add r14, %0, %6\n\t"
433 "mov sp, r14\n\t"
ccea7a19
RK
434 "msr cpsr_c, %7"
435 :
436 : "r" (stk),
b86040a5 437 PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
ccea7a19 438 "I" (offsetof(struct stack, irq[0])),
b86040a5 439 PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
ccea7a19 440 "I" (offsetof(struct stack, abt[0])),
b86040a5 441 PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
ccea7a19 442 "I" (offsetof(struct stack, und[0])),
b86040a5 443 PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
aaaa3f9e 444 : "r14");
ccea7a19
RK
445}
446
18d7f152 447u32 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID };
eb50439b
WD
448
449void __init smp_setup_processor_id(void)
450{
451 int i;
cb8cf4f8
LP
452 u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0;
453 u32 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
eb50439b
WD
454
455 cpu_logical_map(0) = cpu;
cb8cf4f8 456 for (i = 1; i < nr_cpu_ids; ++i)
eb50439b
WD
457 cpu_logical_map(i) = i == cpu ? 0 : i;
458
cb8cf4f8 459 printk(KERN_INFO "Booting Linux on physical CPU 0x%x\n", mpidr);
eb50439b
WD
460}
461
b69874e4
RK
462static void __init setup_processor(void)
463{
464 struct proc_info_list *list;
465
466 /*
467 * locate processor in the list of supported processor
468 * types. The linker builds this table for us from the
469 * entries in arch/arm/mm/proc-*.S
470 */
471 list = lookup_processor_type(read_cpuid_id());
472 if (!list) {
473 printk("CPU configuration botched (ID %08x), unable "
474 "to continue.\n", read_cpuid_id());
475 while (1);
476 }
477
478 cpu_name = list->cpu_name;
2ecccf90 479 __cpu_architecture = __get_cpu_architecture();
b69874e4
RK
480
481#ifdef MULTI_CPU
482 processor = *list->proc;
483#endif
484#ifdef MULTI_TLB
485 cpu_tlb = *list->tlb;
486#endif
487#ifdef MULTI_USER
488 cpu_user = *list->user;
489#endif
490#ifdef MULTI_CACHE
491 cpu_cache = *list->cache;
492#endif
493
494 printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
495 cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
496 proc_arch[cpu_architecture()], cr_alignment);
497
a34dbfb0
WD
498 snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
499 list->arch_name, ENDIANNESS);
500 snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
501 list->elf_name, ENDIANNESS);
b69874e4 502 elf_hwcap = list->elf_hwcap;
8164f7af
SB
503
504 cpuid_init_hwcaps();
505
b69874e4 506#ifndef CONFIG_ARM_THUMB
c40e3641 507 elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT);
b69874e4
RK
508#endif
509
510 feat_v6_fixup();
511
512 cacheid_init();
513 cpu_init();
514}
515
93c02ab4 516void __init dump_machine_table(void)
1da177e4 517{
dce72dd0 518 struct machine_desc *p;
1da177e4 519
6291319d
GL
520 early_print("Available machine support:\n\nID (hex)\tNAME\n");
521 for_each_machine_desc(p)
dce72dd0 522 early_print("%08x\t%s\n", p->nr, p->name);
1da177e4 523
dce72dd0 524 early_print("\nPlease check your kernel config and/or bootloader.\n");
1da177e4 525
dce72dd0
NP
526 while (true)
527 /* can't use cpu_relax() here as it may require MMU setup */;
1da177e4
LT
528}
529
a5d5f7da 530int __init arm_add_memory(phys_addr_t start, phys_addr_t size)
3a669411 531{
4b5f32ce
NP
532 struct membank *bank = &meminfo.bank[meminfo.nr_banks];
533
534 if (meminfo.nr_banks >= NR_BANKS) {
535 printk(KERN_CRIT "NR_BANKS too low, "
29a38193 536 "ignoring memory at 0x%08llx\n", (long long)start);
4b5f32ce
NP
537 return -EINVAL;
538 }
05f96ef1 539
3a669411
RK
540 /*
541 * Ensure that start/size are aligned to a page boundary.
542 * Size is appropriately rounded down, start is rounded up.
543 */
544 size -= start & ~PAGE_MASK;
05f96ef1 545 bank->start = PAGE_ALIGN(start);
e5ab8580 546
4e1db26a 547#ifndef CONFIG_ARM_LPAE
e5ab8580
WD
548 if (bank->start + size < bank->start) {
549 printk(KERN_CRIT "Truncating memory at 0x%08llx to fit in "
550 "32-bit physical address space\n", (long long)start);
551 /*
552 * To ensure bank->start + bank->size is representable in
553 * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
554 * This means we lose a page after masking.
555 */
556 size = ULONG_MAX - bank->start;
557 }
558#endif
559
a5d5f7da 560 bank->size = size & ~(phys_addr_t)(PAGE_SIZE - 1);
4b5f32ce
NP
561
562 /*
563 * Check whether this memory region has non-zero size or
564 * invalid node number.
565 */
be370302 566 if (bank->size == 0)
4b5f32ce
NP
567 return -EINVAL;
568
569 meminfo.nr_banks++;
570 return 0;
3a669411
RK
571}
572
1da177e4
LT
573/*
574 * Pick out the memory size. We look for mem=size@start,
575 * where start and size are "size[KkMm]"
576 */
2b0d8c25 577static int __init early_mem(char *p)
1da177e4
LT
578{
579 static int usermem __initdata = 0;
a5d5f7da 580 phys_addr_t size;
f60892d3 581 phys_addr_t start;
2b0d8c25 582 char *endp;
1da177e4
LT
583
584 /*
585 * If the user specifies memory size, we
586 * blow away any automatically generated
587 * size.
588 */
589 if (usermem == 0) {
590 usermem = 1;
591 meminfo.nr_banks = 0;
592 }
593
594 start = PHYS_OFFSET;
2b0d8c25
JK
595 size = memparse(p, &endp);
596 if (*endp == '@')
597 start = memparse(endp + 1, NULL);
1da177e4 598
1c97b73e 599 arm_add_memory(start, size);
1da177e4 600
2b0d8c25 601 return 0;
1da177e4 602}
2b0d8c25 603early_param("mem", early_mem);
1da177e4 604
11b9369c 605static void __init request_standard_resources(struct machine_desc *mdesc)
1da177e4 606{
11b9369c 607 struct memblock_region *region;
1da177e4 608 struct resource *res;
1da177e4 609
37efe642
RK
610 kernel_code.start = virt_to_phys(_text);
611 kernel_code.end = virt_to_phys(_etext - 1);
842eab40 612 kernel_data.start = virt_to_phys(_sdata);
37efe642 613 kernel_data.end = virt_to_phys(_end - 1);
1da177e4 614
11b9369c 615 for_each_memblock(memory, region) {
1da177e4
LT
616 res = alloc_bootmem_low(sizeof(*res));
617 res->name = "System RAM";
11b9369c
DZ
618 res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
619 res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
1da177e4
LT
620 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
621
622 request_resource(&iomem_resource, res);
623
624 if (kernel_code.start >= res->start &&
625 kernel_code.end <= res->end)
626 request_resource(res, &kernel_code);
627 if (kernel_data.start >= res->start &&
628 kernel_data.end <= res->end)
629 request_resource(res, &kernel_data);
630 }
631
632 if (mdesc->video_start) {
633 video_ram.start = mdesc->video_start;
634 video_ram.end = mdesc->video_end;
635 request_resource(&iomem_resource, &video_ram);
636 }
637
638 /*
639 * Some machines don't have the possibility of ever
640 * possessing lp0, lp1 or lp2
641 */
642 if (mdesc->reserve_lp0)
643 request_resource(&ioport_resource, &lp0);
644 if (mdesc->reserve_lp1)
645 request_resource(&ioport_resource, &lp1);
646 if (mdesc->reserve_lp2)
647 request_resource(&ioport_resource, &lp2);
648}
649
1da177e4
LT
650#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
651struct screen_info screen_info = {
652 .orig_video_lines = 30,
653 .orig_video_cols = 80,
654 .orig_video_mode = 0,
655 .orig_video_ega_bx = 0,
656 .orig_video_isVGA = 1,
657 .orig_video_points = 8
658};
4394c124 659#endif
1da177e4 660
1da177e4
LT
661static int __init customize_machine(void)
662{
883a106b
AB
663 /*
664 * customizes platform devices, or adds new ones
665 * On DT based machines, we fall back to populating the
666 * machine from the device tree, if no callback is provided,
667 * otherwise we would always need an init_machine callback.
668 */
8ff1443c
RK
669 if (machine_desc->init_machine)
670 machine_desc->init_machine();
883a106b
AB
671#ifdef CONFIG_OF
672 else
673 of_platform_populate(NULL, of_default_bus_match_table,
674 NULL, NULL);
675#endif
1da177e4
LT
676 return 0;
677}
678arch_initcall(customize_machine);
679
90de4137
SG
680static int __init init_machine_late(void)
681{
682 if (machine_desc->init_late)
683 machine_desc->init_late();
684 return 0;
685}
686late_initcall(init_machine_late);
687
3c57fb43
MW
688#ifdef CONFIG_KEXEC
689static inline unsigned long long get_total_mem(void)
690{
691 unsigned long total;
692
693 total = max_low_pfn - min_low_pfn;
694 return total << PAGE_SHIFT;
695}
696
697/**
698 * reserve_crashkernel() - reserves memory are for crash kernel
699 *
700 * This function reserves memory area given in "crashkernel=" kernel command
701 * line parameter. The memory reserved is used by a dump capture kernel when
702 * primary kernel is crashing.
703 */
704static void __init reserve_crashkernel(void)
705{
706 unsigned long long crash_size, crash_base;
707 unsigned long long total_mem;
708 int ret;
709
710 total_mem = get_total_mem();
711 ret = parse_crashkernel(boot_command_line, total_mem,
712 &crash_size, &crash_base);
713 if (ret)
714 return;
715
716 ret = reserve_bootmem(crash_base, crash_size, BOOTMEM_EXCLUSIVE);
717 if (ret < 0) {
718 printk(KERN_WARNING "crashkernel reservation failed - "
719 "memory is in use (0x%lx)\n", (unsigned long)crash_base);
720 return;
721 }
722
723 printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
724 "for crashkernel (System RAM: %ldMB)\n",
725 (unsigned long)(crash_size >> 20),
726 (unsigned long)(crash_base >> 20),
727 (unsigned long)(total_mem >> 20));
728
729 crashk_res.start = crash_base;
730 crashk_res.end = crash_base + crash_size - 1;
731 insert_resource(&iomem_resource, &crashk_res);
732}
733#else
734static inline void reserve_crashkernel(void) {}
735#endif /* CONFIG_KEXEC */
736
27a3f0e9
NP
737static int __init meminfo_cmp(const void *_a, const void *_b)
738{
739 const struct membank *a = _a, *b = _b;
740 long cmp = bank_pfn_start(a) - bank_pfn_start(b);
741 return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
742}
6291319d 743
4588c34d
DM
744void __init hyp_mode_check(void)
745{
746#ifdef CONFIG_ARM_VIRT_EXT
747 if (is_hyp_mode_available()) {
748 pr_info("CPU: All CPU(s) started in HYP mode.\n");
749 pr_info("CPU: Virtualization extensions available.\n");
750 } else if (is_hyp_mode_mismatched()) {
751 pr_warn("CPU: WARNING: CPU(s) started in wrong/inconsistent modes (primary CPU mode 0x%x)\n",
752 __boot_cpu_mode & MODE_MASK);
753 pr_warn("CPU: This may indicate a broken bootloader or firmware.\n");
754 } else
755 pr_info("CPU: All CPU(s) started in SVC mode.\n");
756#endif
757}
758
6291319d
GL
759void __init setup_arch(char **cmdline_p)
760{
761 struct machine_desc *mdesc;
762
6291319d 763 setup_processor();
93c02ab4
GL
764 mdesc = setup_machine_fdt(__atags_pointer);
765 if (!mdesc)
b8b499c8 766 mdesc = setup_machine_tags(__atags_pointer, __machine_arch_type);
6291319d
GL
767 machine_desc = mdesc;
768 machine_name = mdesc->name;
769
c7909509
MS
770 setup_dma_zone(mdesc);
771
b44c350d
RK
772 if (mdesc->restart_mode)
773 reboot_setup(&mdesc->restart_mode);
6291319d 774
37efe642
RK
775 init_mm.start_code = (unsigned long) _text;
776 init_mm.end_code = (unsigned long) _etext;
777 init_mm.end_data = (unsigned long) _edata;
778 init_mm.brk = (unsigned long) _end;
1da177e4 779
48ab7e09
JK
780 /* populate cmd_line too for later use, preserving boot_command_line */
781 strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
782 *cmdline_p = cmd_line;
2b0d8c25
JK
783
784 parse_early_param();
785
27a3f0e9 786 sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
0371d3f7 787 sanity_check_meminfo();
8d717a52 788 arm_memblock_init(&meminfo, mdesc);
2778f620 789
4b5f32ce 790 paging_init(mdesc);
11b9369c 791 request_standard_resources(mdesc);
1da177e4 792
a528721d
RK
793 if (mdesc->restart)
794 arm_pm_restart = mdesc->restart;
795
93c02ab4
GL
796 unflatten_device_tree();
797
5587164e 798 arm_dt_init_cpu_maps();
7bbb7940 799#ifdef CONFIG_SMP
abcee5fb
MZ
800 if (is_smp()) {
801 smp_set_ops(mdesc->smp);
f00ec48f 802 smp_init_cpus();
abcee5fb 803 }
7bbb7940 804#endif
4588c34d
DM
805
806 if (!is_smp())
807 hyp_mode_check();
808
3c57fb43 809 reserve_crashkernel();
7bbb7940 810
52108641 811#ifdef CONFIG_MULTI_IRQ_HANDLER
812 handle_arch_irq = mdesc->handle_irq;
813#endif
1da177e4
LT
814
815#ifdef CONFIG_VT
816#if defined(CONFIG_VGA_CONSOLE)
817 conswitchp = &vga_con;
818#elif defined(CONFIG_DUMMY_CONSOLE)
819 conswitchp = &dummy_con;
820#endif
821#endif
dec12e62
RK
822
823 if (mdesc->init_early)
824 mdesc->init_early();
1da177e4
LT
825}
826
827
828static int __init topology_init(void)
829{
830 int cpu;
831
66fb8bd2
RK
832 for_each_possible_cpu(cpu) {
833 struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
834 cpuinfo->cpu.hotpluggable = 1;
835 register_cpu(&cpuinfo->cpu, cpu);
836 }
1da177e4
LT
837
838 return 0;
839}
1da177e4
LT
840subsys_initcall(topology_init);
841
e119bfff
RK
842#ifdef CONFIG_HAVE_PROC_CPU
843static int __init proc_cpu_init(void)
844{
845 struct proc_dir_entry *res;
846
847 res = proc_mkdir("cpu", NULL);
848 if (!res)
849 return -ENOMEM;
850 return 0;
851}
852fs_initcall(proc_cpu_init);
853#endif
854
1da177e4
LT
855static const char *hwcap_str[] = {
856 "swp",
857 "half",
858 "thumb",
859 "26bit",
860 "fastmult",
861 "fpa",
862 "vfp",
863 "edsp",
864 "java",
8f7f9435 865 "iwmmxt",
99e4a6dd 866 "crunch",
4369ae16 867 "thumbee",
2bedbdf4 868 "neon",
7279dc3e
CM
869 "vfpv3",
870 "vfpv3d16",
254cdf8e
WD
871 "tls",
872 "vfpv4",
873 "idiva",
874 "idivt",
1da177e4
LT
875 NULL
876};
877
1da177e4
LT
878static int c_show(struct seq_file *m, void *v)
879{
b4b8f770
LP
880 int i, j;
881 u32 cpuid;
1da177e4 882
1da177e4 883 for_each_online_cpu(i) {
15559722
RK
884 /*
885 * glibc reads /proc/cpuinfo to determine the number of
886 * online processors, looking for lines beginning with
887 * "processor". Give glibc what it expects.
888 */
889 seq_printf(m, "processor\t: %d\n", i);
b4b8f770
LP
890 cpuid = is_smp() ? per_cpu(cpu_data, i).cpuid : read_cpuid_id();
891 seq_printf(m, "model name\t: %s rev %d (%s)\n",
892 cpu_name, cpuid & 15, elf_platform);
893
894#if defined(CONFIG_SMP)
895 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1da177e4
LT
896 per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
897 (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
b4b8f770
LP
898#else
899 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
900 loops_per_jiffy / (500000/HZ),
901 (loops_per_jiffy / (5000/HZ)) % 100);
1da177e4 902#endif
b4b8f770
LP
903 /* dump out the processor features */
904 seq_puts(m, "Features\t: ");
1da177e4 905
b4b8f770
LP
906 for (j = 0; hwcap_str[j]; j++)
907 if (elf_hwcap & (1 << j))
908 seq_printf(m, "%s ", hwcap_str[j]);
1da177e4 909
b4b8f770
LP
910 seq_printf(m, "\nCPU implementer\t: 0x%02x\n", cpuid >> 24);
911 seq_printf(m, "CPU architecture: %s\n",
912 proc_arch[cpu_architecture()]);
1da177e4 913
b4b8f770
LP
914 if ((cpuid & 0x0008f000) == 0x00000000) {
915 /* pre-ARM7 */
916 seq_printf(m, "CPU part\t: %07x\n", cpuid >> 4);
1da177e4 917 } else {
b4b8f770
LP
918 if ((cpuid & 0x0008f000) == 0x00007000) {
919 /* ARM7 */
920 seq_printf(m, "CPU variant\t: 0x%02x\n",
921 (cpuid >> 16) & 127);
922 } else {
923 /* post-ARM7 */
924 seq_printf(m, "CPU variant\t: 0x%x\n",
925 (cpuid >> 20) & 15);
926 }
927 seq_printf(m, "CPU part\t: 0x%03x\n",
928 (cpuid >> 4) & 0xfff);
1da177e4 929 }
b4b8f770 930 seq_printf(m, "CPU revision\t: %d\n\n", cpuid & 15);
1da177e4 931 }
1da177e4
LT
932
933 seq_printf(m, "Hardware\t: %s\n", machine_name);
934 seq_printf(m, "Revision\t: %04x\n", system_rev);
935 seq_printf(m, "Serial\t\t: %08x%08x\n",
936 system_serial_high, system_serial_low);
937
938 return 0;
939}
940
941static void *c_start(struct seq_file *m, loff_t *pos)
942{
943 return *pos < 1 ? (void *)1 : NULL;
944}
945
946static void *c_next(struct seq_file *m, void *v, loff_t *pos)
947{
948 ++*pos;
949 return NULL;
950}
951
952static void c_stop(struct seq_file *m, void *v)
953{
954}
955
2ffd6e18 956const struct seq_operations cpuinfo_op = {
1da177e4
LT
957 .start = c_start,
958 .next = c_next,
959 .stop = c_stop,
960 .show = c_show
961};