Merge tag 'f2fs-for-v3.10' of git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / arm / kernel / setup.c
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/arm/kernel/setup.c
3 *
4 * Copyright (C) 1995-2001 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
ecea4ab6 10#include <linux/export.h>
1da177e4
LT
11#include <linux/kernel.h>
12#include <linux/stddef.h>
13#include <linux/ioport.h>
14#include <linux/delay.h>
15#include <linux/utsname.h>
16#include <linux/initrd.h>
17#include <linux/console.h>
18#include <linux/bootmem.h>
19#include <linux/seq_file.h>
894673ee 20#include <linux/screen_info.h>
1da177e4 21#include <linux/init.h>
3c57fb43 22#include <linux/kexec.h>
93c02ab4 23#include <linux/of_fdt.h>
1da177e4
LT
24#include <linux/cpu.h>
25#include <linux/interrupt.h>
7bbb7940 26#include <linux/smp.h>
e119bfff 27#include <linux/proc_fs.h>
2778f620 28#include <linux/memblock.h>
2ecccf90
DM
29#include <linux/bug.h>
30#include <linux/compiler.h>
27a3f0e9 31#include <linux/sort.h>
1da177e4 32
b86040a5 33#include <asm/unified.h>
15d07dc9 34#include <asm/cp15.h>
1da177e4 35#include <asm/cpu.h>
0ba8b9b2 36#include <asm/cputype.h>
1da177e4 37#include <asm/elf.h>
1da177e4 38#include <asm/procinfo.h>
37efe642 39#include <asm/sections.h>
1da177e4 40#include <asm/setup.h>
f00ec48f 41#include <asm/smp_plat.h>
1da177e4
LT
42#include <asm/mach-types.h>
43#include <asm/cacheflush.h>
46097c7d 44#include <asm/cachetype.h>
1da177e4
LT
45#include <asm/tlbflush.h>
46
93c02ab4 47#include <asm/prom.h>
1da177e4
LT
48#include <asm/mach/arch.h>
49#include <asm/mach/irq.h>
50#include <asm/mach/time.h>
9f97da78
DH
51#include <asm/system_info.h>
52#include <asm/system_misc.h>
5cbad0eb 53#include <asm/traps.h>
bff595c1 54#include <asm/unwind.h>
1c16d242 55#include <asm/memblock.h>
4588c34d 56#include <asm/virt.h>
1da177e4 57
4cd9d6f7 58#include "atags.h"
0fc1c832 59
1da177e4
LT
60
61#if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
62char fpe_type[8];
63
64static int __init fpe_setup(char *line)
65{
66 memcpy(fpe_type, line, 8);
67 return 1;
68}
69
70__setup("fpe=", fpe_setup);
71#endif
72
4b5f32ce 73extern void paging_init(struct machine_desc *desc);
0371d3f7 74extern void sanity_check_meminfo(void);
1da177e4 75extern void reboot_setup(char *str);
c7909509 76extern void setup_dma_zone(struct machine_desc *desc);
1da177e4
LT
77
78unsigned int processor_id;
c18f6581 79EXPORT_SYMBOL(processor_id);
0385ebc0 80unsigned int __machine_arch_type __read_mostly;
1da177e4 81EXPORT_SYMBOL(__machine_arch_type);
0385ebc0 82unsigned int cacheid __read_mostly;
c0e95878 83EXPORT_SYMBOL(cacheid);
1da177e4 84
9d20fdd5
BG
85unsigned int __atags_pointer __initdata;
86
1da177e4
LT
87unsigned int system_rev;
88EXPORT_SYMBOL(system_rev);
89
90unsigned int system_serial_low;
91EXPORT_SYMBOL(system_serial_low);
92
93unsigned int system_serial_high;
94EXPORT_SYMBOL(system_serial_high);
95
0385ebc0 96unsigned int elf_hwcap __read_mostly;
1da177e4
LT
97EXPORT_SYMBOL(elf_hwcap);
98
99
100#ifdef MULTI_CPU
0385ebc0 101struct processor processor __read_mostly;
1da177e4
LT
102#endif
103#ifdef MULTI_TLB
0385ebc0 104struct cpu_tlb_fns cpu_tlb __read_mostly;
1da177e4
LT
105#endif
106#ifdef MULTI_USER
0385ebc0 107struct cpu_user_fns cpu_user __read_mostly;
1da177e4
LT
108#endif
109#ifdef MULTI_CACHE
0385ebc0 110struct cpu_cache_fns cpu_cache __read_mostly;
1da177e4 111#endif
953233dc 112#ifdef CONFIG_OUTER_CACHE
0385ebc0 113struct outer_cache_fns outer_cache __read_mostly;
6c09f09d 114EXPORT_SYMBOL(outer_cache);
953233dc 115#endif
1da177e4 116
2ecccf90
DM
117/*
118 * Cached cpu_architecture() result for use by assembler code.
119 * C code should use the cpu_architecture() function instead of accessing this
120 * variable directly.
121 */
122int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN;
123
ccea7a19
RK
124struct stack {
125 u32 irq[3];
126 u32 abt[3];
127 u32 und[3];
128} ____cacheline_aligned;
129
130static struct stack stacks[NR_CPUS];
131
1da177e4
LT
132char elf_platform[ELF_PLATFORM_SIZE];
133EXPORT_SYMBOL(elf_platform);
134
1da177e4
LT
135static const char *cpu_name;
136static const char *machine_name;
48ab7e09 137static char __initdata cmd_line[COMMAND_LINE_SIZE];
8ff1443c 138struct machine_desc *machine_desc __initdata;
1da177e4 139
1da177e4
LT
140static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
141#define ENDIANNESS ((char)endian_test.l)
142
143DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
144
145/*
146 * Standard memory resources
147 */
148static struct resource mem_res[] = {
740e518e
GKH
149 {
150 .name = "Video RAM",
151 .start = 0,
152 .end = 0,
153 .flags = IORESOURCE_MEM
154 },
155 {
a36d8e5b 156 .name = "Kernel code",
740e518e
GKH
157 .start = 0,
158 .end = 0,
159 .flags = IORESOURCE_MEM
160 },
161 {
162 .name = "Kernel data",
163 .start = 0,
164 .end = 0,
165 .flags = IORESOURCE_MEM
166 }
1da177e4
LT
167};
168
169#define video_ram mem_res[0]
170#define kernel_code mem_res[1]
171#define kernel_data mem_res[2]
172
173static struct resource io_res[] = {
740e518e
GKH
174 {
175 .name = "reserved",
176 .start = 0x3bc,
177 .end = 0x3be,
178 .flags = IORESOURCE_IO | IORESOURCE_BUSY
179 },
180 {
181 .name = "reserved",
182 .start = 0x378,
183 .end = 0x37f,
184 .flags = IORESOURCE_IO | IORESOURCE_BUSY
185 },
186 {
187 .name = "reserved",
188 .start = 0x278,
189 .end = 0x27f,
190 .flags = IORESOURCE_IO | IORESOURCE_BUSY
191 }
1da177e4
LT
192};
193
194#define lp0 io_res[0]
195#define lp1 io_res[1]
196#define lp2 io_res[2]
197
1da177e4
LT
198static const char *proc_arch[] = {
199 "undefined/unknown",
200 "3",
201 "4",
202 "4T",
203 "5",
204 "5T",
205 "5TE",
206 "5TEJ",
207 "6TEJ",
6b090a25 208 "7",
1da177e4
LT
209 "?(11)",
210 "?(12)",
211 "?(13)",
212 "?(14)",
213 "?(15)",
214 "?(16)",
215 "?(17)",
216};
217
2ecccf90 218static int __get_cpu_architecture(void)
1da177e4
LT
219{
220 int cpu_arch;
221
0ba8b9b2 222 if ((read_cpuid_id() & 0x0008f000) == 0) {
1da177e4 223 cpu_arch = CPU_ARCH_UNKNOWN;
0ba8b9b2
RK
224 } else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
225 cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
226 } else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
227 cpu_arch = (read_cpuid_id() >> 16) & 7;
1da177e4
LT
228 if (cpu_arch)
229 cpu_arch += CPU_ARCH_ARMv3;
0ba8b9b2 230 } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
180005c4
CM
231 unsigned int mmfr0;
232
233 /* Revised CPUID format. Read the Memory Model Feature
234 * Register 0 and check for VMSAv7 or PMSAv7 */
235 asm("mrc p15, 0, %0, c0, c1, 4"
236 : "=r" (mmfr0));
315cfe78
CM
237 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
238 (mmfr0 & 0x000000f0) >= 0x00000030)
180005c4
CM
239 cpu_arch = CPU_ARCH_ARMv7;
240 else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
241 (mmfr0 & 0x000000f0) == 0x00000020)
242 cpu_arch = CPU_ARCH_ARMv6;
243 else
244 cpu_arch = CPU_ARCH_UNKNOWN;
245 } else
246 cpu_arch = CPU_ARCH_UNKNOWN;
1da177e4
LT
247
248 return cpu_arch;
249}
250
2ecccf90
DM
251int __pure cpu_architecture(void)
252{
253 BUG_ON(__cpu_architecture == CPU_ARCH_UNKNOWN);
254
255 return __cpu_architecture;
256}
257
8925ec4c
WD
258static int cpu_has_aliasing_icache(unsigned int arch)
259{
260 int aliasing_icache;
261 unsigned int id_reg, num_sets, line_size;
262
7f94e9cc
WD
263 /* PIPT caches never alias. */
264 if (icache_is_pipt())
265 return 0;
266
8925ec4c
WD
267 /* arch specifies the register format */
268 switch (arch) {
269 case CPU_ARCH_ARMv7:
5fb31a96
LW
270 asm("mcr p15, 2, %0, c0, c0, 0 @ set CSSELR"
271 : /* No output operands */
8925ec4c 272 : "r" (1));
5fb31a96
LW
273 isb();
274 asm("mrc p15, 1, %0, c0, c0, 0 @ read CCSIDR"
275 : "=r" (id_reg));
8925ec4c
WD
276 line_size = 4 << ((id_reg & 0x7) + 2);
277 num_sets = ((id_reg >> 13) & 0x7fff) + 1;
278 aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
279 break;
280 case CPU_ARCH_ARMv6:
281 aliasing_icache = read_cpuid_cachetype() & (1 << 11);
282 break;
283 default:
284 /* I-cache aliases will be handled by D-cache aliasing code */
285 aliasing_icache = 0;
286 }
287
288 return aliasing_icache;
289}
290
c0e95878
RK
291static void __init cacheid_init(void)
292{
c0e95878
RK
293 unsigned int arch = cpu_architecture();
294
b57ee99f 295 if (arch >= CPU_ARCH_ARMv6) {
ac52e83f 296 unsigned int cachetype = read_cpuid_cachetype();
b57ee99f
CM
297 if ((cachetype & (7 << 29)) == 4 << 29) {
298 /* ARMv7 register format */
72dc53ac 299 arch = CPU_ARCH_ARMv7;
b57ee99f 300 cacheid = CACHEID_VIPT_NONALIASING;
7f94e9cc
WD
301 switch (cachetype & (3 << 14)) {
302 case (1 << 14):
b57ee99f 303 cacheid |= CACHEID_ASID_TAGGED;
7f94e9cc
WD
304 break;
305 case (3 << 14):
306 cacheid |= CACHEID_PIPT;
307 break;
308 }
8925ec4c 309 } else {
72dc53ac
WD
310 arch = CPU_ARCH_ARMv6;
311 if (cachetype & (1 << 23))
312 cacheid = CACHEID_VIPT_ALIASING;
313 else
314 cacheid = CACHEID_VIPT_NONALIASING;
8925ec4c 315 }
72dc53ac
WD
316 if (cpu_has_aliasing_icache(arch))
317 cacheid |= CACHEID_VIPT_I_ALIASING;
c0e95878
RK
318 } else {
319 cacheid = CACHEID_VIVT;
320 }
2b4ae1f1
RK
321
322 printk("CPU: %s data cache, %s instruction cache\n",
323 cache_is_vivt() ? "VIVT" :
324 cache_is_vipt_aliasing() ? "VIPT aliasing" :
7f94e9cc 325 cache_is_vipt_nonaliasing() ? "PIPT / VIPT nonaliasing" : "unknown",
2b4ae1f1
RK
326 cache_is_vivt() ? "VIVT" :
327 icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
8925ec4c 328 icache_is_vipt_aliasing() ? "VIPT aliasing" :
7f94e9cc 329 icache_is_pipt() ? "PIPT" :
2b4ae1f1 330 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
c0e95878
RK
331}
332
1da177e4
LT
333/*
334 * These functions re-use the assembly code in head.S, which
335 * already provide the required functionality.
336 */
0f44ba1d 337extern struct proc_info_list *lookup_processor_type(unsigned int);
6fc31d54 338
93c02ab4 339void __init early_print(const char *str, ...)
6fc31d54
RK
340{
341 extern void printascii(const char *);
342 char buf[256];
343 va_list ap;
344
345 va_start(ap, str);
346 vsnprintf(buf, sizeof(buf), str, ap);
347 va_end(ap);
348
349#ifdef CONFIG_DEBUG_LL
350 printascii(buf);
351#endif
352 printk("%s", buf);
353}
354
8164f7af
SB
355static void __init cpuid_init_hwcaps(void)
356{
357 unsigned int divide_instrs;
358
359 if (cpu_architecture() < CPU_ARCH_ARMv7)
360 return;
361
362 divide_instrs = (read_cpuid_ext(CPUID_EXT_ISAR0) & 0x0f000000) >> 24;
363
364 switch (divide_instrs) {
365 case 2:
366 elf_hwcap |= HWCAP_IDIVA;
367 case 1:
368 elf_hwcap |= HWCAP_IDIVT;
369 }
370}
371
f159f4ed
TL
372static void __init feat_v6_fixup(void)
373{
374 int id = read_cpuid_id();
375
376 if ((id & 0xff0f0000) != 0x41070000)
377 return;
378
379 /*
380 * HWCAP_TLS is available only on 1136 r1p0 and later,
381 * see also kuser_get_tls_init.
382 */
383 if ((((id >> 4) & 0xfff) == 0xb36) && (((id >> 20) & 3) == 0))
384 elf_hwcap &= ~HWCAP_TLS;
385}
386
ccea7a19
RK
387/*
388 * cpu_init - initialise one CPU.
389 *
90f1e084 390 * cpu_init sets up the per-CPU stacks.
ccea7a19 391 */
1783d457 392void notrace cpu_init(void)
ccea7a19
RK
393{
394 unsigned int cpu = smp_processor_id();
395 struct stack *stk = &stacks[cpu];
396
397 if (cpu >= NR_CPUS) {
398 printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu);
399 BUG();
400 }
401
14318efb
RH
402 /*
403 * This only works on resume and secondary cores. For booting on the
404 * boot cpu, smp_prepare_boot_cpu is called after percpu area setup.
405 */
406 set_my_cpu_offset(per_cpu_offset(cpu));
407
b69874e4
RK
408 cpu_proc_init();
409
b86040a5
CM
410 /*
411 * Define the placement constraint for the inline asm directive below.
412 * In Thumb-2, msr with an immediate value is not allowed.
413 */
414#ifdef CONFIG_THUMB2_KERNEL
415#define PLC "r"
416#else
417#define PLC "I"
418#endif
419
ccea7a19
RK
420 /*
421 * setup stacks for re-entrant exception handlers
422 */
423 __asm__ (
424 "msr cpsr_c, %1\n\t"
b86040a5
CM
425 "add r14, %0, %2\n\t"
426 "mov sp, r14\n\t"
ccea7a19 427 "msr cpsr_c, %3\n\t"
b86040a5
CM
428 "add r14, %0, %4\n\t"
429 "mov sp, r14\n\t"
ccea7a19 430 "msr cpsr_c, %5\n\t"
b86040a5
CM
431 "add r14, %0, %6\n\t"
432 "mov sp, r14\n\t"
ccea7a19
RK
433 "msr cpsr_c, %7"
434 :
435 : "r" (stk),
b86040a5 436 PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
ccea7a19 437 "I" (offsetof(struct stack, irq[0])),
b86040a5 438 PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
ccea7a19 439 "I" (offsetof(struct stack, abt[0])),
b86040a5 440 PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
ccea7a19 441 "I" (offsetof(struct stack, und[0])),
b86040a5 442 PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
aaaa3f9e 443 : "r14");
ccea7a19
RK
444}
445
eb50439b
WD
446int __cpu_logical_map[NR_CPUS];
447
448void __init smp_setup_processor_id(void)
449{
450 int i;
cb8cf4f8
LP
451 u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0;
452 u32 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
eb50439b
WD
453
454 cpu_logical_map(0) = cpu;
cb8cf4f8 455 for (i = 1; i < nr_cpu_ids; ++i)
eb50439b
WD
456 cpu_logical_map(i) = i == cpu ? 0 : i;
457
cb8cf4f8 458 printk(KERN_INFO "Booting Linux on physical CPU 0x%x\n", mpidr);
eb50439b
WD
459}
460
b69874e4
RK
461static void __init setup_processor(void)
462{
463 struct proc_info_list *list;
464
465 /*
466 * locate processor in the list of supported processor
467 * types. The linker builds this table for us from the
468 * entries in arch/arm/mm/proc-*.S
469 */
470 list = lookup_processor_type(read_cpuid_id());
471 if (!list) {
472 printk("CPU configuration botched (ID %08x), unable "
473 "to continue.\n", read_cpuid_id());
474 while (1);
475 }
476
477 cpu_name = list->cpu_name;
2ecccf90 478 __cpu_architecture = __get_cpu_architecture();
b69874e4
RK
479
480#ifdef MULTI_CPU
481 processor = *list->proc;
482#endif
483#ifdef MULTI_TLB
484 cpu_tlb = *list->tlb;
485#endif
486#ifdef MULTI_USER
487 cpu_user = *list->user;
488#endif
489#ifdef MULTI_CACHE
490 cpu_cache = *list->cache;
491#endif
492
493 printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
494 cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
495 proc_arch[cpu_architecture()], cr_alignment);
496
a34dbfb0
WD
497 snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
498 list->arch_name, ENDIANNESS);
499 snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
500 list->elf_name, ENDIANNESS);
b69874e4 501 elf_hwcap = list->elf_hwcap;
8164f7af
SB
502
503 cpuid_init_hwcaps();
504
b69874e4 505#ifndef CONFIG_ARM_THUMB
c40e3641 506 elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT);
b69874e4
RK
507#endif
508
509 feat_v6_fixup();
510
511 cacheid_init();
512 cpu_init();
513}
514
93c02ab4 515void __init dump_machine_table(void)
1da177e4 516{
dce72dd0 517 struct machine_desc *p;
1da177e4 518
6291319d
GL
519 early_print("Available machine support:\n\nID (hex)\tNAME\n");
520 for_each_machine_desc(p)
dce72dd0 521 early_print("%08x\t%s\n", p->nr, p->name);
1da177e4 522
dce72dd0 523 early_print("\nPlease check your kernel config and/or bootloader.\n");
1da177e4 524
dce72dd0
NP
525 while (true)
526 /* can't use cpu_relax() here as it may require MMU setup */;
1da177e4
LT
527}
528
a5d5f7da 529int __init arm_add_memory(phys_addr_t start, phys_addr_t size)
3a669411 530{
4b5f32ce
NP
531 struct membank *bank = &meminfo.bank[meminfo.nr_banks];
532
533 if (meminfo.nr_banks >= NR_BANKS) {
534 printk(KERN_CRIT "NR_BANKS too low, "
29a38193 535 "ignoring memory at 0x%08llx\n", (long long)start);
4b5f32ce
NP
536 return -EINVAL;
537 }
05f96ef1 538
3a669411
RK
539 /*
540 * Ensure that start/size are aligned to a page boundary.
541 * Size is appropriately rounded down, start is rounded up.
542 */
543 size -= start & ~PAGE_MASK;
05f96ef1 544 bank->start = PAGE_ALIGN(start);
e5ab8580 545
4e1db26a 546#ifndef CONFIG_ARM_LPAE
e5ab8580
WD
547 if (bank->start + size < bank->start) {
548 printk(KERN_CRIT "Truncating memory at 0x%08llx to fit in "
549 "32-bit physical address space\n", (long long)start);
550 /*
551 * To ensure bank->start + bank->size is representable in
552 * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
553 * This means we lose a page after masking.
554 */
555 size = ULONG_MAX - bank->start;
556 }
557#endif
558
a5d5f7da 559 bank->size = size & ~(phys_addr_t)(PAGE_SIZE - 1);
4b5f32ce
NP
560
561 /*
562 * Check whether this memory region has non-zero size or
563 * invalid node number.
564 */
be370302 565 if (bank->size == 0)
4b5f32ce
NP
566 return -EINVAL;
567
568 meminfo.nr_banks++;
569 return 0;
3a669411
RK
570}
571
1da177e4
LT
572/*
573 * Pick out the memory size. We look for mem=size@start,
574 * where start and size are "size[KkMm]"
575 */
2b0d8c25 576static int __init early_mem(char *p)
1da177e4
LT
577{
578 static int usermem __initdata = 0;
a5d5f7da 579 phys_addr_t size;
f60892d3 580 phys_addr_t start;
2b0d8c25 581 char *endp;
1da177e4
LT
582
583 /*
584 * If the user specifies memory size, we
585 * blow away any automatically generated
586 * size.
587 */
588 if (usermem == 0) {
589 usermem = 1;
590 meminfo.nr_banks = 0;
591 }
592
593 start = PHYS_OFFSET;
2b0d8c25
JK
594 size = memparse(p, &endp);
595 if (*endp == '@')
596 start = memparse(endp + 1, NULL);
1da177e4 597
1c97b73e 598 arm_add_memory(start, size);
1da177e4 599
2b0d8c25 600 return 0;
1da177e4 601}
2b0d8c25 602early_param("mem", early_mem);
1da177e4 603
11b9369c 604static void __init request_standard_resources(struct machine_desc *mdesc)
1da177e4 605{
11b9369c 606 struct memblock_region *region;
1da177e4 607 struct resource *res;
1da177e4 608
37efe642
RK
609 kernel_code.start = virt_to_phys(_text);
610 kernel_code.end = virt_to_phys(_etext - 1);
842eab40 611 kernel_data.start = virt_to_phys(_sdata);
37efe642 612 kernel_data.end = virt_to_phys(_end - 1);
1da177e4 613
11b9369c 614 for_each_memblock(memory, region) {
1da177e4
LT
615 res = alloc_bootmem_low(sizeof(*res));
616 res->name = "System RAM";
11b9369c
DZ
617 res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
618 res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
1da177e4
LT
619 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
620
621 request_resource(&iomem_resource, res);
622
623 if (kernel_code.start >= res->start &&
624 kernel_code.end <= res->end)
625 request_resource(res, &kernel_code);
626 if (kernel_data.start >= res->start &&
627 kernel_data.end <= res->end)
628 request_resource(res, &kernel_data);
629 }
630
631 if (mdesc->video_start) {
632 video_ram.start = mdesc->video_start;
633 video_ram.end = mdesc->video_end;
634 request_resource(&iomem_resource, &video_ram);
635 }
636
637 /*
638 * Some machines don't have the possibility of ever
639 * possessing lp0, lp1 or lp2
640 */
641 if (mdesc->reserve_lp0)
642 request_resource(&ioport_resource, &lp0);
643 if (mdesc->reserve_lp1)
644 request_resource(&ioport_resource, &lp1);
645 if (mdesc->reserve_lp2)
646 request_resource(&ioport_resource, &lp2);
647}
648
1da177e4
LT
649#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
650struct screen_info screen_info = {
651 .orig_video_lines = 30,
652 .orig_video_cols = 80,
653 .orig_video_mode = 0,
654 .orig_video_ega_bx = 0,
655 .orig_video_isVGA = 1,
656 .orig_video_points = 8
657};
4394c124 658#endif
1da177e4 659
1da177e4
LT
660static int __init customize_machine(void)
661{
662 /* customizes platform devices, or adds new ones */
8ff1443c
RK
663 if (machine_desc->init_machine)
664 machine_desc->init_machine();
1da177e4
LT
665 return 0;
666}
667arch_initcall(customize_machine);
668
90de4137
SG
669static int __init init_machine_late(void)
670{
671 if (machine_desc->init_late)
672 machine_desc->init_late();
673 return 0;
674}
675late_initcall(init_machine_late);
676
3c57fb43
MW
677#ifdef CONFIG_KEXEC
678static inline unsigned long long get_total_mem(void)
679{
680 unsigned long total;
681
682 total = max_low_pfn - min_low_pfn;
683 return total << PAGE_SHIFT;
684}
685
686/**
687 * reserve_crashkernel() - reserves memory are for crash kernel
688 *
689 * This function reserves memory area given in "crashkernel=" kernel command
690 * line parameter. The memory reserved is used by a dump capture kernel when
691 * primary kernel is crashing.
692 */
693static void __init reserve_crashkernel(void)
694{
695 unsigned long long crash_size, crash_base;
696 unsigned long long total_mem;
697 int ret;
698
699 total_mem = get_total_mem();
700 ret = parse_crashkernel(boot_command_line, total_mem,
701 &crash_size, &crash_base);
702 if (ret)
703 return;
704
705 ret = reserve_bootmem(crash_base, crash_size, BOOTMEM_EXCLUSIVE);
706 if (ret < 0) {
707 printk(KERN_WARNING "crashkernel reservation failed - "
708 "memory is in use (0x%lx)\n", (unsigned long)crash_base);
709 return;
710 }
711
712 printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
713 "for crashkernel (System RAM: %ldMB)\n",
714 (unsigned long)(crash_size >> 20),
715 (unsigned long)(crash_base >> 20),
716 (unsigned long)(total_mem >> 20));
717
718 crashk_res.start = crash_base;
719 crashk_res.end = crash_base + crash_size - 1;
720 insert_resource(&iomem_resource, &crashk_res);
721}
722#else
723static inline void reserve_crashkernel(void) {}
724#endif /* CONFIG_KEXEC */
725
27a3f0e9
NP
726static int __init meminfo_cmp(const void *_a, const void *_b)
727{
728 const struct membank *a = _a, *b = _b;
729 long cmp = bank_pfn_start(a) - bank_pfn_start(b);
730 return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
731}
6291319d 732
4588c34d
DM
733void __init hyp_mode_check(void)
734{
735#ifdef CONFIG_ARM_VIRT_EXT
736 if (is_hyp_mode_available()) {
737 pr_info("CPU: All CPU(s) started in HYP mode.\n");
738 pr_info("CPU: Virtualization extensions available.\n");
739 } else if (is_hyp_mode_mismatched()) {
740 pr_warn("CPU: WARNING: CPU(s) started in wrong/inconsistent modes (primary CPU mode 0x%x)\n",
741 __boot_cpu_mode & MODE_MASK);
742 pr_warn("CPU: This may indicate a broken bootloader or firmware.\n");
743 } else
744 pr_info("CPU: All CPU(s) started in SVC mode.\n");
745#endif
746}
747
6291319d
GL
748void __init setup_arch(char **cmdline_p)
749{
750 struct machine_desc *mdesc;
751
6291319d 752 setup_processor();
93c02ab4
GL
753 mdesc = setup_machine_fdt(__atags_pointer);
754 if (!mdesc)
b8b499c8 755 mdesc = setup_machine_tags(__atags_pointer, __machine_arch_type);
6291319d
GL
756 machine_desc = mdesc;
757 machine_name = mdesc->name;
758
c7909509
MS
759 setup_dma_zone(mdesc);
760
b44c350d
RK
761 if (mdesc->restart_mode)
762 reboot_setup(&mdesc->restart_mode);
6291319d 763
37efe642
RK
764 init_mm.start_code = (unsigned long) _text;
765 init_mm.end_code = (unsigned long) _etext;
766 init_mm.end_data = (unsigned long) _edata;
767 init_mm.brk = (unsigned long) _end;
1da177e4 768
48ab7e09
JK
769 /* populate cmd_line too for later use, preserving boot_command_line */
770 strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
771 *cmdline_p = cmd_line;
2b0d8c25
JK
772
773 parse_early_param();
774
27a3f0e9 775 sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
0371d3f7 776 sanity_check_meminfo();
8d717a52 777 arm_memblock_init(&meminfo, mdesc);
2778f620 778
4b5f32ce 779 paging_init(mdesc);
11b9369c 780 request_standard_resources(mdesc);
1da177e4 781
a528721d
RK
782 if (mdesc->restart)
783 arm_pm_restart = mdesc->restart;
784
93c02ab4
GL
785 unflatten_device_tree();
786
5587164e 787 arm_dt_init_cpu_maps();
7bbb7940 788#ifdef CONFIG_SMP
abcee5fb
MZ
789 if (is_smp()) {
790 smp_set_ops(mdesc->smp);
f00ec48f 791 smp_init_cpus();
abcee5fb 792 }
7bbb7940 793#endif
4588c34d
DM
794
795 if (!is_smp())
796 hyp_mode_check();
797
3c57fb43 798 reserve_crashkernel();
7bbb7940 799
52108641 800#ifdef CONFIG_MULTI_IRQ_HANDLER
801 handle_arch_irq = mdesc->handle_irq;
802#endif
1da177e4
LT
803
804#ifdef CONFIG_VT
805#if defined(CONFIG_VGA_CONSOLE)
806 conswitchp = &vga_con;
807#elif defined(CONFIG_DUMMY_CONSOLE)
808 conswitchp = &dummy_con;
809#endif
810#endif
dec12e62
RK
811
812 if (mdesc->init_early)
813 mdesc->init_early();
1da177e4
LT
814}
815
816
817static int __init topology_init(void)
818{
819 int cpu;
820
66fb8bd2
RK
821 for_each_possible_cpu(cpu) {
822 struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
823 cpuinfo->cpu.hotpluggable = 1;
824 register_cpu(&cpuinfo->cpu, cpu);
825 }
1da177e4
LT
826
827 return 0;
828}
1da177e4
LT
829subsys_initcall(topology_init);
830
e119bfff
RK
831#ifdef CONFIG_HAVE_PROC_CPU
832static int __init proc_cpu_init(void)
833{
834 struct proc_dir_entry *res;
835
836 res = proc_mkdir("cpu", NULL);
837 if (!res)
838 return -ENOMEM;
839 return 0;
840}
841fs_initcall(proc_cpu_init);
842#endif
843
1da177e4
LT
844static const char *hwcap_str[] = {
845 "swp",
846 "half",
847 "thumb",
848 "26bit",
849 "fastmult",
850 "fpa",
851 "vfp",
852 "edsp",
853 "java",
8f7f9435 854 "iwmmxt",
99e4a6dd 855 "crunch",
4369ae16 856 "thumbee",
2bedbdf4 857 "neon",
7279dc3e
CM
858 "vfpv3",
859 "vfpv3d16",
254cdf8e
WD
860 "tls",
861 "vfpv4",
862 "idiva",
863 "idivt",
1da177e4
LT
864 NULL
865};
866
1da177e4
LT
867static int c_show(struct seq_file *m, void *v)
868{
b4b8f770
LP
869 int i, j;
870 u32 cpuid;
1da177e4 871
1da177e4 872 for_each_online_cpu(i) {
15559722
RK
873 /*
874 * glibc reads /proc/cpuinfo to determine the number of
875 * online processors, looking for lines beginning with
876 * "processor". Give glibc what it expects.
877 */
878 seq_printf(m, "processor\t: %d\n", i);
b4b8f770
LP
879 cpuid = is_smp() ? per_cpu(cpu_data, i).cpuid : read_cpuid_id();
880 seq_printf(m, "model name\t: %s rev %d (%s)\n",
881 cpu_name, cpuid & 15, elf_platform);
882
883#if defined(CONFIG_SMP)
884 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1da177e4
LT
885 per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
886 (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
b4b8f770
LP
887#else
888 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
889 loops_per_jiffy / (500000/HZ),
890 (loops_per_jiffy / (5000/HZ)) % 100);
1da177e4 891#endif
b4b8f770
LP
892 /* dump out the processor features */
893 seq_puts(m, "Features\t: ");
1da177e4 894
b4b8f770
LP
895 for (j = 0; hwcap_str[j]; j++)
896 if (elf_hwcap & (1 << j))
897 seq_printf(m, "%s ", hwcap_str[j]);
1da177e4 898
b4b8f770
LP
899 seq_printf(m, "\nCPU implementer\t: 0x%02x\n", cpuid >> 24);
900 seq_printf(m, "CPU architecture: %s\n",
901 proc_arch[cpu_architecture()]);
1da177e4 902
b4b8f770
LP
903 if ((cpuid & 0x0008f000) == 0x00000000) {
904 /* pre-ARM7 */
905 seq_printf(m, "CPU part\t: %07x\n", cpuid >> 4);
1da177e4 906 } else {
b4b8f770
LP
907 if ((cpuid & 0x0008f000) == 0x00007000) {
908 /* ARM7 */
909 seq_printf(m, "CPU variant\t: 0x%02x\n",
910 (cpuid >> 16) & 127);
911 } else {
912 /* post-ARM7 */
913 seq_printf(m, "CPU variant\t: 0x%x\n",
914 (cpuid >> 20) & 15);
915 }
916 seq_printf(m, "CPU part\t: 0x%03x\n",
917 (cpuid >> 4) & 0xfff);
1da177e4 918 }
b4b8f770 919 seq_printf(m, "CPU revision\t: %d\n\n", cpuid & 15);
1da177e4 920 }
1da177e4
LT
921
922 seq_printf(m, "Hardware\t: %s\n", machine_name);
923 seq_printf(m, "Revision\t: %04x\n", system_rev);
924 seq_printf(m, "Serial\t\t: %08x%08x\n",
925 system_serial_high, system_serial_low);
926
927 return 0;
928}
929
930static void *c_start(struct seq_file *m, loff_t *pos)
931{
932 return *pos < 1 ? (void *)1 : NULL;
933}
934
935static void *c_next(struct seq_file *m, void *v, loff_t *pos)
936{
937 ++*pos;
938 return NULL;
939}
940
941static void c_stop(struct seq_file *m, void *v)
942{
943}
944
2ffd6e18 945const struct seq_operations cpuinfo_op = {
1da177e4
LT
946 .start = c_start,
947 .next = c_next,
948 .stop = c_stop,
949 .show = c_show
950};