ARM: implement support for read-mostly sections
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / arm / kernel / setup.c
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/arm/kernel/setup.c
3 *
4 * Copyright (C) 1995-2001 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
1da177e4
LT
10#include <linux/module.h>
11#include <linux/kernel.h>
12#include <linux/stddef.h>
13#include <linux/ioport.h>
14#include <linux/delay.h>
15#include <linux/utsname.h>
16#include <linux/initrd.h>
17#include <linux/console.h>
18#include <linux/bootmem.h>
19#include <linux/seq_file.h>
894673ee 20#include <linux/screen_info.h>
1da177e4 21#include <linux/init.h>
3c57fb43 22#include <linux/kexec.h>
cea0bb1b 23#include <linux/crash_dump.h>
1da177e4
LT
24#include <linux/root_dev.h>
25#include <linux/cpu.h>
26#include <linux/interrupt.h>
7bbb7940 27#include <linux/smp.h>
4e950f6f 28#include <linux/fs.h>
e119bfff 29#include <linux/proc_fs.h>
2778f620 30#include <linux/memblock.h>
1da177e4 31
b86040a5 32#include <asm/unified.h>
1da177e4 33#include <asm/cpu.h>
0ba8b9b2 34#include <asm/cputype.h>
1da177e4 35#include <asm/elf.h>
1da177e4 36#include <asm/procinfo.h>
37efe642 37#include <asm/sections.h>
1da177e4 38#include <asm/setup.h>
f00ec48f 39#include <asm/smp_plat.h>
1da177e4
LT
40#include <asm/mach-types.h>
41#include <asm/cacheflush.h>
46097c7d 42#include <asm/cachetype.h>
1da177e4
LT
43#include <asm/tlbflush.h>
44
45#include <asm/mach/arch.h>
46#include <asm/mach/irq.h>
47#include <asm/mach/time.h>
5cbad0eb 48#include <asm/traps.h>
bff595c1 49#include <asm/unwind.h>
1da177e4 50
73a65b3f 51#if defined(CONFIG_DEPRECATED_PARAM_STRUCT)
0fc1c832 52#include "compat.h"
73a65b3f 53#endif
4cd9d6f7 54#include "atags.h"
bc581770 55#include "tcm.h"
0fc1c832 56
1da177e4
LT
57#ifndef MEM_SIZE
58#define MEM_SIZE (16*1024*1024)
59#endif
60
61#if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
62char fpe_type[8];
63
64static int __init fpe_setup(char *line)
65{
66 memcpy(fpe_type, line, 8);
67 return 1;
68}
69
70__setup("fpe=", fpe_setup);
71#endif
72
4b5f32ce 73extern void paging_init(struct machine_desc *desc);
1da177e4 74extern void reboot_setup(char *str);
1da177e4
LT
75
76unsigned int processor_id;
c18f6581 77EXPORT_SYMBOL(processor_id);
1da177e4
LT
78unsigned int __machine_arch_type;
79EXPORT_SYMBOL(__machine_arch_type);
c0e95878
RK
80unsigned int cacheid;
81EXPORT_SYMBOL(cacheid);
1da177e4 82
9d20fdd5
BG
83unsigned int __atags_pointer __initdata;
84
1da177e4
LT
85unsigned int system_rev;
86EXPORT_SYMBOL(system_rev);
87
88unsigned int system_serial_low;
89EXPORT_SYMBOL(system_serial_low);
90
91unsigned int system_serial_high;
92EXPORT_SYMBOL(system_serial_high);
93
94unsigned int elf_hwcap;
95EXPORT_SYMBOL(elf_hwcap);
96
97
98#ifdef MULTI_CPU
99struct processor processor;
100#endif
101#ifdef MULTI_TLB
102struct cpu_tlb_fns cpu_tlb;
103#endif
104#ifdef MULTI_USER
105struct cpu_user_fns cpu_user;
106#endif
107#ifdef MULTI_CACHE
108struct cpu_cache_fns cpu_cache;
109#endif
953233dc
CM
110#ifdef CONFIG_OUTER_CACHE
111struct outer_cache_fns outer_cache;
6c09f09d 112EXPORT_SYMBOL(outer_cache);
953233dc 113#endif
1da177e4 114
ccea7a19
RK
115struct stack {
116 u32 irq[3];
117 u32 abt[3];
118 u32 und[3];
119} ____cacheline_aligned;
120
121static struct stack stacks[NR_CPUS];
122
1da177e4
LT
123char elf_platform[ELF_PLATFORM_SIZE];
124EXPORT_SYMBOL(elf_platform);
125
1da177e4
LT
126static const char *cpu_name;
127static const char *machine_name;
48ab7e09 128static char __initdata cmd_line[COMMAND_LINE_SIZE];
1da177e4
LT
129
130static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE;
131static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
132#define ENDIANNESS ((char)endian_test.l)
133
134DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
135
136/*
137 * Standard memory resources
138 */
139static struct resource mem_res[] = {
740e518e
GKH
140 {
141 .name = "Video RAM",
142 .start = 0,
143 .end = 0,
144 .flags = IORESOURCE_MEM
145 },
146 {
147 .name = "Kernel text",
148 .start = 0,
149 .end = 0,
150 .flags = IORESOURCE_MEM
151 },
152 {
153 .name = "Kernel data",
154 .start = 0,
155 .end = 0,
156 .flags = IORESOURCE_MEM
157 }
1da177e4
LT
158};
159
160#define video_ram mem_res[0]
161#define kernel_code mem_res[1]
162#define kernel_data mem_res[2]
163
164static struct resource io_res[] = {
740e518e
GKH
165 {
166 .name = "reserved",
167 .start = 0x3bc,
168 .end = 0x3be,
169 .flags = IORESOURCE_IO | IORESOURCE_BUSY
170 },
171 {
172 .name = "reserved",
173 .start = 0x378,
174 .end = 0x37f,
175 .flags = IORESOURCE_IO | IORESOURCE_BUSY
176 },
177 {
178 .name = "reserved",
179 .start = 0x278,
180 .end = 0x27f,
181 .flags = IORESOURCE_IO | IORESOURCE_BUSY
182 }
1da177e4
LT
183};
184
185#define lp0 io_res[0]
186#define lp1 io_res[1]
187#define lp2 io_res[2]
188
1da177e4
LT
189static const char *proc_arch[] = {
190 "undefined/unknown",
191 "3",
192 "4",
193 "4T",
194 "5",
195 "5T",
196 "5TE",
197 "5TEJ",
198 "6TEJ",
6b090a25 199 "7",
1da177e4
LT
200 "?(11)",
201 "?(12)",
202 "?(13)",
203 "?(14)",
204 "?(15)",
205 "?(16)",
206 "?(17)",
207};
208
1da177e4
LT
209int cpu_architecture(void)
210{
211 int cpu_arch;
212
0ba8b9b2 213 if ((read_cpuid_id() & 0x0008f000) == 0) {
1da177e4 214 cpu_arch = CPU_ARCH_UNKNOWN;
0ba8b9b2
RK
215 } else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
216 cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
217 } else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
218 cpu_arch = (read_cpuid_id() >> 16) & 7;
1da177e4
LT
219 if (cpu_arch)
220 cpu_arch += CPU_ARCH_ARMv3;
0ba8b9b2 221 } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
180005c4
CM
222 unsigned int mmfr0;
223
224 /* Revised CPUID format. Read the Memory Model Feature
225 * Register 0 and check for VMSAv7 or PMSAv7 */
226 asm("mrc p15, 0, %0, c0, c1, 4"
227 : "=r" (mmfr0));
228 if ((mmfr0 & 0x0000000f) == 0x00000003 ||
229 (mmfr0 & 0x000000f0) == 0x00000030)
230 cpu_arch = CPU_ARCH_ARMv7;
231 else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
232 (mmfr0 & 0x000000f0) == 0x00000020)
233 cpu_arch = CPU_ARCH_ARMv6;
234 else
235 cpu_arch = CPU_ARCH_UNKNOWN;
236 } else
237 cpu_arch = CPU_ARCH_UNKNOWN;
1da177e4
LT
238
239 return cpu_arch;
240}
241
8925ec4c
WD
242static int cpu_has_aliasing_icache(unsigned int arch)
243{
244 int aliasing_icache;
245 unsigned int id_reg, num_sets, line_size;
246
247 /* arch specifies the register format */
248 switch (arch) {
249 case CPU_ARCH_ARMv7:
5fb31a96
LW
250 asm("mcr p15, 2, %0, c0, c0, 0 @ set CSSELR"
251 : /* No output operands */
8925ec4c 252 : "r" (1));
5fb31a96
LW
253 isb();
254 asm("mrc p15, 1, %0, c0, c0, 0 @ read CCSIDR"
255 : "=r" (id_reg));
8925ec4c
WD
256 line_size = 4 << ((id_reg & 0x7) + 2);
257 num_sets = ((id_reg >> 13) & 0x7fff) + 1;
258 aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
259 break;
260 case CPU_ARCH_ARMv6:
261 aliasing_icache = read_cpuid_cachetype() & (1 << 11);
262 break;
263 default:
264 /* I-cache aliases will be handled by D-cache aliasing code */
265 aliasing_icache = 0;
266 }
267
268 return aliasing_icache;
269}
270
c0e95878
RK
271static void __init cacheid_init(void)
272{
273 unsigned int cachetype = read_cpuid_cachetype();
274 unsigned int arch = cpu_architecture();
275
b57ee99f
CM
276 if (arch >= CPU_ARCH_ARMv6) {
277 if ((cachetype & (7 << 29)) == 4 << 29) {
278 /* ARMv7 register format */
279 cacheid = CACHEID_VIPT_NONALIASING;
280 if ((cachetype & (3 << 14)) == 1 << 14)
281 cacheid |= CACHEID_ASID_TAGGED;
8925ec4c
WD
282 else if (cpu_has_aliasing_icache(CPU_ARCH_ARMv7))
283 cacheid |= CACHEID_VIPT_I_ALIASING;
284 } else if (cachetype & (1 << 23)) {
c0e95878 285 cacheid = CACHEID_VIPT_ALIASING;
8925ec4c 286 } else {
c0e95878 287 cacheid = CACHEID_VIPT_NONALIASING;
8925ec4c
WD
288 if (cpu_has_aliasing_icache(CPU_ARCH_ARMv6))
289 cacheid |= CACHEID_VIPT_I_ALIASING;
290 }
c0e95878
RK
291 } else {
292 cacheid = CACHEID_VIVT;
293 }
2b4ae1f1
RK
294
295 printk("CPU: %s data cache, %s instruction cache\n",
296 cache_is_vivt() ? "VIVT" :
297 cache_is_vipt_aliasing() ? "VIPT aliasing" :
298 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown",
299 cache_is_vivt() ? "VIVT" :
300 icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
8925ec4c 301 icache_is_vipt_aliasing() ? "VIPT aliasing" :
2b4ae1f1 302 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
c0e95878
RK
303}
304
1da177e4
LT
305/*
306 * These functions re-use the assembly code in head.S, which
307 * already provide the required functionality.
308 */
0f44ba1d 309extern struct proc_info_list *lookup_processor_type(unsigned int);
1da177e4
LT
310extern struct machine_desc *lookup_machine_type(unsigned int);
311
f159f4ed
TL
312static void __init feat_v6_fixup(void)
313{
314 int id = read_cpuid_id();
315
316 if ((id & 0xff0f0000) != 0x41070000)
317 return;
318
319 /*
320 * HWCAP_TLS is available only on 1136 r1p0 and later,
321 * see also kuser_get_tls_init.
322 */
323 if ((((id >> 4) & 0xfff) == 0xb36) && (((id >> 20) & 3) == 0))
324 elf_hwcap &= ~HWCAP_TLS;
325}
326
1da177e4
LT
327static void __init setup_processor(void)
328{
329 struct proc_info_list *list;
330
331 /*
332 * locate processor in the list of supported processor
333 * types. The linker builds this table for us from the
334 * entries in arch/arm/mm/proc-*.S
335 */
0ba8b9b2 336 list = lookup_processor_type(read_cpuid_id());
1da177e4
LT
337 if (!list) {
338 printk("CPU configuration botched (ID %08x), unable "
0ba8b9b2 339 "to continue.\n", read_cpuid_id());
1da177e4
LT
340 while (1);
341 }
342
343 cpu_name = list->cpu_name;
344
345#ifdef MULTI_CPU
346 processor = *list->proc;
347#endif
348#ifdef MULTI_TLB
349 cpu_tlb = *list->tlb;
350#endif
351#ifdef MULTI_USER
352 cpu_user = *list->user;
353#endif
354#ifdef MULTI_CACHE
355 cpu_cache = *list->cache;
356#endif
357
4e19025b 358 printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
0ba8b9b2 359 cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
264edb35 360 proc_arch[cpu_architecture()], cr_alignment);
1da177e4 361
96b644bd 362 sprintf(init_utsname()->machine, "%s%c", list->arch_name, ENDIANNESS);
1da177e4
LT
363 sprintf(elf_platform, "%s%c", list->elf_name, ENDIANNESS);
364 elf_hwcap = list->elf_hwcap;
adeff422
CM
365#ifndef CONFIG_ARM_THUMB
366 elf_hwcap &= ~HWCAP_THUMB;
367#endif
1da177e4 368
f159f4ed
TL
369 feat_v6_fixup();
370
c0e95878 371 cacheid_init();
1da177e4
LT
372 cpu_proc_init();
373}
374
ccea7a19
RK
375/*
376 * cpu_init - initialise one CPU.
377 *
90f1e084 378 * cpu_init sets up the per-CPU stacks.
ccea7a19 379 */
36c5ed23 380void cpu_init(void)
ccea7a19
RK
381{
382 unsigned int cpu = smp_processor_id();
383 struct stack *stk = &stacks[cpu];
384
385 if (cpu >= NR_CPUS) {
386 printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu);
387 BUG();
388 }
389
b86040a5
CM
390 /*
391 * Define the placement constraint for the inline asm directive below.
392 * In Thumb-2, msr with an immediate value is not allowed.
393 */
394#ifdef CONFIG_THUMB2_KERNEL
395#define PLC "r"
396#else
397#define PLC "I"
398#endif
399
ccea7a19
RK
400 /*
401 * setup stacks for re-entrant exception handlers
402 */
403 __asm__ (
404 "msr cpsr_c, %1\n\t"
b86040a5
CM
405 "add r14, %0, %2\n\t"
406 "mov sp, r14\n\t"
ccea7a19 407 "msr cpsr_c, %3\n\t"
b86040a5
CM
408 "add r14, %0, %4\n\t"
409 "mov sp, r14\n\t"
ccea7a19 410 "msr cpsr_c, %5\n\t"
b86040a5
CM
411 "add r14, %0, %6\n\t"
412 "mov sp, r14\n\t"
ccea7a19
RK
413 "msr cpsr_c, %7"
414 :
415 : "r" (stk),
b86040a5 416 PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
ccea7a19 417 "I" (offsetof(struct stack, irq[0])),
b86040a5 418 PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
ccea7a19 419 "I" (offsetof(struct stack, abt[0])),
b86040a5 420 PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
ccea7a19 421 "I" (offsetof(struct stack, und[0])),
b86040a5 422 PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
aaaa3f9e 423 : "r14");
ccea7a19
RK
424}
425
1da177e4
LT
426static struct machine_desc * __init setup_machine(unsigned int nr)
427{
428 struct machine_desc *list;
429
430 /*
431 * locate machine in the list of supported machines.
432 */
433 list = lookup_machine_type(nr);
434 if (!list) {
435 printk("Machine configuration botched (nr %d), unable "
436 "to continue.\n", nr);
437 while (1);
438 }
439
440 printk("Machine: %s\n", list->name);
441
442 return list;
443}
444
4b5f32ce 445static int __init arm_add_memory(unsigned long start, unsigned long size)
3a669411 446{
4b5f32ce
NP
447 struct membank *bank = &meminfo.bank[meminfo.nr_banks];
448
449 if (meminfo.nr_banks >= NR_BANKS) {
450 printk(KERN_CRIT "NR_BANKS too low, "
451 "ignoring memory at %#lx\n", start);
452 return -EINVAL;
453 }
05f96ef1 454
3a669411
RK
455 /*
456 * Ensure that start/size are aligned to a page boundary.
457 * Size is appropriately rounded down, start is rounded up.
458 */
459 size -= start & ~PAGE_MASK;
05f96ef1
RK
460 bank->start = PAGE_ALIGN(start);
461 bank->size = size & PAGE_MASK;
4b5f32ce
NP
462
463 /*
464 * Check whether this memory region has non-zero size or
465 * invalid node number.
466 */
be370302 467 if (bank->size == 0)
4b5f32ce
NP
468 return -EINVAL;
469
470 meminfo.nr_banks++;
471 return 0;
3a669411
RK
472}
473
1da177e4
LT
474/*
475 * Pick out the memory size. We look for mem=size@start,
476 * where start and size are "size[KkMm]"
477 */
2b0d8c25 478static int __init early_mem(char *p)
1da177e4
LT
479{
480 static int usermem __initdata = 0;
481 unsigned long size, start;
2b0d8c25 482 char *endp;
1da177e4
LT
483
484 /*
485 * If the user specifies memory size, we
486 * blow away any automatically generated
487 * size.
488 */
489 if (usermem == 0) {
490 usermem = 1;
491 meminfo.nr_banks = 0;
492 }
493
494 start = PHYS_OFFSET;
2b0d8c25
JK
495 size = memparse(p, &endp);
496 if (*endp == '@')
497 start = memparse(endp + 1, NULL);
1da177e4 498
1c97b73e 499 arm_add_memory(start, size);
1da177e4 500
2b0d8c25 501 return 0;
1da177e4 502}
2b0d8c25 503early_param("mem", early_mem);
1da177e4
LT
504
505static void __init
506setup_ramdisk(int doload, int prompt, int image_start, unsigned int rd_sz)
507{
508#ifdef CONFIG_BLK_DEV_RAM
509 extern int rd_size, rd_image_start, rd_prompt, rd_doload;
510
511 rd_image_start = image_start;
512 rd_prompt = prompt;
513 rd_doload = doload;
514
515 if (rd_sz)
516 rd_size = rd_sz;
517#endif
518}
519
520static void __init
521request_standard_resources(struct meminfo *mi, struct machine_desc *mdesc)
522{
523 struct resource *res;
524 int i;
525
37efe642
RK
526 kernel_code.start = virt_to_phys(_text);
527 kernel_code.end = virt_to_phys(_etext - 1);
842eab40 528 kernel_data.start = virt_to_phys(_sdata);
37efe642 529 kernel_data.end = virt_to_phys(_end - 1);
1da177e4
LT
530
531 for (i = 0; i < mi->nr_banks; i++) {
1da177e4
LT
532 if (mi->bank[i].size == 0)
533 continue;
534
1da177e4
LT
535 res = alloc_bootmem_low(sizeof(*res));
536 res->name = "System RAM";
3319f5e5
NP
537 res->start = mi->bank[i].start;
538 res->end = mi->bank[i].start + mi->bank[i].size - 1;
1da177e4
LT
539 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
540
541 request_resource(&iomem_resource, res);
542
543 if (kernel_code.start >= res->start &&
544 kernel_code.end <= res->end)
545 request_resource(res, &kernel_code);
546 if (kernel_data.start >= res->start &&
547 kernel_data.end <= res->end)
548 request_resource(res, &kernel_data);
549 }
550
551 if (mdesc->video_start) {
552 video_ram.start = mdesc->video_start;
553 video_ram.end = mdesc->video_end;
554 request_resource(&iomem_resource, &video_ram);
555 }
556
557 /*
558 * Some machines don't have the possibility of ever
559 * possessing lp0, lp1 or lp2
560 */
561 if (mdesc->reserve_lp0)
562 request_resource(&ioport_resource, &lp0);
563 if (mdesc->reserve_lp1)
564 request_resource(&ioport_resource, &lp1);
565 if (mdesc->reserve_lp2)
566 request_resource(&ioport_resource, &lp2);
567}
568
569/*
570 * Tag parsing.
571 *
572 * This is the new way of passing data to the kernel at boot time. Rather
573 * than passing a fixed inflexible structure to the kernel, we pass a list
574 * of variable-sized tags to the kernel. The first tag must be a ATAG_CORE
575 * tag for the list to be recognised (to distinguish the tagged list from
576 * a param_struct). The list is terminated with a zero-length tag (this tag
577 * is not parsed in any way).
578 */
579static int __init parse_tag_core(const struct tag *tag)
580{
581 if (tag->hdr.size > 2) {
582 if ((tag->u.core.flags & 1) == 0)
583 root_mountflags &= ~MS_RDONLY;
584 ROOT_DEV = old_decode_dev(tag->u.core.rootdev);
585 }
586 return 0;
587}
588
589__tagtable(ATAG_CORE, parse_tag_core);
590
591static int __init parse_tag_mem32(const struct tag *tag)
592{
4b5f32ce 593 return arm_add_memory(tag->u.mem.start, tag->u.mem.size);
1da177e4
LT
594}
595
596__tagtable(ATAG_MEM, parse_tag_mem32);
597
598#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
599struct screen_info screen_info = {
600 .orig_video_lines = 30,
601 .orig_video_cols = 80,
602 .orig_video_mode = 0,
603 .orig_video_ega_bx = 0,
604 .orig_video_isVGA = 1,
605 .orig_video_points = 8
606};
607
608static int __init parse_tag_videotext(const struct tag *tag)
609{
610 screen_info.orig_x = tag->u.videotext.x;
611 screen_info.orig_y = tag->u.videotext.y;
612 screen_info.orig_video_page = tag->u.videotext.video_page;
613 screen_info.orig_video_mode = tag->u.videotext.video_mode;
614 screen_info.orig_video_cols = tag->u.videotext.video_cols;
615 screen_info.orig_video_ega_bx = tag->u.videotext.video_ega_bx;
616 screen_info.orig_video_lines = tag->u.videotext.video_lines;
617 screen_info.orig_video_isVGA = tag->u.videotext.video_isvga;
618 screen_info.orig_video_points = tag->u.videotext.video_points;
619 return 0;
620}
621
622__tagtable(ATAG_VIDEOTEXT, parse_tag_videotext);
623#endif
624
625static int __init parse_tag_ramdisk(const struct tag *tag)
626{
627 setup_ramdisk((tag->u.ramdisk.flags & 1) == 0,
628 (tag->u.ramdisk.flags & 2) == 0,
629 tag->u.ramdisk.start, tag->u.ramdisk.size);
630 return 0;
631}
632
633__tagtable(ATAG_RAMDISK, parse_tag_ramdisk);
634
1da177e4
LT
635static int __init parse_tag_serialnr(const struct tag *tag)
636{
637 system_serial_low = tag->u.serialnr.low;
638 system_serial_high = tag->u.serialnr.high;
639 return 0;
640}
641
642__tagtable(ATAG_SERIAL, parse_tag_serialnr);
643
644static int __init parse_tag_revision(const struct tag *tag)
645{
646 system_rev = tag->u.revision.rev;
647 return 0;
648}
649
650__tagtable(ATAG_REVISION, parse_tag_revision);
651
92d2040d 652#ifndef CONFIG_CMDLINE_FORCE
1da177e4
LT
653static int __init parse_tag_cmdline(const struct tag *tag)
654{
655 strlcpy(default_command_line, tag->u.cmdline.cmdline, COMMAND_LINE_SIZE);
656 return 0;
657}
658
659__tagtable(ATAG_CMDLINE, parse_tag_cmdline);
92d2040d 660#endif /* CONFIG_CMDLINE_FORCE */
1da177e4
LT
661
662/*
663 * Scan the tag table for this tag, and call its parse function.
664 * The tag table is built by the linker from all the __tagtable
665 * declarations.
666 */
667static int __init parse_tag(const struct tag *tag)
668{
669 extern struct tagtable __tagtable_begin, __tagtable_end;
670 struct tagtable *t;
671
672 for (t = &__tagtable_begin; t < &__tagtable_end; t++)
673 if (tag->hdr.tag == t->tag) {
674 t->parse(tag);
675 break;
676 }
677
678 return t < &__tagtable_end;
679}
680
681/*
682 * Parse all tags in the list, checking both the global and architecture
683 * specific tag tables.
684 */
685static void __init parse_tags(const struct tag *t)
686{
687 for (; t->hdr.size; t = tag_next(t))
688 if (!parse_tag(t))
689 printk(KERN_WARNING
690 "Ignoring unrecognised tag 0x%08x\n",
691 t->hdr.tag);
692}
693
694/*
695 * This holds our defaults.
696 */
697static struct init_tags {
698 struct tag_header hdr1;
699 struct tag_core core;
700 struct tag_header hdr2;
701 struct tag_mem32 mem;
702 struct tag_header hdr3;
703} init_tags __initdata = {
704 { tag_size(tag_core), ATAG_CORE },
705 { 1, PAGE_SIZE, 0xff },
706 { tag_size(tag_mem32), ATAG_MEM },
707 { MEM_SIZE, PHYS_OFFSET },
708 { 0, ATAG_NONE }
709};
710
711static void (*init_machine)(void) __initdata;
712
713static int __init customize_machine(void)
714{
715 /* customizes platform devices, or adds new ones */
716 if (init_machine)
717 init_machine();
718 return 0;
719}
720arch_initcall(customize_machine);
721
3c57fb43
MW
722#ifdef CONFIG_KEXEC
723static inline unsigned long long get_total_mem(void)
724{
725 unsigned long total;
726
727 total = max_low_pfn - min_low_pfn;
728 return total << PAGE_SHIFT;
729}
730
731/**
732 * reserve_crashkernel() - reserves memory are for crash kernel
733 *
734 * This function reserves memory area given in "crashkernel=" kernel command
735 * line parameter. The memory reserved is used by a dump capture kernel when
736 * primary kernel is crashing.
737 */
738static void __init reserve_crashkernel(void)
739{
740 unsigned long long crash_size, crash_base;
741 unsigned long long total_mem;
742 int ret;
743
744 total_mem = get_total_mem();
745 ret = parse_crashkernel(boot_command_line, total_mem,
746 &crash_size, &crash_base);
747 if (ret)
748 return;
749
750 ret = reserve_bootmem(crash_base, crash_size, BOOTMEM_EXCLUSIVE);
751 if (ret < 0) {
752 printk(KERN_WARNING "crashkernel reservation failed - "
753 "memory is in use (0x%lx)\n", (unsigned long)crash_base);
754 return;
755 }
756
757 printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
758 "for crashkernel (System RAM: %ldMB)\n",
759 (unsigned long)(crash_size >> 20),
760 (unsigned long)(crash_base >> 20),
761 (unsigned long)(total_mem >> 20));
762
763 crashk_res.start = crash_base;
764 crashk_res.end = crash_base + crash_size - 1;
765 insert_resource(&iomem_resource, &crashk_res);
766}
767#else
768static inline void reserve_crashkernel(void) {}
769#endif /* CONFIG_KEXEC */
770
cea0bb1b
MW
771/*
772 * Note: elfcorehdr_addr is not just limited to vmcore. It is also used by
773 * is_kdump_kernel() to determine if we are booting after a panic. Hence
774 * ifdef it under CONFIG_CRASH_DUMP and not CONFIG_PROC_VMCORE.
775 */
776
777#ifdef CONFIG_CRASH_DUMP
778/*
779 * elfcorehdr= specifies the location of elf core header stored by the crashed
780 * kernel. This option will be passed by kexec loader to the capture kernel.
781 */
782static int __init setup_elfcorehdr(char *arg)
783{
784 char *end;
785
786 if (!arg)
787 return -EINVAL;
788
789 elfcorehdr_addr = memparse(arg, &end);
790 return end > arg ? 0 : -EINVAL;
791}
792early_param("elfcorehdr", setup_elfcorehdr);
793#endif /* CONFIG_CRASH_DUMP */
794
73a65b3f
UKK
795static void __init squash_mem_tags(struct tag *tag)
796{
797 for (; tag->hdr.size; tag = tag_next(tag))
798 if (tag->hdr.tag == ATAG_MEM)
799 tag->hdr.tag = ATAG_NONE;
800}
801
1da177e4
LT
802void __init setup_arch(char **cmdline_p)
803{
804 struct tag *tags = (struct tag *)&init_tags;
805 struct machine_desc *mdesc;
806 char *from = default_command_line;
807
bff595c1
CM
808 unwind_init();
809
1da177e4
LT
810 setup_processor();
811 mdesc = setup_machine(machine_arch_type);
812 machine_name = mdesc->name;
813
814 if (mdesc->soft_reboot)
815 reboot_setup("s");
816
9d20fdd5
BG
817 if (__atags_pointer)
818 tags = phys_to_virt(__atags_pointer);
819 else if (mdesc->boot_params)
f9bd6ea4 820 tags = phys_to_virt(mdesc->boot_params);
1da177e4 821
73a65b3f 822#if defined(CONFIG_DEPRECATED_PARAM_STRUCT)
1da177e4
LT
823 /*
824 * If we have the old style parameters, convert them to
825 * a tag list.
826 */
827 if (tags->hdr.tag != ATAG_CORE)
828 convert_to_tag_list(tags);
73a65b3f 829#endif
1da177e4
LT
830 if (tags->hdr.tag != ATAG_CORE)
831 tags = (struct tag *)&init_tags;
832
833 if (mdesc->fixup)
834 mdesc->fixup(mdesc, tags, &from, &meminfo);
835
836 if (tags->hdr.tag == ATAG_CORE) {
837 if (meminfo.nr_banks != 0)
838 squash_mem_tags(tags);
4cd9d6f7 839 save_atags(tags);
1da177e4
LT
840 parse_tags(tags);
841 }
842
37efe642
RK
843 init_mm.start_code = (unsigned long) _text;
844 init_mm.end_code = (unsigned long) _etext;
845 init_mm.end_data = (unsigned long) _edata;
846 init_mm.brk = (unsigned long) _end;
1da177e4 847
2b0d8c25
JK
848 /* parse_early_param needs a boot_command_line */
849 strlcpy(boot_command_line, from, COMMAND_LINE_SIZE);
850
48ab7e09
JK
851 /* populate cmd_line too for later use, preserving boot_command_line */
852 strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
853 *cmdline_p = cmd_line;
2b0d8c25
JK
854
855 parse_early_param();
856
8d717a52 857 arm_memblock_init(&meminfo, mdesc);
2778f620 858
4b5f32ce 859 paging_init(mdesc);
1da177e4
LT
860 request_standard_resources(&meminfo, mdesc);
861
7bbb7940 862#ifdef CONFIG_SMP
f00ec48f
RK
863 if (is_smp())
864 smp_init_cpus();
7bbb7940 865#endif
3c57fb43 866 reserve_crashkernel();
7bbb7940 867
ccea7a19 868 cpu_init();
bc581770 869 tcm_init();
ccea7a19 870
1da177e4
LT
871 /*
872 * Set up various architecture-specific pointers
873 */
354e6f72 874 arch_nr_irqs = mdesc->nr_irqs;
1da177e4
LT
875 init_arch_irq = mdesc->init_irq;
876 system_timer = mdesc->timer;
877 init_machine = mdesc->init_machine;
878
879#ifdef CONFIG_VT
880#if defined(CONFIG_VGA_CONSOLE)
881 conswitchp = &vga_con;
882#elif defined(CONFIG_DUMMY_CONSOLE)
883 conswitchp = &dummy_con;
884#endif
885#endif
5cbad0eb 886 early_trap_init();
1da177e4
LT
887}
888
889
890static int __init topology_init(void)
891{
892 int cpu;
893
66fb8bd2
RK
894 for_each_possible_cpu(cpu) {
895 struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
896 cpuinfo->cpu.hotpluggable = 1;
897 register_cpu(&cpuinfo->cpu, cpu);
898 }
1da177e4
LT
899
900 return 0;
901}
1da177e4
LT
902subsys_initcall(topology_init);
903
e119bfff
RK
904#ifdef CONFIG_HAVE_PROC_CPU
905static int __init proc_cpu_init(void)
906{
907 struct proc_dir_entry *res;
908
909 res = proc_mkdir("cpu", NULL);
910 if (!res)
911 return -ENOMEM;
912 return 0;
913}
914fs_initcall(proc_cpu_init);
915#endif
916
1da177e4
LT
917static const char *hwcap_str[] = {
918 "swp",
919 "half",
920 "thumb",
921 "26bit",
922 "fastmult",
923 "fpa",
924 "vfp",
925 "edsp",
926 "java",
8f7f9435 927 "iwmmxt",
99e4a6dd 928 "crunch",
4369ae16 929 "thumbee",
2bedbdf4 930 "neon",
7279dc3e
CM
931 "vfpv3",
932 "vfpv3d16",
1da177e4
LT
933 NULL
934};
935
1da177e4
LT
936static int c_show(struct seq_file *m, void *v)
937{
938 int i;
939
940 seq_printf(m, "Processor\t: %s rev %d (%s)\n",
0ba8b9b2 941 cpu_name, read_cpuid_id() & 15, elf_platform);
1da177e4
LT
942
943#if defined(CONFIG_SMP)
944 for_each_online_cpu(i) {
15559722
RK
945 /*
946 * glibc reads /proc/cpuinfo to determine the number of
947 * online processors, looking for lines beginning with
948 * "processor". Give glibc what it expects.
949 */
950 seq_printf(m, "processor\t: %d\n", i);
1da177e4
LT
951 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n\n",
952 per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
953 (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
954 }
955#else /* CONFIG_SMP */
956 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
957 loops_per_jiffy / (500000/HZ),
958 (loops_per_jiffy / (5000/HZ)) % 100);
959#endif
960
961 /* dump out the processor features */
962 seq_puts(m, "Features\t: ");
963
964 for (i = 0; hwcap_str[i]; i++)
965 if (elf_hwcap & (1 << i))
966 seq_printf(m, "%s ", hwcap_str[i]);
967
0ba8b9b2 968 seq_printf(m, "\nCPU implementer\t: 0x%02x\n", read_cpuid_id() >> 24);
1da177e4
LT
969 seq_printf(m, "CPU architecture: %s\n", proc_arch[cpu_architecture()]);
970
0ba8b9b2 971 if ((read_cpuid_id() & 0x0008f000) == 0x00000000) {
1da177e4 972 /* pre-ARM7 */
0ba8b9b2 973 seq_printf(m, "CPU part\t: %07x\n", read_cpuid_id() >> 4);
1da177e4 974 } else {
0ba8b9b2 975 if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
1da177e4
LT
976 /* ARM7 */
977 seq_printf(m, "CPU variant\t: 0x%02x\n",
0ba8b9b2 978 (read_cpuid_id() >> 16) & 127);
1da177e4
LT
979 } else {
980 /* post-ARM7 */
981 seq_printf(m, "CPU variant\t: 0x%x\n",
0ba8b9b2 982 (read_cpuid_id() >> 20) & 15);
1da177e4
LT
983 }
984 seq_printf(m, "CPU part\t: 0x%03x\n",
0ba8b9b2 985 (read_cpuid_id() >> 4) & 0xfff);
1da177e4 986 }
0ba8b9b2 987 seq_printf(m, "CPU revision\t: %d\n", read_cpuid_id() & 15);
1da177e4 988
1da177e4
LT
989 seq_puts(m, "\n");
990
991 seq_printf(m, "Hardware\t: %s\n", machine_name);
992 seq_printf(m, "Revision\t: %04x\n", system_rev);
993 seq_printf(m, "Serial\t\t: %08x%08x\n",
994 system_serial_high, system_serial_low);
995
996 return 0;
997}
998
999static void *c_start(struct seq_file *m, loff_t *pos)
1000{
1001 return *pos < 1 ? (void *)1 : NULL;
1002}
1003
1004static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1005{
1006 ++*pos;
1007 return NULL;
1008}
1009
1010static void c_stop(struct seq_file *m, void *v)
1011{
1012}
1013
2ffd6e18 1014const struct seq_operations cpuinfo_op = {
1da177e4
LT
1015 .start = c_start,
1016 .next = c_next,
1017 .stop = c_stop,
1018 .show = c_show
1019};