Merge master.kernel.org:/home/rmk/linux-2.6-arm
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / arm / kernel / setup.c
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/arm/kernel/setup.c
3 *
4 * Copyright (C) 1995-2001 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/config.h>
11#include <linux/module.h>
12#include <linux/kernel.h>
13#include <linux/stddef.h>
14#include <linux/ioport.h>
15#include <linux/delay.h>
16#include <linux/utsname.h>
17#include <linux/initrd.h>
18#include <linux/console.h>
19#include <linux/bootmem.h>
20#include <linux/seq_file.h>
21#include <linux/tty.h>
22#include <linux/init.h>
23#include <linux/root_dev.h>
24#include <linux/cpu.h>
25#include <linux/interrupt.h>
26
27#include <asm/cpu.h>
28#include <asm/elf.h>
29#include <asm/hardware.h>
30#include <asm/io.h>
31#include <asm/procinfo.h>
32#include <asm/setup.h>
33#include <asm/mach-types.h>
34#include <asm/cacheflush.h>
35#include <asm/tlbflush.h>
36
37#include <asm/mach/arch.h>
38#include <asm/mach/irq.h>
39#include <asm/mach/time.h>
40
41#ifndef MEM_SIZE
42#define MEM_SIZE (16*1024*1024)
43#endif
44
45#if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
46char fpe_type[8];
47
48static int __init fpe_setup(char *line)
49{
50 memcpy(fpe_type, line, 8);
51 return 1;
52}
53
54__setup("fpe=", fpe_setup);
55#endif
56
57extern unsigned int mem_fclk_21285;
58extern void paging_init(struct meminfo *, struct machine_desc *desc);
59extern void convert_to_tag_list(struct tag *tags);
60extern void squash_mem_tags(struct tag *tag);
61extern void reboot_setup(char *str);
62extern int root_mountflags;
63extern void _stext, _text, _etext, __data_start, _edata, _end;
64
65unsigned int processor_id;
66unsigned int __machine_arch_type;
67EXPORT_SYMBOL(__machine_arch_type);
68
69unsigned int system_rev;
70EXPORT_SYMBOL(system_rev);
71
72unsigned int system_serial_low;
73EXPORT_SYMBOL(system_serial_low);
74
75unsigned int system_serial_high;
76EXPORT_SYMBOL(system_serial_high);
77
78unsigned int elf_hwcap;
79EXPORT_SYMBOL(elf_hwcap);
80
81
82#ifdef MULTI_CPU
83struct processor processor;
84#endif
85#ifdef MULTI_TLB
86struct cpu_tlb_fns cpu_tlb;
87#endif
88#ifdef MULTI_USER
89struct cpu_user_fns cpu_user;
90#endif
91#ifdef MULTI_CACHE
92struct cpu_cache_fns cpu_cache;
93#endif
94
ccea7a19
RK
95struct stack {
96 u32 irq[3];
97 u32 abt[3];
98 u32 und[3];
99} ____cacheline_aligned;
100
101static struct stack stacks[NR_CPUS];
102
1da177e4
LT
103char elf_platform[ELF_PLATFORM_SIZE];
104EXPORT_SYMBOL(elf_platform);
105
106unsigned long phys_initrd_start __initdata = 0;
107unsigned long phys_initrd_size __initdata = 0;
108
109static struct meminfo meminfo __initdata = { 0, };
110static const char *cpu_name;
111static const char *machine_name;
112static char command_line[COMMAND_LINE_SIZE];
113
114static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE;
115static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
116#define ENDIANNESS ((char)endian_test.l)
117
118DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
119
120/*
121 * Standard memory resources
122 */
123static struct resource mem_res[] = {
124 { "Video RAM", 0, 0, IORESOURCE_MEM },
125 { "Kernel text", 0, 0, IORESOURCE_MEM },
126 { "Kernel data", 0, 0, IORESOURCE_MEM }
127};
128
129#define video_ram mem_res[0]
130#define kernel_code mem_res[1]
131#define kernel_data mem_res[2]
132
133static struct resource io_res[] = {
134 { "reserved", 0x3bc, 0x3be, IORESOURCE_IO | IORESOURCE_BUSY },
135 { "reserved", 0x378, 0x37f, IORESOURCE_IO | IORESOURCE_BUSY },
136 { "reserved", 0x278, 0x27f, IORESOURCE_IO | IORESOURCE_BUSY }
137};
138
139#define lp0 io_res[0]
140#define lp1 io_res[1]
141#define lp2 io_res[2]
142
143static const char *cache_types[16] = {
144 "write-through",
145 "write-back",
146 "write-back",
147 "undefined 3",
148 "undefined 4",
149 "undefined 5",
150 "write-back",
151 "write-back",
152 "undefined 8",
153 "undefined 9",
154 "undefined 10",
155 "undefined 11",
156 "undefined 12",
157 "undefined 13",
158 "write-back",
159 "undefined 15",
160};
161
162static const char *cache_clean[16] = {
163 "not required",
164 "read-block",
165 "cp15 c7 ops",
166 "undefined 3",
167 "undefined 4",
168 "undefined 5",
169 "cp15 c7 ops",
170 "cp15 c7 ops",
171 "undefined 8",
172 "undefined 9",
173 "undefined 10",
174 "undefined 11",
175 "undefined 12",
176 "undefined 13",
177 "cp15 c7 ops",
178 "undefined 15",
179};
180
181static const char *cache_lockdown[16] = {
182 "not supported",
183 "not supported",
184 "not supported",
185 "undefined 3",
186 "undefined 4",
187 "undefined 5",
188 "format A",
189 "format B",
190 "undefined 8",
191 "undefined 9",
192 "undefined 10",
193 "undefined 11",
194 "undefined 12",
195 "undefined 13",
196 "format C",
197 "undefined 15",
198};
199
200static const char *proc_arch[] = {
201 "undefined/unknown",
202 "3",
203 "4",
204 "4T",
205 "5",
206 "5T",
207 "5TE",
208 "5TEJ",
209 "6TEJ",
210 "?(10)",
211 "?(11)",
212 "?(12)",
213 "?(13)",
214 "?(14)",
215 "?(15)",
216 "?(16)",
217 "?(17)",
218};
219
220#define CACHE_TYPE(x) (((x) >> 25) & 15)
221#define CACHE_S(x) ((x) & (1 << 24))
222#define CACHE_DSIZE(x) (((x) >> 12) & 4095) /* only if S=1 */
223#define CACHE_ISIZE(x) ((x) & 4095)
224
225#define CACHE_SIZE(y) (((y) >> 6) & 7)
226#define CACHE_ASSOC(y) (((y) >> 3) & 7)
227#define CACHE_M(y) ((y) & (1 << 2))
228#define CACHE_LINE(y) ((y) & 3)
229
230static inline void dump_cache(const char *prefix, int cpu, unsigned int cache)
231{
232 unsigned int mult = 2 + (CACHE_M(cache) ? 1 : 0);
233
234 printk("CPU%u: %s: %d bytes, associativity %d, %d byte lines, %d sets\n",
235 cpu, prefix,
236 mult << (8 + CACHE_SIZE(cache)),
237 (mult << CACHE_ASSOC(cache)) >> 1,
238 8 << CACHE_LINE(cache),
239 1 << (6 + CACHE_SIZE(cache) - CACHE_ASSOC(cache) -
240 CACHE_LINE(cache)));
241}
242
243static void __init dump_cpu_info(int cpu)
244{
245 unsigned int info = read_cpuid(CPUID_CACHETYPE);
246
247 if (info != processor_id) {
248 printk("CPU%u: D %s %s cache\n", cpu, cache_is_vivt() ? "VIVT" : "VIPT",
249 cache_types[CACHE_TYPE(info)]);
250 if (CACHE_S(info)) {
251 dump_cache("I cache", cpu, CACHE_ISIZE(info));
252 dump_cache("D cache", cpu, CACHE_DSIZE(info));
253 } else {
254 dump_cache("cache", cpu, CACHE_ISIZE(info));
255 }
256 }
257}
258
259int cpu_architecture(void)
260{
261 int cpu_arch;
262
263 if ((processor_id & 0x0000f000) == 0) {
264 cpu_arch = CPU_ARCH_UNKNOWN;
265 } else if ((processor_id & 0x0000f000) == 0x00007000) {
266 cpu_arch = (processor_id & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
267 } else {
268 cpu_arch = (processor_id >> 16) & 7;
269 if (cpu_arch)
270 cpu_arch += CPU_ARCH_ARMv3;
271 }
272
273 return cpu_arch;
274}
275
276/*
277 * These functions re-use the assembly code in head.S, which
278 * already provide the required functionality.
279 */
280extern struct proc_info_list *lookup_processor_type(void);
281extern struct machine_desc *lookup_machine_type(unsigned int);
282
283static void __init setup_processor(void)
284{
285 struct proc_info_list *list;
286
287 /*
288 * locate processor in the list of supported processor
289 * types. The linker builds this table for us from the
290 * entries in arch/arm/mm/proc-*.S
291 */
292 list = lookup_processor_type();
293 if (!list) {
294 printk("CPU configuration botched (ID %08x), unable "
295 "to continue.\n", processor_id);
296 while (1);
297 }
298
299 cpu_name = list->cpu_name;
300
301#ifdef MULTI_CPU
302 processor = *list->proc;
303#endif
304#ifdef MULTI_TLB
305 cpu_tlb = *list->tlb;
306#endif
307#ifdef MULTI_USER
308 cpu_user = *list->user;
309#endif
310#ifdef MULTI_CACHE
311 cpu_cache = *list->cache;
312#endif
313
314 printk("CPU: %s [%08x] revision %d (ARMv%s)\n",
315 cpu_name, processor_id, (int)processor_id & 15,
316 proc_arch[cpu_architecture()]);
317
1da177e4
LT
318 sprintf(system_utsname.machine, "%s%c", list->arch_name, ENDIANNESS);
319 sprintf(elf_platform, "%s%c", list->elf_name, ENDIANNESS);
320 elf_hwcap = list->elf_hwcap;
321
322 cpu_proc_init();
323}
324
ccea7a19
RK
325/*
326 * cpu_init - initialise one CPU.
327 *
328 * cpu_init dumps the cache information, initialises SMP specific
329 * information, and sets up the per-CPU stacks.
330 */
36c5ed23 331void cpu_init(void)
ccea7a19
RK
332{
333 unsigned int cpu = smp_processor_id();
334 struct stack *stk = &stacks[cpu];
335
336 if (cpu >= NR_CPUS) {
337 printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu);
338 BUG();
339 }
340
341 dump_cpu_info(cpu);
342
343 /*
344 * setup stacks for re-entrant exception handlers
345 */
346 __asm__ (
347 "msr cpsr_c, %1\n\t"
348 "add sp, %0, %2\n\t"
349 "msr cpsr_c, %3\n\t"
350 "add sp, %0, %4\n\t"
351 "msr cpsr_c, %5\n\t"
352 "add sp, %0, %6\n\t"
353 "msr cpsr_c, %7"
354 :
355 : "r" (stk),
356 "I" (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
357 "I" (offsetof(struct stack, irq[0])),
358 "I" (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
359 "I" (offsetof(struct stack, abt[0])),
360 "I" (PSR_F_BIT | PSR_I_BIT | UND_MODE),
361 "I" (offsetof(struct stack, und[0])),
aaaa3f9e
CM
362 "I" (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
363 : "r14");
ccea7a19
RK
364}
365
1da177e4
LT
366static struct machine_desc * __init setup_machine(unsigned int nr)
367{
368 struct machine_desc *list;
369
370 /*
371 * locate machine in the list of supported machines.
372 */
373 list = lookup_machine_type(nr);
374 if (!list) {
375 printk("Machine configuration botched (nr %d), unable "
376 "to continue.\n", nr);
377 while (1);
378 }
379
380 printk("Machine: %s\n", list->name);
381
382 return list;
383}
384
385static void __init early_initrd(char **p)
386{
387 unsigned long start, size;
388
389 start = memparse(*p, p);
390 if (**p == ',') {
391 size = memparse((*p) + 1, p);
392
393 phys_initrd_start = start;
394 phys_initrd_size = size;
395 }
396}
397__early_param("initrd=", early_initrd);
398
3a669411
RK
399static void __init add_memory(unsigned long start, unsigned long size)
400{
401 /*
402 * Ensure that start/size are aligned to a page boundary.
403 * Size is appropriately rounded down, start is rounded up.
404 */
405 size -= start & ~PAGE_MASK;
406
407 meminfo.bank[meminfo.nr_banks].start = PAGE_ALIGN(start);
408 meminfo.bank[meminfo.nr_banks].size = size & PAGE_MASK;
409 meminfo.bank[meminfo.nr_banks].node = PHYS_TO_NID(start);
410 meminfo.nr_banks += 1;
411}
412
1da177e4
LT
413/*
414 * Pick out the memory size. We look for mem=size@start,
415 * where start and size are "size[KkMm]"
416 */
417static void __init early_mem(char **p)
418{
419 static int usermem __initdata = 0;
420 unsigned long size, start;
421
422 /*
423 * If the user specifies memory size, we
424 * blow away any automatically generated
425 * size.
426 */
427 if (usermem == 0) {
428 usermem = 1;
429 meminfo.nr_banks = 0;
430 }
431
432 start = PHYS_OFFSET;
433 size = memparse(*p, p);
434 if (**p == '@')
435 start = memparse(*p + 1, p);
436
3a669411 437 add_memory(start, size);
1da177e4
LT
438}
439__early_param("mem=", early_mem);
440
441/*
442 * Initial parsing of the command line.
443 */
444static void __init parse_cmdline(char **cmdline_p, char *from)
445{
446 char c = ' ', *to = command_line;
447 int len = 0;
448
449 for (;;) {
450 if (c == ' ') {
451 extern struct early_params __early_begin, __early_end;
452 struct early_params *p;
453
454 for (p = &__early_begin; p < &__early_end; p++) {
455 int len = strlen(p->arg);
456
457 if (memcmp(from, p->arg, len) == 0) {
458 if (to != command_line)
459 to -= 1;
460 from += len;
461 p->fn(&from);
462
463 while (*from != ' ' && *from != '\0')
464 from++;
465 break;
466 }
467 }
468 }
469 c = *from++;
470 if (!c)
471 break;
472 if (COMMAND_LINE_SIZE <= ++len)
473 break;
474 *to++ = c;
475 }
476 *to = '\0';
477 *cmdline_p = command_line;
478}
479
480static void __init
481setup_ramdisk(int doload, int prompt, int image_start, unsigned int rd_sz)
482{
483#ifdef CONFIG_BLK_DEV_RAM
484 extern int rd_size, rd_image_start, rd_prompt, rd_doload;
485
486 rd_image_start = image_start;
487 rd_prompt = prompt;
488 rd_doload = doload;
489
490 if (rd_sz)
491 rd_size = rd_sz;
492#endif
493}
494
495static void __init
496request_standard_resources(struct meminfo *mi, struct machine_desc *mdesc)
497{
498 struct resource *res;
499 int i;
500
501 kernel_code.start = virt_to_phys(&_text);
502 kernel_code.end = virt_to_phys(&_etext - 1);
503 kernel_data.start = virt_to_phys(&__data_start);
504 kernel_data.end = virt_to_phys(&_end - 1);
505
506 for (i = 0; i < mi->nr_banks; i++) {
507 unsigned long virt_start, virt_end;
508
509 if (mi->bank[i].size == 0)
510 continue;
511
512 virt_start = __phys_to_virt(mi->bank[i].start);
513 virt_end = virt_start + mi->bank[i].size - 1;
514
515 res = alloc_bootmem_low(sizeof(*res));
516 res->name = "System RAM";
517 res->start = __virt_to_phys(virt_start);
518 res->end = __virt_to_phys(virt_end);
519 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
520
521 request_resource(&iomem_resource, res);
522
523 if (kernel_code.start >= res->start &&
524 kernel_code.end <= res->end)
525 request_resource(res, &kernel_code);
526 if (kernel_data.start >= res->start &&
527 kernel_data.end <= res->end)
528 request_resource(res, &kernel_data);
529 }
530
531 if (mdesc->video_start) {
532 video_ram.start = mdesc->video_start;
533 video_ram.end = mdesc->video_end;
534 request_resource(&iomem_resource, &video_ram);
535 }
536
537 /*
538 * Some machines don't have the possibility of ever
539 * possessing lp0, lp1 or lp2
540 */
541 if (mdesc->reserve_lp0)
542 request_resource(&ioport_resource, &lp0);
543 if (mdesc->reserve_lp1)
544 request_resource(&ioport_resource, &lp1);
545 if (mdesc->reserve_lp2)
546 request_resource(&ioport_resource, &lp2);
547}
548
549/*
550 * Tag parsing.
551 *
552 * This is the new way of passing data to the kernel at boot time. Rather
553 * than passing a fixed inflexible structure to the kernel, we pass a list
554 * of variable-sized tags to the kernel. The first tag must be a ATAG_CORE
555 * tag for the list to be recognised (to distinguish the tagged list from
556 * a param_struct). The list is terminated with a zero-length tag (this tag
557 * is not parsed in any way).
558 */
559static int __init parse_tag_core(const struct tag *tag)
560{
561 if (tag->hdr.size > 2) {
562 if ((tag->u.core.flags & 1) == 0)
563 root_mountflags &= ~MS_RDONLY;
564 ROOT_DEV = old_decode_dev(tag->u.core.rootdev);
565 }
566 return 0;
567}
568
569__tagtable(ATAG_CORE, parse_tag_core);
570
571static int __init parse_tag_mem32(const struct tag *tag)
572{
573 if (meminfo.nr_banks >= NR_BANKS) {
574 printk(KERN_WARNING
575 "Ignoring memory bank 0x%08x size %dKB\n",
576 tag->u.mem.start, tag->u.mem.size / 1024);
577 return -EINVAL;
578 }
3a669411 579 add_memory(tag->u.mem.start, tag->u.mem.size);
1da177e4
LT
580 return 0;
581}
582
583__tagtable(ATAG_MEM, parse_tag_mem32);
584
585#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
586struct screen_info screen_info = {
587 .orig_video_lines = 30,
588 .orig_video_cols = 80,
589 .orig_video_mode = 0,
590 .orig_video_ega_bx = 0,
591 .orig_video_isVGA = 1,
592 .orig_video_points = 8
593};
594
595static int __init parse_tag_videotext(const struct tag *tag)
596{
597 screen_info.orig_x = tag->u.videotext.x;
598 screen_info.orig_y = tag->u.videotext.y;
599 screen_info.orig_video_page = tag->u.videotext.video_page;
600 screen_info.orig_video_mode = tag->u.videotext.video_mode;
601 screen_info.orig_video_cols = tag->u.videotext.video_cols;
602 screen_info.orig_video_ega_bx = tag->u.videotext.video_ega_bx;
603 screen_info.orig_video_lines = tag->u.videotext.video_lines;
604 screen_info.orig_video_isVGA = tag->u.videotext.video_isvga;
605 screen_info.orig_video_points = tag->u.videotext.video_points;
606 return 0;
607}
608
609__tagtable(ATAG_VIDEOTEXT, parse_tag_videotext);
610#endif
611
612static int __init parse_tag_ramdisk(const struct tag *tag)
613{
614 setup_ramdisk((tag->u.ramdisk.flags & 1) == 0,
615 (tag->u.ramdisk.flags & 2) == 0,
616 tag->u.ramdisk.start, tag->u.ramdisk.size);
617 return 0;
618}
619
620__tagtable(ATAG_RAMDISK, parse_tag_ramdisk);
621
622static int __init parse_tag_initrd(const struct tag *tag)
623{
624 printk(KERN_WARNING "ATAG_INITRD is deprecated; "
625 "please update your bootloader.\n");
626 phys_initrd_start = __virt_to_phys(tag->u.initrd.start);
627 phys_initrd_size = tag->u.initrd.size;
628 return 0;
629}
630
631__tagtable(ATAG_INITRD, parse_tag_initrd);
632
633static int __init parse_tag_initrd2(const struct tag *tag)
634{
635 phys_initrd_start = tag->u.initrd.start;
636 phys_initrd_size = tag->u.initrd.size;
637 return 0;
638}
639
640__tagtable(ATAG_INITRD2, parse_tag_initrd2);
641
642static int __init parse_tag_serialnr(const struct tag *tag)
643{
644 system_serial_low = tag->u.serialnr.low;
645 system_serial_high = tag->u.serialnr.high;
646 return 0;
647}
648
649__tagtable(ATAG_SERIAL, parse_tag_serialnr);
650
651static int __init parse_tag_revision(const struct tag *tag)
652{
653 system_rev = tag->u.revision.rev;
654 return 0;
655}
656
657__tagtable(ATAG_REVISION, parse_tag_revision);
658
659static int __init parse_tag_cmdline(const struct tag *tag)
660{
661 strlcpy(default_command_line, tag->u.cmdline.cmdline, COMMAND_LINE_SIZE);
662 return 0;
663}
664
665__tagtable(ATAG_CMDLINE, parse_tag_cmdline);
666
667/*
668 * Scan the tag table for this tag, and call its parse function.
669 * The tag table is built by the linker from all the __tagtable
670 * declarations.
671 */
672static int __init parse_tag(const struct tag *tag)
673{
674 extern struct tagtable __tagtable_begin, __tagtable_end;
675 struct tagtable *t;
676
677 for (t = &__tagtable_begin; t < &__tagtable_end; t++)
678 if (tag->hdr.tag == t->tag) {
679 t->parse(tag);
680 break;
681 }
682
683 return t < &__tagtable_end;
684}
685
686/*
687 * Parse all tags in the list, checking both the global and architecture
688 * specific tag tables.
689 */
690static void __init parse_tags(const struct tag *t)
691{
692 for (; t->hdr.size; t = tag_next(t))
693 if (!parse_tag(t))
694 printk(KERN_WARNING
695 "Ignoring unrecognised tag 0x%08x\n",
696 t->hdr.tag);
697}
698
699/*
700 * This holds our defaults.
701 */
702static struct init_tags {
703 struct tag_header hdr1;
704 struct tag_core core;
705 struct tag_header hdr2;
706 struct tag_mem32 mem;
707 struct tag_header hdr3;
708} init_tags __initdata = {
709 { tag_size(tag_core), ATAG_CORE },
710 { 1, PAGE_SIZE, 0xff },
711 { tag_size(tag_mem32), ATAG_MEM },
712 { MEM_SIZE, PHYS_OFFSET },
713 { 0, ATAG_NONE }
714};
715
716static void (*init_machine)(void) __initdata;
717
718static int __init customize_machine(void)
719{
720 /* customizes platform devices, or adds new ones */
721 if (init_machine)
722 init_machine();
723 return 0;
724}
725arch_initcall(customize_machine);
726
727void __init setup_arch(char **cmdline_p)
728{
729 struct tag *tags = (struct tag *)&init_tags;
730 struct machine_desc *mdesc;
731 char *from = default_command_line;
732
733 setup_processor();
734 mdesc = setup_machine(machine_arch_type);
735 machine_name = mdesc->name;
736
737 if (mdesc->soft_reboot)
738 reboot_setup("s");
739
740 if (mdesc->param_offset)
741 tags = phys_to_virt(mdesc->param_offset);
742
743 /*
744 * If we have the old style parameters, convert them to
745 * a tag list.
746 */
747 if (tags->hdr.tag != ATAG_CORE)
748 convert_to_tag_list(tags);
749 if (tags->hdr.tag != ATAG_CORE)
750 tags = (struct tag *)&init_tags;
751
752 if (mdesc->fixup)
753 mdesc->fixup(mdesc, tags, &from, &meminfo);
754
755 if (tags->hdr.tag == ATAG_CORE) {
756 if (meminfo.nr_banks != 0)
757 squash_mem_tags(tags);
758 parse_tags(tags);
759 }
760
761 init_mm.start_code = (unsigned long) &_text;
762 init_mm.end_code = (unsigned long) &_etext;
763 init_mm.end_data = (unsigned long) &_edata;
764 init_mm.brk = (unsigned long) &_end;
765
766 memcpy(saved_command_line, from, COMMAND_LINE_SIZE);
767 saved_command_line[COMMAND_LINE_SIZE-1] = '\0';
768 parse_cmdline(cmdline_p, from);
769 paging_init(&meminfo, mdesc);
770 request_standard_resources(&meminfo, mdesc);
771
ccea7a19
RK
772 cpu_init();
773
1da177e4
LT
774 /*
775 * Set up various architecture-specific pointers
776 */
777 init_arch_irq = mdesc->init_irq;
778 system_timer = mdesc->timer;
779 init_machine = mdesc->init_machine;
780
781#ifdef CONFIG_VT
782#if defined(CONFIG_VGA_CONSOLE)
783 conswitchp = &vga_con;
784#elif defined(CONFIG_DUMMY_CONSOLE)
785 conswitchp = &dummy_con;
786#endif
787#endif
788}
789
790
791static int __init topology_init(void)
792{
793 int cpu;
794
795 for_each_cpu(cpu)
796 register_cpu(&per_cpu(cpu_data, cpu).cpu, cpu, NULL);
797
798 return 0;
799}
800
801subsys_initcall(topology_init);
802
803static const char *hwcap_str[] = {
804 "swp",
805 "half",
806 "thumb",
807 "26bit",
808 "fastmult",
809 "fpa",
810 "vfp",
811 "edsp",
812 "java",
813 NULL
814};
815
816static void
817c_show_cache(struct seq_file *m, const char *type, unsigned int cache)
818{
819 unsigned int mult = 2 + (CACHE_M(cache) ? 1 : 0);
820
821 seq_printf(m, "%s size\t\t: %d\n"
822 "%s assoc\t\t: %d\n"
823 "%s line length\t: %d\n"
824 "%s sets\t\t: %d\n",
825 type, mult << (8 + CACHE_SIZE(cache)),
826 type, (mult << CACHE_ASSOC(cache)) >> 1,
827 type, 8 << CACHE_LINE(cache),
828 type, 1 << (6 + CACHE_SIZE(cache) - CACHE_ASSOC(cache) -
829 CACHE_LINE(cache)));
830}
831
832static int c_show(struct seq_file *m, void *v)
833{
834 int i;
835
836 seq_printf(m, "Processor\t: %s rev %d (%s)\n",
837 cpu_name, (int)processor_id & 15, elf_platform);
838
839#if defined(CONFIG_SMP)
840 for_each_online_cpu(i) {
841 seq_printf(m, "Processor\t: %d\n", i);
842 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n\n",
843 per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
844 (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
845 }
846#else /* CONFIG_SMP */
847 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
848 loops_per_jiffy / (500000/HZ),
849 (loops_per_jiffy / (5000/HZ)) % 100);
850#endif
851
852 /* dump out the processor features */
853 seq_puts(m, "Features\t: ");
854
855 for (i = 0; hwcap_str[i]; i++)
856 if (elf_hwcap & (1 << i))
857 seq_printf(m, "%s ", hwcap_str[i]);
858
859 seq_printf(m, "\nCPU implementer\t: 0x%02x\n", processor_id >> 24);
860 seq_printf(m, "CPU architecture: %s\n", proc_arch[cpu_architecture()]);
861
862 if ((processor_id & 0x0000f000) == 0x00000000) {
863 /* pre-ARM7 */
864 seq_printf(m, "CPU part\t\t: %07x\n", processor_id >> 4);
865 } else {
866 if ((processor_id & 0x0000f000) == 0x00007000) {
867 /* ARM7 */
868 seq_printf(m, "CPU variant\t: 0x%02x\n",
869 (processor_id >> 16) & 127);
870 } else {
871 /* post-ARM7 */
872 seq_printf(m, "CPU variant\t: 0x%x\n",
873 (processor_id >> 20) & 15);
874 }
875 seq_printf(m, "CPU part\t: 0x%03x\n",
876 (processor_id >> 4) & 0xfff);
877 }
878 seq_printf(m, "CPU revision\t: %d\n", processor_id & 15);
879
880 {
881 unsigned int cache_info = read_cpuid(CPUID_CACHETYPE);
882 if (cache_info != processor_id) {
883 seq_printf(m, "Cache type\t: %s\n"
884 "Cache clean\t: %s\n"
885 "Cache lockdown\t: %s\n"
886 "Cache format\t: %s\n",
887 cache_types[CACHE_TYPE(cache_info)],
888 cache_clean[CACHE_TYPE(cache_info)],
889 cache_lockdown[CACHE_TYPE(cache_info)],
890 CACHE_S(cache_info) ? "Harvard" : "Unified");
891
892 if (CACHE_S(cache_info)) {
893 c_show_cache(m, "I", CACHE_ISIZE(cache_info));
894 c_show_cache(m, "D", CACHE_DSIZE(cache_info));
895 } else {
896 c_show_cache(m, "Cache", CACHE_ISIZE(cache_info));
897 }
898 }
899 }
900
901 seq_puts(m, "\n");
902
903 seq_printf(m, "Hardware\t: %s\n", machine_name);
904 seq_printf(m, "Revision\t: %04x\n", system_rev);
905 seq_printf(m, "Serial\t\t: %08x%08x\n",
906 system_serial_high, system_serial_low);
907
908 return 0;
909}
910
911static void *c_start(struct seq_file *m, loff_t *pos)
912{
913 return *pos < 1 ? (void *)1 : NULL;
914}
915
916static void *c_next(struct seq_file *m, void *v, loff_t *pos)
917{
918 ++*pos;
919 return NULL;
920}
921
922static void c_stop(struct seq_file *m, void *v)
923{
924}
925
926struct seq_operations cpuinfo_op = {
927 .start = c_start,
928 .next = c_next,
929 .stop = c_stop,
930 .show = c_show
931};