Merge master.kernel.org:/home/rmk/linux-2.6-arm
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / arm / kernel / setup.c
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/arm/kernel/setup.c
3 *
4 * Copyright (C) 1995-2001 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/config.h>
11#include <linux/module.h>
12#include <linux/kernel.h>
13#include <linux/stddef.h>
14#include <linux/ioport.h>
15#include <linux/delay.h>
16#include <linux/utsname.h>
17#include <linux/initrd.h>
18#include <linux/console.h>
19#include <linux/bootmem.h>
20#include <linux/seq_file.h>
21#include <linux/tty.h>
22#include <linux/init.h>
23#include <linux/root_dev.h>
24#include <linux/cpu.h>
25#include <linux/interrupt.h>
26
27#include <asm/cpu.h>
28#include <asm/elf.h>
29#include <asm/hardware.h>
30#include <asm/io.h>
31#include <asm/procinfo.h>
32#include <asm/setup.h>
33#include <asm/mach-types.h>
34#include <asm/cacheflush.h>
35#include <asm/tlbflush.h>
36
37#include <asm/mach/arch.h>
38#include <asm/mach/irq.h>
39#include <asm/mach/time.h>
40
41#ifndef MEM_SIZE
42#define MEM_SIZE (16*1024*1024)
43#endif
44
45#if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
46char fpe_type[8];
47
48static int __init fpe_setup(char *line)
49{
50 memcpy(fpe_type, line, 8);
51 return 1;
52}
53
54__setup("fpe=", fpe_setup);
55#endif
56
57extern unsigned int mem_fclk_21285;
58extern void paging_init(struct meminfo *, struct machine_desc *desc);
59extern void convert_to_tag_list(struct tag *tags);
60extern void squash_mem_tags(struct tag *tag);
61extern void reboot_setup(char *str);
62extern int root_mountflags;
63extern void _stext, _text, _etext, __data_start, _edata, _end;
64
65unsigned int processor_id;
66unsigned int __machine_arch_type;
67EXPORT_SYMBOL(__machine_arch_type);
68
69unsigned int system_rev;
70EXPORT_SYMBOL(system_rev);
71
72unsigned int system_serial_low;
73EXPORT_SYMBOL(system_serial_low);
74
75unsigned int system_serial_high;
76EXPORT_SYMBOL(system_serial_high);
77
78unsigned int elf_hwcap;
79EXPORT_SYMBOL(elf_hwcap);
80
81
82#ifdef MULTI_CPU
83struct processor processor;
84#endif
85#ifdef MULTI_TLB
86struct cpu_tlb_fns cpu_tlb;
87#endif
88#ifdef MULTI_USER
89struct cpu_user_fns cpu_user;
90#endif
91#ifdef MULTI_CACHE
92struct cpu_cache_fns cpu_cache;
93#endif
94
ccea7a19
RK
95struct stack {
96 u32 irq[3];
97 u32 abt[3];
98 u32 und[3];
99} ____cacheline_aligned;
100
101static struct stack stacks[NR_CPUS];
102
1da177e4
LT
103char elf_platform[ELF_PLATFORM_SIZE];
104EXPORT_SYMBOL(elf_platform);
105
106unsigned long phys_initrd_start __initdata = 0;
107unsigned long phys_initrd_size __initdata = 0;
108
109static struct meminfo meminfo __initdata = { 0, };
110static const char *cpu_name;
111static const char *machine_name;
112static char command_line[COMMAND_LINE_SIZE];
113
114static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE;
115static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
116#define ENDIANNESS ((char)endian_test.l)
117
118DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
119
120/*
121 * Standard memory resources
122 */
123static struct resource mem_res[] = {
124 { "Video RAM", 0, 0, IORESOURCE_MEM },
125 { "Kernel text", 0, 0, IORESOURCE_MEM },
126 { "Kernel data", 0, 0, IORESOURCE_MEM }
127};
128
129#define video_ram mem_res[0]
130#define kernel_code mem_res[1]
131#define kernel_data mem_res[2]
132
133static struct resource io_res[] = {
134 { "reserved", 0x3bc, 0x3be, IORESOURCE_IO | IORESOURCE_BUSY },
135 { "reserved", 0x378, 0x37f, IORESOURCE_IO | IORESOURCE_BUSY },
136 { "reserved", 0x278, 0x27f, IORESOURCE_IO | IORESOURCE_BUSY }
137};
138
139#define lp0 io_res[0]
140#define lp1 io_res[1]
141#define lp2 io_res[2]
142
143static const char *cache_types[16] = {
144 "write-through",
145 "write-back",
146 "write-back",
147 "undefined 3",
148 "undefined 4",
149 "undefined 5",
150 "write-back",
151 "write-back",
152 "undefined 8",
153 "undefined 9",
154 "undefined 10",
155 "undefined 11",
156 "undefined 12",
157 "undefined 13",
158 "write-back",
159 "undefined 15",
160};
161
162static const char *cache_clean[16] = {
163 "not required",
164 "read-block",
165 "cp15 c7 ops",
166 "undefined 3",
167 "undefined 4",
168 "undefined 5",
169 "cp15 c7 ops",
170 "cp15 c7 ops",
171 "undefined 8",
172 "undefined 9",
173 "undefined 10",
174 "undefined 11",
175 "undefined 12",
176 "undefined 13",
177 "cp15 c7 ops",
178 "undefined 15",
179};
180
181static const char *cache_lockdown[16] = {
182 "not supported",
183 "not supported",
184 "not supported",
185 "undefined 3",
186 "undefined 4",
187 "undefined 5",
188 "format A",
189 "format B",
190 "undefined 8",
191 "undefined 9",
192 "undefined 10",
193 "undefined 11",
194 "undefined 12",
195 "undefined 13",
196 "format C",
197 "undefined 15",
198};
199
200static const char *proc_arch[] = {
201 "undefined/unknown",
202 "3",
203 "4",
204 "4T",
205 "5",
206 "5T",
207 "5TE",
208 "5TEJ",
209 "6TEJ",
210 "?(10)",
211 "?(11)",
212 "?(12)",
213 "?(13)",
214 "?(14)",
215 "?(15)",
216 "?(16)",
217 "?(17)",
218};
219
220#define CACHE_TYPE(x) (((x) >> 25) & 15)
221#define CACHE_S(x) ((x) & (1 << 24))
222#define CACHE_DSIZE(x) (((x) >> 12) & 4095) /* only if S=1 */
223#define CACHE_ISIZE(x) ((x) & 4095)
224
225#define CACHE_SIZE(y) (((y) >> 6) & 7)
226#define CACHE_ASSOC(y) (((y) >> 3) & 7)
227#define CACHE_M(y) ((y) & (1 << 2))
228#define CACHE_LINE(y) ((y) & 3)
229
230static inline void dump_cache(const char *prefix, int cpu, unsigned int cache)
231{
232 unsigned int mult = 2 + (CACHE_M(cache) ? 1 : 0);
233
234 printk("CPU%u: %s: %d bytes, associativity %d, %d byte lines, %d sets\n",
235 cpu, prefix,
236 mult << (8 + CACHE_SIZE(cache)),
237 (mult << CACHE_ASSOC(cache)) >> 1,
238 8 << CACHE_LINE(cache),
239 1 << (6 + CACHE_SIZE(cache) - CACHE_ASSOC(cache) -
240 CACHE_LINE(cache)));
241}
242
243static void __init dump_cpu_info(int cpu)
244{
245 unsigned int info = read_cpuid(CPUID_CACHETYPE);
246
247 if (info != processor_id) {
248 printk("CPU%u: D %s %s cache\n", cpu, cache_is_vivt() ? "VIVT" : "VIPT",
249 cache_types[CACHE_TYPE(info)]);
250 if (CACHE_S(info)) {
251 dump_cache("I cache", cpu, CACHE_ISIZE(info));
252 dump_cache("D cache", cpu, CACHE_DSIZE(info));
253 } else {
254 dump_cache("cache", cpu, CACHE_ISIZE(info));
255 }
256 }
257}
258
259int cpu_architecture(void)
260{
261 int cpu_arch;
262
263 if ((processor_id & 0x0000f000) == 0) {
264 cpu_arch = CPU_ARCH_UNKNOWN;
265 } else if ((processor_id & 0x0000f000) == 0x00007000) {
266 cpu_arch = (processor_id & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
267 } else {
268 cpu_arch = (processor_id >> 16) & 7;
269 if (cpu_arch)
270 cpu_arch += CPU_ARCH_ARMv3;
271 }
272
273 return cpu_arch;
274}
275
276/*
277 * These functions re-use the assembly code in head.S, which
278 * already provide the required functionality.
279 */
280extern struct proc_info_list *lookup_processor_type(void);
281extern struct machine_desc *lookup_machine_type(unsigned int);
282
283static void __init setup_processor(void)
284{
285 struct proc_info_list *list;
286
287 /*
288 * locate processor in the list of supported processor
289 * types. The linker builds this table for us from the
290 * entries in arch/arm/mm/proc-*.S
291 */
292 list = lookup_processor_type();
293 if (!list) {
294 printk("CPU configuration botched (ID %08x), unable "
295 "to continue.\n", processor_id);
296 while (1);
297 }
298
299 cpu_name = list->cpu_name;
300
301#ifdef MULTI_CPU
302 processor = *list->proc;
303#endif
304#ifdef MULTI_TLB
305 cpu_tlb = *list->tlb;
306#endif
307#ifdef MULTI_USER
308 cpu_user = *list->user;
309#endif
310#ifdef MULTI_CACHE
311 cpu_cache = *list->cache;
312#endif
313
314 printk("CPU: %s [%08x] revision %d (ARMv%s)\n",
315 cpu_name, processor_id, (int)processor_id & 15,
316 proc_arch[cpu_architecture()]);
317
1da177e4
LT
318 sprintf(system_utsname.machine, "%s%c", list->arch_name, ENDIANNESS);
319 sprintf(elf_platform, "%s%c", list->elf_name, ENDIANNESS);
320 elf_hwcap = list->elf_hwcap;
321
322 cpu_proc_init();
323}
324
ccea7a19
RK
325/*
326 * cpu_init - initialise one CPU.
327 *
328 * cpu_init dumps the cache information, initialises SMP specific
329 * information, and sets up the per-CPU stacks.
330 */
331void __init cpu_init(void)
332{
333 unsigned int cpu = smp_processor_id();
334 struct stack *stk = &stacks[cpu];
335
336 if (cpu >= NR_CPUS) {
337 printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu);
338 BUG();
339 }
340
341 dump_cpu_info(cpu);
342
343 /*
344 * setup stacks for re-entrant exception handlers
345 */
346 __asm__ (
347 "msr cpsr_c, %1\n\t"
348 "add sp, %0, %2\n\t"
349 "msr cpsr_c, %3\n\t"
350 "add sp, %0, %4\n\t"
351 "msr cpsr_c, %5\n\t"
352 "add sp, %0, %6\n\t"
353 "msr cpsr_c, %7"
354 :
355 : "r" (stk),
356 "I" (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
357 "I" (offsetof(struct stack, irq[0])),
358 "I" (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
359 "I" (offsetof(struct stack, abt[0])),
360 "I" (PSR_F_BIT | PSR_I_BIT | UND_MODE),
361 "I" (offsetof(struct stack, und[0])),
362 "I" (PSR_F_BIT | PSR_I_BIT | SVC_MODE));
363}
364
1da177e4
LT
365static struct machine_desc * __init setup_machine(unsigned int nr)
366{
367 struct machine_desc *list;
368
369 /*
370 * locate machine in the list of supported machines.
371 */
372 list = lookup_machine_type(nr);
373 if (!list) {
374 printk("Machine configuration botched (nr %d), unable "
375 "to continue.\n", nr);
376 while (1);
377 }
378
379 printk("Machine: %s\n", list->name);
380
381 return list;
382}
383
384static void __init early_initrd(char **p)
385{
386 unsigned long start, size;
387
388 start = memparse(*p, p);
389 if (**p == ',') {
390 size = memparse((*p) + 1, p);
391
392 phys_initrd_start = start;
393 phys_initrd_size = size;
394 }
395}
396__early_param("initrd=", early_initrd);
397
398/*
399 * Pick out the memory size. We look for mem=size@start,
400 * where start and size are "size[KkMm]"
401 */
402static void __init early_mem(char **p)
403{
404 static int usermem __initdata = 0;
405 unsigned long size, start;
406
407 /*
408 * If the user specifies memory size, we
409 * blow away any automatically generated
410 * size.
411 */
412 if (usermem == 0) {
413 usermem = 1;
414 meminfo.nr_banks = 0;
415 }
416
417 start = PHYS_OFFSET;
418 size = memparse(*p, p);
419 if (**p == '@')
420 start = memparse(*p + 1, p);
421
422 meminfo.bank[meminfo.nr_banks].start = start;
423 meminfo.bank[meminfo.nr_banks].size = size;
424 meminfo.bank[meminfo.nr_banks].node = PHYS_TO_NID(start);
425 meminfo.nr_banks += 1;
426}
427__early_param("mem=", early_mem);
428
429/*
430 * Initial parsing of the command line.
431 */
432static void __init parse_cmdline(char **cmdline_p, char *from)
433{
434 char c = ' ', *to = command_line;
435 int len = 0;
436
437 for (;;) {
438 if (c == ' ') {
439 extern struct early_params __early_begin, __early_end;
440 struct early_params *p;
441
442 for (p = &__early_begin; p < &__early_end; p++) {
443 int len = strlen(p->arg);
444
445 if (memcmp(from, p->arg, len) == 0) {
446 if (to != command_line)
447 to -= 1;
448 from += len;
449 p->fn(&from);
450
451 while (*from != ' ' && *from != '\0')
452 from++;
453 break;
454 }
455 }
456 }
457 c = *from++;
458 if (!c)
459 break;
460 if (COMMAND_LINE_SIZE <= ++len)
461 break;
462 *to++ = c;
463 }
464 *to = '\0';
465 *cmdline_p = command_line;
466}
467
468static void __init
469setup_ramdisk(int doload, int prompt, int image_start, unsigned int rd_sz)
470{
471#ifdef CONFIG_BLK_DEV_RAM
472 extern int rd_size, rd_image_start, rd_prompt, rd_doload;
473
474 rd_image_start = image_start;
475 rd_prompt = prompt;
476 rd_doload = doload;
477
478 if (rd_sz)
479 rd_size = rd_sz;
480#endif
481}
482
483static void __init
484request_standard_resources(struct meminfo *mi, struct machine_desc *mdesc)
485{
486 struct resource *res;
487 int i;
488
489 kernel_code.start = virt_to_phys(&_text);
490 kernel_code.end = virt_to_phys(&_etext - 1);
491 kernel_data.start = virt_to_phys(&__data_start);
492 kernel_data.end = virt_to_phys(&_end - 1);
493
494 for (i = 0; i < mi->nr_banks; i++) {
495 unsigned long virt_start, virt_end;
496
497 if (mi->bank[i].size == 0)
498 continue;
499
500 virt_start = __phys_to_virt(mi->bank[i].start);
501 virt_end = virt_start + mi->bank[i].size - 1;
502
503 res = alloc_bootmem_low(sizeof(*res));
504 res->name = "System RAM";
505 res->start = __virt_to_phys(virt_start);
506 res->end = __virt_to_phys(virt_end);
507 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
508
509 request_resource(&iomem_resource, res);
510
511 if (kernel_code.start >= res->start &&
512 kernel_code.end <= res->end)
513 request_resource(res, &kernel_code);
514 if (kernel_data.start >= res->start &&
515 kernel_data.end <= res->end)
516 request_resource(res, &kernel_data);
517 }
518
519 if (mdesc->video_start) {
520 video_ram.start = mdesc->video_start;
521 video_ram.end = mdesc->video_end;
522 request_resource(&iomem_resource, &video_ram);
523 }
524
525 /*
526 * Some machines don't have the possibility of ever
527 * possessing lp0, lp1 or lp2
528 */
529 if (mdesc->reserve_lp0)
530 request_resource(&ioport_resource, &lp0);
531 if (mdesc->reserve_lp1)
532 request_resource(&ioport_resource, &lp1);
533 if (mdesc->reserve_lp2)
534 request_resource(&ioport_resource, &lp2);
535}
536
537/*
538 * Tag parsing.
539 *
540 * This is the new way of passing data to the kernel at boot time. Rather
541 * than passing a fixed inflexible structure to the kernel, we pass a list
542 * of variable-sized tags to the kernel. The first tag must be a ATAG_CORE
543 * tag for the list to be recognised (to distinguish the tagged list from
544 * a param_struct). The list is terminated with a zero-length tag (this tag
545 * is not parsed in any way).
546 */
547static int __init parse_tag_core(const struct tag *tag)
548{
549 if (tag->hdr.size > 2) {
550 if ((tag->u.core.flags & 1) == 0)
551 root_mountflags &= ~MS_RDONLY;
552 ROOT_DEV = old_decode_dev(tag->u.core.rootdev);
553 }
554 return 0;
555}
556
557__tagtable(ATAG_CORE, parse_tag_core);
558
559static int __init parse_tag_mem32(const struct tag *tag)
560{
561 if (meminfo.nr_banks >= NR_BANKS) {
562 printk(KERN_WARNING
563 "Ignoring memory bank 0x%08x size %dKB\n",
564 tag->u.mem.start, tag->u.mem.size / 1024);
565 return -EINVAL;
566 }
567 meminfo.bank[meminfo.nr_banks].start = tag->u.mem.start;
568 meminfo.bank[meminfo.nr_banks].size = tag->u.mem.size;
569 meminfo.bank[meminfo.nr_banks].node = PHYS_TO_NID(tag->u.mem.start);
570 meminfo.nr_banks += 1;
571
572 return 0;
573}
574
575__tagtable(ATAG_MEM, parse_tag_mem32);
576
577#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
578struct screen_info screen_info = {
579 .orig_video_lines = 30,
580 .orig_video_cols = 80,
581 .orig_video_mode = 0,
582 .orig_video_ega_bx = 0,
583 .orig_video_isVGA = 1,
584 .orig_video_points = 8
585};
586
587static int __init parse_tag_videotext(const struct tag *tag)
588{
589 screen_info.orig_x = tag->u.videotext.x;
590 screen_info.orig_y = tag->u.videotext.y;
591 screen_info.orig_video_page = tag->u.videotext.video_page;
592 screen_info.orig_video_mode = tag->u.videotext.video_mode;
593 screen_info.orig_video_cols = tag->u.videotext.video_cols;
594 screen_info.orig_video_ega_bx = tag->u.videotext.video_ega_bx;
595 screen_info.orig_video_lines = tag->u.videotext.video_lines;
596 screen_info.orig_video_isVGA = tag->u.videotext.video_isvga;
597 screen_info.orig_video_points = tag->u.videotext.video_points;
598 return 0;
599}
600
601__tagtable(ATAG_VIDEOTEXT, parse_tag_videotext);
602#endif
603
604static int __init parse_tag_ramdisk(const struct tag *tag)
605{
606 setup_ramdisk((tag->u.ramdisk.flags & 1) == 0,
607 (tag->u.ramdisk.flags & 2) == 0,
608 tag->u.ramdisk.start, tag->u.ramdisk.size);
609 return 0;
610}
611
612__tagtable(ATAG_RAMDISK, parse_tag_ramdisk);
613
614static int __init parse_tag_initrd(const struct tag *tag)
615{
616 printk(KERN_WARNING "ATAG_INITRD is deprecated; "
617 "please update your bootloader.\n");
618 phys_initrd_start = __virt_to_phys(tag->u.initrd.start);
619 phys_initrd_size = tag->u.initrd.size;
620 return 0;
621}
622
623__tagtable(ATAG_INITRD, parse_tag_initrd);
624
625static int __init parse_tag_initrd2(const struct tag *tag)
626{
627 phys_initrd_start = tag->u.initrd.start;
628 phys_initrd_size = tag->u.initrd.size;
629 return 0;
630}
631
632__tagtable(ATAG_INITRD2, parse_tag_initrd2);
633
634static int __init parse_tag_serialnr(const struct tag *tag)
635{
636 system_serial_low = tag->u.serialnr.low;
637 system_serial_high = tag->u.serialnr.high;
638 return 0;
639}
640
641__tagtable(ATAG_SERIAL, parse_tag_serialnr);
642
643static int __init parse_tag_revision(const struct tag *tag)
644{
645 system_rev = tag->u.revision.rev;
646 return 0;
647}
648
649__tagtable(ATAG_REVISION, parse_tag_revision);
650
651static int __init parse_tag_cmdline(const struct tag *tag)
652{
653 strlcpy(default_command_line, tag->u.cmdline.cmdline, COMMAND_LINE_SIZE);
654 return 0;
655}
656
657__tagtable(ATAG_CMDLINE, parse_tag_cmdline);
658
659/*
660 * Scan the tag table for this tag, and call its parse function.
661 * The tag table is built by the linker from all the __tagtable
662 * declarations.
663 */
664static int __init parse_tag(const struct tag *tag)
665{
666 extern struct tagtable __tagtable_begin, __tagtable_end;
667 struct tagtable *t;
668
669 for (t = &__tagtable_begin; t < &__tagtable_end; t++)
670 if (tag->hdr.tag == t->tag) {
671 t->parse(tag);
672 break;
673 }
674
675 return t < &__tagtable_end;
676}
677
678/*
679 * Parse all tags in the list, checking both the global and architecture
680 * specific tag tables.
681 */
682static void __init parse_tags(const struct tag *t)
683{
684 for (; t->hdr.size; t = tag_next(t))
685 if (!parse_tag(t))
686 printk(KERN_WARNING
687 "Ignoring unrecognised tag 0x%08x\n",
688 t->hdr.tag);
689}
690
691/*
692 * This holds our defaults.
693 */
694static struct init_tags {
695 struct tag_header hdr1;
696 struct tag_core core;
697 struct tag_header hdr2;
698 struct tag_mem32 mem;
699 struct tag_header hdr3;
700} init_tags __initdata = {
701 { tag_size(tag_core), ATAG_CORE },
702 { 1, PAGE_SIZE, 0xff },
703 { tag_size(tag_mem32), ATAG_MEM },
704 { MEM_SIZE, PHYS_OFFSET },
705 { 0, ATAG_NONE }
706};
707
708static void (*init_machine)(void) __initdata;
709
710static int __init customize_machine(void)
711{
712 /* customizes platform devices, or adds new ones */
713 if (init_machine)
714 init_machine();
715 return 0;
716}
717arch_initcall(customize_machine);
718
719void __init setup_arch(char **cmdline_p)
720{
721 struct tag *tags = (struct tag *)&init_tags;
722 struct machine_desc *mdesc;
723 char *from = default_command_line;
724
725 setup_processor();
726 mdesc = setup_machine(machine_arch_type);
727 machine_name = mdesc->name;
728
729 if (mdesc->soft_reboot)
730 reboot_setup("s");
731
732 if (mdesc->param_offset)
733 tags = phys_to_virt(mdesc->param_offset);
734
735 /*
736 * If we have the old style parameters, convert them to
737 * a tag list.
738 */
739 if (tags->hdr.tag != ATAG_CORE)
740 convert_to_tag_list(tags);
741 if (tags->hdr.tag != ATAG_CORE)
742 tags = (struct tag *)&init_tags;
743
744 if (mdesc->fixup)
745 mdesc->fixup(mdesc, tags, &from, &meminfo);
746
747 if (tags->hdr.tag == ATAG_CORE) {
748 if (meminfo.nr_banks != 0)
749 squash_mem_tags(tags);
750 parse_tags(tags);
751 }
752
753 init_mm.start_code = (unsigned long) &_text;
754 init_mm.end_code = (unsigned long) &_etext;
755 init_mm.end_data = (unsigned long) &_edata;
756 init_mm.brk = (unsigned long) &_end;
757
758 memcpy(saved_command_line, from, COMMAND_LINE_SIZE);
759 saved_command_line[COMMAND_LINE_SIZE-1] = '\0';
760 parse_cmdline(cmdline_p, from);
761 paging_init(&meminfo, mdesc);
762 request_standard_resources(&meminfo, mdesc);
763
ccea7a19
RK
764 cpu_init();
765
1da177e4
LT
766 /*
767 * Set up various architecture-specific pointers
768 */
769 init_arch_irq = mdesc->init_irq;
770 system_timer = mdesc->timer;
771 init_machine = mdesc->init_machine;
772
773#ifdef CONFIG_VT
774#if defined(CONFIG_VGA_CONSOLE)
775 conswitchp = &vga_con;
776#elif defined(CONFIG_DUMMY_CONSOLE)
777 conswitchp = &dummy_con;
778#endif
779#endif
780}
781
782
783static int __init topology_init(void)
784{
785 int cpu;
786
787 for_each_cpu(cpu)
788 register_cpu(&per_cpu(cpu_data, cpu).cpu, cpu, NULL);
789
790 return 0;
791}
792
793subsys_initcall(topology_init);
794
795static const char *hwcap_str[] = {
796 "swp",
797 "half",
798 "thumb",
799 "26bit",
800 "fastmult",
801 "fpa",
802 "vfp",
803 "edsp",
804 "java",
805 NULL
806};
807
808static void
809c_show_cache(struct seq_file *m, const char *type, unsigned int cache)
810{
811 unsigned int mult = 2 + (CACHE_M(cache) ? 1 : 0);
812
813 seq_printf(m, "%s size\t\t: %d\n"
814 "%s assoc\t\t: %d\n"
815 "%s line length\t: %d\n"
816 "%s sets\t\t: %d\n",
817 type, mult << (8 + CACHE_SIZE(cache)),
818 type, (mult << CACHE_ASSOC(cache)) >> 1,
819 type, 8 << CACHE_LINE(cache),
820 type, 1 << (6 + CACHE_SIZE(cache) - CACHE_ASSOC(cache) -
821 CACHE_LINE(cache)));
822}
823
824static int c_show(struct seq_file *m, void *v)
825{
826 int i;
827
828 seq_printf(m, "Processor\t: %s rev %d (%s)\n",
829 cpu_name, (int)processor_id & 15, elf_platform);
830
831#if defined(CONFIG_SMP)
832 for_each_online_cpu(i) {
833 seq_printf(m, "Processor\t: %d\n", i);
834 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n\n",
835 per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
836 (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
837 }
838#else /* CONFIG_SMP */
839 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
840 loops_per_jiffy / (500000/HZ),
841 (loops_per_jiffy / (5000/HZ)) % 100);
842#endif
843
844 /* dump out the processor features */
845 seq_puts(m, "Features\t: ");
846
847 for (i = 0; hwcap_str[i]; i++)
848 if (elf_hwcap & (1 << i))
849 seq_printf(m, "%s ", hwcap_str[i]);
850
851 seq_printf(m, "\nCPU implementer\t: 0x%02x\n", processor_id >> 24);
852 seq_printf(m, "CPU architecture: %s\n", proc_arch[cpu_architecture()]);
853
854 if ((processor_id & 0x0000f000) == 0x00000000) {
855 /* pre-ARM7 */
856 seq_printf(m, "CPU part\t\t: %07x\n", processor_id >> 4);
857 } else {
858 if ((processor_id & 0x0000f000) == 0x00007000) {
859 /* ARM7 */
860 seq_printf(m, "CPU variant\t: 0x%02x\n",
861 (processor_id >> 16) & 127);
862 } else {
863 /* post-ARM7 */
864 seq_printf(m, "CPU variant\t: 0x%x\n",
865 (processor_id >> 20) & 15);
866 }
867 seq_printf(m, "CPU part\t: 0x%03x\n",
868 (processor_id >> 4) & 0xfff);
869 }
870 seq_printf(m, "CPU revision\t: %d\n", processor_id & 15);
871
872 {
873 unsigned int cache_info = read_cpuid(CPUID_CACHETYPE);
874 if (cache_info != processor_id) {
875 seq_printf(m, "Cache type\t: %s\n"
876 "Cache clean\t: %s\n"
877 "Cache lockdown\t: %s\n"
878 "Cache format\t: %s\n",
879 cache_types[CACHE_TYPE(cache_info)],
880 cache_clean[CACHE_TYPE(cache_info)],
881 cache_lockdown[CACHE_TYPE(cache_info)],
882 CACHE_S(cache_info) ? "Harvard" : "Unified");
883
884 if (CACHE_S(cache_info)) {
885 c_show_cache(m, "I", CACHE_ISIZE(cache_info));
886 c_show_cache(m, "D", CACHE_DSIZE(cache_info));
887 } else {
888 c_show_cache(m, "Cache", CACHE_ISIZE(cache_info));
889 }
890 }
891 }
892
893 seq_puts(m, "\n");
894
895 seq_printf(m, "Hardware\t: %s\n", machine_name);
896 seq_printf(m, "Revision\t: %04x\n", system_rev);
897 seq_printf(m, "Serial\t\t: %08x%08x\n",
898 system_serial_high, system_serial_low);
899
900 return 0;
901}
902
903static void *c_start(struct seq_file *m, loff_t *pos)
904{
905 return *pos < 1 ? (void *)1 : NULL;
906}
907
908static void *c_next(struct seq_file *m, void *v, loff_t *pos)
909{
910 ++*pos;
911 return NULL;
912}
913
914static void c_stop(struct seq_file *m, void *v)
915{
916}
917
918struct seq_operations cpuinfo_op = {
919 .start = c_start,
920 .next = c_next,
921 .stop = c_stop,
922 .show = c_show
923};