[ARM] 3365/1: [cleanup] header for compat.c exported functions
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / arm / kernel / setup.c
1 /*
2 * linux/arch/arm/kernel/setup.c
3 *
4 * Copyright (C) 1995-2001 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10 #include <linux/config.h>
11 #include <linux/module.h>
12 #include <linux/kernel.h>
13 #include <linux/stddef.h>
14 #include <linux/ioport.h>
15 #include <linux/delay.h>
16 #include <linux/utsname.h>
17 #include <linux/initrd.h>
18 #include <linux/console.h>
19 #include <linux/bootmem.h>
20 #include <linux/seq_file.h>
21 #include <linux/tty.h>
22 #include <linux/init.h>
23 #include <linux/root_dev.h>
24 #include <linux/cpu.h>
25 #include <linux/interrupt.h>
26 #include <linux/smp.h>
27
28 #include <asm/cpu.h>
29 #include <asm/elf.h>
30 #include <asm/procinfo.h>
31 #include <asm/setup.h>
32 #include <asm/mach-types.h>
33 #include <asm/cacheflush.h>
34 #include <asm/tlbflush.h>
35
36 #include <asm/mach/arch.h>
37 #include <asm/mach/irq.h>
38 #include <asm/mach/time.h>
39
40 #include "compat.h"
41
42 #ifndef MEM_SIZE
43 #define MEM_SIZE (16*1024*1024)
44 #endif
45
46 #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
47 char fpe_type[8];
48
49 static int __init fpe_setup(char *line)
50 {
51 memcpy(fpe_type, line, 8);
52 return 1;
53 }
54
55 __setup("fpe=", fpe_setup);
56 #endif
57
58 extern unsigned int mem_fclk_21285;
59 extern void paging_init(struct meminfo *, struct machine_desc *desc);
60 extern void reboot_setup(char *str);
61 extern int root_mountflags;
62 extern void _stext, _text, _etext, __data_start, _edata, _end;
63
64 unsigned int processor_id;
65 unsigned int __machine_arch_type;
66 EXPORT_SYMBOL(__machine_arch_type);
67
68 unsigned int system_rev;
69 EXPORT_SYMBOL(system_rev);
70
71 unsigned int system_serial_low;
72 EXPORT_SYMBOL(system_serial_low);
73
74 unsigned int system_serial_high;
75 EXPORT_SYMBOL(system_serial_high);
76
77 unsigned int elf_hwcap;
78 EXPORT_SYMBOL(elf_hwcap);
79
80
81 #ifdef MULTI_CPU
82 struct processor processor;
83 #endif
84 #ifdef MULTI_TLB
85 struct cpu_tlb_fns cpu_tlb;
86 #endif
87 #ifdef MULTI_USER
88 struct cpu_user_fns cpu_user;
89 #endif
90 #ifdef MULTI_CACHE
91 struct cpu_cache_fns cpu_cache;
92 #endif
93
94 struct stack {
95 u32 irq[3];
96 u32 abt[3];
97 u32 und[3];
98 } ____cacheline_aligned;
99
100 static struct stack stacks[NR_CPUS];
101
102 char elf_platform[ELF_PLATFORM_SIZE];
103 EXPORT_SYMBOL(elf_platform);
104
105 unsigned long phys_initrd_start __initdata = 0;
106 unsigned long phys_initrd_size __initdata = 0;
107
108 static struct meminfo meminfo __initdata = { 0, };
109 static const char *cpu_name;
110 static const char *machine_name;
111 static char command_line[COMMAND_LINE_SIZE];
112
113 static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE;
114 static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
115 #define ENDIANNESS ((char)endian_test.l)
116
117 DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
118
119 /*
120 * Standard memory resources
121 */
122 static struct resource mem_res[] = {
123 { "Video RAM", 0, 0, IORESOURCE_MEM },
124 { "Kernel text", 0, 0, IORESOURCE_MEM },
125 { "Kernel data", 0, 0, IORESOURCE_MEM }
126 };
127
128 #define video_ram mem_res[0]
129 #define kernel_code mem_res[1]
130 #define kernel_data mem_res[2]
131
132 static struct resource io_res[] = {
133 { "reserved", 0x3bc, 0x3be, IORESOURCE_IO | IORESOURCE_BUSY },
134 { "reserved", 0x378, 0x37f, IORESOURCE_IO | IORESOURCE_BUSY },
135 { "reserved", 0x278, 0x27f, IORESOURCE_IO | IORESOURCE_BUSY }
136 };
137
138 #define lp0 io_res[0]
139 #define lp1 io_res[1]
140 #define lp2 io_res[2]
141
142 static const char *cache_types[16] = {
143 "write-through",
144 "write-back",
145 "write-back",
146 "undefined 3",
147 "undefined 4",
148 "undefined 5",
149 "write-back",
150 "write-back",
151 "undefined 8",
152 "undefined 9",
153 "undefined 10",
154 "undefined 11",
155 "undefined 12",
156 "undefined 13",
157 "write-back",
158 "undefined 15",
159 };
160
161 static const char *cache_clean[16] = {
162 "not required",
163 "read-block",
164 "cp15 c7 ops",
165 "undefined 3",
166 "undefined 4",
167 "undefined 5",
168 "cp15 c7 ops",
169 "cp15 c7 ops",
170 "undefined 8",
171 "undefined 9",
172 "undefined 10",
173 "undefined 11",
174 "undefined 12",
175 "undefined 13",
176 "cp15 c7 ops",
177 "undefined 15",
178 };
179
180 static const char *cache_lockdown[16] = {
181 "not supported",
182 "not supported",
183 "not supported",
184 "undefined 3",
185 "undefined 4",
186 "undefined 5",
187 "format A",
188 "format B",
189 "undefined 8",
190 "undefined 9",
191 "undefined 10",
192 "undefined 11",
193 "undefined 12",
194 "undefined 13",
195 "format C",
196 "undefined 15",
197 };
198
199 static const char *proc_arch[] = {
200 "undefined/unknown",
201 "3",
202 "4",
203 "4T",
204 "5",
205 "5T",
206 "5TE",
207 "5TEJ",
208 "6TEJ",
209 "7",
210 "?(11)",
211 "?(12)",
212 "?(13)",
213 "?(14)",
214 "?(15)",
215 "?(16)",
216 "?(17)",
217 };
218
219 #define CACHE_TYPE(x) (((x) >> 25) & 15)
220 #define CACHE_S(x) ((x) & (1 << 24))
221 #define CACHE_DSIZE(x) (((x) >> 12) & 4095) /* only if S=1 */
222 #define CACHE_ISIZE(x) ((x) & 4095)
223
224 #define CACHE_SIZE(y) (((y) >> 6) & 7)
225 #define CACHE_ASSOC(y) (((y) >> 3) & 7)
226 #define CACHE_M(y) ((y) & (1 << 2))
227 #define CACHE_LINE(y) ((y) & 3)
228
229 static inline void dump_cache(const char *prefix, int cpu, unsigned int cache)
230 {
231 unsigned int mult = 2 + (CACHE_M(cache) ? 1 : 0);
232
233 printk("CPU%u: %s: %d bytes, associativity %d, %d byte lines, %d sets\n",
234 cpu, prefix,
235 mult << (8 + CACHE_SIZE(cache)),
236 (mult << CACHE_ASSOC(cache)) >> 1,
237 8 << CACHE_LINE(cache),
238 1 << (6 + CACHE_SIZE(cache) - CACHE_ASSOC(cache) -
239 CACHE_LINE(cache)));
240 }
241
242 static void __init dump_cpu_info(int cpu)
243 {
244 unsigned int info = read_cpuid(CPUID_CACHETYPE);
245
246 if (info != processor_id) {
247 printk("CPU%u: D %s %s cache\n", cpu, cache_is_vivt() ? "VIVT" : "VIPT",
248 cache_types[CACHE_TYPE(info)]);
249 if (CACHE_S(info)) {
250 dump_cache("I cache", cpu, CACHE_ISIZE(info));
251 dump_cache("D cache", cpu, CACHE_DSIZE(info));
252 } else {
253 dump_cache("cache", cpu, CACHE_ISIZE(info));
254 }
255 }
256 }
257
258 int cpu_architecture(void)
259 {
260 int cpu_arch;
261
262 if ((processor_id & 0x0008f000) == 0) {
263 cpu_arch = CPU_ARCH_UNKNOWN;
264 } else if ((processor_id & 0x0008f000) == 0x00007000) {
265 cpu_arch = (processor_id & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
266 } else if ((processor_id & 0x00080000) == 0x00000000) {
267 cpu_arch = (processor_id >> 16) & 7;
268 if (cpu_arch)
269 cpu_arch += CPU_ARCH_ARMv3;
270 } else {
271 /* the revised CPUID */
272 cpu_arch = ((processor_id >> 12) & 0xf) - 0xb + CPU_ARCH_ARMv6;
273 }
274
275 return cpu_arch;
276 }
277
278 /*
279 * These functions re-use the assembly code in head.S, which
280 * already provide the required functionality.
281 */
282 extern struct proc_info_list *lookup_processor_type(void);
283 extern struct machine_desc *lookup_machine_type(unsigned int);
284
285 static void __init setup_processor(void)
286 {
287 struct proc_info_list *list;
288
289 /*
290 * locate processor in the list of supported processor
291 * types. The linker builds this table for us from the
292 * entries in arch/arm/mm/proc-*.S
293 */
294 list = lookup_processor_type();
295 if (!list) {
296 printk("CPU configuration botched (ID %08x), unable "
297 "to continue.\n", processor_id);
298 while (1);
299 }
300
301 cpu_name = list->cpu_name;
302
303 #ifdef MULTI_CPU
304 processor = *list->proc;
305 #endif
306 #ifdef MULTI_TLB
307 cpu_tlb = *list->tlb;
308 #endif
309 #ifdef MULTI_USER
310 cpu_user = *list->user;
311 #endif
312 #ifdef MULTI_CACHE
313 cpu_cache = *list->cache;
314 #endif
315
316 printk("CPU: %s [%08x] revision %d (ARMv%s)\n",
317 cpu_name, processor_id, (int)processor_id & 15,
318 proc_arch[cpu_architecture()]);
319
320 sprintf(system_utsname.machine, "%s%c", list->arch_name, ENDIANNESS);
321 sprintf(elf_platform, "%s%c", list->elf_name, ENDIANNESS);
322 elf_hwcap = list->elf_hwcap;
323
324 cpu_proc_init();
325 }
326
327 /*
328 * cpu_init - initialise one CPU.
329 *
330 * cpu_init dumps the cache information, initialises SMP specific
331 * information, and sets up the per-CPU stacks.
332 */
333 void cpu_init(void)
334 {
335 unsigned int cpu = smp_processor_id();
336 struct stack *stk = &stacks[cpu];
337
338 if (cpu >= NR_CPUS) {
339 printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu);
340 BUG();
341 }
342
343 if (system_state == SYSTEM_BOOTING)
344 dump_cpu_info(cpu);
345
346 /*
347 * setup stacks for re-entrant exception handlers
348 */
349 __asm__ (
350 "msr cpsr_c, %1\n\t"
351 "add sp, %0, %2\n\t"
352 "msr cpsr_c, %3\n\t"
353 "add sp, %0, %4\n\t"
354 "msr cpsr_c, %5\n\t"
355 "add sp, %0, %6\n\t"
356 "msr cpsr_c, %7"
357 :
358 : "r" (stk),
359 "I" (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
360 "I" (offsetof(struct stack, irq[0])),
361 "I" (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
362 "I" (offsetof(struct stack, abt[0])),
363 "I" (PSR_F_BIT | PSR_I_BIT | UND_MODE),
364 "I" (offsetof(struct stack, und[0])),
365 "I" (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
366 : "r14");
367 }
368
369 static struct machine_desc * __init setup_machine(unsigned int nr)
370 {
371 struct machine_desc *list;
372
373 /*
374 * locate machine in the list of supported machines.
375 */
376 list = lookup_machine_type(nr);
377 if (!list) {
378 printk("Machine configuration botched (nr %d), unable "
379 "to continue.\n", nr);
380 while (1);
381 }
382
383 printk("Machine: %s\n", list->name);
384
385 return list;
386 }
387
388 static void __init early_initrd(char **p)
389 {
390 unsigned long start, size;
391
392 start = memparse(*p, p);
393 if (**p == ',') {
394 size = memparse((*p) + 1, p);
395
396 phys_initrd_start = start;
397 phys_initrd_size = size;
398 }
399 }
400 __early_param("initrd=", early_initrd);
401
402 static void __init add_memory(unsigned long start, unsigned long size)
403 {
404 /*
405 * Ensure that start/size are aligned to a page boundary.
406 * Size is appropriately rounded down, start is rounded up.
407 */
408 size -= start & ~PAGE_MASK;
409
410 meminfo.bank[meminfo.nr_banks].start = PAGE_ALIGN(start);
411 meminfo.bank[meminfo.nr_banks].size = size & PAGE_MASK;
412 meminfo.bank[meminfo.nr_banks].node = PHYS_TO_NID(start);
413 meminfo.nr_banks += 1;
414 }
415
416 /*
417 * Pick out the memory size. We look for mem=size@start,
418 * where start and size are "size[KkMm]"
419 */
420 static void __init early_mem(char **p)
421 {
422 static int usermem __initdata = 0;
423 unsigned long size, start;
424
425 /*
426 * If the user specifies memory size, we
427 * blow away any automatically generated
428 * size.
429 */
430 if (usermem == 0) {
431 usermem = 1;
432 meminfo.nr_banks = 0;
433 }
434
435 start = PHYS_OFFSET;
436 size = memparse(*p, p);
437 if (**p == '@')
438 start = memparse(*p + 1, p);
439
440 add_memory(start, size);
441 }
442 __early_param("mem=", early_mem);
443
444 /*
445 * Initial parsing of the command line.
446 */
447 static void __init parse_cmdline(char **cmdline_p, char *from)
448 {
449 char c = ' ', *to = command_line;
450 int len = 0;
451
452 for (;;) {
453 if (c == ' ') {
454 extern struct early_params __early_begin, __early_end;
455 struct early_params *p;
456
457 for (p = &__early_begin; p < &__early_end; p++) {
458 int len = strlen(p->arg);
459
460 if (memcmp(from, p->arg, len) == 0) {
461 if (to != command_line)
462 to -= 1;
463 from += len;
464 p->fn(&from);
465
466 while (*from != ' ' && *from != '\0')
467 from++;
468 break;
469 }
470 }
471 }
472 c = *from++;
473 if (!c)
474 break;
475 if (COMMAND_LINE_SIZE <= ++len)
476 break;
477 *to++ = c;
478 }
479 *to = '\0';
480 *cmdline_p = command_line;
481 }
482
483 static void __init
484 setup_ramdisk(int doload, int prompt, int image_start, unsigned int rd_sz)
485 {
486 #ifdef CONFIG_BLK_DEV_RAM
487 extern int rd_size, rd_image_start, rd_prompt, rd_doload;
488
489 rd_image_start = image_start;
490 rd_prompt = prompt;
491 rd_doload = doload;
492
493 if (rd_sz)
494 rd_size = rd_sz;
495 #endif
496 }
497
498 static void __init
499 request_standard_resources(struct meminfo *mi, struct machine_desc *mdesc)
500 {
501 struct resource *res;
502 int i;
503
504 kernel_code.start = virt_to_phys(&_text);
505 kernel_code.end = virt_to_phys(&_etext - 1);
506 kernel_data.start = virt_to_phys(&__data_start);
507 kernel_data.end = virt_to_phys(&_end - 1);
508
509 for (i = 0; i < mi->nr_banks; i++) {
510 unsigned long virt_start, virt_end;
511
512 if (mi->bank[i].size == 0)
513 continue;
514
515 virt_start = __phys_to_virt(mi->bank[i].start);
516 virt_end = virt_start + mi->bank[i].size - 1;
517
518 res = alloc_bootmem_low(sizeof(*res));
519 res->name = "System RAM";
520 res->start = __virt_to_phys(virt_start);
521 res->end = __virt_to_phys(virt_end);
522 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
523
524 request_resource(&iomem_resource, res);
525
526 if (kernel_code.start >= res->start &&
527 kernel_code.end <= res->end)
528 request_resource(res, &kernel_code);
529 if (kernel_data.start >= res->start &&
530 kernel_data.end <= res->end)
531 request_resource(res, &kernel_data);
532 }
533
534 if (mdesc->video_start) {
535 video_ram.start = mdesc->video_start;
536 video_ram.end = mdesc->video_end;
537 request_resource(&iomem_resource, &video_ram);
538 }
539
540 /*
541 * Some machines don't have the possibility of ever
542 * possessing lp0, lp1 or lp2
543 */
544 if (mdesc->reserve_lp0)
545 request_resource(&ioport_resource, &lp0);
546 if (mdesc->reserve_lp1)
547 request_resource(&ioport_resource, &lp1);
548 if (mdesc->reserve_lp2)
549 request_resource(&ioport_resource, &lp2);
550 }
551
552 /*
553 * Tag parsing.
554 *
555 * This is the new way of passing data to the kernel at boot time. Rather
556 * than passing a fixed inflexible structure to the kernel, we pass a list
557 * of variable-sized tags to the kernel. The first tag must be a ATAG_CORE
558 * tag for the list to be recognised (to distinguish the tagged list from
559 * a param_struct). The list is terminated with a zero-length tag (this tag
560 * is not parsed in any way).
561 */
562 static int __init parse_tag_core(const struct tag *tag)
563 {
564 if (tag->hdr.size > 2) {
565 if ((tag->u.core.flags & 1) == 0)
566 root_mountflags &= ~MS_RDONLY;
567 ROOT_DEV = old_decode_dev(tag->u.core.rootdev);
568 }
569 return 0;
570 }
571
572 __tagtable(ATAG_CORE, parse_tag_core);
573
574 static int __init parse_tag_mem32(const struct tag *tag)
575 {
576 if (meminfo.nr_banks >= NR_BANKS) {
577 printk(KERN_WARNING
578 "Ignoring memory bank 0x%08x size %dKB\n",
579 tag->u.mem.start, tag->u.mem.size / 1024);
580 return -EINVAL;
581 }
582 add_memory(tag->u.mem.start, tag->u.mem.size);
583 return 0;
584 }
585
586 __tagtable(ATAG_MEM, parse_tag_mem32);
587
588 #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
589 struct screen_info screen_info = {
590 .orig_video_lines = 30,
591 .orig_video_cols = 80,
592 .orig_video_mode = 0,
593 .orig_video_ega_bx = 0,
594 .orig_video_isVGA = 1,
595 .orig_video_points = 8
596 };
597
598 static int __init parse_tag_videotext(const struct tag *tag)
599 {
600 screen_info.orig_x = tag->u.videotext.x;
601 screen_info.orig_y = tag->u.videotext.y;
602 screen_info.orig_video_page = tag->u.videotext.video_page;
603 screen_info.orig_video_mode = tag->u.videotext.video_mode;
604 screen_info.orig_video_cols = tag->u.videotext.video_cols;
605 screen_info.orig_video_ega_bx = tag->u.videotext.video_ega_bx;
606 screen_info.orig_video_lines = tag->u.videotext.video_lines;
607 screen_info.orig_video_isVGA = tag->u.videotext.video_isvga;
608 screen_info.orig_video_points = tag->u.videotext.video_points;
609 return 0;
610 }
611
612 __tagtable(ATAG_VIDEOTEXT, parse_tag_videotext);
613 #endif
614
615 static int __init parse_tag_ramdisk(const struct tag *tag)
616 {
617 setup_ramdisk((tag->u.ramdisk.flags & 1) == 0,
618 (tag->u.ramdisk.flags & 2) == 0,
619 tag->u.ramdisk.start, tag->u.ramdisk.size);
620 return 0;
621 }
622
623 __tagtable(ATAG_RAMDISK, parse_tag_ramdisk);
624
625 static int __init parse_tag_initrd(const struct tag *tag)
626 {
627 printk(KERN_WARNING "ATAG_INITRD is deprecated; "
628 "please update your bootloader.\n");
629 phys_initrd_start = __virt_to_phys(tag->u.initrd.start);
630 phys_initrd_size = tag->u.initrd.size;
631 return 0;
632 }
633
634 __tagtable(ATAG_INITRD, parse_tag_initrd);
635
636 static int __init parse_tag_initrd2(const struct tag *tag)
637 {
638 phys_initrd_start = tag->u.initrd.start;
639 phys_initrd_size = tag->u.initrd.size;
640 return 0;
641 }
642
643 __tagtable(ATAG_INITRD2, parse_tag_initrd2);
644
645 static int __init parse_tag_serialnr(const struct tag *tag)
646 {
647 system_serial_low = tag->u.serialnr.low;
648 system_serial_high = tag->u.serialnr.high;
649 return 0;
650 }
651
652 __tagtable(ATAG_SERIAL, parse_tag_serialnr);
653
654 static int __init parse_tag_revision(const struct tag *tag)
655 {
656 system_rev = tag->u.revision.rev;
657 return 0;
658 }
659
660 __tagtable(ATAG_REVISION, parse_tag_revision);
661
662 static int __init parse_tag_cmdline(const struct tag *tag)
663 {
664 strlcpy(default_command_line, tag->u.cmdline.cmdline, COMMAND_LINE_SIZE);
665 return 0;
666 }
667
668 __tagtable(ATAG_CMDLINE, parse_tag_cmdline);
669
670 /*
671 * Scan the tag table for this tag, and call its parse function.
672 * The tag table is built by the linker from all the __tagtable
673 * declarations.
674 */
675 static int __init parse_tag(const struct tag *tag)
676 {
677 extern struct tagtable __tagtable_begin, __tagtable_end;
678 struct tagtable *t;
679
680 for (t = &__tagtable_begin; t < &__tagtable_end; t++)
681 if (tag->hdr.tag == t->tag) {
682 t->parse(tag);
683 break;
684 }
685
686 return t < &__tagtable_end;
687 }
688
689 /*
690 * Parse all tags in the list, checking both the global and architecture
691 * specific tag tables.
692 */
693 static void __init parse_tags(const struct tag *t)
694 {
695 for (; t->hdr.size; t = tag_next(t))
696 if (!parse_tag(t))
697 printk(KERN_WARNING
698 "Ignoring unrecognised tag 0x%08x\n",
699 t->hdr.tag);
700 }
701
702 /*
703 * This holds our defaults.
704 */
705 static struct init_tags {
706 struct tag_header hdr1;
707 struct tag_core core;
708 struct tag_header hdr2;
709 struct tag_mem32 mem;
710 struct tag_header hdr3;
711 } init_tags __initdata = {
712 { tag_size(tag_core), ATAG_CORE },
713 { 1, PAGE_SIZE, 0xff },
714 { tag_size(tag_mem32), ATAG_MEM },
715 { MEM_SIZE, PHYS_OFFSET },
716 { 0, ATAG_NONE }
717 };
718
719 static void (*init_machine)(void) __initdata;
720
721 static int __init customize_machine(void)
722 {
723 /* customizes platform devices, or adds new ones */
724 if (init_machine)
725 init_machine();
726 return 0;
727 }
728 arch_initcall(customize_machine);
729
730 void __init setup_arch(char **cmdline_p)
731 {
732 struct tag *tags = (struct tag *)&init_tags;
733 struct machine_desc *mdesc;
734 char *from = default_command_line;
735
736 setup_processor();
737 mdesc = setup_machine(machine_arch_type);
738 machine_name = mdesc->name;
739
740 if (mdesc->soft_reboot)
741 reboot_setup("s");
742
743 if (mdesc->boot_params)
744 tags = phys_to_virt(mdesc->boot_params);
745
746 /*
747 * If we have the old style parameters, convert them to
748 * a tag list.
749 */
750 if (tags->hdr.tag != ATAG_CORE)
751 convert_to_tag_list(tags);
752 if (tags->hdr.tag != ATAG_CORE)
753 tags = (struct tag *)&init_tags;
754
755 if (mdesc->fixup)
756 mdesc->fixup(mdesc, tags, &from, &meminfo);
757
758 if (tags->hdr.tag == ATAG_CORE) {
759 if (meminfo.nr_banks != 0)
760 squash_mem_tags(tags);
761 parse_tags(tags);
762 }
763
764 init_mm.start_code = (unsigned long) &_text;
765 init_mm.end_code = (unsigned long) &_etext;
766 init_mm.end_data = (unsigned long) &_edata;
767 init_mm.brk = (unsigned long) &_end;
768
769 memcpy(saved_command_line, from, COMMAND_LINE_SIZE);
770 saved_command_line[COMMAND_LINE_SIZE-1] = '\0';
771 parse_cmdline(cmdline_p, from);
772 paging_init(&meminfo, mdesc);
773 request_standard_resources(&meminfo, mdesc);
774
775 #ifdef CONFIG_SMP
776 smp_init_cpus();
777 #endif
778
779 cpu_init();
780
781 /*
782 * Set up various architecture-specific pointers
783 */
784 init_arch_irq = mdesc->init_irq;
785 system_timer = mdesc->timer;
786 init_machine = mdesc->init_machine;
787
788 #ifdef CONFIG_VT
789 #if defined(CONFIG_VGA_CONSOLE)
790 conswitchp = &vga_con;
791 #elif defined(CONFIG_DUMMY_CONSOLE)
792 conswitchp = &dummy_con;
793 #endif
794 #endif
795 }
796
797
798 static int __init topology_init(void)
799 {
800 int cpu;
801
802 for_each_cpu(cpu)
803 register_cpu(&per_cpu(cpu_data, cpu).cpu, cpu, NULL);
804
805 return 0;
806 }
807
808 subsys_initcall(topology_init);
809
810 static const char *hwcap_str[] = {
811 "swp",
812 "half",
813 "thumb",
814 "26bit",
815 "fastmult",
816 "fpa",
817 "vfp",
818 "edsp",
819 "java",
820 NULL
821 };
822
823 static void
824 c_show_cache(struct seq_file *m, const char *type, unsigned int cache)
825 {
826 unsigned int mult = 2 + (CACHE_M(cache) ? 1 : 0);
827
828 seq_printf(m, "%s size\t\t: %d\n"
829 "%s assoc\t\t: %d\n"
830 "%s line length\t: %d\n"
831 "%s sets\t\t: %d\n",
832 type, mult << (8 + CACHE_SIZE(cache)),
833 type, (mult << CACHE_ASSOC(cache)) >> 1,
834 type, 8 << CACHE_LINE(cache),
835 type, 1 << (6 + CACHE_SIZE(cache) - CACHE_ASSOC(cache) -
836 CACHE_LINE(cache)));
837 }
838
839 static int c_show(struct seq_file *m, void *v)
840 {
841 int i;
842
843 seq_printf(m, "Processor\t: %s rev %d (%s)\n",
844 cpu_name, (int)processor_id & 15, elf_platform);
845
846 #if defined(CONFIG_SMP)
847 for_each_online_cpu(i) {
848 /*
849 * glibc reads /proc/cpuinfo to determine the number of
850 * online processors, looking for lines beginning with
851 * "processor". Give glibc what it expects.
852 */
853 seq_printf(m, "processor\t: %d\n", i);
854 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n\n",
855 per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
856 (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
857 }
858 #else /* CONFIG_SMP */
859 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
860 loops_per_jiffy / (500000/HZ),
861 (loops_per_jiffy / (5000/HZ)) % 100);
862 #endif
863
864 /* dump out the processor features */
865 seq_puts(m, "Features\t: ");
866
867 for (i = 0; hwcap_str[i]; i++)
868 if (elf_hwcap & (1 << i))
869 seq_printf(m, "%s ", hwcap_str[i]);
870
871 seq_printf(m, "\nCPU implementer\t: 0x%02x\n", processor_id >> 24);
872 seq_printf(m, "CPU architecture: %s\n", proc_arch[cpu_architecture()]);
873
874 if ((processor_id & 0x0008f000) == 0x00000000) {
875 /* pre-ARM7 */
876 seq_printf(m, "CPU part\t\t: %07x\n", processor_id >> 4);
877 } else {
878 if ((processor_id & 0x0008f000) == 0x00007000) {
879 /* ARM7 */
880 seq_printf(m, "CPU variant\t: 0x%02x\n",
881 (processor_id >> 16) & 127);
882 } else {
883 /* post-ARM7 */
884 seq_printf(m, "CPU variant\t: 0x%x\n",
885 (processor_id >> 20) & 15);
886 }
887 seq_printf(m, "CPU part\t: 0x%03x\n",
888 (processor_id >> 4) & 0xfff);
889 }
890 seq_printf(m, "CPU revision\t: %d\n", processor_id & 15);
891
892 {
893 unsigned int cache_info = read_cpuid(CPUID_CACHETYPE);
894 if (cache_info != processor_id) {
895 seq_printf(m, "Cache type\t: %s\n"
896 "Cache clean\t: %s\n"
897 "Cache lockdown\t: %s\n"
898 "Cache format\t: %s\n",
899 cache_types[CACHE_TYPE(cache_info)],
900 cache_clean[CACHE_TYPE(cache_info)],
901 cache_lockdown[CACHE_TYPE(cache_info)],
902 CACHE_S(cache_info) ? "Harvard" : "Unified");
903
904 if (CACHE_S(cache_info)) {
905 c_show_cache(m, "I", CACHE_ISIZE(cache_info));
906 c_show_cache(m, "D", CACHE_DSIZE(cache_info));
907 } else {
908 c_show_cache(m, "Cache", CACHE_ISIZE(cache_info));
909 }
910 }
911 }
912
913 seq_puts(m, "\n");
914
915 seq_printf(m, "Hardware\t: %s\n", machine_name);
916 seq_printf(m, "Revision\t: %04x\n", system_rev);
917 seq_printf(m, "Serial\t\t: %08x%08x\n",
918 system_serial_high, system_serial_low);
919
920 return 0;
921 }
922
923 static void *c_start(struct seq_file *m, loff_t *pos)
924 {
925 return *pos < 1 ? (void *)1 : NULL;
926 }
927
928 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
929 {
930 ++*pos;
931 return NULL;
932 }
933
934 static void c_stop(struct seq_file *m, void *v)
935 {
936 }
937
938 struct seq_operations cpuinfo_op = {
939 .start = c_start,
940 .next = c_next,
941 .stop = c_stop,
942 .show = c_show
943 };