Merge tag 'v3.10.69' into update
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / arm64 / kernel / setup.c
1 /*
2 * Based on arch/arm/kernel/setup.c
3 *
4 * Copyright (C) 1995-2001 Russell King
5 * Copyright (C) 2012 ARM Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include <linux/export.h>
21 #include <linux/kernel.h>
22 #include <linux/stddef.h>
23 #include <linux/ioport.h>
24 #include <linux/delay.h>
25 #include <linux/utsname.h>
26 #include <linux/initrd.h>
27 #include <linux/console.h>
28 #include <linux/bootmem.h>
29 #include <linux/seq_file.h>
30 #include <linux/screen_info.h>
31 #include <linux/init.h>
32 #include <linux/kexec.h>
33 #include <linux/crash_dump.h>
34 #include <linux/root_dev.h>
35 #include <linux/clk-provider.h>
36 #include <linux/cpu.h>
37 #include <linux/interrupt.h>
38 #include <linux/smp.h>
39 #include <linux/fs.h>
40 #include <linux/proc_fs.h>
41 #include <linux/memblock.h>
42 #include <linux/of_fdt.h>
43 #include <linux/of_platform.h>
44 #include <linux/personality.h>
45
46 #include <asm/cputype.h>
47 #include <asm/elf.h>
48 #include <asm/cputable.h>
49 #include <asm/cpu_ops.h>
50 #include <asm/sections.h>
51 #include <asm/setup.h>
52 #include <asm/smp_plat.h>
53 #include <asm/cacheflush.h>
54 #include <asm/tlbflush.h>
55 #include <asm/traps.h>
56 #include <asm/memblock.h>
57 #include <asm/psci.h>
58
59 unsigned int processor_id;
60 EXPORT_SYMBOL(processor_id);
61
62 unsigned long elf_hwcap __read_mostly;
63 EXPORT_SYMBOL_GPL(elf_hwcap);
64
65 #ifdef CONFIG_COMPAT
66 #define COMPAT_ELF_HWCAP_DEFAULT \
67 (COMPAT_HWCAP_HALF|COMPAT_HWCAP_THUMB|\
68 COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\
69 COMPAT_HWCAP_TLS|COMPAT_HWCAP_VFP|\
70 COMPAT_HWCAP_VFPv3|COMPAT_HWCAP_VFPv4|\
71 COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV)
72 unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT;
73 unsigned int compat_elf_hwcap2 __read_mostly;
74 #endif
75
76 static const char *cpu_name;
77 static const char *machine_name;
78 phys_addr_t __fdt_pointer __initdata;
79
80 /*
81 * Standard memory resources
82 */
83 static struct resource mem_res[] = {
84 {
85 .name = "Kernel code",
86 .start = 0,
87 .end = 0,
88 .flags = IORESOURCE_MEM
89 },
90 {
91 .name = "Kernel data",
92 .start = 0,
93 .end = 0,
94 .flags = IORESOURCE_MEM
95 }
96 };
97
98 #define kernel_code mem_res[0]
99 #define kernel_data mem_res[1]
100
101 void __init early_print(const char *str, ...)
102 {
103 char buf[256];
104 va_list ap;
105
106 va_start(ap, str);
107 vsnprintf(buf, sizeof(buf), str, ap);
108 va_end(ap);
109
110 printk("%s", buf);
111 }
112
113 struct cpuinfo_arm64 {
114 struct cpu cpu;
115 u32 reg_midr;
116 };
117
118 static DEFINE_PER_CPU(struct cpuinfo_arm64, cpu_data);
119
120 void cpuinfo_store_cpu(void)
121 {
122 struct cpuinfo_arm64 *info = this_cpu_ptr(&cpu_data);
123 info->reg_midr = read_cpuid_id();
124 }
125
126 static void __init setup_processor(void)
127 {
128 /*
129 * clear __my_cpu_offset on boot CPU to avoid hang caused by
130 * using percpu variable early, for example, lockdep will
131 * access percpu variable inside lock_release
132 */
133 set_my_cpu_offset(0);
134 }
135
136 bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
137 {
138 return phys_id == cpu_logical_map(cpu);
139 }
140
141 struct mpidr_hash mpidr_hash;
142 #ifdef CONFIG_SMP
143 /**
144 * smp_build_mpidr_hash - Pre-compute shifts required at each affinity
145 * level in order to build a linear index from an
146 * MPIDR value. Resulting algorithm is a collision
147 * free hash carried out through shifting and ORing
148 */
149 static void __init smp_build_mpidr_hash(void)
150 {
151 u32 i, affinity, fs[4], bits[4], ls;
152 u64 mask = 0;
153 /*
154 * Pre-scan the list of MPIDRS and filter out bits that do
155 * not contribute to affinity levels, ie they never toggle.
156 */
157 for_each_possible_cpu(i)
158 mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
159 pr_debug("mask of set bits %#llx\n", mask);
160 /*
161 * Find and stash the last and first bit set at all affinity levels to
162 * check how many bits are required to represent them.
163 */
164 for (i = 0; i < 4; i++) {
165 affinity = MPIDR_AFFINITY_LEVEL(mask, i);
166 /*
167 * Find the MSB bit and LSB bits position
168 * to determine how many bits are required
169 * to express the affinity level.
170 */
171 ls = fls(affinity);
172 fs[i] = affinity ? ffs(affinity) - 1 : 0;
173 bits[i] = ls - fs[i];
174 }
175 /*
176 * An index can be created from the MPIDR_EL1 by isolating the
177 * significant bits at each affinity level and by shifting
178 * them in order to compress the 32 bits values space to a
179 * compressed set of values. This is equivalent to hashing
180 * the MPIDR_EL1 through shifting and ORing. It is a collision free
181 * hash though not minimal since some levels might contain a number
182 * of CPUs that is not an exact power of 2 and their bit
183 * representation might contain holes, eg MPIDR_EL1[7:0] = {0x2, 0x80}.
184 */
185 mpidr_hash.shift_aff[0] = MPIDR_LEVEL_SHIFT(0) + fs[0];
186 mpidr_hash.shift_aff[1] = MPIDR_LEVEL_SHIFT(1) + fs[1] - bits[0];
187 mpidr_hash.shift_aff[2] = MPIDR_LEVEL_SHIFT(2) + fs[2] -
188 (bits[1] + bits[0]);
189 mpidr_hash.shift_aff[3] = MPIDR_LEVEL_SHIFT(3) +
190 fs[3] - (bits[2] + bits[1] + bits[0]);
191 mpidr_hash.mask = mask;
192 mpidr_hash.bits = bits[3] + bits[2] + bits[1] + bits[0];
193 pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] aff3[%u] mask[%#llx] bits[%u]\n",
194 mpidr_hash.shift_aff[0],
195 mpidr_hash.shift_aff[1],
196 mpidr_hash.shift_aff[2],
197 mpidr_hash.shift_aff[3],
198 mpidr_hash.mask,
199 mpidr_hash.bits);
200 /*
201 * 4x is an arbitrary value used to warn on a hash table much bigger
202 * than expected on most systems.
203 */
204 if (mpidr_hash_size() > 4 * num_possible_cpus())
205 pr_warn("Large number of MPIDR hash buckets detected\n");
206 __flush_dcache_area(&mpidr_hash, sizeof(struct mpidr_hash));
207 }
208 #endif
209
210 static void __init setup_processor(void)
211 {
212 struct cpu_info *cpu_info;
213 u64 features, block;
214
215 cpu_info = lookup_processor_type(read_cpuid_id());
216 if (!cpu_info) {
217 printk("CPU configuration botched (ID %08x), unable to continue.\n",
218 read_cpuid_id());
219 while (1);
220 }
221
222 cpu_name = cpu_info->cpu_name;
223
224 printk("CPU: %s [%08x] revision %d\n",
225 cpu_name, read_cpuid_id(), read_cpuid_id() & 15);
226
227 sprintf(init_utsname()->machine, "aarch64");
228 elf_hwcap = 0;
229
230 /*
231 * ID_AA64ISAR0_EL1 contains 4-bit wide signed feature blocks.
232 * The blocks we test below represent incremental functionality
233 * for non-negative values. Negative values are reserved.
234 */
235 features = read_cpuid(ID_AA64ISAR0_EL1);
236 block = (features >> 4) & 0xf;
237 if (!(block & 0x8)) {
238 switch (block) {
239 default:
240 case 2:
241 elf_hwcap |= HWCAP_PMULL;
242 case 1:
243 elf_hwcap |= HWCAP_AES;
244 case 0:
245 break;
246 }
247 }
248
249 block = (features >> 8) & 0xf;
250 if (block && !(block & 0x8))
251 elf_hwcap |= HWCAP_SHA1;
252
253 block = (features >> 12) & 0xf;
254 if (block && !(block & 0x8))
255 elf_hwcap |= HWCAP_SHA2;
256
257 block = (features >> 16) & 0xf;
258 if (block && !(block & 0x8))
259 elf_hwcap |= HWCAP_CRC32;
260 }
261
262 static void __init setup_machine_fdt(phys_addr_t dt_phys)
263 {
264 struct boot_param_header *devtree;
265 unsigned long dt_root;
266
267 cpuinfo_store_cpu();
268
269 /* Check we have a non-NULL DT pointer */
270 if (!dt_phys) {
271 early_print("\n"
272 "Error: NULL or invalid device tree blob\n"
273 "The dtb must be 8-byte aligned and passed in the first 512MB of memory\n"
274 "\nPlease check your bootloader.\n");
275
276 while (true)
277 cpu_relax();
278
279 }
280
281 devtree = phys_to_virt(dt_phys);
282
283 /* Check device tree validity */
284 if (be32_to_cpu(devtree->magic) != OF_DT_HEADER) {
285 early_print("\n"
286 "Error: invalid device tree blob at physical address 0x%p (virtual address 0x%p)\n"
287 "Expected 0x%x, found 0x%x\n"
288 "\nPlease check your bootloader.\n",
289 dt_phys, devtree, OF_DT_HEADER,
290 be32_to_cpu(devtree->magic));
291
292 while (true)
293 cpu_relax();
294 }
295
296 initial_boot_params = devtree;
297 dt_root = of_get_flat_dt_root();
298
299 machine_name = of_get_flat_dt_prop(dt_root, "model", NULL);
300 if (!machine_name)
301 machine_name = of_get_flat_dt_prop(dt_root, "compatible", NULL);
302 if (!machine_name)
303 machine_name = "<unknown>";
304 pr_info("Machine: %s\n", machine_name);
305
306 /* Retrieve various information from the /chosen node */
307 of_scan_flat_dt(early_init_dt_scan_chosen, boot_command_line);
308 /* Initialize {size,address}-cells info */
309 of_scan_flat_dt(early_init_dt_scan_root, NULL);
310 /* Setup memory, calling early_init_dt_add_memory_arch */
311 of_scan_flat_dt(early_init_dt_scan_memory, NULL);
312 }
313
314 void __init early_init_dt_add_memory_arch(u64 base, u64 size)
315 {
316 base &= PAGE_MASK;
317 size &= PAGE_MASK;
318 if (base + size < PHYS_OFFSET) {
319 pr_warning("Ignoring memory block 0x%llx - 0x%llx\n",
320 base, base + size);
321 return;
322 }
323 if (base < PHYS_OFFSET) {
324 pr_warning("Ignoring memory range 0x%llx - 0x%llx\n",
325 base, PHYS_OFFSET);
326 size -= PHYS_OFFSET - base;
327 base = PHYS_OFFSET;
328 }
329 memblock_add(base, size);
330 }
331
332 void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
333 {
334 return __va(memblock_alloc(size, align));
335 }
336
337 /*
338 * Limit the memory size that was specified via FDT.
339 */
340 static int __init early_mem(char *p)
341 {
342 phys_addr_t limit;
343
344 if (!p)
345 return 1;
346
347 limit = memparse(p, &p) & PAGE_MASK;
348 pr_notice("Memory limited to %lldMB\n", limit >> 20);
349
350 memblock_enforce_memory_limit(limit);
351
352 return 0;
353 }
354 early_param("mem", early_mem);
355
356 static void __init request_standard_resources(void)
357 {
358 struct memblock_region *region;
359 struct resource *res;
360
361 kernel_code.start = virt_to_phys(_text);
362 kernel_code.end = virt_to_phys(_etext - 1);
363 kernel_data.start = virt_to_phys(_sdata);
364 kernel_data.end = virt_to_phys(_end - 1);
365
366 for_each_memblock(memory, region) {
367 res = alloc_bootmem_low(sizeof(*res));
368 res->name = "System RAM";
369 res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
370 res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
371 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
372
373 request_resource(&iomem_resource, res);
374
375 if (kernel_code.start >= res->start &&
376 kernel_code.end <= res->end)
377 request_resource(res, &kernel_code);
378 if (kernel_data.start >= res->start &&
379 kernel_data.end <= res->end)
380 request_resource(res, &kernel_data);
381 }
382 }
383
384 u64 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID_HWID };
385
386 void __init setup_arch(char **cmdline_p)
387 {
388 setup_processor();
389
390 setup_machine_fdt(__fdt_pointer);
391
392 init_mm.start_code = (unsigned long) _text;
393 init_mm.end_code = (unsigned long) _etext;
394 init_mm.end_data = (unsigned long) _edata;
395 init_mm.brk = (unsigned long) _end;
396
397 *cmdline_p = boot_command_line;
398
399 parse_early_param();
400
401 arm64_memblock_init();
402
403 paging_init();
404 request_standard_resources();
405
406 unflatten_device_tree();
407
408 psci_init();
409
410 cpu_logical_map(0) = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
411 cpu_read_bootcpu_ops();
412 #ifdef CONFIG_SMP
413 smp_init_cpus();
414 smp_build_mpidr_hash();
415 #endif
416
417 #ifdef CONFIG_VT
418 #if defined(CONFIG_VGA_CONSOLE)
419 conswitchp = &vga_con;
420 #elif defined(CONFIG_DUMMY_CONSOLE)
421 conswitchp = &dummy_con;
422 #endif
423 #endif
424
425 }
426
427 static int __init arm64_device_init(void)
428 {
429 of_clk_init(NULL);
430 of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
431 return 0;
432 }
433 arch_initcall_sync(arm64_device_init);
434
435 static int __init topology_init(void)
436 {
437 int i;
438
439 for_each_possible_cpu(i) {
440 struct cpu *cpu = &per_cpu(cpu_data.cpu, i);
441 cpu->hotpluggable = 1;
442 register_cpu(cpu, i);
443 }
444
445 return 0;
446 }
447 subsys_initcall(topology_init);
448
449 static const char *hwcap_str[] = {
450 "fp",
451 "asimd",
452 "evtstrm",
453 "aes",
454 "pmull",
455 "sha1",
456 "sha2",
457 "crc32",
458 NULL
459 };
460
461 #ifdef CONFIG_COMPAT
462 static const char *compat_hwcap_str[] = {
463 "swp",
464 "half",
465 "thumb",
466 "26bit",
467 "fastmult",
468 "fpa",
469 "vfp",
470 "edsp",
471 "java",
472 "iwmmxt",
473 "crunch",
474 "thumbee",
475 "neon",
476 "vfpv3",
477 "vfpv3d16",
478 "tls",
479 "vfpv4",
480 "idiva",
481 "idivt",
482 "vfpd32",
483 "lpae",
484 "evtstrm"
485 };
486 #endif /* CONFIG_COMPAT */
487
488 static int c_show(struct seq_file *m, void *v)
489 {
490 int i, j;
491
492 for_each_online_cpu(i) {
493 struct cpuinfo_arm64 *cpuinfo = &per_cpu(cpu_data, i);
494 u32 midr = cpuinfo->reg_midr;
495
496 /*
497 * glibc reads /proc/cpuinfo to determine the number of
498 * online processors, looking for lines beginning with
499 * "processor". Give glibc what it expects.
500 */
501 #ifdef CONFIG_SMP
502 seq_printf(m, "processor\t: %d\n", i);
503 #endif
504 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
505 loops_per_jiffy / (500000UL/HZ),
506 loops_per_jiffy / (5000UL/HZ) % 100);
507
508 /*
509 * Dump out the common processor features in a single line.
510 * Userspace should read the hwcaps with getauxval(AT_HWCAP)
511 * rather than attempting to parse this, but there's a body of
512 * software which does already (at least for 32-bit).
513 */
514 seq_puts(m, "Features\t:");
515 if (personality(current->personality) == PER_LINUX32) {
516 #ifdef CONFIG_COMPAT
517 for (j = 0; compat_hwcap_str[j]; j++)
518 if (COMPAT_ELF_HWCAP & (1 << j))
519 seq_printf(m, " %s", compat_hwcap_str[j]);
520 #endif /* CONFIG_COMPAT */
521 } else {
522 for (j = 0; hwcap_str[j]; j++)
523 if (elf_hwcap & (1 << j))
524 seq_printf(m, " %s", hwcap_str[j]);
525 }
526 seq_puts(m, "\n");
527
528 seq_printf(m, "CPU implementer\t: 0x%02x\n", (midr >> 24));
529 seq_printf(m, "CPU architecture: 8\n");
530 seq_printf(m, "CPU variant\t: 0x%x\n", ((midr >> 20) & 0xf));
531 seq_printf(m, "CPU part\t: 0x%03x\n", ((midr >> 4) & 0xfff));
532 seq_printf(m, "CPU revision\t: %d\n\n", (midr & 0xf));
533 }
534
535 return 0;
536 }
537
538 static void *c_start(struct seq_file *m, loff_t *pos)
539 {
540 return *pos < 1 ? (void *)1 : NULL;
541 }
542
543 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
544 {
545 ++*pos;
546 return NULL;
547 }
548
549 static void c_stop(struct seq_file *m, void *v)
550 {
551 }
552
553 const struct seq_operations cpuinfo_op = {
554 .start = c_start,
555 .next = c_next,
556 .stop = c_stop,
557 .show = c_show
558 };