2 * linux/arch/arm/kernel/setup.c
4 * Copyright (C) 1995-2001 Russell King
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 #include <linux/efi.h>
11 #include <linux/export.h>
12 #include <linux/kernel.h>
13 #include <linux/stddef.h>
14 #include <linux/ioport.h>
15 #include <linux/delay.h>
16 #include <linux/utsname.h>
17 #include <linux/initrd.h>
18 #include <linux/console.h>
19 #include <linux/bootmem.h>
20 #include <linux/seq_file.h>
21 #include <linux/screen_info.h>
22 #include <linux/of_platform.h>
23 #include <linux/init.h>
24 #include <linux/kexec.h>
25 #include <linux/of_fdt.h>
26 #include <linux/cpu.h>
27 #include <linux/interrupt.h>
28 #include <linux/smp.h>
29 #include <linux/proc_fs.h>
30 #include <linux/memblock.h>
31 #include <linux/bug.h>
32 #include <linux/compiler.h>
33 #include <linux/sort.h>
34 #include <linux/psci.h>
36 #include <asm/unified.h>
39 #include <asm/cputype.h>
42 #include <asm/early_ioremap.h>
43 #include <asm/fixmap.h>
44 #include <asm/procinfo.h>
46 #include <asm/sections.h>
47 #include <asm/setup.h>
48 #include <asm/smp_plat.h>
49 #include <asm/mach-types.h>
50 #include <asm/cacheflush.h>
51 #include <asm/cachetype.h>
52 #include <asm/tlbflush.h>
53 #include <asm/xen/hypervisor.h>
56 #include <asm/mach/arch.h>
57 #include <asm/mach/irq.h>
58 #include <asm/mach/time.h>
59 #include <asm/system_info.h>
60 #include <asm/system_misc.h>
61 #include <asm/traps.h>
62 #include <asm/unwind.h>
63 #include <asm/memblock.h>
69 #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
72 static int __init fpe_setup(char *line)
74 memcpy(fpe_type, line, 8);
78 __setup("fpe=", fpe_setup);
81 extern void init_default_cache_policy(unsigned long);
82 extern void paging_init(const struct machine_desc *desc);
83 extern void early_paging_init(const struct machine_desc *);
84 extern void sanity_check_meminfo(void);
85 extern enum reboot_mode reboot_mode;
86 extern void setup_dma_zone(const struct machine_desc *desc);
88 unsigned int processor_id;
89 EXPORT_SYMBOL(processor_id);
90 unsigned int __machine_arch_type __read_mostly;
91 EXPORT_SYMBOL(__machine_arch_type);
92 unsigned int cacheid __read_mostly;
93 EXPORT_SYMBOL(cacheid);
95 unsigned int __atags_pointer __initdata;
97 unsigned int system_rev;
98 EXPORT_SYMBOL(system_rev);
100 const char *system_serial;
101 EXPORT_SYMBOL(system_serial);
103 unsigned int system_serial_low;
104 EXPORT_SYMBOL(system_serial_low);
106 unsigned int system_serial_high;
107 EXPORT_SYMBOL(system_serial_high);
109 unsigned int elf_hwcap __read_mostly;
110 EXPORT_SYMBOL(elf_hwcap);
112 unsigned int elf_hwcap2 __read_mostly;
113 EXPORT_SYMBOL(elf_hwcap2);
117 struct processor processor __ro_after_init;
120 struct cpu_tlb_fns cpu_tlb __ro_after_init;
123 struct cpu_user_fns cpu_user __ro_after_init;
126 struct cpu_cache_fns cpu_cache __ro_after_init;
128 #ifdef CONFIG_OUTER_CACHE
129 struct outer_cache_fns outer_cache __ro_after_init;
130 EXPORT_SYMBOL(outer_cache);
134 * Cached cpu_architecture() result for use by assembler code.
135 * C code should use the cpu_architecture() function instead of accessing this
138 int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN;
145 } ____cacheline_aligned;
147 #ifndef CONFIG_CPU_V7M
148 static struct stack stacks[NR_CPUS];
151 char elf_platform[ELF_PLATFORM_SIZE];
152 EXPORT_SYMBOL(elf_platform);
154 static const char *cpu_name;
155 static const char *machine_name;
156 static char __initdata cmd_line[COMMAND_LINE_SIZE];
157 const struct machine_desc *machine_desc __initdata;
159 static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
160 #define ENDIANNESS ((char)endian_test.l)
162 DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
165 * Standard memory resources
167 static struct resource mem_res[] = {
172 .flags = IORESOURCE_MEM
175 .name = "Kernel code",
178 .flags = IORESOURCE_SYSTEM_RAM
181 .name = "Kernel data",
184 .flags = IORESOURCE_SYSTEM_RAM
188 #define video_ram mem_res[0]
189 #define kernel_code mem_res[1]
190 #define kernel_data mem_res[2]
192 static struct resource io_res[] = {
197 .flags = IORESOURCE_IO | IORESOURCE_BUSY
203 .flags = IORESOURCE_IO | IORESOURCE_BUSY
209 .flags = IORESOURCE_IO | IORESOURCE_BUSY
213 #define lp0 io_res[0]
214 #define lp1 io_res[1]
215 #define lp2 io_res[2]
217 static const char *proc_arch[] = {
237 #ifdef CONFIG_CPU_V7M
238 static int __get_cpu_architecture(void)
240 return CPU_ARCH_ARMv7M;
243 static int __get_cpu_architecture(void)
247 if ((read_cpuid_id() & 0x0008f000) == 0) {
248 cpu_arch = CPU_ARCH_UNKNOWN;
249 } else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
250 cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
251 } else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
252 cpu_arch = (read_cpuid_id() >> 16) & 7;
254 cpu_arch += CPU_ARCH_ARMv3;
255 } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
256 /* Revised CPUID format. Read the Memory Model Feature
257 * Register 0 and check for VMSAv7 or PMSAv7 */
258 unsigned int mmfr0 = read_cpuid_ext(CPUID_EXT_MMFR0);
259 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
260 (mmfr0 & 0x000000f0) >= 0x00000030)
261 cpu_arch = CPU_ARCH_ARMv7;
262 else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
263 (mmfr0 & 0x000000f0) == 0x00000020)
264 cpu_arch = CPU_ARCH_ARMv6;
266 cpu_arch = CPU_ARCH_UNKNOWN;
268 cpu_arch = CPU_ARCH_UNKNOWN;
274 int __pure cpu_architecture(void)
276 BUG_ON(__cpu_architecture == CPU_ARCH_UNKNOWN);
278 return __cpu_architecture;
281 static int cpu_has_aliasing_icache(unsigned int arch)
284 unsigned int id_reg, num_sets, line_size;
286 /* PIPT caches never alias. */
287 if (icache_is_pipt())
290 /* arch specifies the register format */
293 set_csselr(CSSELR_ICACHE | CSSELR_L1);
295 id_reg = read_ccsidr();
296 line_size = 4 << ((id_reg & 0x7) + 2);
297 num_sets = ((id_reg >> 13) & 0x7fff) + 1;
298 aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
301 aliasing_icache = read_cpuid_cachetype() & (1 << 11);
304 /* I-cache aliases will be handled by D-cache aliasing code */
308 return aliasing_icache;
311 static void __init cacheid_init(void)
313 unsigned int arch = cpu_architecture();
315 if (arch == CPU_ARCH_ARMv7M) {
317 } else if (arch >= CPU_ARCH_ARMv6) {
318 unsigned int cachetype = read_cpuid_cachetype();
319 if ((cachetype & (7 << 29)) == 4 << 29) {
320 /* ARMv7 register format */
321 arch = CPU_ARCH_ARMv7;
322 cacheid = CACHEID_VIPT_NONALIASING;
323 switch (cachetype & (3 << 14)) {
325 cacheid |= CACHEID_ASID_TAGGED;
328 cacheid |= CACHEID_PIPT;
332 arch = CPU_ARCH_ARMv6;
333 if (cachetype & (1 << 23))
334 cacheid = CACHEID_VIPT_ALIASING;
336 cacheid = CACHEID_VIPT_NONALIASING;
338 if (cpu_has_aliasing_icache(arch))
339 cacheid |= CACHEID_VIPT_I_ALIASING;
341 cacheid = CACHEID_VIVT;
344 pr_info("CPU: %s data cache, %s instruction cache\n",
345 cache_is_vivt() ? "VIVT" :
346 cache_is_vipt_aliasing() ? "VIPT aliasing" :
347 cache_is_vipt_nonaliasing() ? "PIPT / VIPT nonaliasing" : "unknown",
348 cache_is_vivt() ? "VIVT" :
349 icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
350 icache_is_vipt_aliasing() ? "VIPT aliasing" :
351 icache_is_pipt() ? "PIPT" :
352 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
356 * These functions re-use the assembly code in head.S, which
357 * already provide the required functionality.
359 extern struct proc_info_list *lookup_processor_type(unsigned int);
361 void __init early_print(const char *str, ...)
363 extern void printascii(const char *);
368 vsnprintf(buf, sizeof(buf), str, ap);
371 #ifdef CONFIG_DEBUG_LL
377 #ifdef CONFIG_ARM_PATCH_IDIV
379 static inline u32 __attribute_const__ sdiv_instruction(void)
381 if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
382 /* "sdiv r0, r0, r1" */
383 u32 insn = __opcode_thumb32_compose(0xfb90, 0xf0f1);
384 return __opcode_to_mem_thumb32(insn);
387 /* "sdiv r0, r0, r1" */
388 return __opcode_to_mem_arm(0xe710f110);
391 static inline u32 __attribute_const__ udiv_instruction(void)
393 if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
394 /* "udiv r0, r0, r1" */
395 u32 insn = __opcode_thumb32_compose(0xfbb0, 0xf0f1);
396 return __opcode_to_mem_thumb32(insn);
399 /* "udiv r0, r0, r1" */
400 return __opcode_to_mem_arm(0xe730f110);
403 static inline u32 __attribute_const__ bx_lr_instruction(void)
405 if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
407 u32 insn = __opcode_thumb32_compose(0x4770, 0x46c0);
408 return __opcode_to_mem_thumb32(insn);
412 return __opcode_to_mem_arm(0xe12fff1e);
415 static void __init patch_aeabi_idiv(void)
417 extern void __aeabi_uidiv(void);
418 extern void __aeabi_idiv(void);
422 mask = IS_ENABLED(CONFIG_THUMB2_KERNEL) ? HWCAP_IDIVT : HWCAP_IDIVA;
423 if (!(elf_hwcap & mask))
426 pr_info("CPU: div instructions available: patching division code\n");
428 fn_addr = ((uintptr_t)&__aeabi_uidiv) & ~1;
429 asm ("" : "+g" (fn_addr));
430 ((u32 *)fn_addr)[0] = udiv_instruction();
431 ((u32 *)fn_addr)[1] = bx_lr_instruction();
432 flush_icache_range(fn_addr, fn_addr + 8);
434 fn_addr = ((uintptr_t)&__aeabi_idiv) & ~1;
435 asm ("" : "+g" (fn_addr));
436 ((u32 *)fn_addr)[0] = sdiv_instruction();
437 ((u32 *)fn_addr)[1] = bx_lr_instruction();
438 flush_icache_range(fn_addr, fn_addr + 8);
442 static inline void patch_aeabi_idiv(void) { }
445 static void __init cpuid_init_hwcaps(void)
450 if (cpu_architecture() < CPU_ARCH_ARMv7)
453 block = cpuid_feature_extract(CPUID_EXT_ISAR0, 24);
455 elf_hwcap |= HWCAP_IDIVA;
457 elf_hwcap |= HWCAP_IDIVT;
459 /* LPAE implies atomic ldrd/strd instructions */
460 block = cpuid_feature_extract(CPUID_EXT_MMFR0, 0);
462 elf_hwcap |= HWCAP_LPAE;
464 /* check for supported v8 Crypto instructions */
465 isar5 = read_cpuid_ext(CPUID_EXT_ISAR5);
467 block = cpuid_feature_extract_field(isar5, 4);
469 elf_hwcap2 |= HWCAP2_PMULL;
471 elf_hwcap2 |= HWCAP2_AES;
473 block = cpuid_feature_extract_field(isar5, 8);
475 elf_hwcap2 |= HWCAP2_SHA1;
477 block = cpuid_feature_extract_field(isar5, 12);
479 elf_hwcap2 |= HWCAP2_SHA2;
481 block = cpuid_feature_extract_field(isar5, 16);
483 elf_hwcap2 |= HWCAP2_CRC32;
486 static void __init elf_hwcap_fixup(void)
488 unsigned id = read_cpuid_id();
491 * HWCAP_TLS is available only on 1136 r1p0 and later,
492 * see also kuser_get_tls_init.
494 if (read_cpuid_part() == ARM_CPU_PART_ARM1136 &&
495 ((id >> 20) & 3) == 0) {
496 elf_hwcap &= ~HWCAP_TLS;
500 /* Verify if CPUID scheme is implemented */
501 if ((id & 0x000f0000) != 0x000f0000)
505 * If the CPU supports LDREX/STREX and LDREXB/STREXB,
506 * avoid advertising SWP; it may not be atomic with
507 * multiprocessing cores.
509 if (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) > 1 ||
510 (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) == 1 &&
511 cpuid_feature_extract(CPUID_EXT_ISAR4, 20) >= 3))
512 elf_hwcap &= ~HWCAP_SWP;
516 * cpu_init - initialise one CPU.
518 * cpu_init sets up the per-CPU stacks.
520 void notrace cpu_init(void)
522 #ifndef CONFIG_CPU_V7M
523 unsigned int cpu = smp_processor_id();
524 struct stack *stk = &stacks[cpu];
526 if (cpu >= NR_CPUS) {
527 pr_crit("CPU%u: bad primary CPU number\n", cpu);
532 * This only works on resume and secondary cores. For booting on the
533 * boot cpu, smp_prepare_boot_cpu is called after percpu area setup.
535 set_my_cpu_offset(per_cpu_offset(cpu));
540 * Define the placement constraint for the inline asm directive below.
541 * In Thumb-2, msr with an immediate value is not allowed.
543 #ifdef CONFIG_THUMB2_KERNEL
550 * setup stacks for re-entrant exception handlers
554 "add r14, %0, %2\n\t"
557 "add r14, %0, %4\n\t"
560 "add r14, %0, %6\n\t"
563 "add r14, %0, %8\n\t"
568 PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
569 "I" (offsetof(struct stack, irq[0])),
570 PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
571 "I" (offsetof(struct stack, abt[0])),
572 PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
573 "I" (offsetof(struct stack, und[0])),
574 PLC (PSR_F_BIT | PSR_I_BIT | FIQ_MODE),
575 "I" (offsetof(struct stack, fiq[0])),
576 PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
581 u32 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID };
583 void __init smp_setup_processor_id(void)
586 u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0;
587 u32 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
589 cpu_logical_map(0) = cpu;
590 for (i = 1; i < nr_cpu_ids; ++i)
591 cpu_logical_map(i) = i == cpu ? 0 : i;
594 * clear __my_cpu_offset on boot CPU to avoid hang caused by
595 * using percpu variable early, for example, lockdep will
596 * access percpu variable inside lock_release
598 set_my_cpu_offset(0);
600 pr_info("Booting Linux on physical CPU 0x%x\n", mpidr);
603 struct mpidr_hash mpidr_hash;
606 * smp_build_mpidr_hash - Pre-compute shifts required at each affinity
607 * level in order to build a linear index from an
608 * MPIDR value. Resulting algorithm is a collision
609 * free hash carried out through shifting and ORing
611 static void __init smp_build_mpidr_hash(void)
614 u32 fs[3], bits[3], ls, mask = 0;
616 * Pre-scan the list of MPIDRS and filter out bits that do
617 * not contribute to affinity levels, ie they never toggle.
619 for_each_possible_cpu(i)
620 mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
621 pr_debug("mask of set bits 0x%x\n", mask);
623 * Find and stash the last and first bit set at all affinity levels to
624 * check how many bits are required to represent them.
626 for (i = 0; i < 3; i++) {
627 affinity = MPIDR_AFFINITY_LEVEL(mask, i);
629 * Find the MSB bit and LSB bits position
630 * to determine how many bits are required
631 * to express the affinity level.
634 fs[i] = affinity ? ffs(affinity) - 1 : 0;
635 bits[i] = ls - fs[i];
638 * An index can be created from the MPIDR by isolating the
639 * significant bits at each affinity level and by shifting
640 * them in order to compress the 24 bits values space to a
641 * compressed set of values. This is equivalent to hashing
642 * the MPIDR through shifting and ORing. It is a collision free
643 * hash though not minimal since some levels might contain a number
644 * of CPUs that is not an exact power of 2 and their bit
645 * representation might contain holes, eg MPIDR[7:0] = {0x2, 0x80}.
647 mpidr_hash.shift_aff[0] = fs[0];
648 mpidr_hash.shift_aff[1] = MPIDR_LEVEL_BITS + fs[1] - bits[0];
649 mpidr_hash.shift_aff[2] = 2*MPIDR_LEVEL_BITS + fs[2] -
651 mpidr_hash.mask = mask;
652 mpidr_hash.bits = bits[2] + bits[1] + bits[0];
653 pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] mask[0x%x] bits[%u]\n",
654 mpidr_hash.shift_aff[0],
655 mpidr_hash.shift_aff[1],
656 mpidr_hash.shift_aff[2],
660 * 4x is an arbitrary value used to warn on a hash table much bigger
661 * than expected on most systems.
663 if (mpidr_hash_size() > 4 * num_possible_cpus())
664 pr_warn("Large number of MPIDR hash buckets detected\n");
665 sync_cache_w(&mpidr_hash);
669 static void __init setup_processor(void)
671 struct proc_info_list *list;
674 * locate processor in the list of supported processor
675 * types. The linker builds this table for us from the
676 * entries in arch/arm/mm/proc-*.S
678 list = lookup_processor_type(read_cpuid_id());
680 pr_err("CPU configuration botched (ID %08x), unable to continue.\n",
685 cpu_name = list->cpu_name;
686 __cpu_architecture = __get_cpu_architecture();
689 processor = *list->proc;
692 cpu_tlb = *list->tlb;
695 cpu_user = *list->user;
698 cpu_cache = *list->cache;
701 pr_info("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
702 cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
703 proc_arch[cpu_architecture()], get_cr());
705 snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
706 list->arch_name, ENDIANNESS);
707 snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
708 list->elf_name, ENDIANNESS);
709 elf_hwcap = list->elf_hwcap;
714 #ifndef CONFIG_ARM_THUMB
715 elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT);
718 init_default_cache_policy(list->__cpu_mm_mmu_flags);
720 erratum_a15_798181_init();
728 void __init dump_machine_table(void)
730 const struct machine_desc *p;
732 early_print("Available machine support:\n\nID (hex)\tNAME\n");
733 for_each_machine_desc(p)
734 early_print("%08x\t%s\n", p->nr, p->name);
736 early_print("\nPlease check your kernel config and/or bootloader.\n");
739 /* can't use cpu_relax() here as it may require MMU setup */;
742 int __init arm_add_memory(u64 start, u64 size)
747 * Ensure that start/size are aligned to a page boundary.
748 * Size is rounded down, start is rounded up.
750 aligned_start = PAGE_ALIGN(start);
751 if (aligned_start > start + size)
754 size -= aligned_start - start;
756 #ifndef CONFIG_ARCH_PHYS_ADDR_T_64BIT
757 if (aligned_start > ULONG_MAX) {
758 pr_crit("Ignoring memory at 0x%08llx outside 32-bit physical address space\n",
763 if (aligned_start + size > ULONG_MAX) {
764 pr_crit("Truncating memory at 0x%08llx to fit in 32-bit physical address space\n",
767 * To ensure bank->start + bank->size is representable in
768 * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
769 * This means we lose a page after masking.
771 size = ULONG_MAX - aligned_start;
775 if (aligned_start < PHYS_OFFSET) {
776 if (aligned_start + size <= PHYS_OFFSET) {
777 pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
778 aligned_start, aligned_start + size);
782 pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
783 aligned_start, (u64)PHYS_OFFSET);
785 size -= PHYS_OFFSET - aligned_start;
786 aligned_start = PHYS_OFFSET;
789 start = aligned_start;
790 size = size & ~(phys_addr_t)(PAGE_SIZE - 1);
793 * Check whether this memory region has non-zero size or
794 * invalid node number.
799 memblock_add(start, size);
804 * Pick out the memory size. We look for mem=size@start,
805 * where start and size are "size[KkMm]"
808 static int __init early_mem(char *p)
810 static int usermem __initdata = 0;
816 * If the user specifies memory size, we
817 * blow away any automatically generated
822 memblock_remove(memblock_start_of_DRAM(),
823 memblock_end_of_DRAM() - memblock_start_of_DRAM());
827 size = memparse(p, &endp);
829 start = memparse(endp + 1, NULL);
831 arm_add_memory(start, size);
835 early_param("mem", early_mem);
837 static void __init request_standard_resources(const struct machine_desc *mdesc)
839 struct memblock_region *region;
840 struct resource *res;
842 kernel_code.start = virt_to_phys(_text);
843 kernel_code.end = virt_to_phys(__init_begin - 1);
844 kernel_data.start = virt_to_phys(_sdata);
845 kernel_data.end = virt_to_phys(_end - 1);
847 for_each_memblock(memory, region) {
848 phys_addr_t start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
849 phys_addr_t end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
850 unsigned long boot_alias_start;
853 * Some systems have a special memory alias which is only
854 * used for booting. We need to advertise this region to
855 * kexec-tools so they know where bootable RAM is located.
857 boot_alias_start = phys_to_idmap(start);
858 if (arm_has_idmap_alias() && boot_alias_start != IDMAP_INVALID_ADDR) {
859 res = memblock_virt_alloc(sizeof(*res), 0);
860 res->name = "System RAM (boot alias)";
861 res->start = boot_alias_start;
862 res->end = phys_to_idmap(end);
863 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
864 request_resource(&iomem_resource, res);
867 res = memblock_virt_alloc(sizeof(*res), 0);
868 res->name = "System RAM";
871 res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
873 request_resource(&iomem_resource, res);
875 if (kernel_code.start >= res->start &&
876 kernel_code.end <= res->end)
877 request_resource(res, &kernel_code);
878 if (kernel_data.start >= res->start &&
879 kernel_data.end <= res->end)
880 request_resource(res, &kernel_data);
883 if (mdesc->video_start) {
884 video_ram.start = mdesc->video_start;
885 video_ram.end = mdesc->video_end;
886 request_resource(&iomem_resource, &video_ram);
890 * Some machines don't have the possibility of ever
891 * possessing lp0, lp1 or lp2
893 if (mdesc->reserve_lp0)
894 request_resource(&ioport_resource, &lp0);
895 if (mdesc->reserve_lp1)
896 request_resource(&ioport_resource, &lp1);
897 if (mdesc->reserve_lp2)
898 request_resource(&ioport_resource, &lp2);
901 #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE) || \
903 struct screen_info screen_info = {
904 .orig_video_lines = 30,
905 .orig_video_cols = 80,
906 .orig_video_mode = 0,
907 .orig_video_ega_bx = 0,
908 .orig_video_isVGA = 1,
909 .orig_video_points = 8
913 static int __init customize_machine(void)
916 * customizes platform devices, or adds new ones
917 * On DT based machines, we fall back to populating the
918 * machine from the device tree, if no callback is provided,
919 * otherwise we would always need an init_machine callback.
921 if (machine_desc->init_machine)
922 machine_desc->init_machine();
926 arch_initcall(customize_machine);
928 static int __init init_machine_late(void)
930 struct device_node *root;
933 if (machine_desc->init_late)
934 machine_desc->init_late();
936 root = of_find_node_by_path("/");
938 ret = of_property_read_string(root, "serial-number",
941 system_serial = NULL;
945 system_serial = kasprintf(GFP_KERNEL, "%08x%08x",
951 late_initcall(init_machine_late);
955 * The crash region must be aligned to 128MB to avoid
956 * zImage relocating below the reserved region.
958 #define CRASH_ALIGN (128 << 20)
960 static inline unsigned long long get_total_mem(void)
964 total = max_low_pfn - min_low_pfn;
965 return total << PAGE_SHIFT;
969 * reserve_crashkernel() - reserves memory are for crash kernel
971 * This function reserves memory area given in "crashkernel=" kernel command
972 * line parameter. The memory reserved is used by a dump capture kernel when
973 * primary kernel is crashing.
975 static void __init reserve_crashkernel(void)
977 unsigned long long crash_size, crash_base;
978 unsigned long long total_mem;
981 total_mem = get_total_mem();
982 ret = parse_crashkernel(boot_command_line, total_mem,
983 &crash_size, &crash_base);
987 if (crash_base <= 0) {
988 unsigned long long crash_max = idmap_to_phys((u32)~0);
989 crash_base = memblock_find_in_range(CRASH_ALIGN, crash_max,
990 crash_size, CRASH_ALIGN);
992 pr_err("crashkernel reservation failed - No suitable area found.\n");
996 unsigned long long start;
998 start = memblock_find_in_range(crash_base,
999 crash_base + crash_size,
1000 crash_size, SECTION_SIZE);
1001 if (start != crash_base) {
1002 pr_err("crashkernel reservation failed - memory is in use.\n");
1007 ret = memblock_reserve(crash_base, crash_size);
1009 pr_warn("crashkernel reservation failed - memory is in use (0x%lx)\n",
1010 (unsigned long)crash_base);
1014 pr_info("Reserving %ldMB of memory at %ldMB for crashkernel (System RAM: %ldMB)\n",
1015 (unsigned long)(crash_size >> 20),
1016 (unsigned long)(crash_base >> 20),
1017 (unsigned long)(total_mem >> 20));
1019 /* The crashk resource must always be located in normal mem */
1020 crashk_res.start = crash_base;
1021 crashk_res.end = crash_base + crash_size - 1;
1022 insert_resource(&iomem_resource, &crashk_res);
1024 if (arm_has_idmap_alias()) {
1026 * If we have a special RAM alias for use at boot, we
1027 * need to advertise to kexec tools where the alias is.
1029 static struct resource crashk_boot_res = {
1030 .name = "Crash kernel (boot alias)",
1031 .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
1034 crashk_boot_res.start = phys_to_idmap(crash_base);
1035 crashk_boot_res.end = crashk_boot_res.start + crash_size - 1;
1036 insert_resource(&iomem_resource, &crashk_boot_res);
1040 static inline void reserve_crashkernel(void) {}
1041 #endif /* CONFIG_KEXEC */
1043 void __init hyp_mode_check(void)
1045 #ifdef CONFIG_ARM_VIRT_EXT
1048 if (is_hyp_mode_available()) {
1049 pr_info("CPU: All CPU(s) started in HYP mode.\n");
1050 pr_info("CPU: Virtualization extensions available.\n");
1051 } else if (is_hyp_mode_mismatched()) {
1052 pr_warn("CPU: WARNING: CPU(s) started in wrong/inconsistent modes (primary CPU mode 0x%x)\n",
1053 __boot_cpu_mode & MODE_MASK);
1054 pr_warn("CPU: This may indicate a broken bootloader or firmware.\n");
1056 pr_info("CPU: All CPU(s) started in SVC mode.\n");
1060 void __init setup_arch(char **cmdline_p)
1062 const struct machine_desc *mdesc;
1065 mdesc = setup_machine_fdt(__atags_pointer);
1067 mdesc = setup_machine_tags(__atags_pointer, __machine_arch_type);
1068 machine_desc = mdesc;
1069 machine_name = mdesc->name;
1070 dump_stack_set_arch_desc("%s", mdesc->name);
1072 if (mdesc->reboot_mode != REBOOT_HARD)
1073 reboot_mode = mdesc->reboot_mode;
1075 init_mm.start_code = (unsigned long) _text;
1076 init_mm.end_code = (unsigned long) _etext;
1077 init_mm.end_data = (unsigned long) _edata;
1078 init_mm.brk = (unsigned long) _end;
1080 /* populate cmd_line too for later use, preserving boot_command_line */
1081 strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
1082 *cmdline_p = cmd_line;
1084 early_fixmap_init();
1085 early_ioremap_init();
1087 parse_early_param();
1090 early_paging_init(mdesc);
1092 setup_dma_zone(mdesc);
1095 sanity_check_meminfo();
1096 arm_memblock_init(mdesc);
1098 early_ioremap_reset();
1101 request_standard_resources(mdesc);
1104 arm_pm_restart = mdesc->restart;
1106 unflatten_device_tree();
1108 arm_dt_init_cpu_maps();
1112 if (!mdesc->smp_init || !mdesc->smp_init()) {
1113 if (psci_smp_available())
1114 smp_set_ops(&psci_smp_ops);
1115 else if (mdesc->smp)
1116 smp_set_ops(mdesc->smp);
1119 smp_build_mpidr_hash();
1126 reserve_crashkernel();
1128 #ifdef CONFIG_MULTI_IRQ_HANDLER
1129 handle_arch_irq = mdesc->handle_irq;
1133 #if defined(CONFIG_VGA_CONSOLE)
1134 conswitchp = &vga_con;
1135 #elif defined(CONFIG_DUMMY_CONSOLE)
1136 conswitchp = &dummy_con;
1140 if (mdesc->init_early)
1141 mdesc->init_early();
1145 static int __init topology_init(void)
1149 for_each_possible_cpu(cpu) {
1150 struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
1151 cpuinfo->cpu.hotpluggable = platform_can_hotplug_cpu(cpu);
1152 register_cpu(&cpuinfo->cpu, cpu);
1157 subsys_initcall(topology_init);
1159 #ifdef CONFIG_HAVE_PROC_CPU
1160 static int __init proc_cpu_init(void)
1162 struct proc_dir_entry *res;
1164 res = proc_mkdir("cpu", NULL);
1169 fs_initcall(proc_cpu_init);
1172 static const char *hwcap_str[] = {
1198 static const char *hwcap2_str[] = {
1207 static int c_show(struct seq_file *m, void *v)
1212 for_each_online_cpu(i) {
1214 * glibc reads /proc/cpuinfo to determine the number of
1215 * online processors, looking for lines beginning with
1216 * "processor". Give glibc what it expects.
1218 seq_printf(m, "processor\t: %d\n", i);
1219 cpuid = is_smp() ? per_cpu(cpu_data, i).cpuid : read_cpuid_id();
1220 seq_printf(m, "model name\t: %s rev %d (%s)\n",
1221 cpu_name, cpuid & 15, elf_platform);
1223 #if defined(CONFIG_SMP)
1224 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1225 per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
1226 (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
1228 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1229 loops_per_jiffy / (500000/HZ),
1230 (loops_per_jiffy / (5000/HZ)) % 100);
1232 /* dump out the processor features */
1233 seq_puts(m, "Features\t: ");
1235 for (j = 0; hwcap_str[j]; j++)
1236 if (elf_hwcap & (1 << j))
1237 seq_printf(m, "%s ", hwcap_str[j]);
1239 for (j = 0; hwcap2_str[j]; j++)
1240 if (elf_hwcap2 & (1 << j))
1241 seq_printf(m, "%s ", hwcap2_str[j]);
1243 seq_printf(m, "\nCPU implementer\t: 0x%02x\n", cpuid >> 24);
1244 seq_printf(m, "CPU architecture: %s\n",
1245 proc_arch[cpu_architecture()]);
1247 if ((cpuid & 0x0008f000) == 0x00000000) {
1249 seq_printf(m, "CPU part\t: %07x\n", cpuid >> 4);
1251 if ((cpuid & 0x0008f000) == 0x00007000) {
1253 seq_printf(m, "CPU variant\t: 0x%02x\n",
1254 (cpuid >> 16) & 127);
1257 seq_printf(m, "CPU variant\t: 0x%x\n",
1258 (cpuid >> 20) & 15);
1260 seq_printf(m, "CPU part\t: 0x%03x\n",
1261 (cpuid >> 4) & 0xfff);
1263 seq_printf(m, "CPU revision\t: %d\n\n", cpuid & 15);
1266 seq_printf(m, "Hardware\t: %s\n", machine_name);
1267 seq_printf(m, "Revision\t: %04x\n", system_rev);
1268 seq_printf(m, "Serial\t\t: %s\n", system_serial);
1273 static void *c_start(struct seq_file *m, loff_t *pos)
1275 return *pos < 1 ? (void *)1 : NULL;
1278 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1284 static void c_stop(struct seq_file *m, void *v)
1288 const struct seq_operations cpuinfo_op = {