Merge branch 'v6v7' into devel
[cascardo/linux.git] / arch / arm / kernel / setup.c
1 /*
2  *  linux/arch/arm/kernel/setup.c
3  *
4  *  Copyright (C) 1995-2001 Russell King
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/stddef.h>
13 #include <linux/ioport.h>
14 #include <linux/delay.h>
15 #include <linux/utsname.h>
16 #include <linux/initrd.h>
17 #include <linux/console.h>
18 #include <linux/bootmem.h>
19 #include <linux/seq_file.h>
20 #include <linux/screen_info.h>
21 #include <linux/init.h>
22 #include <linux/kexec.h>
23 #include <linux/crash_dump.h>
24 #include <linux/root_dev.h>
25 #include <linux/cpu.h>
26 #include <linux/interrupt.h>
27 #include <linux/smp.h>
28 #include <linux/fs.h>
29 #include <linux/proc_fs.h>
30 #include <linux/memblock.h>
31
32 #include <asm/unified.h>
33 #include <asm/cpu.h>
34 #include <asm/cputype.h>
35 #include <asm/elf.h>
36 #include <asm/procinfo.h>
37 #include <asm/sections.h>
38 #include <asm/setup.h>
39 #include <asm/smp_plat.h>
40 #include <asm/mach-types.h>
41 #include <asm/cacheflush.h>
42 #include <asm/cachetype.h>
43 #include <asm/tlbflush.h>
44
45 #include <asm/mach/arch.h>
46 #include <asm/mach/irq.h>
47 #include <asm/mach/time.h>
48 #include <asm/traps.h>
49 #include <asm/unwind.h>
50
51 #if defined(CONFIG_DEPRECATED_PARAM_STRUCT)
52 #include "compat.h"
53 #endif
54 #include "atags.h"
55 #include "tcm.h"
56
57 #ifndef MEM_SIZE
58 #define MEM_SIZE        (16*1024*1024)
59 #endif
60
61 #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
62 char fpe_type[8];
63
64 static int __init fpe_setup(char *line)
65 {
66         memcpy(fpe_type, line, 8);
67         return 1;
68 }
69
70 __setup("fpe=", fpe_setup);
71 #endif
72
73 extern void paging_init(struct machine_desc *desc);
74 extern void reboot_setup(char *str);
75
76 unsigned int processor_id;
77 EXPORT_SYMBOL(processor_id);
78 unsigned int __machine_arch_type __read_mostly;
79 EXPORT_SYMBOL(__machine_arch_type);
80 unsigned int cacheid __read_mostly;
81 EXPORT_SYMBOL(cacheid);
82
83 unsigned int __atags_pointer __initdata;
84
85 unsigned int system_rev;
86 EXPORT_SYMBOL(system_rev);
87
88 unsigned int system_serial_low;
89 EXPORT_SYMBOL(system_serial_low);
90
91 unsigned int system_serial_high;
92 EXPORT_SYMBOL(system_serial_high);
93
94 unsigned int elf_hwcap __read_mostly;
95 EXPORT_SYMBOL(elf_hwcap);
96
97
98 #ifdef MULTI_CPU
99 struct processor processor __read_mostly;
100 #endif
101 #ifdef MULTI_TLB
102 struct cpu_tlb_fns cpu_tlb __read_mostly;
103 #endif
104 #ifdef MULTI_USER
105 struct cpu_user_fns cpu_user __read_mostly;
106 #endif
107 #ifdef MULTI_CACHE
108 struct cpu_cache_fns cpu_cache __read_mostly;
109 #endif
110 #ifdef CONFIG_OUTER_CACHE
111 struct outer_cache_fns outer_cache __read_mostly;
112 EXPORT_SYMBOL(outer_cache);
113 #endif
114
115 struct stack {
116         u32 irq[3];
117         u32 abt[3];
118         u32 und[3];
119 } ____cacheline_aligned;
120
121 static struct stack stacks[NR_CPUS];
122
123 char elf_platform[ELF_PLATFORM_SIZE];
124 EXPORT_SYMBOL(elf_platform);
125
126 static const char *cpu_name;
127 static const char *machine_name;
128 static char __initdata cmd_line[COMMAND_LINE_SIZE];
129 struct machine_desc *machine_desc __initdata;
130
131 static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE;
132 static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
133 #define ENDIANNESS ((char)endian_test.l)
134
135 DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
136
137 /*
138  * Standard memory resources
139  */
140 static struct resource mem_res[] = {
141         {
142                 .name = "Video RAM",
143                 .start = 0,
144                 .end = 0,
145                 .flags = IORESOURCE_MEM
146         },
147         {
148                 .name = "Kernel text",
149                 .start = 0,
150                 .end = 0,
151                 .flags = IORESOURCE_MEM
152         },
153         {
154                 .name = "Kernel data",
155                 .start = 0,
156                 .end = 0,
157                 .flags = IORESOURCE_MEM
158         }
159 };
160
161 #define video_ram   mem_res[0]
162 #define kernel_code mem_res[1]
163 #define kernel_data mem_res[2]
164
165 static struct resource io_res[] = {
166         {
167                 .name = "reserved",
168                 .start = 0x3bc,
169                 .end = 0x3be,
170                 .flags = IORESOURCE_IO | IORESOURCE_BUSY
171         },
172         {
173                 .name = "reserved",
174                 .start = 0x378,
175                 .end = 0x37f,
176                 .flags = IORESOURCE_IO | IORESOURCE_BUSY
177         },
178         {
179                 .name = "reserved",
180                 .start = 0x278,
181                 .end = 0x27f,
182                 .flags = IORESOURCE_IO | IORESOURCE_BUSY
183         }
184 };
185
186 #define lp0 io_res[0]
187 #define lp1 io_res[1]
188 #define lp2 io_res[2]
189
190 static const char *proc_arch[] = {
191         "undefined/unknown",
192         "3",
193         "4",
194         "4T",
195         "5",
196         "5T",
197         "5TE",
198         "5TEJ",
199         "6TEJ",
200         "7",
201         "?(11)",
202         "?(12)",
203         "?(13)",
204         "?(14)",
205         "?(15)",
206         "?(16)",
207         "?(17)",
208 };
209
210 int cpu_architecture(void)
211 {
212         int cpu_arch;
213
214         if ((read_cpuid_id() & 0x0008f000) == 0) {
215                 cpu_arch = CPU_ARCH_UNKNOWN;
216         } else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
217                 cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
218         } else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
219                 cpu_arch = (read_cpuid_id() >> 16) & 7;
220                 if (cpu_arch)
221                         cpu_arch += CPU_ARCH_ARMv3;
222         } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
223                 unsigned int mmfr0;
224
225                 /* Revised CPUID format. Read the Memory Model Feature
226                  * Register 0 and check for VMSAv7 or PMSAv7 */
227                 asm("mrc        p15, 0, %0, c0, c1, 4"
228                     : "=r" (mmfr0));
229                 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
230                     (mmfr0 & 0x000000f0) >= 0x00000030)
231                         cpu_arch = CPU_ARCH_ARMv7;
232                 else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
233                          (mmfr0 & 0x000000f0) == 0x00000020)
234                         cpu_arch = CPU_ARCH_ARMv6;
235                 else
236                         cpu_arch = CPU_ARCH_UNKNOWN;
237         } else
238                 cpu_arch = CPU_ARCH_UNKNOWN;
239
240         return cpu_arch;
241 }
242
243 static int cpu_has_aliasing_icache(unsigned int arch)
244 {
245         int aliasing_icache;
246         unsigned int id_reg, num_sets, line_size;
247
248         /* arch specifies the register format */
249         switch (arch) {
250         case CPU_ARCH_ARMv7:
251                 asm("mcr        p15, 2, %0, c0, c0, 0 @ set CSSELR"
252                     : /* No output operands */
253                     : "r" (1));
254                 isb();
255                 asm("mrc        p15, 1, %0, c0, c0, 0 @ read CCSIDR"
256                     : "=r" (id_reg));
257                 line_size = 4 << ((id_reg & 0x7) + 2);
258                 num_sets = ((id_reg >> 13) & 0x7fff) + 1;
259                 aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
260                 break;
261         case CPU_ARCH_ARMv6:
262                 aliasing_icache = read_cpuid_cachetype() & (1 << 11);
263                 break;
264         default:
265                 /* I-cache aliases will be handled by D-cache aliasing code */
266                 aliasing_icache = 0;
267         }
268
269         return aliasing_icache;
270 }
271
272 static void __init cacheid_init(void)
273 {
274         unsigned int cachetype = read_cpuid_cachetype();
275         unsigned int arch = cpu_architecture();
276
277         if (arch >= CPU_ARCH_ARMv6) {
278                 if ((cachetype & (7 << 29)) == 4 << 29) {
279                         /* ARMv7 register format */
280                         cacheid = CACHEID_VIPT_NONALIASING;
281                         if ((cachetype & (3 << 14)) == 1 << 14)
282                                 cacheid |= CACHEID_ASID_TAGGED;
283                         else if (cpu_has_aliasing_icache(CPU_ARCH_ARMv7))
284                                 cacheid |= CACHEID_VIPT_I_ALIASING;
285                 } else if (cachetype & (1 << 23)) {
286                         cacheid = CACHEID_VIPT_ALIASING;
287                 } else {
288                         cacheid = CACHEID_VIPT_NONALIASING;
289                         if (cpu_has_aliasing_icache(CPU_ARCH_ARMv6))
290                                 cacheid |= CACHEID_VIPT_I_ALIASING;
291                 }
292         } else {
293                 cacheid = CACHEID_VIVT;
294         }
295
296         printk("CPU: %s data cache, %s instruction cache\n",
297                 cache_is_vivt() ? "VIVT" :
298                 cache_is_vipt_aliasing() ? "VIPT aliasing" :
299                 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown",
300                 cache_is_vivt() ? "VIVT" :
301                 icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
302                 icache_is_vipt_aliasing() ? "VIPT aliasing" :
303                 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
304 }
305
306 /*
307  * These functions re-use the assembly code in head.S, which
308  * already provide the required functionality.
309  */
310 extern struct proc_info_list *lookup_processor_type(unsigned int);
311
312 static void __init early_print(const char *str, ...)
313 {
314         extern void printascii(const char *);
315         char buf[256];
316         va_list ap;
317
318         va_start(ap, str);
319         vsnprintf(buf, sizeof(buf), str, ap);
320         va_end(ap);
321
322 #ifdef CONFIG_DEBUG_LL
323         printascii(buf);
324 #endif
325         printk("%s", buf);
326 }
327
328 static struct machine_desc * __init lookup_machine_type(unsigned int type)
329 {
330         extern struct machine_desc __arch_info_begin[], __arch_info_end[];
331         struct machine_desc *p;
332
333         for (p = __arch_info_begin; p < __arch_info_end; p++)
334                 if (type == p->nr)
335                         return p;
336
337         early_print("\n"
338                 "Error: unrecognized/unsupported machine ID (r1 = 0x%08x).\n\n"
339                 "Available machine support:\n\nID (hex)\tNAME\n", type);
340
341         for (p = __arch_info_begin; p < __arch_info_end; p++)
342                 early_print("%08x\t%s\n", p->nr, p->name);
343
344         early_print("\nPlease check your kernel config and/or bootloader.\n");
345
346         while (true)
347                 /* can't use cpu_relax() here as it may require MMU setup */;
348 }
349
350 static void __init feat_v6_fixup(void)
351 {
352         int id = read_cpuid_id();
353
354         if ((id & 0xff0f0000) != 0x41070000)
355                 return;
356
357         /*
358          * HWCAP_TLS is available only on 1136 r1p0 and later,
359          * see also kuser_get_tls_init.
360          */
361         if ((((id >> 4) & 0xfff) == 0xb36) && (((id >> 20) & 3) == 0))
362                 elf_hwcap &= ~HWCAP_TLS;
363 }
364
365 static void __init setup_processor(void)
366 {
367         struct proc_info_list *list;
368
369         /*
370          * locate processor in the list of supported processor
371          * types.  The linker builds this table for us from the
372          * entries in arch/arm/mm/proc-*.S
373          */
374         list = lookup_processor_type(read_cpuid_id());
375         if (!list) {
376                 printk("CPU configuration botched (ID %08x), unable "
377                        "to continue.\n", read_cpuid_id());
378                 while (1);
379         }
380
381         cpu_name = list->cpu_name;
382
383 #ifdef MULTI_CPU
384         processor = *list->proc;
385 #endif
386 #ifdef MULTI_TLB
387         cpu_tlb = *list->tlb;
388 #endif
389 #ifdef MULTI_USER
390         cpu_user = *list->user;
391 #endif
392 #ifdef MULTI_CACHE
393         cpu_cache = *list->cache;
394 #endif
395
396         printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
397                cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
398                proc_arch[cpu_architecture()], cr_alignment);
399
400         sprintf(init_utsname()->machine, "%s%c", list->arch_name, ENDIANNESS);
401         sprintf(elf_platform, "%s%c", list->elf_name, ENDIANNESS);
402         elf_hwcap = list->elf_hwcap;
403 #ifndef CONFIG_ARM_THUMB
404         elf_hwcap &= ~HWCAP_THUMB;
405 #endif
406
407         feat_v6_fixup();
408
409         cacheid_init();
410         cpu_proc_init();
411 }
412
413 /*
414  * cpu_init - initialise one CPU.
415  *
416  * cpu_init sets up the per-CPU stacks.
417  */
418 void cpu_init(void)
419 {
420         unsigned int cpu = smp_processor_id();
421         struct stack *stk = &stacks[cpu];
422
423         if (cpu >= NR_CPUS) {
424                 printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu);
425                 BUG();
426         }
427
428         /*
429          * Define the placement constraint for the inline asm directive below.
430          * In Thumb-2, msr with an immediate value is not allowed.
431          */
432 #ifdef CONFIG_THUMB2_KERNEL
433 #define PLC     "r"
434 #else
435 #define PLC     "I"
436 #endif
437
438         /*
439          * setup stacks for re-entrant exception handlers
440          */
441         __asm__ (
442         "msr    cpsr_c, %1\n\t"
443         "add    r14, %0, %2\n\t"
444         "mov    sp, r14\n\t"
445         "msr    cpsr_c, %3\n\t"
446         "add    r14, %0, %4\n\t"
447         "mov    sp, r14\n\t"
448         "msr    cpsr_c, %5\n\t"
449         "add    r14, %0, %6\n\t"
450         "mov    sp, r14\n\t"
451         "msr    cpsr_c, %7"
452             :
453             : "r" (stk),
454               PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
455               "I" (offsetof(struct stack, irq[0])),
456               PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
457               "I" (offsetof(struct stack, abt[0])),
458               PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
459               "I" (offsetof(struct stack, und[0])),
460               PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
461             : "r14");
462 }
463
464 static struct machine_desc * __init setup_machine(unsigned int nr)
465 {
466         struct machine_desc *list;
467
468         /*
469          * locate machine in the list of supported machines.
470          */
471         list = lookup_machine_type(nr);
472         if (!list) {
473                 printk("Machine configuration botched (nr %d), unable "
474                        "to continue.\n", nr);
475                 while (1);
476         }
477
478         printk("Machine: %s\n", list->name);
479
480         return list;
481 }
482
483 static int __init arm_add_memory(unsigned long start, unsigned long size)
484 {
485         struct membank *bank = &meminfo.bank[meminfo.nr_banks];
486
487         if (meminfo.nr_banks >= NR_BANKS) {
488                 printk(KERN_CRIT "NR_BANKS too low, "
489                         "ignoring memory at %#lx\n", start);
490                 return -EINVAL;
491         }
492
493         /*
494          * Ensure that start/size are aligned to a page boundary.
495          * Size is appropriately rounded down, start is rounded up.
496          */
497         size -= start & ~PAGE_MASK;
498         bank->start = PAGE_ALIGN(start);
499         bank->size  = size & PAGE_MASK;
500
501         /*
502          * Check whether this memory region has non-zero size or
503          * invalid node number.
504          */
505         if (bank->size == 0)
506                 return -EINVAL;
507
508         meminfo.nr_banks++;
509         return 0;
510 }
511
512 /*
513  * Pick out the memory size.  We look for mem=size@start,
514  * where start and size are "size[KkMm]"
515  */
516 static int __init early_mem(char *p)
517 {
518         static int usermem __initdata = 0;
519         unsigned long size, start;
520         char *endp;
521
522         /*
523          * If the user specifies memory size, we
524          * blow away any automatically generated
525          * size.
526          */
527         if (usermem == 0) {
528                 usermem = 1;
529                 meminfo.nr_banks = 0;
530         }
531
532         start = PHYS_OFFSET;
533         size  = memparse(p, &endp);
534         if (*endp == '@')
535                 start = memparse(endp + 1, NULL);
536
537         arm_add_memory(start, size);
538
539         return 0;
540 }
541 early_param("mem", early_mem);
542
543 static void __init
544 setup_ramdisk(int doload, int prompt, int image_start, unsigned int rd_sz)
545 {
546 #ifdef CONFIG_BLK_DEV_RAM
547         extern int rd_size, rd_image_start, rd_prompt, rd_doload;
548
549         rd_image_start = image_start;
550         rd_prompt = prompt;
551         rd_doload = doload;
552
553         if (rd_sz)
554                 rd_size = rd_sz;
555 #endif
556 }
557
558 static void __init request_standard_resources(struct machine_desc *mdesc)
559 {
560         struct memblock_region *region;
561         struct resource *res;
562
563         kernel_code.start   = virt_to_phys(_text);
564         kernel_code.end     = virt_to_phys(_etext - 1);
565         kernel_data.start   = virt_to_phys(_sdata);
566         kernel_data.end     = virt_to_phys(_end - 1);
567
568         for_each_memblock(memory, region) {
569                 res = alloc_bootmem_low(sizeof(*res));
570                 res->name  = "System RAM";
571                 res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
572                 res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
573                 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
574
575                 request_resource(&iomem_resource, res);
576
577                 if (kernel_code.start >= res->start &&
578                     kernel_code.end <= res->end)
579                         request_resource(res, &kernel_code);
580                 if (kernel_data.start >= res->start &&
581                     kernel_data.end <= res->end)
582                         request_resource(res, &kernel_data);
583         }
584
585         if (mdesc->video_start) {
586                 video_ram.start = mdesc->video_start;
587                 video_ram.end   = mdesc->video_end;
588                 request_resource(&iomem_resource, &video_ram);
589         }
590
591         /*
592          * Some machines don't have the possibility of ever
593          * possessing lp0, lp1 or lp2
594          */
595         if (mdesc->reserve_lp0)
596                 request_resource(&ioport_resource, &lp0);
597         if (mdesc->reserve_lp1)
598                 request_resource(&ioport_resource, &lp1);
599         if (mdesc->reserve_lp2)
600                 request_resource(&ioport_resource, &lp2);
601 }
602
603 /*
604  *  Tag parsing.
605  *
606  * This is the new way of passing data to the kernel at boot time.  Rather
607  * than passing a fixed inflexible structure to the kernel, we pass a list
608  * of variable-sized tags to the kernel.  The first tag must be a ATAG_CORE
609  * tag for the list to be recognised (to distinguish the tagged list from
610  * a param_struct).  The list is terminated with a zero-length tag (this tag
611  * is not parsed in any way).
612  */
613 static int __init parse_tag_core(const struct tag *tag)
614 {
615         if (tag->hdr.size > 2) {
616                 if ((tag->u.core.flags & 1) == 0)
617                         root_mountflags &= ~MS_RDONLY;
618                 ROOT_DEV = old_decode_dev(tag->u.core.rootdev);
619         }
620         return 0;
621 }
622
623 __tagtable(ATAG_CORE, parse_tag_core);
624
625 static int __init parse_tag_mem32(const struct tag *tag)
626 {
627         return arm_add_memory(tag->u.mem.start, tag->u.mem.size);
628 }
629
630 __tagtable(ATAG_MEM, parse_tag_mem32);
631
632 #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
633 struct screen_info screen_info = {
634  .orig_video_lines      = 30,
635  .orig_video_cols       = 80,
636  .orig_video_mode       = 0,
637  .orig_video_ega_bx     = 0,
638  .orig_video_isVGA      = 1,
639  .orig_video_points     = 8
640 };
641
642 static int __init parse_tag_videotext(const struct tag *tag)
643 {
644         screen_info.orig_x            = tag->u.videotext.x;
645         screen_info.orig_y            = tag->u.videotext.y;
646         screen_info.orig_video_page   = tag->u.videotext.video_page;
647         screen_info.orig_video_mode   = tag->u.videotext.video_mode;
648         screen_info.orig_video_cols   = tag->u.videotext.video_cols;
649         screen_info.orig_video_ega_bx = tag->u.videotext.video_ega_bx;
650         screen_info.orig_video_lines  = tag->u.videotext.video_lines;
651         screen_info.orig_video_isVGA  = tag->u.videotext.video_isvga;
652         screen_info.orig_video_points = tag->u.videotext.video_points;
653         return 0;
654 }
655
656 __tagtable(ATAG_VIDEOTEXT, parse_tag_videotext);
657 #endif
658
659 static int __init parse_tag_ramdisk(const struct tag *tag)
660 {
661         setup_ramdisk((tag->u.ramdisk.flags & 1) == 0,
662                       (tag->u.ramdisk.flags & 2) == 0,
663                       tag->u.ramdisk.start, tag->u.ramdisk.size);
664         return 0;
665 }
666
667 __tagtable(ATAG_RAMDISK, parse_tag_ramdisk);
668
669 static int __init parse_tag_serialnr(const struct tag *tag)
670 {
671         system_serial_low = tag->u.serialnr.low;
672         system_serial_high = tag->u.serialnr.high;
673         return 0;
674 }
675
676 __tagtable(ATAG_SERIAL, parse_tag_serialnr);
677
678 static int __init parse_tag_revision(const struct tag *tag)
679 {
680         system_rev = tag->u.revision.rev;
681         return 0;
682 }
683
684 __tagtable(ATAG_REVISION, parse_tag_revision);
685
686 static int __init parse_tag_cmdline(const struct tag *tag)
687 {
688 #ifndef CONFIG_CMDLINE_FORCE
689         strlcpy(default_command_line, tag->u.cmdline.cmdline, COMMAND_LINE_SIZE);
690 #else
691         pr_warning("Ignoring tag cmdline (using the default kernel command line)\n");
692 #endif /* CONFIG_CMDLINE_FORCE */
693         return 0;
694 }
695
696 __tagtable(ATAG_CMDLINE, parse_tag_cmdline);
697
698 /*
699  * Scan the tag table for this tag, and call its parse function.
700  * The tag table is built by the linker from all the __tagtable
701  * declarations.
702  */
703 static int __init parse_tag(const struct tag *tag)
704 {
705         extern struct tagtable __tagtable_begin, __tagtable_end;
706         struct tagtable *t;
707
708         for (t = &__tagtable_begin; t < &__tagtable_end; t++)
709                 if (tag->hdr.tag == t->tag) {
710                         t->parse(tag);
711                         break;
712                 }
713
714         return t < &__tagtable_end;
715 }
716
717 /*
718  * Parse all tags in the list, checking both the global and architecture
719  * specific tag tables.
720  */
721 static void __init parse_tags(const struct tag *t)
722 {
723         for (; t->hdr.size; t = tag_next(t))
724                 if (!parse_tag(t))
725                         printk(KERN_WARNING
726                                 "Ignoring unrecognised tag 0x%08x\n",
727                                 t->hdr.tag);
728 }
729
730 /*
731  * This holds our defaults.
732  */
733 static struct init_tags {
734         struct tag_header hdr1;
735         struct tag_core   core;
736         struct tag_header hdr2;
737         struct tag_mem32  mem;
738         struct tag_header hdr3;
739 } init_tags __initdata = {
740         { tag_size(tag_core), ATAG_CORE },
741         { 1, PAGE_SIZE, 0xff },
742         { tag_size(tag_mem32), ATAG_MEM },
743         { MEM_SIZE, PHYS_OFFSET },
744         { 0, ATAG_NONE }
745 };
746
747 static int __init customize_machine(void)
748 {
749         /* customizes platform devices, or adds new ones */
750         if (machine_desc->init_machine)
751                 machine_desc->init_machine();
752         return 0;
753 }
754 arch_initcall(customize_machine);
755
756 #ifdef CONFIG_KEXEC
757 static inline unsigned long long get_total_mem(void)
758 {
759         unsigned long total;
760
761         total = max_low_pfn - min_low_pfn;
762         return total << PAGE_SHIFT;
763 }
764
765 /**
766  * reserve_crashkernel() - reserves memory are for crash kernel
767  *
768  * This function reserves memory area given in "crashkernel=" kernel command
769  * line parameter. The memory reserved is used by a dump capture kernel when
770  * primary kernel is crashing.
771  */
772 static void __init reserve_crashkernel(void)
773 {
774         unsigned long long crash_size, crash_base;
775         unsigned long long total_mem;
776         int ret;
777
778         total_mem = get_total_mem();
779         ret = parse_crashkernel(boot_command_line, total_mem,
780                                 &crash_size, &crash_base);
781         if (ret)
782                 return;
783
784         ret = reserve_bootmem(crash_base, crash_size, BOOTMEM_EXCLUSIVE);
785         if (ret < 0) {
786                 printk(KERN_WARNING "crashkernel reservation failed - "
787                        "memory is in use (0x%lx)\n", (unsigned long)crash_base);
788                 return;
789         }
790
791         printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
792                "for crashkernel (System RAM: %ldMB)\n",
793                (unsigned long)(crash_size >> 20),
794                (unsigned long)(crash_base >> 20),
795                (unsigned long)(total_mem >> 20));
796
797         crashk_res.start = crash_base;
798         crashk_res.end = crash_base + crash_size - 1;
799         insert_resource(&iomem_resource, &crashk_res);
800 }
801 #else
802 static inline void reserve_crashkernel(void) {}
803 #endif /* CONFIG_KEXEC */
804
805 /*
806  * Note: elfcorehdr_addr is not just limited to vmcore. It is also used by
807  * is_kdump_kernel() to determine if we are booting after a panic. Hence
808  * ifdef it under CONFIG_CRASH_DUMP and not CONFIG_PROC_VMCORE.
809  */
810
811 #ifdef CONFIG_CRASH_DUMP
812 /*
813  * elfcorehdr= specifies the location of elf core header stored by the crashed
814  * kernel. This option will be passed by kexec loader to the capture kernel.
815  */
816 static int __init setup_elfcorehdr(char *arg)
817 {
818         char *end;
819
820         if (!arg)
821                 return -EINVAL;
822
823         elfcorehdr_addr = memparse(arg, &end);
824         return end > arg ? 0 : -EINVAL;
825 }
826 early_param("elfcorehdr", setup_elfcorehdr);
827 #endif /* CONFIG_CRASH_DUMP */
828
829 static void __init squash_mem_tags(struct tag *tag)
830 {
831         for (; tag->hdr.size; tag = tag_next(tag))
832                 if (tag->hdr.tag == ATAG_MEM)
833                         tag->hdr.tag = ATAG_NONE;
834 }
835
836 void __init setup_arch(char **cmdline_p)
837 {
838         struct tag *tags = (struct tag *)&init_tags;
839         struct machine_desc *mdesc;
840         char *from = default_command_line;
841
842         unwind_init();
843
844         setup_processor();
845         mdesc = setup_machine(machine_arch_type);
846         machine_desc = mdesc;
847         machine_name = mdesc->name;
848
849         if (mdesc->soft_reboot)
850                 reboot_setup("s");
851
852         if (__atags_pointer)
853                 tags = phys_to_virt(__atags_pointer);
854         else if (mdesc->boot_params)
855                 tags = phys_to_virt(mdesc->boot_params);
856
857 #if defined(CONFIG_DEPRECATED_PARAM_STRUCT)
858         /*
859          * If we have the old style parameters, convert them to
860          * a tag list.
861          */
862         if (tags->hdr.tag != ATAG_CORE)
863                 convert_to_tag_list(tags);
864 #endif
865         if (tags->hdr.tag != ATAG_CORE)
866                 tags = (struct tag *)&init_tags;
867
868         if (mdesc->fixup)
869                 mdesc->fixup(mdesc, tags, &from, &meminfo);
870
871         if (tags->hdr.tag == ATAG_CORE) {
872                 if (meminfo.nr_banks != 0)
873                         squash_mem_tags(tags);
874                 save_atags(tags);
875                 parse_tags(tags);
876         }
877
878         init_mm.start_code = (unsigned long) _text;
879         init_mm.end_code   = (unsigned long) _etext;
880         init_mm.end_data   = (unsigned long) _edata;
881         init_mm.brk        = (unsigned long) _end;
882
883         /* parse_early_param needs a boot_command_line */
884         strlcpy(boot_command_line, from, COMMAND_LINE_SIZE);
885
886         /* populate cmd_line too for later use, preserving boot_command_line */
887         strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
888         *cmdline_p = cmd_line;
889
890         parse_early_param();
891
892         arm_memblock_init(&meminfo, mdesc);
893
894         paging_init(mdesc);
895         request_standard_resources(mdesc);
896
897 #ifdef CONFIG_SMP
898         if (is_smp())
899                 smp_init_cpus();
900 #endif
901         reserve_crashkernel();
902
903         cpu_init();
904         tcm_init();
905
906 #ifdef CONFIG_MULTI_IRQ_HANDLER
907         handle_arch_irq = mdesc->handle_irq;
908 #endif
909
910 #ifdef CONFIG_VT
911 #if defined(CONFIG_VGA_CONSOLE)
912         conswitchp = &vga_con;
913 #elif defined(CONFIG_DUMMY_CONSOLE)
914         conswitchp = &dummy_con;
915 #endif
916 #endif
917         early_trap_init();
918
919         if (mdesc->init_early)
920                 mdesc->init_early();
921 }
922
923
924 static int __init topology_init(void)
925 {
926         int cpu;
927
928         for_each_possible_cpu(cpu) {
929                 struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
930                 cpuinfo->cpu.hotpluggable = 1;
931                 register_cpu(&cpuinfo->cpu, cpu);
932         }
933
934         return 0;
935 }
936 subsys_initcall(topology_init);
937
938 #ifdef CONFIG_HAVE_PROC_CPU
939 static int __init proc_cpu_init(void)
940 {
941         struct proc_dir_entry *res;
942
943         res = proc_mkdir("cpu", NULL);
944         if (!res)
945                 return -ENOMEM;
946         return 0;
947 }
948 fs_initcall(proc_cpu_init);
949 #endif
950
951 static const char *hwcap_str[] = {
952         "swp",
953         "half",
954         "thumb",
955         "26bit",
956         "fastmult",
957         "fpa",
958         "vfp",
959         "edsp",
960         "java",
961         "iwmmxt",
962         "crunch",
963         "thumbee",
964         "neon",
965         "vfpv3",
966         "vfpv3d16",
967         NULL
968 };
969
970 static int c_show(struct seq_file *m, void *v)
971 {
972         int i;
973
974         seq_printf(m, "Processor\t: %s rev %d (%s)\n",
975                    cpu_name, read_cpuid_id() & 15, elf_platform);
976
977 #if defined(CONFIG_SMP)
978         for_each_online_cpu(i) {
979                 /*
980                  * glibc reads /proc/cpuinfo to determine the number of
981                  * online processors, looking for lines beginning with
982                  * "processor".  Give glibc what it expects.
983                  */
984                 seq_printf(m, "processor\t: %d\n", i);
985                 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n\n",
986                            per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
987                            (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
988         }
989 #else /* CONFIG_SMP */
990         seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
991                    loops_per_jiffy / (500000/HZ),
992                    (loops_per_jiffy / (5000/HZ)) % 100);
993 #endif
994
995         /* dump out the processor features */
996         seq_puts(m, "Features\t: ");
997
998         for (i = 0; hwcap_str[i]; i++)
999                 if (elf_hwcap & (1 << i))
1000                         seq_printf(m, "%s ", hwcap_str[i]);
1001
1002         seq_printf(m, "\nCPU implementer\t: 0x%02x\n", read_cpuid_id() >> 24);
1003         seq_printf(m, "CPU architecture: %s\n", proc_arch[cpu_architecture()]);
1004
1005         if ((read_cpuid_id() & 0x0008f000) == 0x00000000) {
1006                 /* pre-ARM7 */
1007                 seq_printf(m, "CPU part\t: %07x\n", read_cpuid_id() >> 4);
1008         } else {
1009                 if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
1010                         /* ARM7 */
1011                         seq_printf(m, "CPU variant\t: 0x%02x\n",
1012                                    (read_cpuid_id() >> 16) & 127);
1013                 } else {
1014                         /* post-ARM7 */
1015                         seq_printf(m, "CPU variant\t: 0x%x\n",
1016                                    (read_cpuid_id() >> 20) & 15);
1017                 }
1018                 seq_printf(m, "CPU part\t: 0x%03x\n",
1019                            (read_cpuid_id() >> 4) & 0xfff);
1020         }
1021         seq_printf(m, "CPU revision\t: %d\n", read_cpuid_id() & 15);
1022
1023         seq_puts(m, "\n");
1024
1025         seq_printf(m, "Hardware\t: %s\n", machine_name);
1026         seq_printf(m, "Revision\t: %04x\n", system_rev);
1027         seq_printf(m, "Serial\t\t: %08x%08x\n",
1028                    system_serial_high, system_serial_low);
1029
1030         return 0;
1031 }
1032
1033 static void *c_start(struct seq_file *m, loff_t *pos)
1034 {
1035         return *pos < 1 ? (void *)1 : NULL;
1036 }
1037
1038 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1039 {
1040         ++*pos;
1041         return NULL;
1042 }
1043
1044 static void c_stop(struct seq_file *m, void *v)
1045 {
1046 }
1047
1048 const struct seq_operations cpuinfo_op = {
1049         .start  = c_start,
1050         .next   = c_next,
1051         .stop   = c_stop,
1052         .show   = c_show
1053 };