]> git.openfabrics.org - ~shefty/rdma-dev.git/blob - arch/arm/kernel/setup.c
da1d1aa20ad957ccd7021815014d12530de4f3a1
[~shefty/rdma-dev.git] / arch / arm / kernel / setup.c
1 /*
2  *  linux/arch/arm/kernel/setup.c
3  *
4  *  Copyright (C) 1995-2001 Russell King
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #include <linux/export.h>
11 #include <linux/kernel.h>
12 #include <linux/stddef.h>
13 #include <linux/ioport.h>
14 #include <linux/delay.h>
15 #include <linux/utsname.h>
16 #include <linux/initrd.h>
17 #include <linux/console.h>
18 #include <linux/bootmem.h>
19 #include <linux/seq_file.h>
20 #include <linux/screen_info.h>
21 #include <linux/init.h>
22 #include <linux/kexec.h>
23 #include <linux/of_fdt.h>
24 #include <linux/cpu.h>
25 #include <linux/interrupt.h>
26 #include <linux/smp.h>
27 #include <linux/proc_fs.h>
28 #include <linux/memblock.h>
29 #include <linux/bug.h>
30 #include <linux/compiler.h>
31 #include <linux/sort.h>
32
33 #include <asm/unified.h>
34 #include <asm/cp15.h>
35 #include <asm/cpu.h>
36 #include <asm/cputype.h>
37 #include <asm/elf.h>
38 #include <asm/procinfo.h>
39 #include <asm/sections.h>
40 #include <asm/setup.h>
41 #include <asm/smp_plat.h>
42 #include <asm/mach-types.h>
43 #include <asm/cacheflush.h>
44 #include <asm/cachetype.h>
45 #include <asm/tlbflush.h>
46
47 #include <asm/prom.h>
48 #include <asm/mach/arch.h>
49 #include <asm/mach/irq.h>
50 #include <asm/mach/time.h>
51 #include <asm/system_info.h>
52 #include <asm/system_misc.h>
53 #include <asm/traps.h>
54 #include <asm/unwind.h>
55 #include <asm/memblock.h>
56 #include <asm/virt.h>
57
58 #include "atags.h"
59 #include "tcm.h"
60
61
62 #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
63 char fpe_type[8];
64
65 static int __init fpe_setup(char *line)
66 {
67         memcpy(fpe_type, line, 8);
68         return 1;
69 }
70
71 __setup("fpe=", fpe_setup);
72 #endif
73
74 extern void paging_init(struct machine_desc *desc);
75 extern void sanity_check_meminfo(void);
76 extern void reboot_setup(char *str);
77 extern void setup_dma_zone(struct machine_desc *desc);
78
79 unsigned int processor_id;
80 EXPORT_SYMBOL(processor_id);
81 unsigned int __machine_arch_type __read_mostly;
82 EXPORT_SYMBOL(__machine_arch_type);
83 unsigned int cacheid __read_mostly;
84 EXPORT_SYMBOL(cacheid);
85
86 unsigned int __atags_pointer __initdata;
87
88 unsigned int system_rev;
89 EXPORT_SYMBOL(system_rev);
90
91 unsigned int system_serial_low;
92 EXPORT_SYMBOL(system_serial_low);
93
94 unsigned int system_serial_high;
95 EXPORT_SYMBOL(system_serial_high);
96
97 unsigned int elf_hwcap __read_mostly;
98 EXPORT_SYMBOL(elf_hwcap);
99
100
101 #ifdef MULTI_CPU
102 struct processor processor __read_mostly;
103 #endif
104 #ifdef MULTI_TLB
105 struct cpu_tlb_fns cpu_tlb __read_mostly;
106 #endif
107 #ifdef MULTI_USER
108 struct cpu_user_fns cpu_user __read_mostly;
109 #endif
110 #ifdef MULTI_CACHE
111 struct cpu_cache_fns cpu_cache __read_mostly;
112 #endif
113 #ifdef CONFIG_OUTER_CACHE
114 struct outer_cache_fns outer_cache __read_mostly;
115 EXPORT_SYMBOL(outer_cache);
116 #endif
117
118 /*
119  * Cached cpu_architecture() result for use by assembler code.
120  * C code should use the cpu_architecture() function instead of accessing this
121  * variable directly.
122  */
123 int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN;
124
125 struct stack {
126         u32 irq[3];
127         u32 abt[3];
128         u32 und[3];
129 } ____cacheline_aligned;
130
131 static struct stack stacks[NR_CPUS];
132
133 char elf_platform[ELF_PLATFORM_SIZE];
134 EXPORT_SYMBOL(elf_platform);
135
136 static const char *cpu_name;
137 static const char *machine_name;
138 static char __initdata cmd_line[COMMAND_LINE_SIZE];
139 struct machine_desc *machine_desc __initdata;
140
141 static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
142 #define ENDIANNESS ((char)endian_test.l)
143
144 DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
145
146 /*
147  * Standard memory resources
148  */
149 static struct resource mem_res[] = {
150         {
151                 .name = "Video RAM",
152                 .start = 0,
153                 .end = 0,
154                 .flags = IORESOURCE_MEM
155         },
156         {
157                 .name = "Kernel code",
158                 .start = 0,
159                 .end = 0,
160                 .flags = IORESOURCE_MEM
161         },
162         {
163                 .name = "Kernel data",
164                 .start = 0,
165                 .end = 0,
166                 .flags = IORESOURCE_MEM
167         }
168 };
169
170 #define video_ram   mem_res[0]
171 #define kernel_code mem_res[1]
172 #define kernel_data mem_res[2]
173
174 static struct resource io_res[] = {
175         {
176                 .name = "reserved",
177                 .start = 0x3bc,
178                 .end = 0x3be,
179                 .flags = IORESOURCE_IO | IORESOURCE_BUSY
180         },
181         {
182                 .name = "reserved",
183                 .start = 0x378,
184                 .end = 0x37f,
185                 .flags = IORESOURCE_IO | IORESOURCE_BUSY
186         },
187         {
188                 .name = "reserved",
189                 .start = 0x278,
190                 .end = 0x27f,
191                 .flags = IORESOURCE_IO | IORESOURCE_BUSY
192         }
193 };
194
195 #define lp0 io_res[0]
196 #define lp1 io_res[1]
197 #define lp2 io_res[2]
198
199 static const char *proc_arch[] = {
200         "undefined/unknown",
201         "3",
202         "4",
203         "4T",
204         "5",
205         "5T",
206         "5TE",
207         "5TEJ",
208         "6TEJ",
209         "7",
210         "?(11)",
211         "?(12)",
212         "?(13)",
213         "?(14)",
214         "?(15)",
215         "?(16)",
216         "?(17)",
217 };
218
219 static int __get_cpu_architecture(void)
220 {
221         int cpu_arch;
222
223         if ((read_cpuid_id() & 0x0008f000) == 0) {
224                 cpu_arch = CPU_ARCH_UNKNOWN;
225         } else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
226                 cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
227         } else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
228                 cpu_arch = (read_cpuid_id() >> 16) & 7;
229                 if (cpu_arch)
230                         cpu_arch += CPU_ARCH_ARMv3;
231         } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
232                 unsigned int mmfr0;
233
234                 /* Revised CPUID format. Read the Memory Model Feature
235                  * Register 0 and check for VMSAv7 or PMSAv7 */
236                 asm("mrc        p15, 0, %0, c0, c1, 4"
237                     : "=r" (mmfr0));
238                 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
239                     (mmfr0 & 0x000000f0) >= 0x00000030)
240                         cpu_arch = CPU_ARCH_ARMv7;
241                 else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
242                          (mmfr0 & 0x000000f0) == 0x00000020)
243                         cpu_arch = CPU_ARCH_ARMv6;
244                 else
245                         cpu_arch = CPU_ARCH_UNKNOWN;
246         } else
247                 cpu_arch = CPU_ARCH_UNKNOWN;
248
249         return cpu_arch;
250 }
251
252 int __pure cpu_architecture(void)
253 {
254         BUG_ON(__cpu_architecture == CPU_ARCH_UNKNOWN);
255
256         return __cpu_architecture;
257 }
258
259 static int cpu_has_aliasing_icache(unsigned int arch)
260 {
261         int aliasing_icache;
262         unsigned int id_reg, num_sets, line_size;
263
264         /* PIPT caches never alias. */
265         if (icache_is_pipt())
266                 return 0;
267
268         /* arch specifies the register format */
269         switch (arch) {
270         case CPU_ARCH_ARMv7:
271                 asm("mcr        p15, 2, %0, c0, c0, 0 @ set CSSELR"
272                     : /* No output operands */
273                     : "r" (1));
274                 isb();
275                 asm("mrc        p15, 1, %0, c0, c0, 0 @ read CCSIDR"
276                     : "=r" (id_reg));
277                 line_size = 4 << ((id_reg & 0x7) + 2);
278                 num_sets = ((id_reg >> 13) & 0x7fff) + 1;
279                 aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
280                 break;
281         case CPU_ARCH_ARMv6:
282                 aliasing_icache = read_cpuid_cachetype() & (1 << 11);
283                 break;
284         default:
285                 /* I-cache aliases will be handled by D-cache aliasing code */
286                 aliasing_icache = 0;
287         }
288
289         return aliasing_icache;
290 }
291
292 static void __init cacheid_init(void)
293 {
294         unsigned int cachetype = read_cpuid_cachetype();
295         unsigned int arch = cpu_architecture();
296
297         if (arch >= CPU_ARCH_ARMv6) {
298                 if ((cachetype & (7 << 29)) == 4 << 29) {
299                         /* ARMv7 register format */
300                         arch = CPU_ARCH_ARMv7;
301                         cacheid = CACHEID_VIPT_NONALIASING;
302                         switch (cachetype & (3 << 14)) {
303                         case (1 << 14):
304                                 cacheid |= CACHEID_ASID_TAGGED;
305                                 break;
306                         case (3 << 14):
307                                 cacheid |= CACHEID_PIPT;
308                                 break;
309                         }
310                 } else {
311                         arch = CPU_ARCH_ARMv6;
312                         if (cachetype & (1 << 23))
313                                 cacheid = CACHEID_VIPT_ALIASING;
314                         else
315                                 cacheid = CACHEID_VIPT_NONALIASING;
316                 }
317                 if (cpu_has_aliasing_icache(arch))
318                         cacheid |= CACHEID_VIPT_I_ALIASING;
319         } else {
320                 cacheid = CACHEID_VIVT;
321         }
322
323         printk("CPU: %s data cache, %s instruction cache\n",
324                 cache_is_vivt() ? "VIVT" :
325                 cache_is_vipt_aliasing() ? "VIPT aliasing" :
326                 cache_is_vipt_nonaliasing() ? "PIPT / VIPT nonaliasing" : "unknown",
327                 cache_is_vivt() ? "VIVT" :
328                 icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
329                 icache_is_vipt_aliasing() ? "VIPT aliasing" :
330                 icache_is_pipt() ? "PIPT" :
331                 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
332 }
333
334 /*
335  * These functions re-use the assembly code in head.S, which
336  * already provide the required functionality.
337  */
338 extern struct proc_info_list *lookup_processor_type(unsigned int);
339
340 void __init early_print(const char *str, ...)
341 {
342         extern void printascii(const char *);
343         char buf[256];
344         va_list ap;
345
346         va_start(ap, str);
347         vsnprintf(buf, sizeof(buf), str, ap);
348         va_end(ap);
349
350 #ifdef CONFIG_DEBUG_LL
351         printascii(buf);
352 #endif
353         printk("%s", buf);
354 }
355
356 static void __init feat_v6_fixup(void)
357 {
358         int id = read_cpuid_id();
359
360         if ((id & 0xff0f0000) != 0x41070000)
361                 return;
362
363         /*
364          * HWCAP_TLS is available only on 1136 r1p0 and later,
365          * see also kuser_get_tls_init.
366          */
367         if ((((id >> 4) & 0xfff) == 0xb36) && (((id >> 20) & 3) == 0))
368                 elf_hwcap &= ~HWCAP_TLS;
369 }
370
371 /*
372  * cpu_init - initialise one CPU.
373  *
374  * cpu_init sets up the per-CPU stacks.
375  */
376 void cpu_init(void)
377 {
378         unsigned int cpu = smp_processor_id();
379         struct stack *stk = &stacks[cpu];
380
381         if (cpu >= NR_CPUS) {
382                 printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu);
383                 BUG();
384         }
385
386         cpu_proc_init();
387
388         /*
389          * Define the placement constraint for the inline asm directive below.
390          * In Thumb-2, msr with an immediate value is not allowed.
391          */
392 #ifdef CONFIG_THUMB2_KERNEL
393 #define PLC     "r"
394 #else
395 #define PLC     "I"
396 #endif
397
398         /*
399          * setup stacks for re-entrant exception handlers
400          */
401         __asm__ (
402         "msr    cpsr_c, %1\n\t"
403         "add    r14, %0, %2\n\t"
404         "mov    sp, r14\n\t"
405         "msr    cpsr_c, %3\n\t"
406         "add    r14, %0, %4\n\t"
407         "mov    sp, r14\n\t"
408         "msr    cpsr_c, %5\n\t"
409         "add    r14, %0, %6\n\t"
410         "mov    sp, r14\n\t"
411         "msr    cpsr_c, %7"
412             :
413             : "r" (stk),
414               PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
415               "I" (offsetof(struct stack, irq[0])),
416               PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
417               "I" (offsetof(struct stack, abt[0])),
418               PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
419               "I" (offsetof(struct stack, und[0])),
420               PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
421             : "r14");
422 }
423
424 int __cpu_logical_map[NR_CPUS];
425
426 void __init smp_setup_processor_id(void)
427 {
428         int i;
429         u32 cpu = is_smp() ? read_cpuid_mpidr() & 0xff : 0;
430
431         cpu_logical_map(0) = cpu;
432         for (i = 1; i < NR_CPUS; ++i)
433                 cpu_logical_map(i) = i == cpu ? 0 : i;
434
435         printk(KERN_INFO "Booting Linux on physical CPU %d\n", cpu);
436 }
437
438 static void __init setup_processor(void)
439 {
440         struct proc_info_list *list;
441
442         /*
443          * locate processor in the list of supported processor
444          * types.  The linker builds this table for us from the
445          * entries in arch/arm/mm/proc-*.S
446          */
447         list = lookup_processor_type(read_cpuid_id());
448         if (!list) {
449                 printk("CPU configuration botched (ID %08x), unable "
450                        "to continue.\n", read_cpuid_id());
451                 while (1);
452         }
453
454         cpu_name = list->cpu_name;
455         __cpu_architecture = __get_cpu_architecture();
456
457 #ifdef MULTI_CPU
458         processor = *list->proc;
459 #endif
460 #ifdef MULTI_TLB
461         cpu_tlb = *list->tlb;
462 #endif
463 #ifdef MULTI_USER
464         cpu_user = *list->user;
465 #endif
466 #ifdef MULTI_CACHE
467         cpu_cache = *list->cache;
468 #endif
469
470         printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
471                cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
472                proc_arch[cpu_architecture()], cr_alignment);
473
474         snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
475                  list->arch_name, ENDIANNESS);
476         snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
477                  list->elf_name, ENDIANNESS);
478         elf_hwcap = list->elf_hwcap;
479 #ifndef CONFIG_ARM_THUMB
480         elf_hwcap &= ~HWCAP_THUMB;
481 #endif
482
483         feat_v6_fixup();
484
485         cacheid_init();
486         cpu_init();
487 }
488
489 void __init dump_machine_table(void)
490 {
491         struct machine_desc *p;
492
493         early_print("Available machine support:\n\nID (hex)\tNAME\n");
494         for_each_machine_desc(p)
495                 early_print("%08x\t%s\n", p->nr, p->name);
496
497         early_print("\nPlease check your kernel config and/or bootloader.\n");
498
499         while (true)
500                 /* can't use cpu_relax() here as it may require MMU setup */;
501 }
502
503 int __init arm_add_memory(phys_addr_t start, phys_addr_t size)
504 {
505         struct membank *bank = &meminfo.bank[meminfo.nr_banks];
506
507         if (meminfo.nr_banks >= NR_BANKS) {
508                 printk(KERN_CRIT "NR_BANKS too low, "
509                         "ignoring memory at 0x%08llx\n", (long long)start);
510                 return -EINVAL;
511         }
512
513         /*
514          * Ensure that start/size are aligned to a page boundary.
515          * Size is appropriately rounded down, start is rounded up.
516          */
517         size -= start & ~PAGE_MASK;
518         bank->start = PAGE_ALIGN(start);
519
520 #ifndef CONFIG_LPAE
521         if (bank->start + size < bank->start) {
522                 printk(KERN_CRIT "Truncating memory at 0x%08llx to fit in "
523                         "32-bit physical address space\n", (long long)start);
524                 /*
525                  * To ensure bank->start + bank->size is representable in
526                  * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
527                  * This means we lose a page after masking.
528                  */
529                 size = ULONG_MAX - bank->start;
530         }
531 #endif
532
533         bank->size = size & ~(phys_addr_t)(PAGE_SIZE - 1);
534
535         /*
536          * Check whether this memory region has non-zero size or
537          * invalid node number.
538          */
539         if (bank->size == 0)
540                 return -EINVAL;
541
542         meminfo.nr_banks++;
543         return 0;
544 }
545
546 /*
547  * Pick out the memory size.  We look for mem=size@start,
548  * where start and size are "size[KkMm]"
549  */
550 static int __init early_mem(char *p)
551 {
552         static int usermem __initdata = 0;
553         phys_addr_t size;
554         phys_addr_t start;
555         char *endp;
556
557         /*
558          * If the user specifies memory size, we
559          * blow away any automatically generated
560          * size.
561          */
562         if (usermem == 0) {
563                 usermem = 1;
564                 meminfo.nr_banks = 0;
565         }
566
567         start = PHYS_OFFSET;
568         size  = memparse(p, &endp);
569         if (*endp == '@')
570                 start = memparse(endp + 1, NULL);
571
572         arm_add_memory(start, size);
573
574         return 0;
575 }
576 early_param("mem", early_mem);
577
578 static void __init request_standard_resources(struct machine_desc *mdesc)
579 {
580         struct memblock_region *region;
581         struct resource *res;
582
583         kernel_code.start   = virt_to_phys(_text);
584         kernel_code.end     = virt_to_phys(_etext - 1);
585         kernel_data.start   = virt_to_phys(_sdata);
586         kernel_data.end     = virt_to_phys(_end - 1);
587
588         for_each_memblock(memory, region) {
589                 res = alloc_bootmem_low(sizeof(*res));
590                 res->name  = "System RAM";
591                 res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
592                 res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
593                 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
594
595                 request_resource(&iomem_resource, res);
596
597                 if (kernel_code.start >= res->start &&
598                     kernel_code.end <= res->end)
599                         request_resource(res, &kernel_code);
600                 if (kernel_data.start >= res->start &&
601                     kernel_data.end <= res->end)
602                         request_resource(res, &kernel_data);
603         }
604
605         if (mdesc->video_start) {
606                 video_ram.start = mdesc->video_start;
607                 video_ram.end   = mdesc->video_end;
608                 request_resource(&iomem_resource, &video_ram);
609         }
610
611         /*
612          * Some machines don't have the possibility of ever
613          * possessing lp0, lp1 or lp2
614          */
615         if (mdesc->reserve_lp0)
616                 request_resource(&ioport_resource, &lp0);
617         if (mdesc->reserve_lp1)
618                 request_resource(&ioport_resource, &lp1);
619         if (mdesc->reserve_lp2)
620                 request_resource(&ioport_resource, &lp2);
621 }
622
623 #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
624 struct screen_info screen_info = {
625  .orig_video_lines      = 30,
626  .orig_video_cols       = 80,
627  .orig_video_mode       = 0,
628  .orig_video_ega_bx     = 0,
629  .orig_video_isVGA      = 1,
630  .orig_video_points     = 8
631 };
632 #endif
633
634 static int __init customize_machine(void)
635 {
636         /* customizes platform devices, or adds new ones */
637         if (machine_desc->init_machine)
638                 machine_desc->init_machine();
639         return 0;
640 }
641 arch_initcall(customize_machine);
642
643 static int __init init_machine_late(void)
644 {
645         if (machine_desc->init_late)
646                 machine_desc->init_late();
647         return 0;
648 }
649 late_initcall(init_machine_late);
650
651 #ifdef CONFIG_KEXEC
652 static inline unsigned long long get_total_mem(void)
653 {
654         unsigned long total;
655
656         total = max_low_pfn - min_low_pfn;
657         return total << PAGE_SHIFT;
658 }
659
660 /**
661  * reserve_crashkernel() - reserves memory are for crash kernel
662  *
663  * This function reserves memory area given in "crashkernel=" kernel command
664  * line parameter. The memory reserved is used by a dump capture kernel when
665  * primary kernel is crashing.
666  */
667 static void __init reserve_crashkernel(void)
668 {
669         unsigned long long crash_size, crash_base;
670         unsigned long long total_mem;
671         int ret;
672
673         total_mem = get_total_mem();
674         ret = parse_crashkernel(boot_command_line, total_mem,
675                                 &crash_size, &crash_base);
676         if (ret)
677                 return;
678
679         ret = reserve_bootmem(crash_base, crash_size, BOOTMEM_EXCLUSIVE);
680         if (ret < 0) {
681                 printk(KERN_WARNING "crashkernel reservation failed - "
682                        "memory is in use (0x%lx)\n", (unsigned long)crash_base);
683                 return;
684         }
685
686         printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
687                "for crashkernel (System RAM: %ldMB)\n",
688                (unsigned long)(crash_size >> 20),
689                (unsigned long)(crash_base >> 20),
690                (unsigned long)(total_mem >> 20));
691
692         crashk_res.start = crash_base;
693         crashk_res.end = crash_base + crash_size - 1;
694         insert_resource(&iomem_resource, &crashk_res);
695 }
696 #else
697 static inline void reserve_crashkernel(void) {}
698 #endif /* CONFIG_KEXEC */
699
700 static int __init meminfo_cmp(const void *_a, const void *_b)
701 {
702         const struct membank *a = _a, *b = _b;
703         long cmp = bank_pfn_start(a) - bank_pfn_start(b);
704         return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
705 }
706
707 void __init hyp_mode_check(void)
708 {
709 #ifdef CONFIG_ARM_VIRT_EXT
710         if (is_hyp_mode_available()) {
711                 pr_info("CPU: All CPU(s) started in HYP mode.\n");
712                 pr_info("CPU: Virtualization extensions available.\n");
713         } else if (is_hyp_mode_mismatched()) {
714                 pr_warn("CPU: WARNING: CPU(s) started in wrong/inconsistent modes (primary CPU mode 0x%x)\n",
715                         __boot_cpu_mode & MODE_MASK);
716                 pr_warn("CPU: This may indicate a broken bootloader or firmware.\n");
717         } else
718                 pr_info("CPU: All CPU(s) started in SVC mode.\n");
719 #endif
720 }
721
722 void __init setup_arch(char **cmdline_p)
723 {
724         struct machine_desc *mdesc;
725
726         setup_processor();
727         mdesc = setup_machine_fdt(__atags_pointer);
728         if (!mdesc)
729                 mdesc = setup_machine_tags(__atags_pointer, machine_arch_type);
730         machine_desc = mdesc;
731         machine_name = mdesc->name;
732
733         setup_dma_zone(mdesc);
734
735         if (mdesc->restart_mode)
736                 reboot_setup(&mdesc->restart_mode);
737
738         init_mm.start_code = (unsigned long) _text;
739         init_mm.end_code   = (unsigned long) _etext;
740         init_mm.end_data   = (unsigned long) _edata;
741         init_mm.brk        = (unsigned long) _end;
742
743         /* populate cmd_line too for later use, preserving boot_command_line */
744         strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
745         *cmdline_p = cmd_line;
746
747         parse_early_param();
748
749         sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
750         sanity_check_meminfo();
751         arm_memblock_init(&meminfo, mdesc);
752
753         paging_init(mdesc);
754         request_standard_resources(mdesc);
755
756         if (mdesc->restart)
757                 arm_pm_restart = mdesc->restart;
758
759         unflatten_device_tree();
760
761 #ifdef CONFIG_SMP
762         if (is_smp()) {
763                 smp_set_ops(mdesc->smp);
764                 smp_init_cpus();
765         }
766 #endif
767
768         if (!is_smp())
769                 hyp_mode_check();
770
771         reserve_crashkernel();
772
773         tcm_init();
774
775 #ifdef CONFIG_MULTI_IRQ_HANDLER
776         handle_arch_irq = mdesc->handle_irq;
777 #endif
778
779 #ifdef CONFIG_VT
780 #if defined(CONFIG_VGA_CONSOLE)
781         conswitchp = &vga_con;
782 #elif defined(CONFIG_DUMMY_CONSOLE)
783         conswitchp = &dummy_con;
784 #endif
785 #endif
786
787         if (mdesc->init_early)
788                 mdesc->init_early();
789 }
790
791
792 static int __init topology_init(void)
793 {
794         int cpu;
795
796         for_each_possible_cpu(cpu) {
797                 struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
798                 cpuinfo->cpu.hotpluggable = 1;
799                 register_cpu(&cpuinfo->cpu, cpu);
800         }
801
802         return 0;
803 }
804 subsys_initcall(topology_init);
805
806 #ifdef CONFIG_HAVE_PROC_CPU
807 static int __init proc_cpu_init(void)
808 {
809         struct proc_dir_entry *res;
810
811         res = proc_mkdir("cpu", NULL);
812         if (!res)
813                 return -ENOMEM;
814         return 0;
815 }
816 fs_initcall(proc_cpu_init);
817 #endif
818
819 static const char *hwcap_str[] = {
820         "swp",
821         "half",
822         "thumb",
823         "26bit",
824         "fastmult",
825         "fpa",
826         "vfp",
827         "edsp",
828         "java",
829         "iwmmxt",
830         "crunch",
831         "thumbee",
832         "neon",
833         "vfpv3",
834         "vfpv3d16",
835         "tls",
836         "vfpv4",
837         "idiva",
838         "idivt",
839         NULL
840 };
841
842 static int c_show(struct seq_file *m, void *v)
843 {
844         int i;
845
846         seq_printf(m, "Processor\t: %s rev %d (%s)\n",
847                    cpu_name, read_cpuid_id() & 15, elf_platform);
848
849 #if defined(CONFIG_SMP)
850         for_each_online_cpu(i) {
851                 /*
852                  * glibc reads /proc/cpuinfo to determine the number of
853                  * online processors, looking for lines beginning with
854                  * "processor".  Give glibc what it expects.
855                  */
856                 seq_printf(m, "processor\t: %d\n", i);
857                 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n\n",
858                            per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
859                            (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
860         }
861 #else /* CONFIG_SMP */
862         seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
863                    loops_per_jiffy / (500000/HZ),
864                    (loops_per_jiffy / (5000/HZ)) % 100);
865 #endif
866
867         /* dump out the processor features */
868         seq_puts(m, "Features\t: ");
869
870         for (i = 0; hwcap_str[i]; i++)
871                 if (elf_hwcap & (1 << i))
872                         seq_printf(m, "%s ", hwcap_str[i]);
873
874         seq_printf(m, "\nCPU implementer\t: 0x%02x\n", read_cpuid_id() >> 24);
875         seq_printf(m, "CPU architecture: %s\n", proc_arch[cpu_architecture()]);
876
877         if ((read_cpuid_id() & 0x0008f000) == 0x00000000) {
878                 /* pre-ARM7 */
879                 seq_printf(m, "CPU part\t: %07x\n", read_cpuid_id() >> 4);
880         } else {
881                 if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
882                         /* ARM7 */
883                         seq_printf(m, "CPU variant\t: 0x%02x\n",
884                                    (read_cpuid_id() >> 16) & 127);
885                 } else {
886                         /* post-ARM7 */
887                         seq_printf(m, "CPU variant\t: 0x%x\n",
888                                    (read_cpuid_id() >> 20) & 15);
889                 }
890                 seq_printf(m, "CPU part\t: 0x%03x\n",
891                            (read_cpuid_id() >> 4) & 0xfff);
892         }
893         seq_printf(m, "CPU revision\t: %d\n", read_cpuid_id() & 15);
894
895         seq_puts(m, "\n");
896
897         seq_printf(m, "Hardware\t: %s\n", machine_name);
898         seq_printf(m, "Revision\t: %04x\n", system_rev);
899         seq_printf(m, "Serial\t\t: %08x%08x\n",
900                    system_serial_high, system_serial_low);
901
902         return 0;
903 }
904
905 static void *c_start(struct seq_file *m, loff_t *pos)
906 {
907         return *pos < 1 ? (void *)1 : NULL;
908 }
909
910 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
911 {
912         ++*pos;
913         return NULL;
914 }
915
916 static void c_stop(struct seq_file *m, void *v)
917 {
918 }
919
920 const struct seq_operations cpuinfo_op = {
921         .start  = c_start,
922         .next   = c_next,
923         .stop   = c_stop,
924         .show   = c_show
925 };