xref: /linux/arch/arm/kernel/setup.c (revision db10cb9b)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/arch/arm/kernel/setup.c
4  *
5  *  Copyright (C) 1995-2001 Russell King
6  */
7 #include <linux/efi.h>
8 #include <linux/export.h>
9 #include <linux/kernel.h>
10 #include <linux/stddef.h>
11 #include <linux/ioport.h>
12 #include <linux/delay.h>
13 #include <linux/utsname.h>
14 #include <linux/initrd.h>
15 #include <linux/console.h>
16 #include <linux/seq_file.h>
17 #include <linux/screen_info.h>
18 #include <linux/of_platform.h>
19 #include <linux/init.h>
20 #include <linux/kexec.h>
21 #include <linux/libfdt.h>
22 #include <linux/of_fdt.h>
23 #include <linux/cpu.h>
24 #include <linux/interrupt.h>
25 #include <linux/smp.h>
26 #include <linux/proc_fs.h>
27 #include <linux/memblock.h>
28 #include <linux/bug.h>
29 #include <linux/compiler.h>
30 #include <linux/sort.h>
31 #include <linux/psci.h>
32 
33 #include <asm/unified.h>
34 #include <asm/cp15.h>
35 #include <asm/cpu.h>
36 #include <asm/cputype.h>
37 #include <asm/efi.h>
38 #include <asm/elf.h>
39 #include <asm/early_ioremap.h>
40 #include <asm/fixmap.h>
41 #include <asm/procinfo.h>
42 #include <asm/psci.h>
43 #include <asm/sections.h>
44 #include <asm/setup.h>
45 #include <asm/smp_plat.h>
46 #include <asm/mach-types.h>
47 #include <asm/cacheflush.h>
48 #include <asm/cachetype.h>
49 #include <asm/tlbflush.h>
50 #include <asm/xen/hypervisor.h>
51 
52 #include <asm/prom.h>
53 #include <asm/mach/arch.h>
54 #include <asm/mach/irq.h>
55 #include <asm/mach/time.h>
56 #include <asm/system_info.h>
57 #include <asm/system_misc.h>
58 #include <asm/traps.h>
59 #include <asm/unwind.h>
60 #include <asm/memblock.h>
61 #include <asm/virt.h>
62 #include <asm/kasan.h>
63 
64 #include "atags.h"
65 
66 
67 #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
68 char fpe_type[8];
69 
70 static int __init fpe_setup(char *line)
71 {
72 	memcpy(fpe_type, line, 8);
73 	return 1;
74 }
75 
76 __setup("fpe=", fpe_setup);
77 #endif
78 
79 unsigned int processor_id;
80 EXPORT_SYMBOL(processor_id);
81 unsigned int __machine_arch_type __read_mostly;
82 EXPORT_SYMBOL(__machine_arch_type);
83 unsigned int cacheid __read_mostly;
84 EXPORT_SYMBOL(cacheid);
85 
86 unsigned int __atags_pointer __initdata;
87 
88 unsigned int system_rev;
89 EXPORT_SYMBOL(system_rev);
90 
91 const char *system_serial;
92 EXPORT_SYMBOL(system_serial);
93 
94 unsigned int system_serial_low;
95 EXPORT_SYMBOL(system_serial_low);
96 
97 unsigned int system_serial_high;
98 EXPORT_SYMBOL(system_serial_high);
99 
100 unsigned int elf_hwcap __read_mostly;
101 EXPORT_SYMBOL(elf_hwcap);
102 
103 unsigned int elf_hwcap2 __read_mostly;
104 EXPORT_SYMBOL(elf_hwcap2);
105 
106 
107 #ifdef MULTI_CPU
108 struct processor processor __ro_after_init;
109 #if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
110 struct processor *cpu_vtable[NR_CPUS] = {
111 	[0] = &processor,
112 };
113 #endif
114 #endif
115 #ifdef MULTI_TLB
116 struct cpu_tlb_fns cpu_tlb __ro_after_init;
117 #endif
118 #ifdef MULTI_USER
119 struct cpu_user_fns cpu_user __ro_after_init;
120 #endif
121 #ifdef MULTI_CACHE
122 struct cpu_cache_fns cpu_cache __ro_after_init;
123 #endif
124 #ifdef CONFIG_OUTER_CACHE
125 struct outer_cache_fns outer_cache __ro_after_init;
126 EXPORT_SYMBOL(outer_cache);
127 #endif
128 
129 /*
130  * Cached cpu_architecture() result for use by assembler code.
131  * C code should use the cpu_architecture() function instead of accessing this
132  * variable directly.
133  */
134 int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN;
135 
136 struct stack {
137 	u32 irq[4];
138 	u32 abt[4];
139 	u32 und[4];
140 	u32 fiq[4];
141 } ____cacheline_aligned;
142 
143 #ifndef CONFIG_CPU_V7M
144 static struct stack stacks[NR_CPUS];
145 #endif
146 
147 char elf_platform[ELF_PLATFORM_SIZE];
148 EXPORT_SYMBOL(elf_platform);
149 
150 static const char *cpu_name;
151 static const char *machine_name;
152 static char __initdata cmd_line[COMMAND_LINE_SIZE];
153 const struct machine_desc *machine_desc __initdata;
154 
155 static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
156 #define ENDIANNESS ((char)endian_test.l)
157 
158 DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
159 
160 /*
161  * Standard memory resources
162  */
163 static struct resource mem_res[] = {
164 	{
165 		.name = "Video RAM",
166 		.start = 0,
167 		.end = 0,
168 		.flags = IORESOURCE_MEM
169 	},
170 	{
171 		.name = "Kernel code",
172 		.start = 0,
173 		.end = 0,
174 		.flags = IORESOURCE_SYSTEM_RAM
175 	},
176 	{
177 		.name = "Kernel data",
178 		.start = 0,
179 		.end = 0,
180 		.flags = IORESOURCE_SYSTEM_RAM
181 	}
182 };
183 
184 #define video_ram   mem_res[0]
185 #define kernel_code mem_res[1]
186 #define kernel_data mem_res[2]
187 
188 static struct resource io_res[] = {
189 	{
190 		.name = "reserved",
191 		.start = 0x3bc,
192 		.end = 0x3be,
193 		.flags = IORESOURCE_IO | IORESOURCE_BUSY
194 	},
195 	{
196 		.name = "reserved",
197 		.start = 0x378,
198 		.end = 0x37f,
199 		.flags = IORESOURCE_IO | IORESOURCE_BUSY
200 	},
201 	{
202 		.name = "reserved",
203 		.start = 0x278,
204 		.end = 0x27f,
205 		.flags = IORESOURCE_IO | IORESOURCE_BUSY
206 	}
207 };
208 
209 #define lp0 io_res[0]
210 #define lp1 io_res[1]
211 #define lp2 io_res[2]
212 
213 static const char *proc_arch[] = {
214 	"undefined/unknown",
215 	"3",
216 	"4",
217 	"4T",
218 	"5",
219 	"5T",
220 	"5TE",
221 	"5TEJ",
222 	"6TEJ",
223 	"7",
224 	"7M",
225 	"?(12)",
226 	"?(13)",
227 	"?(14)",
228 	"?(15)",
229 	"?(16)",
230 	"?(17)",
231 };
232 
233 #ifdef CONFIG_CPU_V7M
234 static int __get_cpu_architecture(void)
235 {
236 	return CPU_ARCH_ARMv7M;
237 }
238 #else
239 static int __get_cpu_architecture(void)
240 {
241 	int cpu_arch;
242 
243 	if ((read_cpuid_id() & 0x0008f000) == 0) {
244 		cpu_arch = CPU_ARCH_UNKNOWN;
245 	} else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
246 		cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
247 	} else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
248 		cpu_arch = (read_cpuid_id() >> 16) & 7;
249 		if (cpu_arch)
250 			cpu_arch += CPU_ARCH_ARMv3;
251 	} else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
252 		/* Revised CPUID format. Read the Memory Model Feature
253 		 * Register 0 and check for VMSAv7 or PMSAv7 */
254 		unsigned int mmfr0 = read_cpuid_ext(CPUID_EXT_MMFR0);
255 		if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
256 		    (mmfr0 & 0x000000f0) >= 0x00000030)
257 			cpu_arch = CPU_ARCH_ARMv7;
258 		else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
259 			 (mmfr0 & 0x000000f0) == 0x00000020)
260 			cpu_arch = CPU_ARCH_ARMv6;
261 		else
262 			cpu_arch = CPU_ARCH_UNKNOWN;
263 	} else
264 		cpu_arch = CPU_ARCH_UNKNOWN;
265 
266 	return cpu_arch;
267 }
268 #endif
269 
270 int __pure cpu_architecture(void)
271 {
272 	BUG_ON(__cpu_architecture == CPU_ARCH_UNKNOWN);
273 
274 	return __cpu_architecture;
275 }
276 
277 static int cpu_has_aliasing_icache(unsigned int arch)
278 {
279 	int aliasing_icache;
280 	unsigned int id_reg, num_sets, line_size;
281 
282 	/* PIPT caches never alias. */
283 	if (icache_is_pipt())
284 		return 0;
285 
286 	/* arch specifies the register format */
287 	switch (arch) {
288 	case CPU_ARCH_ARMv7:
289 		set_csselr(CSSELR_ICACHE | CSSELR_L1);
290 		isb();
291 		id_reg = read_ccsidr();
292 		line_size = 4 << ((id_reg & 0x7) + 2);
293 		num_sets = ((id_reg >> 13) & 0x7fff) + 1;
294 		aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
295 		break;
296 	case CPU_ARCH_ARMv6:
297 		aliasing_icache = read_cpuid_cachetype() & (1 << 11);
298 		break;
299 	default:
300 		/* I-cache aliases will be handled by D-cache aliasing code */
301 		aliasing_icache = 0;
302 	}
303 
304 	return aliasing_icache;
305 }
306 
307 static void __init cacheid_init(void)
308 {
309 	unsigned int arch = cpu_architecture();
310 
311 	if (arch >= CPU_ARCH_ARMv6) {
312 		unsigned int cachetype = read_cpuid_cachetype();
313 
314 		if ((arch == CPU_ARCH_ARMv7M) && !(cachetype & 0xf000f)) {
315 			cacheid = 0;
316 		} else if ((cachetype & (7 << 29)) == 4 << 29) {
317 			/* ARMv7 register format */
318 			arch = CPU_ARCH_ARMv7;
319 			cacheid = CACHEID_VIPT_NONALIASING;
320 			switch (cachetype & (3 << 14)) {
321 			case (1 << 14):
322 				cacheid |= CACHEID_ASID_TAGGED;
323 				break;
324 			case (3 << 14):
325 				cacheid |= CACHEID_PIPT;
326 				break;
327 			}
328 		} else {
329 			arch = CPU_ARCH_ARMv6;
330 			if (cachetype & (1 << 23))
331 				cacheid = CACHEID_VIPT_ALIASING;
332 			else
333 				cacheid = CACHEID_VIPT_NONALIASING;
334 		}
335 		if (cpu_has_aliasing_icache(arch))
336 			cacheid |= CACHEID_VIPT_I_ALIASING;
337 	} else {
338 		cacheid = CACHEID_VIVT;
339 	}
340 
341 	pr_info("CPU: %s data cache, %s instruction cache\n",
342 		cache_is_vivt() ? "VIVT" :
343 		cache_is_vipt_aliasing() ? "VIPT aliasing" :
344 		cache_is_vipt_nonaliasing() ? "PIPT / VIPT nonaliasing" : "unknown",
345 		cache_is_vivt() ? "VIVT" :
346 		icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
347 		icache_is_vipt_aliasing() ? "VIPT aliasing" :
348 		icache_is_pipt() ? "PIPT" :
349 		cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
350 }
351 
352 /*
353  * These functions re-use the assembly code in head.S, which
354  * already provide the required functionality.
355  */
356 extern struct proc_info_list *lookup_processor_type(unsigned int);
357 
358 void __init early_print(const char *str, ...)
359 {
360 	extern void printascii(const char *);
361 	char buf[256];
362 	va_list ap;
363 
364 	va_start(ap, str);
365 	vsnprintf(buf, sizeof(buf), str, ap);
366 	va_end(ap);
367 
368 #ifdef CONFIG_DEBUG_LL
369 	printascii(buf);
370 #endif
371 	printk("%s", buf);
372 }
373 
374 #ifdef CONFIG_ARM_PATCH_IDIV
375 
376 static inline u32 __attribute_const__ sdiv_instruction(void)
377 {
378 	if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
379 		/* "sdiv r0, r0, r1" */
380 		u32 insn = __opcode_thumb32_compose(0xfb90, 0xf0f1);
381 		return __opcode_to_mem_thumb32(insn);
382 	}
383 
384 	/* "sdiv r0, r0, r1" */
385 	return __opcode_to_mem_arm(0xe710f110);
386 }
387 
388 static inline u32 __attribute_const__ udiv_instruction(void)
389 {
390 	if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
391 		/* "udiv r0, r0, r1" */
392 		u32 insn = __opcode_thumb32_compose(0xfbb0, 0xf0f1);
393 		return __opcode_to_mem_thumb32(insn);
394 	}
395 
396 	/* "udiv r0, r0, r1" */
397 	return __opcode_to_mem_arm(0xe730f110);
398 }
399 
400 static inline u32 __attribute_const__ bx_lr_instruction(void)
401 {
402 	if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
403 		/* "bx lr; nop" */
404 		u32 insn = __opcode_thumb32_compose(0x4770, 0x46c0);
405 		return __opcode_to_mem_thumb32(insn);
406 	}
407 
408 	/* "bx lr" */
409 	return __opcode_to_mem_arm(0xe12fff1e);
410 }
411 
412 static void __init patch_aeabi_idiv(void)
413 {
414 	extern void __aeabi_uidiv(void);
415 	extern void __aeabi_idiv(void);
416 	uintptr_t fn_addr;
417 	unsigned int mask;
418 
419 	mask = IS_ENABLED(CONFIG_THUMB2_KERNEL) ? HWCAP_IDIVT : HWCAP_IDIVA;
420 	if (!(elf_hwcap & mask))
421 		return;
422 
423 	pr_info("CPU: div instructions available: patching division code\n");
424 
425 	fn_addr = ((uintptr_t)&__aeabi_uidiv) & ~1;
426 	asm ("" : "+g" (fn_addr));
427 	((u32 *)fn_addr)[0] = udiv_instruction();
428 	((u32 *)fn_addr)[1] = bx_lr_instruction();
429 	flush_icache_range(fn_addr, fn_addr + 8);
430 
431 	fn_addr = ((uintptr_t)&__aeabi_idiv) & ~1;
432 	asm ("" : "+g" (fn_addr));
433 	((u32 *)fn_addr)[0] = sdiv_instruction();
434 	((u32 *)fn_addr)[1] = bx_lr_instruction();
435 	flush_icache_range(fn_addr, fn_addr + 8);
436 }
437 
438 #else
439 static inline void patch_aeabi_idiv(void) { }
440 #endif
441 
442 static void __init cpuid_init_hwcaps(void)
443 {
444 	int block;
445 	u32 isar5;
446 	u32 isar6;
447 	u32 pfr2;
448 
449 	if (cpu_architecture() < CPU_ARCH_ARMv7)
450 		return;
451 
452 	block = cpuid_feature_extract(CPUID_EXT_ISAR0, 24);
453 	if (block >= 2)
454 		elf_hwcap |= HWCAP_IDIVA;
455 	if (block >= 1)
456 		elf_hwcap |= HWCAP_IDIVT;
457 
458 	/* LPAE implies atomic ldrd/strd instructions */
459 	block = cpuid_feature_extract(CPUID_EXT_MMFR0, 0);
460 	if (block >= 5)
461 		elf_hwcap |= HWCAP_LPAE;
462 
463 	/* check for supported v8 Crypto instructions */
464 	isar5 = read_cpuid_ext(CPUID_EXT_ISAR5);
465 
466 	block = cpuid_feature_extract_field(isar5, 4);
467 	if (block >= 2)
468 		elf_hwcap2 |= HWCAP2_PMULL;
469 	if (block >= 1)
470 		elf_hwcap2 |= HWCAP2_AES;
471 
472 	block = cpuid_feature_extract_field(isar5, 8);
473 	if (block >= 1)
474 		elf_hwcap2 |= HWCAP2_SHA1;
475 
476 	block = cpuid_feature_extract_field(isar5, 12);
477 	if (block >= 1)
478 		elf_hwcap2 |= HWCAP2_SHA2;
479 
480 	block = cpuid_feature_extract_field(isar5, 16);
481 	if (block >= 1)
482 		elf_hwcap2 |= HWCAP2_CRC32;
483 
484 	/* Check for Speculation barrier instruction */
485 	isar6 = read_cpuid_ext(CPUID_EXT_ISAR6);
486 	block = cpuid_feature_extract_field(isar6, 12);
487 	if (block >= 1)
488 		elf_hwcap2 |= HWCAP2_SB;
489 
490 	/* Check for Speculative Store Bypassing control */
491 	pfr2 = read_cpuid_ext(CPUID_EXT_PFR2);
492 	block = cpuid_feature_extract_field(pfr2, 4);
493 	if (block >= 1)
494 		elf_hwcap2 |= HWCAP2_SSBS;
495 }
496 
497 static void __init elf_hwcap_fixup(void)
498 {
499 	unsigned id = read_cpuid_id();
500 
501 	/*
502 	 * HWCAP_TLS is available only on 1136 r1p0 and later,
503 	 * see also kuser_get_tls_init.
504 	 */
505 	if (read_cpuid_part() == ARM_CPU_PART_ARM1136 &&
506 	    ((id >> 20) & 3) == 0) {
507 		elf_hwcap &= ~HWCAP_TLS;
508 		return;
509 	}
510 
511 	/* Verify if CPUID scheme is implemented */
512 	if ((id & 0x000f0000) != 0x000f0000)
513 		return;
514 
515 	/*
516 	 * If the CPU supports LDREX/STREX and LDREXB/STREXB,
517 	 * avoid advertising SWP; it may not be atomic with
518 	 * multiprocessing cores.
519 	 */
520 	if (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) > 1 ||
521 	    (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) == 1 &&
522 	     cpuid_feature_extract(CPUID_EXT_ISAR4, 20) >= 3))
523 		elf_hwcap &= ~HWCAP_SWP;
524 }
525 
526 /*
527  * cpu_init - initialise one CPU.
528  *
529  * cpu_init sets up the per-CPU stacks.
530  */
531 void notrace cpu_init(void)
532 {
533 #ifndef CONFIG_CPU_V7M
534 	unsigned int cpu = smp_processor_id();
535 	struct stack *stk = &stacks[cpu];
536 
537 	if (cpu >= NR_CPUS) {
538 		pr_crit("CPU%u: bad primary CPU number\n", cpu);
539 		BUG();
540 	}
541 
542 	/*
543 	 * This only works on resume and secondary cores. For booting on the
544 	 * boot cpu, smp_prepare_boot_cpu is called after percpu area setup.
545 	 */
546 	set_my_cpu_offset(per_cpu_offset(cpu));
547 
548 	cpu_proc_init();
549 
550 	/*
551 	 * Define the placement constraint for the inline asm directive below.
552 	 * In Thumb-2, msr with an immediate value is not allowed.
553 	 */
554 #ifdef CONFIG_THUMB2_KERNEL
555 #define PLC_l	"l"
556 #define PLC_r	"r"
557 #else
558 #define PLC_l	"I"
559 #define PLC_r	"I"
560 #endif
561 
562 	/*
563 	 * setup stacks for re-entrant exception handlers
564 	 */
565 	__asm__ (
566 	"msr	cpsr_c, %1\n\t"
567 	"add	r14, %0, %2\n\t"
568 	"mov	sp, r14\n\t"
569 	"msr	cpsr_c, %3\n\t"
570 	"add	r14, %0, %4\n\t"
571 	"mov	sp, r14\n\t"
572 	"msr	cpsr_c, %5\n\t"
573 	"add	r14, %0, %6\n\t"
574 	"mov	sp, r14\n\t"
575 	"msr	cpsr_c, %7\n\t"
576 	"add	r14, %0, %8\n\t"
577 	"mov	sp, r14\n\t"
578 	"msr	cpsr_c, %9"
579 	    :
580 	    : "r" (stk),
581 	      PLC_r (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
582 	      "I" (offsetof(struct stack, irq[0])),
583 	      PLC_r (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
584 	      "I" (offsetof(struct stack, abt[0])),
585 	      PLC_r (PSR_F_BIT | PSR_I_BIT | UND_MODE),
586 	      "I" (offsetof(struct stack, und[0])),
587 	      PLC_r (PSR_F_BIT | PSR_I_BIT | FIQ_MODE),
588 	      "I" (offsetof(struct stack, fiq[0])),
589 	      PLC_l (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
590 	    : "r14");
591 #endif
592 }
593 
594 u32 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID };
595 
596 void __init smp_setup_processor_id(void)
597 {
598 	int i;
599 	u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0;
600 	u32 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
601 
602 	cpu_logical_map(0) = cpu;
603 	for (i = 1; i < nr_cpu_ids; ++i)
604 		cpu_logical_map(i) = i == cpu ? 0 : i;
605 
606 	/*
607 	 * clear __my_cpu_offset on boot CPU to avoid hang caused by
608 	 * using percpu variable early, for example, lockdep will
609 	 * access percpu variable inside lock_release
610 	 */
611 	set_my_cpu_offset(0);
612 
613 	pr_info("Booting Linux on physical CPU 0x%x\n", mpidr);
614 }
615 
616 struct mpidr_hash mpidr_hash;
617 #ifdef CONFIG_SMP
618 /**
619  * smp_build_mpidr_hash - Pre-compute shifts required at each affinity
620  *			  level in order to build a linear index from an
621  *			  MPIDR value. Resulting algorithm is a collision
622  *			  free hash carried out through shifting and ORing
623  */
624 static void __init smp_build_mpidr_hash(void)
625 {
626 	u32 i, affinity;
627 	u32 fs[3], bits[3], ls, mask = 0;
628 	/*
629 	 * Pre-scan the list of MPIDRS and filter out bits that do
630 	 * not contribute to affinity levels, ie they never toggle.
631 	 */
632 	for_each_possible_cpu(i)
633 		mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
634 	pr_debug("mask of set bits 0x%x\n", mask);
635 	/*
636 	 * Find and stash the last and first bit set at all affinity levels to
637 	 * check how many bits are required to represent them.
638 	 */
639 	for (i = 0; i < 3; i++) {
640 		affinity = MPIDR_AFFINITY_LEVEL(mask, i);
641 		/*
642 		 * Find the MSB bit and LSB bits position
643 		 * to determine how many bits are required
644 		 * to express the affinity level.
645 		 */
646 		ls = fls(affinity);
647 		fs[i] = affinity ? ffs(affinity) - 1 : 0;
648 		bits[i] = ls - fs[i];
649 	}
650 	/*
651 	 * An index can be created from the MPIDR by isolating the
652 	 * significant bits at each affinity level and by shifting
653 	 * them in order to compress the 24 bits values space to a
654 	 * compressed set of values. This is equivalent to hashing
655 	 * the MPIDR through shifting and ORing. It is a collision free
656 	 * hash though not minimal since some levels might contain a number
657 	 * of CPUs that is not an exact power of 2 and their bit
658 	 * representation might contain holes, eg MPIDR[7:0] = {0x2, 0x80}.
659 	 */
660 	mpidr_hash.shift_aff[0] = fs[0];
661 	mpidr_hash.shift_aff[1] = MPIDR_LEVEL_BITS + fs[1] - bits[0];
662 	mpidr_hash.shift_aff[2] = 2*MPIDR_LEVEL_BITS + fs[2] -
663 						(bits[1] + bits[0]);
664 	mpidr_hash.mask = mask;
665 	mpidr_hash.bits = bits[2] + bits[1] + bits[0];
666 	pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] mask[0x%x] bits[%u]\n",
667 				mpidr_hash.shift_aff[0],
668 				mpidr_hash.shift_aff[1],
669 				mpidr_hash.shift_aff[2],
670 				mpidr_hash.mask,
671 				mpidr_hash.bits);
672 	/*
673 	 * 4x is an arbitrary value used to warn on a hash table much bigger
674 	 * than expected on most systems.
675 	 */
676 	if (mpidr_hash_size() > 4 * num_possible_cpus())
677 		pr_warn("Large number of MPIDR hash buckets detected\n");
678 	sync_cache_w(&mpidr_hash);
679 }
680 #endif
681 
682 /*
683  * locate processor in the list of supported processor types.  The linker
684  * builds this table for us from the entries in arch/arm/mm/proc-*.S
685  */
686 struct proc_info_list *lookup_processor(u32 midr)
687 {
688 	struct proc_info_list *list = lookup_processor_type(midr);
689 
690 	if (!list) {
691 		pr_err("CPU%u: configuration botched (ID %08x), CPU halted\n",
692 		       smp_processor_id(), midr);
693 		while (1)
694 		/* can't use cpu_relax() here as it may require MMU setup */;
695 	}
696 
697 	return list;
698 }
699 
700 static void __init setup_processor(void)
701 {
702 	unsigned int midr = read_cpuid_id();
703 	struct proc_info_list *list = lookup_processor(midr);
704 
705 	cpu_name = list->cpu_name;
706 	__cpu_architecture = __get_cpu_architecture();
707 
708 	init_proc_vtable(list->proc);
709 #ifdef MULTI_TLB
710 	cpu_tlb = *list->tlb;
711 #endif
712 #ifdef MULTI_USER
713 	cpu_user = *list->user;
714 #endif
715 #ifdef MULTI_CACHE
716 	cpu_cache = *list->cache;
717 #endif
718 
719 	pr_info("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
720 		list->cpu_name, midr, midr & 15,
721 		proc_arch[cpu_architecture()], get_cr());
722 
723 	snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
724 		 list->arch_name, ENDIANNESS);
725 	snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
726 		 list->elf_name, ENDIANNESS);
727 	elf_hwcap = list->elf_hwcap;
728 
729 	cpuid_init_hwcaps();
730 	patch_aeabi_idiv();
731 
732 #ifndef CONFIG_ARM_THUMB
733 	elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT);
734 #endif
735 #ifdef CONFIG_MMU
736 	init_default_cache_policy(list->__cpu_mm_mmu_flags);
737 #endif
738 	erratum_a15_798181_init();
739 
740 	elf_hwcap_fixup();
741 
742 	cacheid_init();
743 	cpu_init();
744 }
745 
746 void __init dump_machine_table(void)
747 {
748 	const struct machine_desc *p;
749 
750 	early_print("Available machine support:\n\nID (hex)\tNAME\n");
751 	for_each_machine_desc(p)
752 		early_print("%08x\t%s\n", p->nr, p->name);
753 
754 	early_print("\nPlease check your kernel config and/or bootloader.\n");
755 
756 	while (true)
757 		/* can't use cpu_relax() here as it may require MMU setup */;
758 }
759 
760 int __init arm_add_memory(u64 start, u64 size)
761 {
762 	u64 aligned_start;
763 
764 	/*
765 	 * Ensure that start/size are aligned to a page boundary.
766 	 * Size is rounded down, start is rounded up.
767 	 */
768 	aligned_start = PAGE_ALIGN(start);
769 	if (aligned_start > start + size)
770 		size = 0;
771 	else
772 		size -= aligned_start - start;
773 
774 #ifndef CONFIG_PHYS_ADDR_T_64BIT
775 	if (aligned_start > ULONG_MAX) {
776 		pr_crit("Ignoring memory at 0x%08llx outside 32-bit physical address space\n",
777 			start);
778 		return -EINVAL;
779 	}
780 
781 	if (aligned_start + size > ULONG_MAX) {
782 		pr_crit("Truncating memory at 0x%08llx to fit in 32-bit physical address space\n",
783 			(long long)start);
784 		/*
785 		 * To ensure bank->start + bank->size is representable in
786 		 * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
787 		 * This means we lose a page after masking.
788 		 */
789 		size = ULONG_MAX - aligned_start;
790 	}
791 #endif
792 
793 	if (aligned_start < PHYS_OFFSET) {
794 		if (aligned_start + size <= PHYS_OFFSET) {
795 			pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
796 				aligned_start, aligned_start + size);
797 			return -EINVAL;
798 		}
799 
800 		pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
801 			aligned_start, (u64)PHYS_OFFSET);
802 
803 		size -= PHYS_OFFSET - aligned_start;
804 		aligned_start = PHYS_OFFSET;
805 	}
806 
807 	start = aligned_start;
808 	size = size & ~(phys_addr_t)(PAGE_SIZE - 1);
809 
810 	/*
811 	 * Check whether this memory region has non-zero size or
812 	 * invalid node number.
813 	 */
814 	if (size == 0)
815 		return -EINVAL;
816 
817 	memblock_add(start, size);
818 	return 0;
819 }
820 
821 /*
822  * Pick out the memory size.  We look for mem=size@start,
823  * where start and size are "size[KkMm]"
824  */
825 
826 static int __init early_mem(char *p)
827 {
828 	static int usermem __initdata = 0;
829 	u64 size;
830 	u64 start;
831 	char *endp;
832 
833 	/*
834 	 * If the user specifies memory size, we
835 	 * blow away any automatically generated
836 	 * size.
837 	 */
838 	if (usermem == 0) {
839 		usermem = 1;
840 		memblock_remove(memblock_start_of_DRAM(),
841 			memblock_end_of_DRAM() - memblock_start_of_DRAM());
842 	}
843 
844 	start = PHYS_OFFSET;
845 	size  = memparse(p, &endp);
846 	if (*endp == '@')
847 		start = memparse(endp + 1, NULL);
848 
849 	arm_add_memory(start, size);
850 
851 	return 0;
852 }
853 early_param("mem", early_mem);
854 
855 static void __init request_standard_resources(const struct machine_desc *mdesc)
856 {
857 	phys_addr_t start, end, res_end;
858 	struct resource *res;
859 	u64 i;
860 
861 	kernel_code.start   = virt_to_phys(_text);
862 	kernel_code.end     = virt_to_phys(__init_begin - 1);
863 	kernel_data.start   = virt_to_phys(_sdata);
864 	kernel_data.end     = virt_to_phys(_end - 1);
865 
866 	for_each_mem_range(i, &start, &end) {
867 		unsigned long boot_alias_start;
868 
869 		/*
870 		 * In memblock, end points to the first byte after the
871 		 * range while in resourses, end points to the last byte in
872 		 * the range.
873 		 */
874 		res_end = end - 1;
875 
876 		/*
877 		 * Some systems have a special memory alias which is only
878 		 * used for booting.  We need to advertise this region to
879 		 * kexec-tools so they know where bootable RAM is located.
880 		 */
881 		boot_alias_start = phys_to_idmap(start);
882 		if (arm_has_idmap_alias() && boot_alias_start != IDMAP_INVALID_ADDR) {
883 			res = memblock_alloc(sizeof(*res), SMP_CACHE_BYTES);
884 			if (!res)
885 				panic("%s: Failed to allocate %zu bytes\n",
886 				      __func__, sizeof(*res));
887 			res->name = "System RAM (boot alias)";
888 			res->start = boot_alias_start;
889 			res->end = phys_to_idmap(res_end);
890 			res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
891 			request_resource(&iomem_resource, res);
892 		}
893 
894 		res = memblock_alloc(sizeof(*res), SMP_CACHE_BYTES);
895 		if (!res)
896 			panic("%s: Failed to allocate %zu bytes\n", __func__,
897 			      sizeof(*res));
898 		res->name  = "System RAM";
899 		res->start = start;
900 		res->end = res_end;
901 		res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
902 
903 		request_resource(&iomem_resource, res);
904 
905 		if (kernel_code.start >= res->start &&
906 		    kernel_code.end <= res->end)
907 			request_resource(res, &kernel_code);
908 		if (kernel_data.start >= res->start &&
909 		    kernel_data.end <= res->end)
910 			request_resource(res, &kernel_data);
911 	}
912 
913 	if (mdesc->video_start) {
914 		video_ram.start = mdesc->video_start;
915 		video_ram.end   = mdesc->video_end;
916 		request_resource(&iomem_resource, &video_ram);
917 	}
918 
919 	/*
920 	 * Some machines don't have the possibility of ever
921 	 * possessing lp0, lp1 or lp2
922 	 */
923 	if (mdesc->reserve_lp0)
924 		request_resource(&ioport_resource, &lp0);
925 	if (mdesc->reserve_lp1)
926 		request_resource(&ioport_resource, &lp1);
927 	if (mdesc->reserve_lp2)
928 		request_resource(&ioport_resource, &lp2);
929 }
930 
931 #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE) || \
932     defined(CONFIG_EFI)
933 struct screen_info screen_info = {
934  .orig_video_lines	= 30,
935  .orig_video_cols	= 80,
936  .orig_video_mode	= 0,
937  .orig_video_ega_bx	= 0,
938  .orig_video_isVGA	= 1,
939  .orig_video_points	= 8
940 };
941 #endif
942 
943 static int __init customize_machine(void)
944 {
945 	/*
946 	 * customizes platform devices, or adds new ones
947 	 * On DT based machines, we fall back to populating the
948 	 * machine from the device tree, if no callback is provided,
949 	 * otherwise we would always need an init_machine callback.
950 	 */
951 	if (machine_desc->init_machine)
952 		machine_desc->init_machine();
953 
954 	return 0;
955 }
956 arch_initcall(customize_machine);
957 
958 static int __init init_machine_late(void)
959 {
960 	struct device_node *root;
961 	int ret;
962 
963 	if (machine_desc->init_late)
964 		machine_desc->init_late();
965 
966 	root = of_find_node_by_path("/");
967 	if (root) {
968 		ret = of_property_read_string(root, "serial-number",
969 					      &system_serial);
970 		if (ret)
971 			system_serial = NULL;
972 	}
973 
974 	if (!system_serial)
975 		system_serial = kasprintf(GFP_KERNEL, "%08x%08x",
976 					  system_serial_high,
977 					  system_serial_low);
978 
979 	return 0;
980 }
981 late_initcall(init_machine_late);
982 
983 #ifdef CONFIG_KEXEC
984 /*
985  * The crash region must be aligned to 128MB to avoid
986  * zImage relocating below the reserved region.
987  */
988 #define CRASH_ALIGN	(128 << 20)
989 
990 static inline unsigned long long get_total_mem(void)
991 {
992 	unsigned long total;
993 
994 	total = max_low_pfn - min_low_pfn;
995 	return total << PAGE_SHIFT;
996 }
997 
998 /**
999  * reserve_crashkernel() - reserves memory are for crash kernel
1000  *
1001  * This function reserves memory area given in "crashkernel=" kernel command
1002  * line parameter. The memory reserved is used by a dump capture kernel when
1003  * primary kernel is crashing.
1004  */
1005 static void __init reserve_crashkernel(void)
1006 {
1007 	unsigned long long crash_size, crash_base;
1008 	unsigned long long total_mem;
1009 	int ret;
1010 
1011 	total_mem = get_total_mem();
1012 	ret = parse_crashkernel(boot_command_line, total_mem,
1013 				&crash_size, &crash_base);
1014 	/* invalid value specified or crashkernel=0 */
1015 	if (ret || !crash_size)
1016 		return;
1017 
1018 	if (crash_base <= 0) {
1019 		unsigned long long crash_max = idmap_to_phys((u32)~0);
1020 		unsigned long long lowmem_max = __pa(high_memory - 1) + 1;
1021 		if (crash_max > lowmem_max)
1022 			crash_max = lowmem_max;
1023 
1024 		crash_base = memblock_phys_alloc_range(crash_size, CRASH_ALIGN,
1025 						       CRASH_ALIGN, crash_max);
1026 		if (!crash_base) {
1027 			pr_err("crashkernel reservation failed - No suitable area found.\n");
1028 			return;
1029 		}
1030 	} else {
1031 		unsigned long long crash_max = crash_base + crash_size;
1032 		unsigned long long start;
1033 
1034 		start = memblock_phys_alloc_range(crash_size, SECTION_SIZE,
1035 						  crash_base, crash_max);
1036 		if (!start) {
1037 			pr_err("crashkernel reservation failed - memory is in use.\n");
1038 			return;
1039 		}
1040 	}
1041 
1042 	pr_info("Reserving %ldMB of memory at %ldMB for crashkernel (System RAM: %ldMB)\n",
1043 		(unsigned long)(crash_size >> 20),
1044 		(unsigned long)(crash_base >> 20),
1045 		(unsigned long)(total_mem >> 20));
1046 
1047 	/* The crashk resource must always be located in normal mem */
1048 	crashk_res.start = crash_base;
1049 	crashk_res.end = crash_base + crash_size - 1;
1050 	insert_resource(&iomem_resource, &crashk_res);
1051 
1052 	if (arm_has_idmap_alias()) {
1053 		/*
1054 		 * If we have a special RAM alias for use at boot, we
1055 		 * need to advertise to kexec tools where the alias is.
1056 		 */
1057 		static struct resource crashk_boot_res = {
1058 			.name = "Crash kernel (boot alias)",
1059 			.flags = IORESOURCE_BUSY | IORESOURCE_MEM,
1060 		};
1061 
1062 		crashk_boot_res.start = phys_to_idmap(crash_base);
1063 		crashk_boot_res.end = crashk_boot_res.start + crash_size - 1;
1064 		insert_resource(&iomem_resource, &crashk_boot_res);
1065 	}
1066 }
1067 #else
1068 static inline void reserve_crashkernel(void) {}
1069 #endif /* CONFIG_KEXEC */
1070 
1071 void __init hyp_mode_check(void)
1072 {
1073 #ifdef CONFIG_ARM_VIRT_EXT
1074 	sync_boot_mode();
1075 
1076 	if (is_hyp_mode_available()) {
1077 		pr_info("CPU: All CPU(s) started in HYP mode.\n");
1078 		pr_info("CPU: Virtualization extensions available.\n");
1079 	} else if (is_hyp_mode_mismatched()) {
1080 		pr_warn("CPU: WARNING: CPU(s) started in wrong/inconsistent modes (primary CPU mode 0x%x)\n",
1081 			__boot_cpu_mode & MODE_MASK);
1082 		pr_warn("CPU: This may indicate a broken bootloader or firmware.\n");
1083 	} else
1084 		pr_info("CPU: All CPU(s) started in SVC mode.\n");
1085 #endif
1086 }
1087 
1088 static void (*__arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
1089 
1090 static int arm_restart(struct notifier_block *nb, unsigned long action,
1091 		       void *data)
1092 {
1093 	__arm_pm_restart(action, data);
1094 	return NOTIFY_DONE;
1095 }
1096 
1097 static struct notifier_block arm_restart_nb = {
1098 	.notifier_call = arm_restart,
1099 	.priority = 128,
1100 };
1101 
1102 void __init setup_arch(char **cmdline_p)
1103 {
1104 	const struct machine_desc *mdesc = NULL;
1105 	void *atags_vaddr = NULL;
1106 
1107 	if (__atags_pointer)
1108 		atags_vaddr = FDT_VIRT_BASE(__atags_pointer);
1109 
1110 	setup_processor();
1111 	if (atags_vaddr) {
1112 		mdesc = setup_machine_fdt(atags_vaddr);
1113 		if (mdesc)
1114 			memblock_reserve(__atags_pointer,
1115 					 fdt_totalsize(atags_vaddr));
1116 	}
1117 	if (!mdesc)
1118 		mdesc = setup_machine_tags(atags_vaddr, __machine_arch_type);
1119 	if (!mdesc) {
1120 		early_print("\nError: invalid dtb and unrecognized/unsupported machine ID\n");
1121 		early_print("  r1=0x%08x, r2=0x%08x\n", __machine_arch_type,
1122 			    __atags_pointer);
1123 		if (__atags_pointer)
1124 			early_print("  r2[]=%*ph\n", 16, atags_vaddr);
1125 		dump_machine_table();
1126 	}
1127 
1128 	machine_desc = mdesc;
1129 	machine_name = mdesc->name;
1130 	dump_stack_set_arch_desc("%s", mdesc->name);
1131 
1132 	if (mdesc->reboot_mode != REBOOT_HARD)
1133 		reboot_mode = mdesc->reboot_mode;
1134 
1135 	setup_initial_init_mm(_text, _etext, _edata, _end);
1136 
1137 	/* populate cmd_line too for later use, preserving boot_command_line */
1138 	strscpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
1139 	*cmdline_p = cmd_line;
1140 
1141 	early_fixmap_init();
1142 	early_ioremap_init();
1143 
1144 	parse_early_param();
1145 
1146 #ifdef CONFIG_MMU
1147 	early_mm_init(mdesc);
1148 #endif
1149 	setup_dma_zone(mdesc);
1150 	xen_early_init();
1151 	arm_efi_init();
1152 	/*
1153 	 * Make sure the calculation for lowmem/highmem is set appropriately
1154 	 * before reserving/allocating any memory
1155 	 */
1156 	adjust_lowmem_bounds();
1157 	arm_memblock_init(mdesc);
1158 	/* Memory may have been removed so recalculate the bounds. */
1159 	adjust_lowmem_bounds();
1160 
1161 	early_ioremap_reset();
1162 
1163 	paging_init(mdesc);
1164 	kasan_init();
1165 	request_standard_resources(mdesc);
1166 
1167 	if (mdesc->restart) {
1168 		__arm_pm_restart = mdesc->restart;
1169 		register_restart_handler(&arm_restart_nb);
1170 	}
1171 
1172 	unflatten_device_tree();
1173 
1174 	arm_dt_init_cpu_maps();
1175 	psci_dt_init();
1176 #ifdef CONFIG_SMP
1177 	if (is_smp()) {
1178 		if (!mdesc->smp_init || !mdesc->smp_init()) {
1179 			if (psci_smp_available())
1180 				smp_set_ops(&psci_smp_ops);
1181 			else if (mdesc->smp)
1182 				smp_set_ops(mdesc->smp);
1183 		}
1184 		smp_init_cpus();
1185 		smp_build_mpidr_hash();
1186 	}
1187 #endif
1188 
1189 	if (!is_smp())
1190 		hyp_mode_check();
1191 
1192 	reserve_crashkernel();
1193 
1194 #ifdef CONFIG_VT
1195 #if defined(CONFIG_VGA_CONSOLE)
1196 	conswitchp = &vga_con;
1197 #endif
1198 #endif
1199 
1200 	if (mdesc->init_early)
1201 		mdesc->init_early();
1202 }
1203 
1204 
1205 static int __init topology_init(void)
1206 {
1207 	int cpu;
1208 
1209 	for_each_possible_cpu(cpu) {
1210 		struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
1211 		cpuinfo->cpu.hotpluggable = platform_can_hotplug_cpu(cpu);
1212 		register_cpu(&cpuinfo->cpu, cpu);
1213 	}
1214 
1215 	return 0;
1216 }
1217 subsys_initcall(topology_init);
1218 
1219 #ifdef CONFIG_HAVE_PROC_CPU
1220 static int __init proc_cpu_init(void)
1221 {
1222 	struct proc_dir_entry *res;
1223 
1224 	res = proc_mkdir("cpu", NULL);
1225 	if (!res)
1226 		return -ENOMEM;
1227 	return 0;
1228 }
1229 fs_initcall(proc_cpu_init);
1230 #endif
1231 
1232 static const char *hwcap_str[] = {
1233 	"swp",
1234 	"half",
1235 	"thumb",
1236 	"26bit",
1237 	"fastmult",
1238 	"fpa",
1239 	"vfp",
1240 	"edsp",
1241 	"java",
1242 	"iwmmxt",
1243 	"crunch",
1244 	"thumbee",
1245 	"neon",
1246 	"vfpv3",
1247 	"vfpv3d16",
1248 	"tls",
1249 	"vfpv4",
1250 	"idiva",
1251 	"idivt",
1252 	"vfpd32",
1253 	"lpae",
1254 	"evtstrm",
1255 	"fphp",
1256 	"asimdhp",
1257 	"asimddp",
1258 	"asimdfhm",
1259 	"asimdbf16",
1260 	"i8mm",
1261 	NULL
1262 };
1263 
1264 static const char *hwcap2_str[] = {
1265 	"aes",
1266 	"pmull",
1267 	"sha1",
1268 	"sha2",
1269 	"crc32",
1270 	"sb",
1271 	"ssbs",
1272 	NULL
1273 };
1274 
1275 static int c_show(struct seq_file *m, void *v)
1276 {
1277 	int i, j;
1278 	u32 cpuid;
1279 
1280 	for_each_online_cpu(i) {
1281 		/*
1282 		 * glibc reads /proc/cpuinfo to determine the number of
1283 		 * online processors, looking for lines beginning with
1284 		 * "processor".  Give glibc what it expects.
1285 		 */
1286 		seq_printf(m, "processor\t: %d\n", i);
1287 		cpuid = is_smp() ? per_cpu(cpu_data, i).cpuid : read_cpuid_id();
1288 		seq_printf(m, "model name\t: %s rev %d (%s)\n",
1289 			   cpu_name, cpuid & 15, elf_platform);
1290 
1291 #if defined(CONFIG_SMP)
1292 		seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1293 			   per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
1294 			   (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
1295 #else
1296 		seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1297 			   loops_per_jiffy / (500000/HZ),
1298 			   (loops_per_jiffy / (5000/HZ)) % 100);
1299 #endif
1300 		/* dump out the processor features */
1301 		seq_puts(m, "Features\t: ");
1302 
1303 		for (j = 0; hwcap_str[j]; j++)
1304 			if (elf_hwcap & (1 << j))
1305 				seq_printf(m, "%s ", hwcap_str[j]);
1306 
1307 		for (j = 0; hwcap2_str[j]; j++)
1308 			if (elf_hwcap2 & (1 << j))
1309 				seq_printf(m, "%s ", hwcap2_str[j]);
1310 
1311 		seq_printf(m, "\nCPU implementer\t: 0x%02x\n", cpuid >> 24);
1312 		seq_printf(m, "CPU architecture: %s\n",
1313 			   proc_arch[cpu_architecture()]);
1314 
1315 		if ((cpuid & 0x0008f000) == 0x00000000) {
1316 			/* pre-ARM7 */
1317 			seq_printf(m, "CPU part\t: %07x\n", cpuid >> 4);
1318 		} else {
1319 			if ((cpuid & 0x0008f000) == 0x00007000) {
1320 				/* ARM7 */
1321 				seq_printf(m, "CPU variant\t: 0x%02x\n",
1322 					   (cpuid >> 16) & 127);
1323 			} else {
1324 				/* post-ARM7 */
1325 				seq_printf(m, "CPU variant\t: 0x%x\n",
1326 					   (cpuid >> 20) & 15);
1327 			}
1328 			seq_printf(m, "CPU part\t: 0x%03x\n",
1329 				   (cpuid >> 4) & 0xfff);
1330 		}
1331 		seq_printf(m, "CPU revision\t: %d\n\n", cpuid & 15);
1332 	}
1333 
1334 	seq_printf(m, "Hardware\t: %s\n", machine_name);
1335 	seq_printf(m, "Revision\t: %04x\n", system_rev);
1336 	seq_printf(m, "Serial\t\t: %s\n", system_serial);
1337 
1338 	return 0;
1339 }
1340 
1341 static void *c_start(struct seq_file *m, loff_t *pos)
1342 {
1343 	return *pos < 1 ? (void *)1 : NULL;
1344 }
1345 
1346 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1347 {
1348 	++*pos;
1349 	return NULL;
1350 }
1351 
1352 static void c_stop(struct seq_file *m, void *v)
1353 {
1354 }
1355 
1356 const struct seq_operations cpuinfo_op = {
1357 	.start	= c_start,
1358 	.next	= c_next,
1359 	.stop	= c_stop,
1360 	.show	= c_show
1361 };
1362