1 /*
2  *  linux/arch/arm/kernel/setup.c
3  *
4  *  Copyright (C) 1995-2001 Russell King
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #include <linux/efi.h>
11 #include <linux/export.h>
12 #include <linux/kernel.h>
13 #include <linux/stddef.h>
14 #include <linux/ioport.h>
15 #include <linux/delay.h>
16 #include <linux/utsname.h>
17 #include <linux/initrd.h>
18 #include <linux/console.h>
19 #include <linux/bootmem.h>
20 #include <linux/seq_file.h>
21 #include <linux/screen_info.h>
22 #include <linux/of_platform.h>
23 #include <linux/init.h>
24 #include <linux/kexec.h>
25 #include <linux/of_fdt.h>
26 #include <linux/cpu.h>
27 #include <linux/interrupt.h>
28 #include <linux/smp.h>
29 #include <linux/proc_fs.h>
30 #include <linux/memblock.h>
31 #include <linux/bug.h>
32 #include <linux/compiler.h>
33 #include <linux/sort.h>
34 #include <linux/psci.h>
35 
36 #include <asm/unified.h>
37 #include <asm/cp15.h>
38 #include <asm/cpu.h>
39 #include <asm/cputype.h>
40 #include <asm/efi.h>
41 #include <asm/elf.h>
42 #include <asm/early_ioremap.h>
43 #include <asm/fixmap.h>
44 #include <asm/procinfo.h>
45 #include <asm/psci.h>
46 #include <asm/sections.h>
47 #include <asm/setup.h>
48 #include <asm/smp_plat.h>
49 #include <asm/mach-types.h>
50 #include <asm/cacheflush.h>
51 #include <asm/cachetype.h>
52 #include <asm/tlbflush.h>
53 #include <asm/xen/hypervisor.h>
54 
55 #include <asm/prom.h>
56 #include <asm/mach/arch.h>
57 #include <asm/mach/irq.h>
58 #include <asm/mach/time.h>
59 #include <asm/system_info.h>
60 #include <asm/system_misc.h>
61 #include <asm/traps.h>
62 #include <asm/unwind.h>
63 #include <asm/memblock.h>
64 #include <asm/virt.h>
65 
66 #include "atags.h"
67 
68 
69 #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
70 char fpe_type[8];
71 
fpe_setup(char * line)72 static int __init fpe_setup(char *line)
73 {
74 	memcpy(fpe_type, line, 8);
75 	return 1;
76 }
77 
78 __setup("fpe=", fpe_setup);
79 #endif
80 
81 extern void init_default_cache_policy(unsigned long);
82 extern void paging_init(const struct machine_desc *desc);
83 extern void early_mm_init(const struct machine_desc *);
84 extern void adjust_lowmem_bounds(void);
85 extern enum reboot_mode reboot_mode;
86 extern void setup_dma_zone(const struct machine_desc *desc);
87 
88 unsigned int processor_id;
89 EXPORT_SYMBOL(processor_id);
90 unsigned int __machine_arch_type __read_mostly;
91 EXPORT_SYMBOL(__machine_arch_type);
92 unsigned int cacheid __read_mostly;
93 EXPORT_SYMBOL(cacheid);
94 
95 unsigned int __atags_pointer __initdata;
96 
97 unsigned int system_rev;
98 EXPORT_SYMBOL(system_rev);
99 
100 const char *system_serial;
101 EXPORT_SYMBOL(system_serial);
102 
103 unsigned int system_serial_low;
104 EXPORT_SYMBOL(system_serial_low);
105 
106 unsigned int system_serial_high;
107 EXPORT_SYMBOL(system_serial_high);
108 
109 unsigned int elf_hwcap __read_mostly;
110 EXPORT_SYMBOL(elf_hwcap);
111 
112 unsigned int elf_hwcap2 __read_mostly;
113 EXPORT_SYMBOL(elf_hwcap2);
114 
115 
116 #ifdef MULTI_CPU
117 struct processor processor __ro_after_init;
118 #endif
119 #ifdef MULTI_TLB
120 struct cpu_tlb_fns cpu_tlb __ro_after_init;
121 #endif
122 #ifdef MULTI_USER
123 struct cpu_user_fns cpu_user __ro_after_init;
124 #endif
125 #ifdef MULTI_CACHE
126 struct cpu_cache_fns cpu_cache __ro_after_init;
127 #endif
128 #ifdef CONFIG_OUTER_CACHE
129 struct outer_cache_fns outer_cache __ro_after_init;
130 EXPORT_SYMBOL(outer_cache);
131 #endif
132 
133 /*
134  * Cached cpu_architecture() result for use by assembler code.
135  * C code should use the cpu_architecture() function instead of accessing this
136  * variable directly.
137  */
138 int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN;
139 
140 struct stack {
141 	u32 irq[3];
142 	u32 abt[3];
143 	u32 und[3];
144 	u32 fiq[3];
145 } ____cacheline_aligned;
146 
147 #ifndef CONFIG_CPU_V7M
148 static struct stack stacks[NR_CPUS];
149 #endif
150 
151 char elf_platform[ELF_PLATFORM_SIZE];
152 EXPORT_SYMBOL(elf_platform);
153 
154 static const char *cpu_name;
155 static const char *machine_name;
156 static char __initdata cmd_line[COMMAND_LINE_SIZE];
157 const struct machine_desc *machine_desc __initdata;
158 
159 static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
160 #define ENDIANNESS ((char)endian_test.l)
161 
162 DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
163 
164 /*
165  * Standard memory resources
166  */
167 static struct resource mem_res[] = {
168 	{
169 		.name = "Video RAM",
170 		.start = 0,
171 		.end = 0,
172 		.flags = IORESOURCE_MEM
173 	},
174 	{
175 		.name = "Kernel code",
176 		.start = 0,
177 		.end = 0,
178 		.flags = IORESOURCE_SYSTEM_RAM
179 	},
180 	{
181 		.name = "Kernel data",
182 		.start = 0,
183 		.end = 0,
184 		.flags = IORESOURCE_SYSTEM_RAM
185 	}
186 };
187 
188 #define video_ram   mem_res[0]
189 #define kernel_code mem_res[1]
190 #define kernel_data mem_res[2]
191 
192 static struct resource io_res[] = {
193 	{
194 		.name = "reserved",
195 		.start = 0x3bc,
196 		.end = 0x3be,
197 		.flags = IORESOURCE_IO | IORESOURCE_BUSY
198 	},
199 	{
200 		.name = "reserved",
201 		.start = 0x378,
202 		.end = 0x37f,
203 		.flags = IORESOURCE_IO | IORESOURCE_BUSY
204 	},
205 	{
206 		.name = "reserved",
207 		.start = 0x278,
208 		.end = 0x27f,
209 		.flags = IORESOURCE_IO | IORESOURCE_BUSY
210 	}
211 };
212 
213 #define lp0 io_res[0]
214 #define lp1 io_res[1]
215 #define lp2 io_res[2]
216 
217 static const char *proc_arch[] = {
218 	"undefined/unknown",
219 	"3",
220 	"4",
221 	"4T",
222 	"5",
223 	"5T",
224 	"5TE",
225 	"5TEJ",
226 	"6TEJ",
227 	"7",
228 	"7M",
229 	"?(12)",
230 	"?(13)",
231 	"?(14)",
232 	"?(15)",
233 	"?(16)",
234 	"?(17)",
235 };
236 
237 #ifdef CONFIG_CPU_V7M
__get_cpu_architecture(void)238 static int __get_cpu_architecture(void)
239 {
240 	return CPU_ARCH_ARMv7M;
241 }
242 #else
__get_cpu_architecture(void)243 static int __get_cpu_architecture(void)
244 {
245 	int cpu_arch;
246 
247 	if ((read_cpuid_id() & 0x0008f000) == 0) {
248 		cpu_arch = CPU_ARCH_UNKNOWN;
249 	} else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
250 		cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
251 	} else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
252 		cpu_arch = (read_cpuid_id() >> 16) & 7;
253 		if (cpu_arch)
254 			cpu_arch += CPU_ARCH_ARMv3;
255 	} else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
256 		/* Revised CPUID format. Read the Memory Model Feature
257 		 * Register 0 and check for VMSAv7 or PMSAv7 */
258 		unsigned int mmfr0 = read_cpuid_ext(CPUID_EXT_MMFR0);
259 		if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
260 		    (mmfr0 & 0x000000f0) >= 0x00000030)
261 			cpu_arch = CPU_ARCH_ARMv7;
262 		else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
263 			 (mmfr0 & 0x000000f0) == 0x00000020)
264 			cpu_arch = CPU_ARCH_ARMv6;
265 		else
266 			cpu_arch = CPU_ARCH_UNKNOWN;
267 	} else
268 		cpu_arch = CPU_ARCH_UNKNOWN;
269 
270 	return cpu_arch;
271 }
272 #endif
273 
cpu_architecture(void)274 int __pure cpu_architecture(void)
275 {
276 	BUG_ON(__cpu_architecture == CPU_ARCH_UNKNOWN);
277 
278 	return __cpu_architecture;
279 }
280 
cpu_has_aliasing_icache(unsigned int arch)281 static int cpu_has_aliasing_icache(unsigned int arch)
282 {
283 	int aliasing_icache;
284 	unsigned int id_reg, num_sets, line_size;
285 
286 	/* PIPT caches never alias. */
287 	if (icache_is_pipt())
288 		return 0;
289 
290 	/* arch specifies the register format */
291 	switch (arch) {
292 	case CPU_ARCH_ARMv7:
293 		set_csselr(CSSELR_ICACHE | CSSELR_L1);
294 		isb();
295 		id_reg = read_ccsidr();
296 		line_size = 4 << ((id_reg & 0x7) + 2);
297 		num_sets = ((id_reg >> 13) & 0x7fff) + 1;
298 		aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
299 		break;
300 	case CPU_ARCH_ARMv6:
301 		aliasing_icache = read_cpuid_cachetype() & (1 << 11);
302 		break;
303 	default:
304 		/* I-cache aliases will be handled by D-cache aliasing code */
305 		aliasing_icache = 0;
306 	}
307 
308 	return aliasing_icache;
309 }
310 
cacheid_init(void)311 static void __init cacheid_init(void)
312 {
313 	unsigned int arch = cpu_architecture();
314 
315 	if (arch >= CPU_ARCH_ARMv6) {
316 		unsigned int cachetype = read_cpuid_cachetype();
317 
318 		if ((arch == CPU_ARCH_ARMv7M) && !(cachetype & 0xf000f)) {
319 			cacheid = 0;
320 		} else if ((cachetype & (7 << 29)) == 4 << 29) {
321 			/* ARMv7 register format */
322 			arch = CPU_ARCH_ARMv7;
323 			cacheid = CACHEID_VIPT_NONALIASING;
324 			switch (cachetype & (3 << 14)) {
325 			case (1 << 14):
326 				cacheid |= CACHEID_ASID_TAGGED;
327 				break;
328 			case (3 << 14):
329 				cacheid |= CACHEID_PIPT;
330 				break;
331 			}
332 		} else {
333 			arch = CPU_ARCH_ARMv6;
334 			if (cachetype & (1 << 23))
335 				cacheid = CACHEID_VIPT_ALIASING;
336 			else
337 				cacheid = CACHEID_VIPT_NONALIASING;
338 		}
339 		if (cpu_has_aliasing_icache(arch))
340 			cacheid |= CACHEID_VIPT_I_ALIASING;
341 	} else {
342 		cacheid = CACHEID_VIVT;
343 	}
344 
345 	pr_info("CPU: %s data cache, %s instruction cache\n",
346 		cache_is_vivt() ? "VIVT" :
347 		cache_is_vipt_aliasing() ? "VIPT aliasing" :
348 		cache_is_vipt_nonaliasing() ? "PIPT / VIPT nonaliasing" : "unknown",
349 		cache_is_vivt() ? "VIVT" :
350 		icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
351 		icache_is_vipt_aliasing() ? "VIPT aliasing" :
352 		icache_is_pipt() ? "PIPT" :
353 		cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
354 }
355 
356 /*
357  * These functions re-use the assembly code in head.S, which
358  * already provide the required functionality.
359  */
360 extern struct proc_info_list *lookup_processor_type(unsigned int);
361 
early_print(const char * str,...)362 void __init early_print(const char *str, ...)
363 {
364 	extern void printascii(const char *);
365 	char buf[256];
366 	va_list ap;
367 
368 	va_start(ap, str);
369 	vsnprintf(buf, sizeof(buf), str, ap);
370 	va_end(ap);
371 
372 #ifdef CONFIG_DEBUG_LL
373 	printascii(buf);
374 #endif
375 	printk("%s", buf);
376 }
377 
378 #ifdef CONFIG_ARM_PATCH_IDIV
379 
sdiv_instruction(void)380 static inline u32 __attribute_const__ sdiv_instruction(void)
381 {
382 	if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
383 		/* "sdiv r0, r0, r1" */
384 		u32 insn = __opcode_thumb32_compose(0xfb90, 0xf0f1);
385 		return __opcode_to_mem_thumb32(insn);
386 	}
387 
388 	/* "sdiv r0, r0, r1" */
389 	return __opcode_to_mem_arm(0xe710f110);
390 }
391 
udiv_instruction(void)392 static inline u32 __attribute_const__ udiv_instruction(void)
393 {
394 	if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
395 		/* "udiv r0, r0, r1" */
396 		u32 insn = __opcode_thumb32_compose(0xfbb0, 0xf0f1);
397 		return __opcode_to_mem_thumb32(insn);
398 	}
399 
400 	/* "udiv r0, r0, r1" */
401 	return __opcode_to_mem_arm(0xe730f110);
402 }
403 
bx_lr_instruction(void)404 static inline u32 __attribute_const__ bx_lr_instruction(void)
405 {
406 	if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
407 		/* "bx lr; nop" */
408 		u32 insn = __opcode_thumb32_compose(0x4770, 0x46c0);
409 		return __opcode_to_mem_thumb32(insn);
410 	}
411 
412 	/* "bx lr" */
413 	return __opcode_to_mem_arm(0xe12fff1e);
414 }
415 
patch_aeabi_idiv(void)416 static void __init patch_aeabi_idiv(void)
417 {
418 	extern void __aeabi_uidiv(void);
419 	extern void __aeabi_idiv(void);
420 	uintptr_t fn_addr;
421 	unsigned int mask;
422 
423 	mask = IS_ENABLED(CONFIG_THUMB2_KERNEL) ? HWCAP_IDIVT : HWCAP_IDIVA;
424 	if (!(elf_hwcap & mask))
425 		return;
426 
427 	pr_info("CPU: div instructions available: patching division code\n");
428 
429 	fn_addr = ((uintptr_t)&__aeabi_uidiv) & ~1;
430 	asm ("" : "+g" (fn_addr));
431 	((u32 *)fn_addr)[0] = udiv_instruction();
432 	((u32 *)fn_addr)[1] = bx_lr_instruction();
433 	flush_icache_range(fn_addr, fn_addr + 8);
434 
435 	fn_addr = ((uintptr_t)&__aeabi_idiv) & ~1;
436 	asm ("" : "+g" (fn_addr));
437 	((u32 *)fn_addr)[0] = sdiv_instruction();
438 	((u32 *)fn_addr)[1] = bx_lr_instruction();
439 	flush_icache_range(fn_addr, fn_addr + 8);
440 }
441 
442 #else
patch_aeabi_idiv(void)443 static inline void patch_aeabi_idiv(void) { }
444 #endif
445 
cpuid_init_hwcaps(void)446 static void __init cpuid_init_hwcaps(void)
447 {
448 	int block;
449 	u32 isar5;
450 
451 	if (cpu_architecture() < CPU_ARCH_ARMv7)
452 		return;
453 
454 	block = cpuid_feature_extract(CPUID_EXT_ISAR0, 24);
455 	if (block >= 2)
456 		elf_hwcap |= HWCAP_IDIVA;
457 	if (block >= 1)
458 		elf_hwcap |= HWCAP_IDIVT;
459 
460 	/* LPAE implies atomic ldrd/strd instructions */
461 	block = cpuid_feature_extract(CPUID_EXT_MMFR0, 0);
462 	if (block >= 5)
463 		elf_hwcap |= HWCAP_LPAE;
464 
465 	/* check for supported v8 Crypto instructions */
466 	isar5 = read_cpuid_ext(CPUID_EXT_ISAR5);
467 
468 	block = cpuid_feature_extract_field(isar5, 4);
469 	if (block >= 2)
470 		elf_hwcap2 |= HWCAP2_PMULL;
471 	if (block >= 1)
472 		elf_hwcap2 |= HWCAP2_AES;
473 
474 	block = cpuid_feature_extract_field(isar5, 8);
475 	if (block >= 1)
476 		elf_hwcap2 |= HWCAP2_SHA1;
477 
478 	block = cpuid_feature_extract_field(isar5, 12);
479 	if (block >= 1)
480 		elf_hwcap2 |= HWCAP2_SHA2;
481 
482 	block = cpuid_feature_extract_field(isar5, 16);
483 	if (block >= 1)
484 		elf_hwcap2 |= HWCAP2_CRC32;
485 }
486 
elf_hwcap_fixup(void)487 static void __init elf_hwcap_fixup(void)
488 {
489 	unsigned id = read_cpuid_id();
490 
491 	/*
492 	 * HWCAP_TLS is available only on 1136 r1p0 and later,
493 	 * see also kuser_get_tls_init.
494 	 */
495 	if (read_cpuid_part() == ARM_CPU_PART_ARM1136 &&
496 	    ((id >> 20) & 3) == 0) {
497 		elf_hwcap &= ~HWCAP_TLS;
498 		return;
499 	}
500 
501 	/* Verify if CPUID scheme is implemented */
502 	if ((id & 0x000f0000) != 0x000f0000)
503 		return;
504 
505 	/*
506 	 * If the CPU supports LDREX/STREX and LDREXB/STREXB,
507 	 * avoid advertising SWP; it may not be atomic with
508 	 * multiprocessing cores.
509 	 */
510 	if (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) > 1 ||
511 	    (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) == 1 &&
512 	     cpuid_feature_extract(CPUID_EXT_ISAR4, 20) >= 3))
513 		elf_hwcap &= ~HWCAP_SWP;
514 }
515 
516 /*
517  * cpu_init - initialise one CPU.
518  *
519  * cpu_init sets up the per-CPU stacks.
520  */
cpu_init(void)521 void notrace cpu_init(void)
522 {
523 #ifndef CONFIG_CPU_V7M
524 	unsigned int cpu = smp_processor_id();
525 	struct stack *stk = &stacks[cpu];
526 
527 	if (cpu >= NR_CPUS) {
528 		pr_crit("CPU%u: bad primary CPU number\n", cpu);
529 		BUG();
530 	}
531 
532 	/*
533 	 * This only works on resume and secondary cores. For booting on the
534 	 * boot cpu, smp_prepare_boot_cpu is called after percpu area setup.
535 	 */
536 	set_my_cpu_offset(per_cpu_offset(cpu));
537 
538 	cpu_proc_init();
539 
540 	/*
541 	 * Define the placement constraint for the inline asm directive below.
542 	 * In Thumb-2, msr with an immediate value is not allowed.
543 	 */
544 #ifdef CONFIG_THUMB2_KERNEL
545 #define PLC	"r"
546 #else
547 #define PLC	"I"
548 #endif
549 
550 	/*
551 	 * setup stacks for re-entrant exception handlers
552 	 */
553 	__asm__ (
554 	"msr	cpsr_c, %1\n\t"
555 	"add	r14, %0, %2\n\t"
556 	"mov	sp, r14\n\t"
557 	"msr	cpsr_c, %3\n\t"
558 	"add	r14, %0, %4\n\t"
559 	"mov	sp, r14\n\t"
560 	"msr	cpsr_c, %5\n\t"
561 	"add	r14, %0, %6\n\t"
562 	"mov	sp, r14\n\t"
563 	"msr	cpsr_c, %7\n\t"
564 	"add	r14, %0, %8\n\t"
565 	"mov	sp, r14\n\t"
566 	"msr	cpsr_c, %9"
567 	    :
568 	    : "r" (stk),
569 	      PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
570 	      "I" (offsetof(struct stack, irq[0])),
571 	      PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
572 	      "I" (offsetof(struct stack, abt[0])),
573 	      PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
574 	      "I" (offsetof(struct stack, und[0])),
575 	      PLC (PSR_F_BIT | PSR_I_BIT | FIQ_MODE),
576 	      "I" (offsetof(struct stack, fiq[0])),
577 	      PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
578 	    : "r14");
579 #endif
580 }
581 
582 u32 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID };
583 
smp_setup_processor_id(void)584 void __init smp_setup_processor_id(void)
585 {
586 	int i;
587 	u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0;
588 	u32 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
589 
590 	cpu_logical_map(0) = cpu;
591 	for (i = 1; i < nr_cpu_ids; ++i)
592 		cpu_logical_map(i) = i == cpu ? 0 : i;
593 
594 	/*
595 	 * clear __my_cpu_offset on boot CPU to avoid hang caused by
596 	 * using percpu variable early, for example, lockdep will
597 	 * access percpu variable inside lock_release
598 	 */
599 	set_my_cpu_offset(0);
600 
601 	pr_info("Booting Linux on physical CPU 0x%x\n", mpidr);
602 }
603 
604 struct mpidr_hash mpidr_hash;
605 #ifdef CONFIG_SMP
606 /**
607  * smp_build_mpidr_hash - Pre-compute shifts required at each affinity
608  *			  level in order to build a linear index from an
609  *			  MPIDR value. Resulting algorithm is a collision
610  *			  free hash carried out through shifting and ORing
611  */
smp_build_mpidr_hash(void)612 static void __init smp_build_mpidr_hash(void)
613 {
614 	u32 i, affinity;
615 	u32 fs[3], bits[3], ls, mask = 0;
616 	/*
617 	 * Pre-scan the list of MPIDRS and filter out bits that do
618 	 * not contribute to affinity levels, ie they never toggle.
619 	 */
620 	for_each_possible_cpu(i)
621 		mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
622 	pr_debug("mask of set bits 0x%x\n", mask);
623 	/*
624 	 * Find and stash the last and first bit set at all affinity levels to
625 	 * check how many bits are required to represent them.
626 	 */
627 	for (i = 0; i < 3; i++) {
628 		affinity = MPIDR_AFFINITY_LEVEL(mask, i);
629 		/*
630 		 * Find the MSB bit and LSB bits position
631 		 * to determine how many bits are required
632 		 * to express the affinity level.
633 		 */
634 		ls = fls(affinity);
635 		fs[i] = affinity ? ffs(affinity) - 1 : 0;
636 		bits[i] = ls - fs[i];
637 	}
638 	/*
639 	 * An index can be created from the MPIDR by isolating the
640 	 * significant bits at each affinity level and by shifting
641 	 * them in order to compress the 24 bits values space to a
642 	 * compressed set of values. This is equivalent to hashing
643 	 * the MPIDR through shifting and ORing. It is a collision free
644 	 * hash though not minimal since some levels might contain a number
645 	 * of CPUs that is not an exact power of 2 and their bit
646 	 * representation might contain holes, eg MPIDR[7:0] = {0x2, 0x80}.
647 	 */
648 	mpidr_hash.shift_aff[0] = fs[0];
649 	mpidr_hash.shift_aff[1] = MPIDR_LEVEL_BITS + fs[1] - bits[0];
650 	mpidr_hash.shift_aff[2] = 2*MPIDR_LEVEL_BITS + fs[2] -
651 						(bits[1] + bits[0]);
652 	mpidr_hash.mask = mask;
653 	mpidr_hash.bits = bits[2] + bits[1] + bits[0];
654 	pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] mask[0x%x] bits[%u]\n",
655 				mpidr_hash.shift_aff[0],
656 				mpidr_hash.shift_aff[1],
657 				mpidr_hash.shift_aff[2],
658 				mpidr_hash.mask,
659 				mpidr_hash.bits);
660 	/*
661 	 * 4x is an arbitrary value used to warn on a hash table much bigger
662 	 * than expected on most systems.
663 	 */
664 	if (mpidr_hash_size() > 4 * num_possible_cpus())
665 		pr_warn("Large number of MPIDR hash buckets detected\n");
666 	sync_cache_w(&mpidr_hash);
667 }
668 #endif
669 
setup_processor(void)670 static void __init setup_processor(void)
671 {
672 	struct proc_info_list *list;
673 
674 	/*
675 	 * locate processor in the list of supported processor
676 	 * types.  The linker builds this table for us from the
677 	 * entries in arch/arm/mm/proc-*.S
678 	 */
679 	list = lookup_processor_type(read_cpuid_id());
680 	if (!list) {
681 		pr_err("CPU configuration botched (ID %08x), unable to continue.\n",
682 		       read_cpuid_id());
683 		while (1);
684 	}
685 
686 	cpu_name = list->cpu_name;
687 	__cpu_architecture = __get_cpu_architecture();
688 
689 #ifdef MULTI_CPU
690 	processor = *list->proc;
691 #endif
692 #ifdef MULTI_TLB
693 	cpu_tlb = *list->tlb;
694 #endif
695 #ifdef MULTI_USER
696 	cpu_user = *list->user;
697 #endif
698 #ifdef MULTI_CACHE
699 	cpu_cache = *list->cache;
700 #endif
701 
702 	pr_info("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
703 		cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
704 		proc_arch[cpu_architecture()], get_cr());
705 
706 	snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
707 		 list->arch_name, ENDIANNESS);
708 	snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
709 		 list->elf_name, ENDIANNESS);
710 	elf_hwcap = list->elf_hwcap;
711 
712 	cpuid_init_hwcaps();
713 	patch_aeabi_idiv();
714 
715 #ifndef CONFIG_ARM_THUMB
716 	elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT);
717 #endif
718 #ifdef CONFIG_MMU
719 	init_default_cache_policy(list->__cpu_mm_mmu_flags);
720 #endif
721 	erratum_a15_798181_init();
722 
723 	elf_hwcap_fixup();
724 
725 	cacheid_init();
726 	cpu_init();
727 }
728 
dump_machine_table(void)729 void __init dump_machine_table(void)
730 {
731 	const struct machine_desc *p;
732 
733 	early_print("Available machine support:\n\nID (hex)\tNAME\n");
734 	for_each_machine_desc(p)
735 		early_print("%08x\t%s\n", p->nr, p->name);
736 
737 	early_print("\nPlease check your kernel config and/or bootloader.\n");
738 
739 	while (true)
740 		/* can't use cpu_relax() here as it may require MMU setup */;
741 }
742 
arm_add_memory(u64 start,u64 size)743 int __init arm_add_memory(u64 start, u64 size)
744 {
745 	u64 aligned_start;
746 
747 	/*
748 	 * Ensure that start/size are aligned to a page boundary.
749 	 * Size is rounded down, start is rounded up.
750 	 */
751 	aligned_start = PAGE_ALIGN(start);
752 	if (aligned_start > start + size)
753 		size = 0;
754 	else
755 		size -= aligned_start - start;
756 
757 #ifndef CONFIG_PHYS_ADDR_T_64BIT
758 	if (aligned_start > ULONG_MAX) {
759 		pr_crit("Ignoring memory at 0x%08llx outside 32-bit physical address space\n",
760 			(long long)start);
761 		return -EINVAL;
762 	}
763 
764 	if (aligned_start + size > ULONG_MAX) {
765 		pr_crit("Truncating memory at 0x%08llx to fit in 32-bit physical address space\n",
766 			(long long)start);
767 		/*
768 		 * To ensure bank->start + bank->size is representable in
769 		 * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
770 		 * This means we lose a page after masking.
771 		 */
772 		size = ULONG_MAX - aligned_start;
773 	}
774 #endif
775 
776 	if (aligned_start < PHYS_OFFSET) {
777 		if (aligned_start + size <= PHYS_OFFSET) {
778 			pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
779 				aligned_start, aligned_start + size);
780 			return -EINVAL;
781 		}
782 
783 		pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
784 			aligned_start, (u64)PHYS_OFFSET);
785 
786 		size -= PHYS_OFFSET - aligned_start;
787 		aligned_start = PHYS_OFFSET;
788 	}
789 
790 	start = aligned_start;
791 	size = size & ~(phys_addr_t)(PAGE_SIZE - 1);
792 
793 	/*
794 	 * Check whether this memory region has non-zero size or
795 	 * invalid node number.
796 	 */
797 	if (size == 0)
798 		return -EINVAL;
799 
800 	memblock_add(start, size);
801 	return 0;
802 }
803 
804 /*
805  * Pick out the memory size.  We look for mem=size@start,
806  * where start and size are "size[KkMm]"
807  */
808 
early_mem(char * p)809 static int __init early_mem(char *p)
810 {
811 	static int usermem __initdata = 0;
812 	u64 size;
813 	u64 start;
814 	char *endp;
815 
816 	/*
817 	 * If the user specifies memory size, we
818 	 * blow away any automatically generated
819 	 * size.
820 	 */
821 	if (usermem == 0) {
822 		usermem = 1;
823 		memblock_remove(memblock_start_of_DRAM(),
824 			memblock_end_of_DRAM() - memblock_start_of_DRAM());
825 	}
826 
827 	start = PHYS_OFFSET;
828 	size  = memparse(p, &endp);
829 	if (*endp == '@')
830 		start = memparse(endp + 1, NULL);
831 
832 	arm_add_memory(start, size);
833 
834 	return 0;
835 }
836 early_param("mem", early_mem);
837 
request_standard_resources(const struct machine_desc * mdesc)838 static void __init request_standard_resources(const struct machine_desc *mdesc)
839 {
840 	struct memblock_region *region;
841 	struct resource *res;
842 
843 	kernel_code.start   = virt_to_phys(_text);
844 	kernel_code.end     = virt_to_phys(__init_begin - 1);
845 	kernel_data.start   = virt_to_phys(_sdata);
846 	kernel_data.end     = virt_to_phys(_end - 1);
847 
848 	for_each_memblock(memory, region) {
849 		phys_addr_t start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
850 		phys_addr_t end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
851 		unsigned long boot_alias_start;
852 
853 		/*
854 		 * Some systems have a special memory alias which is only
855 		 * used for booting.  We need to advertise this region to
856 		 * kexec-tools so they know where bootable RAM is located.
857 		 */
858 		boot_alias_start = phys_to_idmap(start);
859 		if (arm_has_idmap_alias() && boot_alias_start != IDMAP_INVALID_ADDR) {
860 			res = memblock_virt_alloc(sizeof(*res), 0);
861 			res->name = "System RAM (boot alias)";
862 			res->start = boot_alias_start;
863 			res->end = phys_to_idmap(end);
864 			res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
865 			request_resource(&iomem_resource, res);
866 		}
867 
868 		res = memblock_virt_alloc(sizeof(*res), 0);
869 		res->name  = "System RAM";
870 		res->start = start;
871 		res->end = end;
872 		res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
873 
874 		request_resource(&iomem_resource, res);
875 
876 		if (kernel_code.start >= res->start &&
877 		    kernel_code.end <= res->end)
878 			request_resource(res, &kernel_code);
879 		if (kernel_data.start >= res->start &&
880 		    kernel_data.end <= res->end)
881 			request_resource(res, &kernel_data);
882 	}
883 
884 	if (mdesc->video_start) {
885 		video_ram.start = mdesc->video_start;
886 		video_ram.end   = mdesc->video_end;
887 		request_resource(&iomem_resource, &video_ram);
888 	}
889 
890 	/*
891 	 * Some machines don't have the possibility of ever
892 	 * possessing lp0, lp1 or lp2
893 	 */
894 	if (mdesc->reserve_lp0)
895 		request_resource(&ioport_resource, &lp0);
896 	if (mdesc->reserve_lp1)
897 		request_resource(&ioport_resource, &lp1);
898 	if (mdesc->reserve_lp2)
899 		request_resource(&ioport_resource, &lp2);
900 }
901 
902 #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE) || \
903     defined(CONFIG_EFI)
904 struct screen_info screen_info = {
905  .orig_video_lines	= 30,
906  .orig_video_cols	= 80,
907  .orig_video_mode	= 0,
908  .orig_video_ega_bx	= 0,
909  .orig_video_isVGA	= 1,
910  .orig_video_points	= 8
911 };
912 #endif
913 
customize_machine(void)914 static int __init customize_machine(void)
915 {
916 	/*
917 	 * customizes platform devices, or adds new ones
918 	 * On DT based machines, we fall back to populating the
919 	 * machine from the device tree, if no callback is provided,
920 	 * otherwise we would always need an init_machine callback.
921 	 */
922 	if (machine_desc->init_machine)
923 		machine_desc->init_machine();
924 
925 	return 0;
926 }
927 arch_initcall(customize_machine);
928 
init_machine_late(void)929 static int __init init_machine_late(void)
930 {
931 	struct device_node *root;
932 	int ret;
933 
934 	if (machine_desc->init_late)
935 		machine_desc->init_late();
936 
937 	root = of_find_node_by_path("/");
938 	if (root) {
939 		ret = of_property_read_string(root, "serial-number",
940 					      &system_serial);
941 		if (ret)
942 			system_serial = NULL;
943 	}
944 
945 	if (!system_serial)
946 		system_serial = kasprintf(GFP_KERNEL, "%08x%08x",
947 					  system_serial_high,
948 					  system_serial_low);
949 
950 	return 0;
951 }
952 late_initcall(init_machine_late);
953 
954 #ifdef CONFIG_KEXEC
955 /*
956  * The crash region must be aligned to 128MB to avoid
957  * zImage relocating below the reserved region.
958  */
959 #define CRASH_ALIGN	(128 << 20)
960 
get_total_mem(void)961 static inline unsigned long long get_total_mem(void)
962 {
963 	unsigned long total;
964 
965 	total = max_low_pfn - min_low_pfn;
966 	return total << PAGE_SHIFT;
967 }
968 
969 /**
970  * reserve_crashkernel() - reserves memory are for crash kernel
971  *
972  * This function reserves memory area given in "crashkernel=" kernel command
973  * line parameter. The memory reserved is used by a dump capture kernel when
974  * primary kernel is crashing.
975  */
reserve_crashkernel(void)976 static void __init reserve_crashkernel(void)
977 {
978 	unsigned long long crash_size, crash_base;
979 	unsigned long long total_mem;
980 	int ret;
981 
982 	total_mem = get_total_mem();
983 	ret = parse_crashkernel(boot_command_line, total_mem,
984 				&crash_size, &crash_base);
985 	if (ret)
986 		return;
987 
988 	if (crash_base <= 0) {
989 		unsigned long long crash_max = idmap_to_phys((u32)~0);
990 		unsigned long long lowmem_max = __pa(high_memory - 1) + 1;
991 		if (crash_max > lowmem_max)
992 			crash_max = lowmem_max;
993 		crash_base = memblock_find_in_range(CRASH_ALIGN, crash_max,
994 						    crash_size, CRASH_ALIGN);
995 		if (!crash_base) {
996 			pr_err("crashkernel reservation failed - No suitable area found.\n");
997 			return;
998 		}
999 	} else {
1000 		unsigned long long start;
1001 
1002 		start = memblock_find_in_range(crash_base,
1003 					       crash_base + crash_size,
1004 					       crash_size, SECTION_SIZE);
1005 		if (start != crash_base) {
1006 			pr_err("crashkernel reservation failed - memory is in use.\n");
1007 			return;
1008 		}
1009 	}
1010 
1011 	ret = memblock_reserve(crash_base, crash_size);
1012 	if (ret < 0) {
1013 		pr_warn("crashkernel reservation failed - memory is in use (0x%lx)\n",
1014 			(unsigned long)crash_base);
1015 		return;
1016 	}
1017 
1018 	pr_info("Reserving %ldMB of memory at %ldMB for crashkernel (System RAM: %ldMB)\n",
1019 		(unsigned long)(crash_size >> 20),
1020 		(unsigned long)(crash_base >> 20),
1021 		(unsigned long)(total_mem >> 20));
1022 
1023 	/* The crashk resource must always be located in normal mem */
1024 	crashk_res.start = crash_base;
1025 	crashk_res.end = crash_base + crash_size - 1;
1026 	insert_resource(&iomem_resource, &crashk_res);
1027 
1028 	if (arm_has_idmap_alias()) {
1029 		/*
1030 		 * If we have a special RAM alias for use at boot, we
1031 		 * need to advertise to kexec tools where the alias is.
1032 		 */
1033 		static struct resource crashk_boot_res = {
1034 			.name = "Crash kernel (boot alias)",
1035 			.flags = IORESOURCE_BUSY | IORESOURCE_MEM,
1036 		};
1037 
1038 		crashk_boot_res.start = phys_to_idmap(crash_base);
1039 		crashk_boot_res.end = crashk_boot_res.start + crash_size - 1;
1040 		insert_resource(&iomem_resource, &crashk_boot_res);
1041 	}
1042 }
1043 #else
reserve_crashkernel(void)1044 static inline void reserve_crashkernel(void) {}
1045 #endif /* CONFIG_KEXEC */
1046 
hyp_mode_check(void)1047 void __init hyp_mode_check(void)
1048 {
1049 #ifdef CONFIG_ARM_VIRT_EXT
1050 	sync_boot_mode();
1051 
1052 	if (is_hyp_mode_available()) {
1053 		pr_info("CPU: All CPU(s) started in HYP mode.\n");
1054 		pr_info("CPU: Virtualization extensions available.\n");
1055 	} else if (is_hyp_mode_mismatched()) {
1056 		pr_warn("CPU: WARNING: CPU(s) started in wrong/inconsistent modes (primary CPU mode 0x%x)\n",
1057 			__boot_cpu_mode & MODE_MASK);
1058 		pr_warn("CPU: This may indicate a broken bootloader or firmware.\n");
1059 	} else
1060 		pr_info("CPU: All CPU(s) started in SVC mode.\n");
1061 #endif
1062 }
1063 
setup_arch(char ** cmdline_p)1064 void __init setup_arch(char **cmdline_p)
1065 {
1066 	const struct machine_desc *mdesc;
1067 
1068 	setup_processor();
1069 	mdesc = setup_machine_fdt(__atags_pointer);
1070 	if (!mdesc)
1071 		mdesc = setup_machine_tags(__atags_pointer, __machine_arch_type);
1072 	if (!mdesc) {
1073 		early_print("\nError: invalid dtb and unrecognized/unsupported machine ID\n");
1074 		early_print("  r1=0x%08x, r2=0x%08x\n", __machine_arch_type,
1075 			    __atags_pointer);
1076 		if (__atags_pointer)
1077 			early_print("  r2[]=%*ph\n", 16,
1078 				    phys_to_virt(__atags_pointer));
1079 		dump_machine_table();
1080 	}
1081 
1082 	machine_desc = mdesc;
1083 	machine_name = mdesc->name;
1084 	dump_stack_set_arch_desc("%s", mdesc->name);
1085 
1086 	if (mdesc->reboot_mode != REBOOT_HARD)
1087 		reboot_mode = mdesc->reboot_mode;
1088 
1089 	init_mm.start_code = (unsigned long) _text;
1090 	init_mm.end_code   = (unsigned long) _etext;
1091 	init_mm.end_data   = (unsigned long) _edata;
1092 	init_mm.brk	   = (unsigned long) _end;
1093 
1094 	/* populate cmd_line too for later use, preserving boot_command_line */
1095 	strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
1096 	*cmdline_p = cmd_line;
1097 
1098 	early_fixmap_init();
1099 	early_ioremap_init();
1100 
1101 	parse_early_param();
1102 
1103 #ifdef CONFIG_MMU
1104 	early_mm_init(mdesc);
1105 #endif
1106 	setup_dma_zone(mdesc);
1107 	xen_early_init();
1108 	efi_init();
1109 	/*
1110 	 * Make sure the calculation for lowmem/highmem is set appropriately
1111 	 * before reserving/allocating any mmeory
1112 	 */
1113 	adjust_lowmem_bounds();
1114 	arm_memblock_init(mdesc);
1115 	/* Memory may have been removed so recalculate the bounds. */
1116 	adjust_lowmem_bounds();
1117 
1118 	early_ioremap_reset();
1119 
1120 	paging_init(mdesc);
1121 	request_standard_resources(mdesc);
1122 
1123 	if (mdesc->restart)
1124 		arm_pm_restart = mdesc->restart;
1125 
1126 	unflatten_device_tree();
1127 
1128 	arm_dt_init_cpu_maps();
1129 	psci_dt_init();
1130 #ifdef CONFIG_SMP
1131 	if (is_smp()) {
1132 		if (!mdesc->smp_init || !mdesc->smp_init()) {
1133 			if (psci_smp_available())
1134 				smp_set_ops(&psci_smp_ops);
1135 			else if (mdesc->smp)
1136 				smp_set_ops(mdesc->smp);
1137 		}
1138 		smp_init_cpus();
1139 		smp_build_mpidr_hash();
1140 	}
1141 #endif
1142 
1143 	if (!is_smp())
1144 		hyp_mode_check();
1145 
1146 	reserve_crashkernel();
1147 
1148 #ifdef CONFIG_GENERIC_IRQ_MULTI_HANDLER
1149 	handle_arch_irq = mdesc->handle_irq;
1150 #endif
1151 
1152 #ifdef CONFIG_VT
1153 #if defined(CONFIG_VGA_CONSOLE)
1154 	conswitchp = &vga_con;
1155 #elif defined(CONFIG_DUMMY_CONSOLE)
1156 	conswitchp = &dummy_con;
1157 #endif
1158 #endif
1159 
1160 	if (mdesc->init_early)
1161 		mdesc->init_early();
1162 }
1163 
1164 
topology_init(void)1165 static int __init topology_init(void)
1166 {
1167 	int cpu;
1168 
1169 	for_each_possible_cpu(cpu) {
1170 		struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
1171 		cpuinfo->cpu.hotpluggable = platform_can_hotplug_cpu(cpu);
1172 		register_cpu(&cpuinfo->cpu, cpu);
1173 	}
1174 
1175 	return 0;
1176 }
1177 subsys_initcall(topology_init);
1178 
1179 #ifdef CONFIG_HAVE_PROC_CPU
proc_cpu_init(void)1180 static int __init proc_cpu_init(void)
1181 {
1182 	struct proc_dir_entry *res;
1183 
1184 	res = proc_mkdir("cpu", NULL);
1185 	if (!res)
1186 		return -ENOMEM;
1187 	return 0;
1188 }
1189 fs_initcall(proc_cpu_init);
1190 #endif
1191 
1192 static const char *hwcap_str[] = {
1193 	"swp",
1194 	"half",
1195 	"thumb",
1196 	"26bit",
1197 	"fastmult",
1198 	"fpa",
1199 	"vfp",
1200 	"edsp",
1201 	"java",
1202 	"iwmmxt",
1203 	"crunch",
1204 	"thumbee",
1205 	"neon",
1206 	"vfpv3",
1207 	"vfpv3d16",
1208 	"tls",
1209 	"vfpv4",
1210 	"idiva",
1211 	"idivt",
1212 	"vfpd32",
1213 	"lpae",
1214 	"evtstrm",
1215 	NULL
1216 };
1217 
1218 static const char *hwcap2_str[] = {
1219 	"aes",
1220 	"pmull",
1221 	"sha1",
1222 	"sha2",
1223 	"crc32",
1224 	NULL
1225 };
1226 
c_show(struct seq_file * m,void * v)1227 static int c_show(struct seq_file *m, void *v)
1228 {
1229 	int i, j;
1230 	u32 cpuid;
1231 
1232 	for_each_online_cpu(i) {
1233 		/*
1234 		 * glibc reads /proc/cpuinfo to determine the number of
1235 		 * online processors, looking for lines beginning with
1236 		 * "processor".  Give glibc what it expects.
1237 		 */
1238 		seq_printf(m, "processor\t: %d\n", i);
1239 		cpuid = is_smp() ? per_cpu(cpu_data, i).cpuid : read_cpuid_id();
1240 		seq_printf(m, "model name\t: %s rev %d (%s)\n",
1241 			   cpu_name, cpuid & 15, elf_platform);
1242 
1243 #if defined(CONFIG_SMP)
1244 		seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1245 			   per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
1246 			   (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
1247 #else
1248 		seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1249 			   loops_per_jiffy / (500000/HZ),
1250 			   (loops_per_jiffy / (5000/HZ)) % 100);
1251 #endif
1252 		/* dump out the processor features */
1253 		seq_puts(m, "Features\t: ");
1254 
1255 		for (j = 0; hwcap_str[j]; j++)
1256 			if (elf_hwcap & (1 << j))
1257 				seq_printf(m, "%s ", hwcap_str[j]);
1258 
1259 		for (j = 0; hwcap2_str[j]; j++)
1260 			if (elf_hwcap2 & (1 << j))
1261 				seq_printf(m, "%s ", hwcap2_str[j]);
1262 
1263 		seq_printf(m, "\nCPU implementer\t: 0x%02x\n", cpuid >> 24);
1264 		seq_printf(m, "CPU architecture: %s\n",
1265 			   proc_arch[cpu_architecture()]);
1266 
1267 		if ((cpuid & 0x0008f000) == 0x00000000) {
1268 			/* pre-ARM7 */
1269 			seq_printf(m, "CPU part\t: %07x\n", cpuid >> 4);
1270 		} else {
1271 			if ((cpuid & 0x0008f000) == 0x00007000) {
1272 				/* ARM7 */
1273 				seq_printf(m, "CPU variant\t: 0x%02x\n",
1274 					   (cpuid >> 16) & 127);
1275 			} else {
1276 				/* post-ARM7 */
1277 				seq_printf(m, "CPU variant\t: 0x%x\n",
1278 					   (cpuid >> 20) & 15);
1279 			}
1280 			seq_printf(m, "CPU part\t: 0x%03x\n",
1281 				   (cpuid >> 4) & 0xfff);
1282 		}
1283 		seq_printf(m, "CPU revision\t: %d\n\n", cpuid & 15);
1284 	}
1285 
1286 	seq_printf(m, "Hardware\t: %s\n", machine_name);
1287 	seq_printf(m, "Revision\t: %04x\n", system_rev);
1288 	seq_printf(m, "Serial\t\t: %s\n", system_serial);
1289 
1290 	return 0;
1291 }
1292 
c_start(struct seq_file * m,loff_t * pos)1293 static void *c_start(struct seq_file *m, loff_t *pos)
1294 {
1295 	return *pos < 1 ? (void *)1 : NULL;
1296 }
1297 
c_next(struct seq_file * m,void * v,loff_t * pos)1298 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1299 {
1300 	++*pos;
1301 	return NULL;
1302 }
1303 
c_stop(struct seq_file * m,void * v)1304 static void c_stop(struct seq_file *m, void *v)
1305 {
1306 }
1307 
1308 const struct seq_operations cpuinfo_op = {
1309 	.start	= c_start,
1310 	.next	= c_next,
1311 	.stop	= c_stop,
1312 	.show	= c_show
1313 };
1314