1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *	Intel Multiprocessor Specification 1.1 and 1.4
4  *	compliant MP-table parsing routines.
5  *
6  *	(c) 1995 Alan Cox, Building #3 <alan@lxorguk.ukuu.org.uk>
7  *	(c) 1998, 1999, 2000, 2009 Ingo Molnar <mingo@redhat.com>
8  *      (c) 2008 Alexey Starikovskiy <astarikovskiy@suse.de>
9  */
10 
11 #include <linux/mm.h>
12 #include <linux/init.h>
13 #include <linux/delay.h>
14 #include <linux/memblock.h>
15 #include <linux/kernel_stat.h>
16 #include <linux/mc146818rtc.h>
17 #include <linux/bitops.h>
18 #include <linux/acpi.h>
19 #include <linux/smp.h>
20 #include <linux/pci.h>
21 
22 #include <asm/irqdomain.h>
23 #include <asm/mtrr.h>
24 #include <asm/mpspec.h>
25 #include <asm/pgalloc.h>
26 #include <asm/io_apic.h>
27 #include <asm/proto.h>
28 #include <asm/bios_ebda.h>
29 #include <asm/e820/api.h>
30 #include <asm/setup.h>
31 #include <asm/smp.h>
32 
33 #include <asm/apic.h>
34 /*
35  * Checksum an MP configuration block.
36  */
37 
mpf_checksum(unsigned char * mp,int len)38 static int __init mpf_checksum(unsigned char *mp, int len)
39 {
40 	int sum = 0;
41 
42 	while (len--)
43 		sum += *mp++;
44 
45 	return sum & 0xFF;
46 }
47 
default_mpc_apic_id(struct mpc_cpu * m)48 int __init default_mpc_apic_id(struct mpc_cpu *m)
49 {
50 	return m->apicid;
51 }
52 
MP_processor_info(struct mpc_cpu * m)53 static void __init MP_processor_info(struct mpc_cpu *m)
54 {
55 	int apicid;
56 	char *bootup_cpu = "";
57 
58 	if (!(m->cpuflag & CPU_ENABLED)) {
59 		disabled_cpus++;
60 		return;
61 	}
62 
63 	apicid = x86_init.mpparse.mpc_apic_id(m);
64 
65 	if (m->cpuflag & CPU_BOOTPROCESSOR) {
66 		bootup_cpu = " (Bootup-CPU)";
67 		boot_cpu_physical_apicid = m->apicid;
68 	}
69 
70 	pr_info("Processor #%d%s\n", m->apicid, bootup_cpu);
71 	generic_processor_info(apicid, m->apicver);
72 }
73 
74 #ifdef CONFIG_X86_IO_APIC
default_mpc_oem_bus_info(struct mpc_bus * m,char * str)75 void __init default_mpc_oem_bus_info(struct mpc_bus *m, char *str)
76 {
77 	memcpy(str, m->bustype, 6);
78 	str[6] = 0;
79 	apic_printk(APIC_VERBOSE, "Bus #%d is %s\n", m->busid, str);
80 }
81 
MP_bus_info(struct mpc_bus * m)82 static void __init MP_bus_info(struct mpc_bus *m)
83 {
84 	char str[7];
85 
86 	x86_init.mpparse.mpc_oem_bus_info(m, str);
87 
88 #if MAX_MP_BUSSES < 256
89 	if (m->busid >= MAX_MP_BUSSES) {
90 		pr_warn("MP table busid value (%d) for bustype %s is too large, max. supported is %d\n",
91 			m->busid, str, MAX_MP_BUSSES - 1);
92 		return;
93 	}
94 #endif
95 
96 	set_bit(m->busid, mp_bus_not_pci);
97 	if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA) - 1) == 0) {
98 #ifdef CONFIG_EISA
99 		mp_bus_id_to_type[m->busid] = MP_BUS_ISA;
100 #endif
101 	} else if (strncmp(str, BUSTYPE_PCI, sizeof(BUSTYPE_PCI) - 1) == 0) {
102 		if (x86_init.mpparse.mpc_oem_pci_bus)
103 			x86_init.mpparse.mpc_oem_pci_bus(m);
104 
105 		clear_bit(m->busid, mp_bus_not_pci);
106 #ifdef CONFIG_EISA
107 		mp_bus_id_to_type[m->busid] = MP_BUS_PCI;
108 	} else if (strncmp(str, BUSTYPE_EISA, sizeof(BUSTYPE_EISA) - 1) == 0) {
109 		mp_bus_id_to_type[m->busid] = MP_BUS_EISA;
110 #endif
111 	} else
112 		pr_warn("Unknown bustype %s - ignoring\n", str);
113 }
114 
MP_ioapic_info(struct mpc_ioapic * m)115 static void __init MP_ioapic_info(struct mpc_ioapic *m)
116 {
117 	struct ioapic_domain_cfg cfg = {
118 		.type = IOAPIC_DOMAIN_LEGACY,
119 		.ops = &mp_ioapic_irqdomain_ops,
120 	};
121 
122 	if (m->flags & MPC_APIC_USABLE)
123 		mp_register_ioapic(m->apicid, m->apicaddr, gsi_top, &cfg);
124 }
125 
print_mp_irq_info(struct mpc_intsrc * mp_irq)126 static void __init print_mp_irq_info(struct mpc_intsrc *mp_irq)
127 {
128 	apic_printk(APIC_VERBOSE,
129 		"Int: type %d, pol %d, trig %d, bus %02x, IRQ %02x, APIC ID %x, APIC INT %02x\n",
130 		mp_irq->irqtype, mp_irq->irqflag & 3,
131 		(mp_irq->irqflag >> 2) & 3, mp_irq->srcbus,
132 		mp_irq->srcbusirq, mp_irq->dstapic, mp_irq->dstirq);
133 }
134 
135 #else /* CONFIG_X86_IO_APIC */
MP_bus_info(struct mpc_bus * m)136 static inline void __init MP_bus_info(struct mpc_bus *m) {}
MP_ioapic_info(struct mpc_ioapic * m)137 static inline void __init MP_ioapic_info(struct mpc_ioapic *m) {}
138 #endif /* CONFIG_X86_IO_APIC */
139 
MP_lintsrc_info(struct mpc_lintsrc * m)140 static void __init MP_lintsrc_info(struct mpc_lintsrc *m)
141 {
142 	apic_printk(APIC_VERBOSE,
143 		"Lint: type %d, pol %d, trig %d, bus %02x, IRQ %02x, APIC ID %x, APIC LINT %02x\n",
144 		m->irqtype, m->irqflag & 3, (m->irqflag >> 2) & 3, m->srcbusid,
145 		m->srcbusirq, m->destapic, m->destapiclint);
146 }
147 
148 /*
149  * Read/parse the MPC
150  */
smp_check_mpc(struct mpc_table * mpc,char * oem,char * str)151 static int __init smp_check_mpc(struct mpc_table *mpc, char *oem, char *str)
152 {
153 
154 	if (memcmp(mpc->signature, MPC_SIGNATURE, 4)) {
155 		pr_err("MPTABLE: bad signature [%c%c%c%c]!\n",
156 		       mpc->signature[0], mpc->signature[1],
157 		       mpc->signature[2], mpc->signature[3]);
158 		return 0;
159 	}
160 	if (mpf_checksum((unsigned char *)mpc, mpc->length)) {
161 		pr_err("MPTABLE: checksum error!\n");
162 		return 0;
163 	}
164 	if (mpc->spec != 0x01 && mpc->spec != 0x04) {
165 		pr_err("MPTABLE: bad table version (%d)!!\n", mpc->spec);
166 		return 0;
167 	}
168 	if (!mpc->lapic) {
169 		pr_err("MPTABLE: null local APIC address!\n");
170 		return 0;
171 	}
172 	memcpy(oem, mpc->oem, 8);
173 	oem[8] = 0;
174 	pr_info("MPTABLE: OEM ID: %s\n", oem);
175 
176 	memcpy(str, mpc->productid, 12);
177 	str[12] = 0;
178 
179 	pr_info("MPTABLE: Product ID: %s\n", str);
180 
181 	pr_info("MPTABLE: APIC at: 0x%X\n", mpc->lapic);
182 
183 	return 1;
184 }
185 
skip_entry(unsigned char ** ptr,int * count,int size)186 static void skip_entry(unsigned char **ptr, int *count, int size)
187 {
188 	*ptr += size;
189 	*count += size;
190 }
191 
smp_dump_mptable(struct mpc_table * mpc,unsigned char * mpt)192 static void __init smp_dump_mptable(struct mpc_table *mpc, unsigned char *mpt)
193 {
194 	pr_err("Your mptable is wrong, contact your HW vendor!\n");
195 	pr_cont("type %x\n", *mpt);
196 	print_hex_dump(KERN_ERR, "  ", DUMP_PREFIX_ADDRESS, 16,
197 			1, mpc, mpc->length, 1);
198 }
199 
default_smp_read_mpc_oem(struct mpc_table * mpc)200 void __init default_smp_read_mpc_oem(struct mpc_table *mpc) { }
201 
smp_read_mpc(struct mpc_table * mpc,unsigned early)202 static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early)
203 {
204 	char str[16];
205 	char oem[10];
206 
207 	int count = sizeof(*mpc);
208 	unsigned char *mpt = ((unsigned char *)mpc) + count;
209 
210 	if (!smp_check_mpc(mpc, oem, str))
211 		return 0;
212 
213 	/* Initialize the lapic mapping */
214 	if (!acpi_lapic)
215 		register_lapic_address(mpc->lapic);
216 
217 	if (early)
218 		return 1;
219 
220 	if (mpc->oemptr)
221 		x86_init.mpparse.smp_read_mpc_oem(mpc);
222 
223 	/*
224 	 *      Now process the configuration blocks.
225 	 */
226 	x86_init.mpparse.mpc_record(0);
227 
228 	while (count < mpc->length) {
229 		switch (*mpt) {
230 		case MP_PROCESSOR:
231 			/* ACPI may have already provided this data */
232 			if (!acpi_lapic)
233 				MP_processor_info((struct mpc_cpu *)mpt);
234 			skip_entry(&mpt, &count, sizeof(struct mpc_cpu));
235 			break;
236 		case MP_BUS:
237 			MP_bus_info((struct mpc_bus *)mpt);
238 			skip_entry(&mpt, &count, sizeof(struct mpc_bus));
239 			break;
240 		case MP_IOAPIC:
241 			MP_ioapic_info((struct mpc_ioapic *)mpt);
242 			skip_entry(&mpt, &count, sizeof(struct mpc_ioapic));
243 			break;
244 		case MP_INTSRC:
245 			mp_save_irq((struct mpc_intsrc *)mpt);
246 			skip_entry(&mpt, &count, sizeof(struct mpc_intsrc));
247 			break;
248 		case MP_LINTSRC:
249 			MP_lintsrc_info((struct mpc_lintsrc *)mpt);
250 			skip_entry(&mpt, &count, sizeof(struct mpc_lintsrc));
251 			break;
252 		default:
253 			/* wrong mptable */
254 			smp_dump_mptable(mpc, mpt);
255 			count = mpc->length;
256 			break;
257 		}
258 		x86_init.mpparse.mpc_record(1);
259 	}
260 
261 	if (!num_processors)
262 		pr_err("MPTABLE: no processors registered!\n");
263 	return num_processors;
264 }
265 
266 #ifdef CONFIG_X86_IO_APIC
267 
ELCR_trigger(unsigned int irq)268 static int __init ELCR_trigger(unsigned int irq)
269 {
270 	unsigned int port;
271 
272 	port = 0x4d0 + (irq >> 3);
273 	return (inb(port) >> (irq & 7)) & 1;
274 }
275 
construct_default_ioirq_mptable(int mpc_default_type)276 static void __init construct_default_ioirq_mptable(int mpc_default_type)
277 {
278 	struct mpc_intsrc intsrc;
279 	int i;
280 	int ELCR_fallback = 0;
281 
282 	intsrc.type = MP_INTSRC;
283 	intsrc.irqflag = MP_IRQTRIG_DEFAULT | MP_IRQPOL_DEFAULT;
284 	intsrc.srcbus = 0;
285 	intsrc.dstapic = mpc_ioapic_id(0);
286 
287 	intsrc.irqtype = mp_INT;
288 
289 	/*
290 	 *  If true, we have an ISA/PCI system with no IRQ entries
291 	 *  in the MP table. To prevent the PCI interrupts from being set up
292 	 *  incorrectly, we try to use the ELCR. The sanity check to see if
293 	 *  there is good ELCR data is very simple - IRQ0, 1, 2 and 13 can
294 	 *  never be level sensitive, so we simply see if the ELCR agrees.
295 	 *  If it does, we assume it's valid.
296 	 */
297 	if (mpc_default_type == 5) {
298 		pr_info("ISA/PCI bus type with no IRQ information... falling back to ELCR\n");
299 
300 		if (ELCR_trigger(0) || ELCR_trigger(1) || ELCR_trigger(2) ||
301 		    ELCR_trigger(13))
302 			pr_err("ELCR contains invalid data... not using ELCR\n");
303 		else {
304 			pr_info("Using ELCR to identify PCI interrupts\n");
305 			ELCR_fallback = 1;
306 		}
307 	}
308 
309 	for (i = 0; i < 16; i++) {
310 		switch (mpc_default_type) {
311 		case 2:
312 			if (i == 0 || i == 13)
313 				continue;	/* IRQ0 & IRQ13 not connected */
314 			/* fall through */
315 		default:
316 			if (i == 2)
317 				continue;	/* IRQ2 is never connected */
318 		}
319 
320 		if (ELCR_fallback) {
321 			/*
322 			 *  If the ELCR indicates a level-sensitive interrupt, we
323 			 *  copy that information over to the MP table in the
324 			 *  irqflag field (level sensitive, active high polarity).
325 			 */
326 			if (ELCR_trigger(i)) {
327 				intsrc.irqflag = MP_IRQTRIG_LEVEL |
328 						 MP_IRQPOL_ACTIVE_HIGH;
329 			} else {
330 				intsrc.irqflag = MP_IRQTRIG_DEFAULT |
331 						 MP_IRQPOL_DEFAULT;
332 			}
333 		}
334 
335 		intsrc.srcbusirq = i;
336 		intsrc.dstirq = i ? i : 2;	/* IRQ0 to INTIN2 */
337 		mp_save_irq(&intsrc);
338 	}
339 
340 	intsrc.irqtype = mp_ExtINT;
341 	intsrc.srcbusirq = 0;
342 	intsrc.dstirq = 0;	/* 8259A to INTIN0 */
343 	mp_save_irq(&intsrc);
344 }
345 
346 
construct_ioapic_table(int mpc_default_type)347 static void __init construct_ioapic_table(int mpc_default_type)
348 {
349 	struct mpc_ioapic ioapic;
350 	struct mpc_bus bus;
351 
352 	bus.type = MP_BUS;
353 	bus.busid = 0;
354 	switch (mpc_default_type) {
355 	default:
356 		pr_err("???\nUnknown standard configuration %d\n",
357 		       mpc_default_type);
358 		/* fall through */
359 	case 1:
360 	case 5:
361 		memcpy(bus.bustype, "ISA   ", 6);
362 		break;
363 	case 2:
364 	case 6:
365 	case 3:
366 		memcpy(bus.bustype, "EISA  ", 6);
367 		break;
368 	}
369 	MP_bus_info(&bus);
370 	if (mpc_default_type > 4) {
371 		bus.busid = 1;
372 		memcpy(bus.bustype, "PCI   ", 6);
373 		MP_bus_info(&bus);
374 	}
375 
376 	ioapic.type	= MP_IOAPIC;
377 	ioapic.apicid	= 2;
378 	ioapic.apicver	= mpc_default_type > 4 ? 0x10 : 0x01;
379 	ioapic.flags	= MPC_APIC_USABLE;
380 	ioapic.apicaddr	= IO_APIC_DEFAULT_PHYS_BASE;
381 	MP_ioapic_info(&ioapic);
382 
383 	/*
384 	 * We set up most of the low 16 IO-APIC pins according to MPS rules.
385 	 */
386 	construct_default_ioirq_mptable(mpc_default_type);
387 }
388 #else
construct_ioapic_table(int mpc_default_type)389 static inline void __init construct_ioapic_table(int mpc_default_type) { }
390 #endif
391 
construct_default_ISA_mptable(int mpc_default_type)392 static inline void __init construct_default_ISA_mptable(int mpc_default_type)
393 {
394 	struct mpc_cpu processor;
395 	struct mpc_lintsrc lintsrc;
396 	int linttypes[2] = { mp_ExtINT, mp_NMI };
397 	int i;
398 
399 	/*
400 	 * local APIC has default address
401 	 */
402 	mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
403 
404 	/*
405 	 * 2 CPUs, numbered 0 & 1.
406 	 */
407 	processor.type = MP_PROCESSOR;
408 	/* Either an integrated APIC or a discrete 82489DX. */
409 	processor.apicver = mpc_default_type > 4 ? 0x10 : 0x01;
410 	processor.cpuflag = CPU_ENABLED;
411 	processor.cpufeature = (boot_cpu_data.x86 << 8) |
412 	    (boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_stepping;
413 	processor.featureflag = boot_cpu_data.x86_capability[CPUID_1_EDX];
414 	processor.reserved[0] = 0;
415 	processor.reserved[1] = 0;
416 	for (i = 0; i < 2; i++) {
417 		processor.apicid = i;
418 		MP_processor_info(&processor);
419 	}
420 
421 	construct_ioapic_table(mpc_default_type);
422 
423 	lintsrc.type = MP_LINTSRC;
424 	lintsrc.irqflag = MP_IRQTRIG_DEFAULT | MP_IRQPOL_DEFAULT;
425 	lintsrc.srcbusid = 0;
426 	lintsrc.srcbusirq = 0;
427 	lintsrc.destapic = MP_APIC_ALL;
428 	for (i = 0; i < 2; i++) {
429 		lintsrc.irqtype = linttypes[i];
430 		lintsrc.destapiclint = i;
431 		MP_lintsrc_info(&lintsrc);
432 	}
433 }
434 
435 static unsigned long mpf_base;
436 static bool mpf_found;
437 
get_mpc_size(unsigned long physptr)438 static unsigned long __init get_mpc_size(unsigned long physptr)
439 {
440 	struct mpc_table *mpc;
441 	unsigned long size;
442 
443 	mpc = early_memremap(physptr, PAGE_SIZE);
444 	size = mpc->length;
445 	early_memunmap(mpc, PAGE_SIZE);
446 	apic_printk(APIC_VERBOSE, "  mpc: %lx-%lx\n", physptr, physptr + size);
447 
448 	return size;
449 }
450 
check_physptr(struct mpf_intel * mpf,unsigned int early)451 static int __init check_physptr(struct mpf_intel *mpf, unsigned int early)
452 {
453 	struct mpc_table *mpc;
454 	unsigned long size;
455 
456 	size = get_mpc_size(mpf->physptr);
457 	mpc = early_memremap(mpf->physptr, size);
458 
459 	/*
460 	 * Read the physical hardware table.  Anything here will
461 	 * override the defaults.
462 	 */
463 	if (!smp_read_mpc(mpc, early)) {
464 #ifdef CONFIG_X86_LOCAL_APIC
465 		smp_found_config = 0;
466 #endif
467 		pr_err("BIOS bug, MP table errors detected!...\n");
468 		pr_cont("... disabling SMP support. (tell your hw vendor)\n");
469 		early_memunmap(mpc, size);
470 		return -1;
471 	}
472 	early_memunmap(mpc, size);
473 
474 	if (early)
475 		return -1;
476 
477 #ifdef CONFIG_X86_IO_APIC
478 	/*
479 	 * If there are no explicit MP IRQ entries, then we are
480 	 * broken.  We set up most of the low 16 IO-APIC pins to
481 	 * ISA defaults and hope it will work.
482 	 */
483 	if (!mp_irq_entries) {
484 		struct mpc_bus bus;
485 
486 		pr_err("BIOS bug, no explicit IRQ entries, using default mptable. (tell your hw vendor)\n");
487 
488 		bus.type = MP_BUS;
489 		bus.busid = 0;
490 		memcpy(bus.bustype, "ISA   ", 6);
491 		MP_bus_info(&bus);
492 
493 		construct_default_ioirq_mptable(0);
494 	}
495 #endif
496 
497 	return 0;
498 }
499 
500 /*
501  * Scan the memory blocks for an SMP configuration block.
502  */
default_get_smp_config(unsigned int early)503 void __init default_get_smp_config(unsigned int early)
504 {
505 	struct mpf_intel *mpf;
506 
507 	if (!smp_found_config)
508 		return;
509 
510 	if (!mpf_found)
511 		return;
512 
513 	if (acpi_lapic && early)
514 		return;
515 
516 	/*
517 	 * MPS doesn't support hyperthreading, aka only have
518 	 * thread 0 apic id in MPS table
519 	 */
520 	if (acpi_lapic && acpi_ioapic)
521 		return;
522 
523 	mpf = early_memremap(mpf_base, sizeof(*mpf));
524 	if (!mpf) {
525 		pr_err("MPTABLE: error mapping MP table\n");
526 		return;
527 	}
528 
529 	pr_info("Intel MultiProcessor Specification v1.%d\n",
530 		mpf->specification);
531 #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32)
532 	if (mpf->feature2 & (1 << 7)) {
533 		pr_info("    IMCR and PIC compatibility mode.\n");
534 		pic_mode = 1;
535 	} else {
536 		pr_info("    Virtual Wire compatibility mode.\n");
537 		pic_mode = 0;
538 	}
539 #endif
540 	/*
541 	 * Now see if we need to read further.
542 	 */
543 	if (mpf->feature1) {
544 		if (early) {
545 			/*
546 			 * local APIC has default address
547 			 */
548 			mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
549 			goto out;
550 		}
551 
552 		pr_info("Default MP configuration #%d\n", mpf->feature1);
553 		construct_default_ISA_mptable(mpf->feature1);
554 
555 	} else if (mpf->physptr) {
556 		if (check_physptr(mpf, early))
557 			goto out;
558 	} else
559 		BUG();
560 
561 	if (!early)
562 		pr_info("Processors: %d\n", num_processors);
563 	/*
564 	 * Only use the first configuration found.
565 	 */
566 out:
567 	early_memunmap(mpf, sizeof(*mpf));
568 }
569 
smp_reserve_memory(struct mpf_intel * mpf)570 static void __init smp_reserve_memory(struct mpf_intel *mpf)
571 {
572 	memblock_reserve(mpf->physptr, get_mpc_size(mpf->physptr));
573 }
574 
smp_scan_config(unsigned long base,unsigned long length)575 static int __init smp_scan_config(unsigned long base, unsigned long length)
576 {
577 	unsigned int *bp;
578 	struct mpf_intel *mpf;
579 	int ret = 0;
580 
581 	apic_printk(APIC_VERBOSE, "Scan for SMP in [mem %#010lx-%#010lx]\n",
582 		    base, base + length - 1);
583 	BUILD_BUG_ON(sizeof(*mpf) != 16);
584 
585 	while (length > 0) {
586 		bp = early_memremap(base, length);
587 		mpf = (struct mpf_intel *)bp;
588 		if ((*bp == SMP_MAGIC_IDENT) &&
589 		    (mpf->length == 1) &&
590 		    !mpf_checksum((unsigned char *)bp, 16) &&
591 		    ((mpf->specification == 1)
592 		     || (mpf->specification == 4))) {
593 #ifdef CONFIG_X86_LOCAL_APIC
594 			smp_found_config = 1;
595 #endif
596 			mpf_base = base;
597 			mpf_found = true;
598 
599 			pr_info("found SMP MP-table at [mem %#010lx-%#010lx]\n",
600 				base, base + sizeof(*mpf) - 1);
601 
602 			memblock_reserve(base, sizeof(*mpf));
603 			if (mpf->physptr)
604 				smp_reserve_memory(mpf);
605 
606 			ret = 1;
607 		}
608 		early_memunmap(bp, length);
609 
610 		if (ret)
611 			break;
612 
613 		base += 16;
614 		length -= 16;
615 	}
616 	return ret;
617 }
618 
default_find_smp_config(void)619 void __init default_find_smp_config(void)
620 {
621 	unsigned int address;
622 
623 	/*
624 	 * FIXME: Linux assumes you have 640K of base ram..
625 	 * this continues the error...
626 	 *
627 	 * 1) Scan the bottom 1K for a signature
628 	 * 2) Scan the top 1K of base RAM
629 	 * 3) Scan the 64K of bios
630 	 */
631 	if (smp_scan_config(0x0, 0x400) ||
632 	    smp_scan_config(639 * 0x400, 0x400) ||
633 	    smp_scan_config(0xF0000, 0x10000))
634 		return;
635 	/*
636 	 * If it is an SMP machine we should know now, unless the
637 	 * configuration is in an EISA bus machine with an
638 	 * extended bios data area.
639 	 *
640 	 * there is a real-mode segmented pointer pointing to the
641 	 * 4K EBDA area at 0x40E, calculate and scan it here.
642 	 *
643 	 * NOTE! There are Linux loaders that will corrupt the EBDA
644 	 * area, and as such this kind of SMP config may be less
645 	 * trustworthy, simply because the SMP table may have been
646 	 * stomped on during early boot. These loaders are buggy and
647 	 * should be fixed.
648 	 *
649 	 * MP1.4 SPEC states to only scan first 1K of 4K EBDA.
650 	 */
651 
652 	address = get_bios_ebda();
653 	if (address)
654 		smp_scan_config(address, 0x400);
655 }
656 
657 #ifdef CONFIG_X86_IO_APIC
658 static u8 __initdata irq_used[MAX_IRQ_SOURCES];
659 
get_MP_intsrc_index(struct mpc_intsrc * m)660 static int  __init get_MP_intsrc_index(struct mpc_intsrc *m)
661 {
662 	int i;
663 
664 	if (m->irqtype != mp_INT)
665 		return 0;
666 
667 	if (m->irqflag != (MP_IRQTRIG_LEVEL | MP_IRQPOL_ACTIVE_LOW))
668 		return 0;
669 
670 	/* not legacy */
671 
672 	for (i = 0; i < mp_irq_entries; i++) {
673 		if (mp_irqs[i].irqtype != mp_INT)
674 			continue;
675 
676 		if (mp_irqs[i].irqflag != (MP_IRQTRIG_LEVEL |
677 					   MP_IRQPOL_ACTIVE_LOW))
678 			continue;
679 
680 		if (mp_irqs[i].srcbus != m->srcbus)
681 			continue;
682 		if (mp_irqs[i].srcbusirq != m->srcbusirq)
683 			continue;
684 		if (irq_used[i]) {
685 			/* already claimed */
686 			return -2;
687 		}
688 		irq_used[i] = 1;
689 		return i;
690 	}
691 
692 	/* not found */
693 	return -1;
694 }
695 
696 #define SPARE_SLOT_NUM 20
697 
698 static struct mpc_intsrc __initdata *m_spare[SPARE_SLOT_NUM];
699 
check_irq_src(struct mpc_intsrc * m,int * nr_m_spare)700 static void __init check_irq_src(struct mpc_intsrc *m, int *nr_m_spare)
701 {
702 	int i;
703 
704 	apic_printk(APIC_VERBOSE, "OLD ");
705 	print_mp_irq_info(m);
706 
707 	i = get_MP_intsrc_index(m);
708 	if (i > 0) {
709 		memcpy(m, &mp_irqs[i], sizeof(*m));
710 		apic_printk(APIC_VERBOSE, "NEW ");
711 		print_mp_irq_info(&mp_irqs[i]);
712 		return;
713 	}
714 	if (!i) {
715 		/* legacy, do nothing */
716 		return;
717 	}
718 	if (*nr_m_spare < SPARE_SLOT_NUM) {
719 		/*
720 		 * not found (-1), or duplicated (-2) are invalid entries,
721 		 * we need to use the slot later
722 		 */
723 		m_spare[*nr_m_spare] = m;
724 		*nr_m_spare += 1;
725 	}
726 }
727 
728 static int __init
check_slot(unsigned long mpc_new_phys,unsigned long mpc_new_length,int count)729 check_slot(unsigned long mpc_new_phys, unsigned long mpc_new_length, int count)
730 {
731 	if (!mpc_new_phys || count <= mpc_new_length) {
732 		WARN(1, "update_mptable: No spare slots (length: %x)\n", count);
733 		return -1;
734 	}
735 
736 	return 0;
737 }
738 #else /* CONFIG_X86_IO_APIC */
739 static
check_irq_src(struct mpc_intsrc * m,int * nr_m_spare)740 inline void __init check_irq_src(struct mpc_intsrc *m, int *nr_m_spare) {}
741 #endif /* CONFIG_X86_IO_APIC */
742 
replace_intsrc_all(struct mpc_table * mpc,unsigned long mpc_new_phys,unsigned long mpc_new_length)743 static int  __init replace_intsrc_all(struct mpc_table *mpc,
744 					unsigned long mpc_new_phys,
745 					unsigned long mpc_new_length)
746 {
747 #ifdef CONFIG_X86_IO_APIC
748 	int i;
749 #endif
750 	int count = sizeof(*mpc);
751 	int nr_m_spare = 0;
752 	unsigned char *mpt = ((unsigned char *)mpc) + count;
753 
754 	pr_info("mpc_length %x\n", mpc->length);
755 	while (count < mpc->length) {
756 		switch (*mpt) {
757 		case MP_PROCESSOR:
758 			skip_entry(&mpt, &count, sizeof(struct mpc_cpu));
759 			break;
760 		case MP_BUS:
761 			skip_entry(&mpt, &count, sizeof(struct mpc_bus));
762 			break;
763 		case MP_IOAPIC:
764 			skip_entry(&mpt, &count, sizeof(struct mpc_ioapic));
765 			break;
766 		case MP_INTSRC:
767 			check_irq_src((struct mpc_intsrc *)mpt, &nr_m_spare);
768 			skip_entry(&mpt, &count, sizeof(struct mpc_intsrc));
769 			break;
770 		case MP_LINTSRC:
771 			skip_entry(&mpt, &count, sizeof(struct mpc_lintsrc));
772 			break;
773 		default:
774 			/* wrong mptable */
775 			smp_dump_mptable(mpc, mpt);
776 			goto out;
777 		}
778 	}
779 
780 #ifdef CONFIG_X86_IO_APIC
781 	for (i = 0; i < mp_irq_entries; i++) {
782 		if (irq_used[i])
783 			continue;
784 
785 		if (mp_irqs[i].irqtype != mp_INT)
786 			continue;
787 
788 		if (mp_irqs[i].irqflag != (MP_IRQTRIG_LEVEL |
789 					   MP_IRQPOL_ACTIVE_LOW))
790 			continue;
791 
792 		if (nr_m_spare > 0) {
793 			apic_printk(APIC_VERBOSE, "*NEW* found\n");
794 			nr_m_spare--;
795 			memcpy(m_spare[nr_m_spare], &mp_irqs[i], sizeof(mp_irqs[i]));
796 			m_spare[nr_m_spare] = NULL;
797 		} else {
798 			struct mpc_intsrc *m = (struct mpc_intsrc *)mpt;
799 			count += sizeof(struct mpc_intsrc);
800 			if (check_slot(mpc_new_phys, mpc_new_length, count) < 0)
801 				goto out;
802 			memcpy(m, &mp_irqs[i], sizeof(*m));
803 			mpc->length = count;
804 			mpt += sizeof(struct mpc_intsrc);
805 		}
806 		print_mp_irq_info(&mp_irqs[i]);
807 	}
808 #endif
809 out:
810 	/* update checksum */
811 	mpc->checksum = 0;
812 	mpc->checksum -= mpf_checksum((unsigned char *)mpc, mpc->length);
813 
814 	return 0;
815 }
816 
817 int enable_update_mptable;
818 
update_mptable_setup(char * str)819 static int __init update_mptable_setup(char *str)
820 {
821 	enable_update_mptable = 1;
822 #ifdef CONFIG_PCI
823 	pci_routeirq = 1;
824 #endif
825 	return 0;
826 }
827 early_param("update_mptable", update_mptable_setup);
828 
829 static unsigned long __initdata mpc_new_phys;
830 static unsigned long mpc_new_length __initdata = 4096;
831 
832 /* alloc_mptable or alloc_mptable=4k */
833 static int __initdata alloc_mptable;
parse_alloc_mptable_opt(char * p)834 static int __init parse_alloc_mptable_opt(char *p)
835 {
836 	enable_update_mptable = 1;
837 #ifdef CONFIG_PCI
838 	pci_routeirq = 1;
839 #endif
840 	alloc_mptable = 1;
841 	if (!p)
842 		return 0;
843 	mpc_new_length = memparse(p, &p);
844 	return 0;
845 }
846 early_param("alloc_mptable", parse_alloc_mptable_opt);
847 
e820__memblock_alloc_reserved_mpc_new(void)848 void __init e820__memblock_alloc_reserved_mpc_new(void)
849 {
850 	if (enable_update_mptable && alloc_mptable)
851 		mpc_new_phys = e820__memblock_alloc_reserved(mpc_new_length, 4);
852 }
853 
update_mp_table(void)854 static int __init update_mp_table(void)
855 {
856 	char str[16];
857 	char oem[10];
858 	struct mpf_intel *mpf;
859 	struct mpc_table *mpc, *mpc_new;
860 	unsigned long size;
861 
862 	if (!enable_update_mptable)
863 		return 0;
864 
865 	if (!mpf_found)
866 		return 0;
867 
868 	mpf = early_memremap(mpf_base, sizeof(*mpf));
869 	if (!mpf) {
870 		pr_err("MPTABLE: mpf early_memremap() failed\n");
871 		return 0;
872 	}
873 
874 	/*
875 	 * Now see if we need to go further.
876 	 */
877 	if (mpf->feature1)
878 		goto do_unmap_mpf;
879 
880 	if (!mpf->physptr)
881 		goto do_unmap_mpf;
882 
883 	size = get_mpc_size(mpf->physptr);
884 	mpc = early_memremap(mpf->physptr, size);
885 	if (!mpc) {
886 		pr_err("MPTABLE: mpc early_memremap() failed\n");
887 		goto do_unmap_mpf;
888 	}
889 
890 	if (!smp_check_mpc(mpc, oem, str))
891 		goto do_unmap_mpc;
892 
893 	pr_info("mpf: %llx\n", (u64)mpf_base);
894 	pr_info("physptr: %x\n", mpf->physptr);
895 
896 	if (mpc_new_phys && mpc->length > mpc_new_length) {
897 		mpc_new_phys = 0;
898 		pr_info("mpc_new_length is %ld, please use alloc_mptable=8k\n",
899 			mpc_new_length);
900 	}
901 
902 	if (!mpc_new_phys) {
903 		unsigned char old, new;
904 		/* check if we can change the position */
905 		mpc->checksum = 0;
906 		old = mpf_checksum((unsigned char *)mpc, mpc->length);
907 		mpc->checksum = 0xff;
908 		new = mpf_checksum((unsigned char *)mpc, mpc->length);
909 		if (old == new) {
910 			pr_info("mpc is readonly, please try alloc_mptable instead\n");
911 			goto do_unmap_mpc;
912 		}
913 		pr_info("use in-position replacing\n");
914 	} else {
915 		mpc_new = early_memremap(mpc_new_phys, mpc_new_length);
916 		if (!mpc_new) {
917 			pr_err("MPTABLE: new mpc early_memremap() failed\n");
918 			goto do_unmap_mpc;
919 		}
920 		mpf->physptr = mpc_new_phys;
921 		memcpy(mpc_new, mpc, mpc->length);
922 		early_memunmap(mpc, size);
923 		mpc = mpc_new;
924 		size = mpc_new_length;
925 		/* check if we can modify that */
926 		if (mpc_new_phys - mpf->physptr) {
927 			struct mpf_intel *mpf_new;
928 			/* steal 16 bytes from [0, 1k) */
929 			mpf_new = early_memremap(0x400 - 16, sizeof(*mpf_new));
930 			if (!mpf_new) {
931 				pr_err("MPTABLE: new mpf early_memremap() failed\n");
932 				goto do_unmap_mpc;
933 			}
934 			pr_info("mpf new: %x\n", 0x400 - 16);
935 			memcpy(mpf_new, mpf, 16);
936 			early_memunmap(mpf, sizeof(*mpf));
937 			mpf = mpf_new;
938 			mpf->physptr = mpc_new_phys;
939 		}
940 		mpf->checksum = 0;
941 		mpf->checksum -= mpf_checksum((unsigned char *)mpf, 16);
942 		pr_info("physptr new: %x\n", mpf->physptr);
943 	}
944 
945 	/*
946 	 * only replace the one with mp_INT and
947 	 *	 MP_IRQ_TRIGGER_LEVEL|MP_IRQ_POLARITY_LOW,
948 	 * already in mp_irqs , stored by ... and mp_config_acpi_gsi,
949 	 * may need pci=routeirq for all coverage
950 	 */
951 	replace_intsrc_all(mpc, mpc_new_phys, mpc_new_length);
952 
953 do_unmap_mpc:
954 	early_memunmap(mpc, size);
955 
956 do_unmap_mpf:
957 	early_memunmap(mpf, sizeof(*mpf));
958 
959 	return 0;
960 }
961 
962 late_initcall(update_mp_table);
963