1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Exceptions for specific devices. Usually work-arounds for fatal design flaws.
4 */
5
6 #include <linux/delay.h>
7 #include <linux/dmi.h>
8 #include <linux/pci.h>
9 #include <linux/vgaarb.h>
10 #include <asm/amd_nb.h>
11 #include <asm/hpet.h>
12 #include <asm/pci_x86.h>
13
pci_fixup_i450nx(struct pci_dev * d)14 static void pci_fixup_i450nx(struct pci_dev *d)
15 {
16 /*
17 * i450NX -- Find and scan all secondary buses on all PXB's.
18 */
19 int pxb, reg;
20 u8 busno, suba, subb;
21
22 dev_warn(&d->dev, "Searching for i450NX host bridges\n");
23 reg = 0xd0;
24 for(pxb = 0; pxb < 2; pxb++) {
25 pci_read_config_byte(d, reg++, &busno);
26 pci_read_config_byte(d, reg++, &suba);
27 pci_read_config_byte(d, reg++, &subb);
28 dev_dbg(&d->dev, "i450NX PXB %d: %02x/%02x/%02x\n", pxb, busno,
29 suba, subb);
30 if (busno)
31 pcibios_scan_root(busno); /* Bus A */
32 if (suba < subb)
33 pcibios_scan_root(suba+1); /* Bus B */
34 }
35 pcibios_last_bus = -1;
36 }
37 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82451NX, pci_fixup_i450nx);
38
pci_fixup_i450gx(struct pci_dev * d)39 static void pci_fixup_i450gx(struct pci_dev *d)
40 {
41 /*
42 * i450GX and i450KX -- Find and scan all secondary buses.
43 * (called separately for each PCI bridge found)
44 */
45 u8 busno;
46 pci_read_config_byte(d, 0x4a, &busno);
47 dev_info(&d->dev, "i440KX/GX host bridge; secondary bus %02x\n", busno);
48 pcibios_scan_root(busno);
49 pcibios_last_bus = -1;
50 }
51 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454GX, pci_fixup_i450gx);
52
pci_fixup_umc_ide(struct pci_dev * d)53 static void pci_fixup_umc_ide(struct pci_dev *d)
54 {
55 /*
56 * UM8886BF IDE controller sets region type bits incorrectly,
57 * therefore they look like memory despite of them being I/O.
58 */
59 int i;
60
61 dev_warn(&d->dev, "Fixing base address flags\n");
62 for(i = 0; i < 4; i++)
63 d->resource[i].flags |= PCI_BASE_ADDRESS_SPACE_IO;
64 }
65 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_UMC, PCI_DEVICE_ID_UMC_UM8886BF, pci_fixup_umc_ide);
66
pci_fixup_latency(struct pci_dev * d)67 static void pci_fixup_latency(struct pci_dev *d)
68 {
69 /*
70 * SiS 5597 and 5598 chipsets require latency timer set to
71 * at most 32 to avoid lockups.
72 */
73 dev_dbg(&d->dev, "Setting max latency to 32\n");
74 pcibios_max_latency = 32;
75 }
76 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_5597, pci_fixup_latency);
77 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_5598, pci_fixup_latency);
78
pci_fixup_piix4_acpi(struct pci_dev * d)79 static void pci_fixup_piix4_acpi(struct pci_dev *d)
80 {
81 /*
82 * PIIX4 ACPI device: hardwired IRQ9
83 */
84 d->irq = 9;
85 }
86 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_3, pci_fixup_piix4_acpi);
87
88 /*
89 * Addresses issues with problems in the memory write queue timer in
90 * certain VIA Northbridges. This bugfix is per VIA's specifications,
91 * except for the KL133/KM133: clearing bit 5 on those Northbridges seems
92 * to trigger a bug in its integrated ProSavage video card, which
93 * causes screen corruption. We only clear bits 6 and 7 for that chipset,
94 * until VIA can provide us with definitive information on why screen
95 * corruption occurs, and what exactly those bits do.
96 *
97 * VIA 8363,8622,8361 Northbridges:
98 * - bits 5, 6, 7 at offset 0x55 need to be turned off
99 * VIA 8367 (KT266x) Northbridges:
100 * - bits 5, 6, 7 at offset 0x95 need to be turned off
101 * VIA 8363 rev 0x81/0x84 (KL133/KM133) Northbridges:
102 * - bits 6, 7 at offset 0x55 need to be turned off
103 */
104
105 #define VIA_8363_KL133_REVISION_ID 0x81
106 #define VIA_8363_KM133_REVISION_ID 0x84
107
pci_fixup_via_northbridge_bug(struct pci_dev * d)108 static void pci_fixup_via_northbridge_bug(struct pci_dev *d)
109 {
110 u8 v;
111 int where = 0x55;
112 int mask = 0x1f; /* clear bits 5, 6, 7 by default */
113
114 if (d->device == PCI_DEVICE_ID_VIA_8367_0) {
115 /* fix pci bus latency issues resulted by NB bios error
116 it appears on bug free^Wreduced kt266x's bios forces
117 NB latency to zero */
118 pci_write_config_byte(d, PCI_LATENCY_TIMER, 0);
119
120 where = 0x95; /* the memory write queue timer register is
121 different for the KT266x's: 0x95 not 0x55 */
122 } else if (d->device == PCI_DEVICE_ID_VIA_8363_0 &&
123 (d->revision == VIA_8363_KL133_REVISION_ID ||
124 d->revision == VIA_8363_KM133_REVISION_ID)) {
125 mask = 0x3f; /* clear only bits 6 and 7; clearing bit 5
126 causes screen corruption on the KL133/KM133 */
127 }
128
129 pci_read_config_byte(d, where, &v);
130 if (v & ~mask) {
131 dev_warn(&d->dev, "Disabling VIA memory write queue (PCI ID %04x, rev %02x): [%02x] %02x & %02x -> %02x\n", \
132 d->device, d->revision, where, v, mask, v & mask);
133 v &= mask;
134 pci_write_config_byte(d, where, v);
135 }
136 }
137 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8363_0, pci_fixup_via_northbridge_bug);
138 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8622, pci_fixup_via_northbridge_bug);
139 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8361, pci_fixup_via_northbridge_bug);
140 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8367_0, pci_fixup_via_northbridge_bug);
141 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8363_0, pci_fixup_via_northbridge_bug);
142 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8622, pci_fixup_via_northbridge_bug);
143 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8361, pci_fixup_via_northbridge_bug);
144 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8367_0, pci_fixup_via_northbridge_bug);
145
146 /*
147 * For some reasons Intel decided that certain parts of their
148 * 815, 845 and some other chipsets must look like PCI-to-PCI bridges
149 * while they are obviously not. The 82801 family (AA, AB, BAM/CAM,
150 * BA/CA/DB and E) PCI bridges are actually HUB-to-PCI ones, according
151 * to Intel terminology. These devices do forward all addresses from
152 * system to PCI bus no matter what are their window settings, so they are
153 * "transparent" (or subtractive decoding) from programmers point of view.
154 */
pci_fixup_transparent_bridge(struct pci_dev * dev)155 static void pci_fixup_transparent_bridge(struct pci_dev *dev)
156 {
157 if ((dev->device & 0xff00) == 0x2400)
158 dev->transparent = 1;
159 }
160 DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
161 PCI_CLASS_BRIDGE_PCI, 8, pci_fixup_transparent_bridge);
162
163 /*
164 * Fixup for C1 Halt Disconnect problem on nForce2 systems.
165 *
166 * From information provided by "Allen Martin" <AMartin@nvidia.com>:
167 *
168 * A hang is caused when the CPU generates a very fast CONNECT/HALT cycle
169 * sequence. Workaround is to set the SYSTEM_IDLE_TIMEOUT to 80 ns.
170 * This allows the state-machine and timer to return to a proper state within
171 * 80 ns of the CONNECT and probe appearing together. Since the CPU will not
172 * issue another HALT within 80 ns of the initial HALT, the failure condition
173 * is avoided.
174 */
pci_fixup_nforce2(struct pci_dev * dev)175 static void pci_fixup_nforce2(struct pci_dev *dev)
176 {
177 u32 val;
178
179 /*
180 * Chip Old value New value
181 * C17 0x1F0FFF01 0x1F01FF01
182 * C18D 0x9F0FFF01 0x9F01FF01
183 *
184 * Northbridge chip version may be determined by
185 * reading the PCI revision ID (0xC1 or greater is C18D).
186 */
187 pci_read_config_dword(dev, 0x6c, &val);
188
189 /*
190 * Apply fixup if needed, but don't touch disconnect state
191 */
192 if ((val & 0x00FF0000) != 0x00010000) {
193 dev_warn(&dev->dev, "nForce2 C1 Halt Disconnect fixup\n");
194 pci_write_config_dword(dev, 0x6c, (val & 0xFF00FFFF) | 0x00010000);
195 }
196 }
197 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2, pci_fixup_nforce2);
198 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2, pci_fixup_nforce2);
199
200 /* Max PCI Express root ports */
201 #define MAX_PCIEROOT 6
202 static int quirk_aspm_offset[MAX_PCIEROOT << 3];
203
204 #define GET_INDEX(a, b) ((((a) - PCI_DEVICE_ID_INTEL_MCH_PA) << 3) + ((b) & 7))
205
quirk_pcie_aspm_read(struct pci_bus * bus,unsigned int devfn,int where,int size,u32 * value)206 static int quirk_pcie_aspm_read(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *value)
207 {
208 return raw_pci_read(pci_domain_nr(bus), bus->number,
209 devfn, where, size, value);
210 }
211
212 /*
213 * Replace the original pci bus ops for write with a new one that will filter
214 * the request to insure ASPM cannot be enabled.
215 */
quirk_pcie_aspm_write(struct pci_bus * bus,unsigned int devfn,int where,int size,u32 value)216 static int quirk_pcie_aspm_write(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 value)
217 {
218 u8 offset;
219
220 offset = quirk_aspm_offset[GET_INDEX(bus->self->device, devfn)];
221
222 if ((offset) && (where == offset))
223 value = value & ~PCI_EXP_LNKCTL_ASPMC;
224
225 return raw_pci_write(pci_domain_nr(bus), bus->number,
226 devfn, where, size, value);
227 }
228
229 static struct pci_ops quirk_pcie_aspm_ops = {
230 .read = quirk_pcie_aspm_read,
231 .write = quirk_pcie_aspm_write,
232 };
233
234 /*
235 * Prevents PCI Express ASPM (Active State Power Management) being enabled.
236 *
237 * Save the register offset, where the ASPM control bits are located,
238 * for each PCI Express device that is in the device list of
239 * the root port in an array for fast indexing. Replace the bus ops
240 * with the modified one.
241 */
pcie_rootport_aspm_quirk(struct pci_dev * pdev)242 static void pcie_rootport_aspm_quirk(struct pci_dev *pdev)
243 {
244 int i;
245 struct pci_bus *pbus;
246 struct pci_dev *dev;
247
248 if ((pbus = pdev->subordinate) == NULL)
249 return;
250
251 /*
252 * Check if the DID of pdev matches one of the six root ports. This
253 * check is needed in the case this function is called directly by the
254 * hot-plug driver.
255 */
256 if ((pdev->device < PCI_DEVICE_ID_INTEL_MCH_PA) ||
257 (pdev->device > PCI_DEVICE_ID_INTEL_MCH_PC1))
258 return;
259
260 if (list_empty(&pbus->devices)) {
261 /*
262 * If no device is attached to the root port at power-up or
263 * after hot-remove, the pbus->devices is empty and this code
264 * will set the offsets to zero and the bus ops to parent's bus
265 * ops, which is unmodified.
266 */
267 for (i = GET_INDEX(pdev->device, 0); i <= GET_INDEX(pdev->device, 7); ++i)
268 quirk_aspm_offset[i] = 0;
269
270 pci_bus_set_ops(pbus, pbus->parent->ops);
271 } else {
272 /*
273 * If devices are attached to the root port at power-up or
274 * after hot-add, the code loops through the device list of
275 * each root port to save the register offsets and replace the
276 * bus ops.
277 */
278 list_for_each_entry(dev, &pbus->devices, bus_list)
279 /* There are 0 to 8 devices attached to this bus */
280 quirk_aspm_offset[GET_INDEX(pdev->device, dev->devfn)] =
281 dev->pcie_cap + PCI_EXP_LNKCTL;
282
283 pci_bus_set_ops(pbus, &quirk_pcie_aspm_ops);
284 dev_info(&pbus->dev, "writes to ASPM control bits will be ignored\n");
285 }
286
287 }
288 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MCH_PA, pcie_rootport_aspm_quirk);
289 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MCH_PA1, pcie_rootport_aspm_quirk);
290 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MCH_PB, pcie_rootport_aspm_quirk);
291 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MCH_PB1, pcie_rootport_aspm_quirk);
292 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MCH_PC, pcie_rootport_aspm_quirk);
293 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MCH_PC1, pcie_rootport_aspm_quirk);
294
295 /*
296 * Fixup to mark boot BIOS video selected by BIOS before it changes
297 *
298 * From information provided by "Jon Smirl" <jonsmirl@gmail.com>
299 *
300 * The standard boot ROM sequence for an x86 machine uses the BIOS
301 * to select an initial video card for boot display. This boot video
302 * card will have its BIOS copied to 0xC0000 in system RAM.
303 * IORESOURCE_ROM_SHADOW is used to associate the boot video
304 * card with this copy. On laptops this copy has to be used since
305 * the main ROM may be compressed or combined with another image.
306 * See pci_map_rom() for use of this flag. Before marking the device
307 * with IORESOURCE_ROM_SHADOW check if a vga_default_device is already set
308 * by either arch code or vga-arbitration; if so only apply the fixup to this
309 * already-determined primary video card.
310 */
311
pci_fixup_video(struct pci_dev * pdev)312 static void pci_fixup_video(struct pci_dev *pdev)
313 {
314 struct pci_dev *bridge;
315 struct pci_bus *bus;
316 u16 config;
317 struct resource *res;
318
319 /* Is VGA routed to us? */
320 bus = pdev->bus;
321 while (bus) {
322 bridge = bus->self;
323
324 /*
325 * From information provided by
326 * "David Miller" <davem@davemloft.net>
327 * The bridge control register is valid for PCI header
328 * type BRIDGE, or CARDBUS. Host to PCI controllers use
329 * PCI header type NORMAL.
330 */
331 if (bridge && (pci_is_bridge(bridge))) {
332 pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
333 &config);
334 if (!(config & PCI_BRIDGE_CTL_VGA))
335 return;
336 }
337 bus = bus->parent;
338 }
339 if (!vga_default_device() || pdev == vga_default_device()) {
340 pci_read_config_word(pdev, PCI_COMMAND, &config);
341 if (config & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) {
342 res = &pdev->resource[PCI_ROM_RESOURCE];
343
344 pci_disable_rom(pdev);
345 if (res->parent)
346 release_resource(res);
347
348 res->start = 0xC0000;
349 res->end = res->start + 0x20000 - 1;
350 res->flags = IORESOURCE_MEM | IORESOURCE_ROM_SHADOW |
351 IORESOURCE_PCI_FIXED;
352 dev_info(&pdev->dev, "Video device with shadowed ROM at %pR\n",
353 res);
354 }
355 }
356 }
357 DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_ANY_ID, PCI_ANY_ID,
358 PCI_CLASS_DISPLAY_VGA, 8, pci_fixup_video);
359
360
361 static const struct dmi_system_id msi_k8t_dmi_table[] = {
362 {
363 .ident = "MSI-K8T-Neo2Fir",
364 .matches = {
365 DMI_MATCH(DMI_SYS_VENDOR, "MSI"),
366 DMI_MATCH(DMI_PRODUCT_NAME, "MS-6702E"),
367 },
368 },
369 {}
370 };
371
372 /*
373 * The AMD-Athlon64 board MSI "K8T Neo2-FIR" disables the onboard sound
374 * card if a PCI-soundcard is added.
375 *
376 * The BIOS only gives options "DISABLED" and "AUTO". This code sets
377 * the corresponding register-value to enable the soundcard.
378 *
379 * The soundcard is only enabled, if the mainboard is identified
380 * via DMI-tables and the soundcard is detected to be off.
381 */
pci_fixup_msi_k8t_onboard_sound(struct pci_dev * dev)382 static void pci_fixup_msi_k8t_onboard_sound(struct pci_dev *dev)
383 {
384 unsigned char val;
385 if (!dmi_check_system(msi_k8t_dmi_table))
386 return; /* only applies to MSI K8T Neo2-FIR */
387
388 pci_read_config_byte(dev, 0x50, &val);
389 if (val & 0x40) {
390 pci_write_config_byte(dev, 0x50, val & (~0x40));
391
392 /* verify the change for status output */
393 pci_read_config_byte(dev, 0x50, &val);
394 if (val & 0x40)
395 dev_info(&dev->dev, "Detected MSI K8T Neo2-FIR; "
396 "can't enable onboard soundcard!\n");
397 else
398 dev_info(&dev->dev, "Detected MSI K8T Neo2-FIR; "
399 "enabled onboard soundcard\n");
400 }
401 }
402 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237,
403 pci_fixup_msi_k8t_onboard_sound);
404 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237,
405 pci_fixup_msi_k8t_onboard_sound);
406
407 /*
408 * Some Toshiba laptops need extra code to enable their TI TSB43AB22/A.
409 *
410 * We pretend to bring them out of full D3 state, and restore the proper
411 * IRQ, PCI cache line size, and BARs, otherwise the device won't function
412 * properly. In some cases, the device will generate an interrupt on
413 * the wrong IRQ line, causing any devices sharing the line it's
414 * *supposed* to use to be disabled by the kernel's IRQ debug code.
415 */
416 static u16 toshiba_line_size;
417
418 static const struct dmi_system_id toshiba_ohci1394_dmi_table[] = {
419 {
420 .ident = "Toshiba PS5 based laptop",
421 .matches = {
422 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
423 DMI_MATCH(DMI_PRODUCT_VERSION, "PS5"),
424 },
425 },
426 {
427 .ident = "Toshiba PSM4 based laptop",
428 .matches = {
429 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
430 DMI_MATCH(DMI_PRODUCT_VERSION, "PSM4"),
431 },
432 },
433 {
434 .ident = "Toshiba A40 based laptop",
435 .matches = {
436 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
437 DMI_MATCH(DMI_PRODUCT_VERSION, "PSA40U"),
438 },
439 },
440 { }
441 };
442
pci_pre_fixup_toshiba_ohci1394(struct pci_dev * dev)443 static void pci_pre_fixup_toshiba_ohci1394(struct pci_dev *dev)
444 {
445 if (!dmi_check_system(toshiba_ohci1394_dmi_table))
446 return; /* only applies to certain Toshibas (so far) */
447
448 dev->current_state = PCI_D3cold;
449 pci_read_config_word(dev, PCI_CACHE_LINE_SIZE, &toshiba_line_size);
450 }
451 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TI, 0x8032,
452 pci_pre_fixup_toshiba_ohci1394);
453
pci_post_fixup_toshiba_ohci1394(struct pci_dev * dev)454 static void pci_post_fixup_toshiba_ohci1394(struct pci_dev *dev)
455 {
456 if (!dmi_check_system(toshiba_ohci1394_dmi_table))
457 return; /* only applies to certain Toshibas (so far) */
458
459 /* Restore config space on Toshiba laptops */
460 pci_write_config_word(dev, PCI_CACHE_LINE_SIZE, toshiba_line_size);
461 pci_read_config_byte(dev, PCI_INTERRUPT_LINE, (u8 *)&dev->irq);
462 pci_write_config_dword(dev, PCI_BASE_ADDRESS_0,
463 pci_resource_start(dev, 0));
464 pci_write_config_dword(dev, PCI_BASE_ADDRESS_1,
465 pci_resource_start(dev, 1));
466 }
467 DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_TI, 0x8032,
468 pci_post_fixup_toshiba_ohci1394);
469
470
471 /*
472 * Prevent the BIOS trapping accesses to the Cyrix CS5530A video device
473 * configuration space.
474 */
pci_early_fixup_cyrix_5530(struct pci_dev * dev)475 static void pci_early_fixup_cyrix_5530(struct pci_dev *dev)
476 {
477 u8 r;
478 /* clear 'F4 Video Configuration Trap' bit */
479 pci_read_config_byte(dev, 0x42, &r);
480 r &= 0xfd;
481 pci_write_config_byte(dev, 0x42, r);
482 }
483 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5530_LEGACY,
484 pci_early_fixup_cyrix_5530);
485 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5530_LEGACY,
486 pci_early_fixup_cyrix_5530);
487
488 /*
489 * Siemens Nixdorf AG FSC Multiprocessor Interrupt Controller:
490 * prevent update of the BAR0, which doesn't look like a normal BAR.
491 */
pci_siemens_interrupt_controller(struct pci_dev * dev)492 static void pci_siemens_interrupt_controller(struct pci_dev *dev)
493 {
494 dev->resource[0].flags |= IORESOURCE_PCI_FIXED;
495 }
496 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SIEMENS, 0x0015,
497 pci_siemens_interrupt_controller);
498
499 /*
500 * SB600: Disable BAR1 on device 14.0 to avoid HPET resources from
501 * confusing the PCI engine:
502 */
sb600_disable_hpet_bar(struct pci_dev * dev)503 static void sb600_disable_hpet_bar(struct pci_dev *dev)
504 {
505 u8 val;
506
507 /*
508 * The SB600 and SB700 both share the same device
509 * ID, but the PM register 0x55 does something different
510 * for the SB700, so make sure we are dealing with the
511 * SB600 before touching the bit:
512 */
513
514 pci_read_config_byte(dev, 0x08, &val);
515
516 if (val < 0x2F) {
517 outb(0x55, 0xCD6);
518 val = inb(0xCD7);
519
520 /* Set bit 7 in PM register 0x55 */
521 outb(0x55, 0xCD6);
522 outb(val | 0x80, 0xCD7);
523 }
524 }
525 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_ATI, 0x4385, sb600_disable_hpet_bar);
526
527 #ifdef CONFIG_HPET_TIMER
sb600_hpet_quirk(struct pci_dev * dev)528 static void sb600_hpet_quirk(struct pci_dev *dev)
529 {
530 struct resource *r = &dev->resource[1];
531
532 if (r->flags & IORESOURCE_MEM && r->start == hpet_address) {
533 r->flags |= IORESOURCE_PCI_FIXED;
534 dev_info(&dev->dev, "reg 0x14 contains HPET; making it immovable\n");
535 }
536 }
537 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, 0x4385, sb600_hpet_quirk);
538 #endif
539
540 /*
541 * Twinhead H12Y needs us to block out a region otherwise we map devices
542 * there and any access kills the box.
543 *
544 * See: https://bugzilla.kernel.org/show_bug.cgi?id=10231
545 *
546 * Match off the LPC and svid/sdid (older kernels lose the bridge subvendor)
547 */
twinhead_reserve_killing_zone(struct pci_dev * dev)548 static void twinhead_reserve_killing_zone(struct pci_dev *dev)
549 {
550 if (dev->subsystem_vendor == 0x14FF && dev->subsystem_device == 0xA003) {
551 pr_info("Reserving memory on Twinhead H12Y\n");
552 request_mem_region(0xFFB00000, 0x100000, "twinhead");
553 }
554 }
555 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x27B9, twinhead_reserve_killing_zone);
556
557 /*
558 * Device [8086:2fc0]
559 * Erratum HSE43
560 * CONFIG_TDP_NOMINAL CSR Implemented at Incorrect Offset
561 * https://www.intel.com/content/www/us/en/processors/xeon/xeon-e5-v3-spec-update.html
562 *
563 * Devices [8086:6f60,6fa0,6fc0]
564 * Erratum BDF2
565 * PCI BARs in the Home Agent Will Return Non-Zero Values During Enumeration
566 * https://www.intel.com/content/www/us/en/processors/xeon/xeon-e5-v4-spec-update.html
567 */
pci_invalid_bar(struct pci_dev * dev)568 static void pci_invalid_bar(struct pci_dev *dev)
569 {
570 dev->non_compliant_bars = 1;
571 }
572 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2fc0, pci_invalid_bar);
573 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6f60, pci_invalid_bar);
574 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fa0, pci_invalid_bar);
575 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fc0, pci_invalid_bar);
576 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0xa1ec, pci_invalid_bar);
577 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0xa1ed, pci_invalid_bar);
578 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0xa26c, pci_invalid_bar);
579 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0xa26d, pci_invalid_bar);
580
581 /*
582 * Device [1022:7808]
583 * 23. USB Wake on Connect/Disconnect with Low Speed Devices
584 * https://support.amd.com/TechDocs/46837.pdf
585 * Appendix A2
586 * https://support.amd.com/TechDocs/42413.pdf
587 */
pci_fixup_amd_ehci_pme(struct pci_dev * dev)588 static void pci_fixup_amd_ehci_pme(struct pci_dev *dev)
589 {
590 dev_info(&dev->dev, "PME# does not work under D3, disabling it\n");
591 dev->pme_support &= ~((PCI_PM_CAP_PME_D3hot | PCI_PM_CAP_PME_D3cold)
592 >> PCI_PM_CAP_PME_SHIFT);
593 }
594 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x7808, pci_fixup_amd_ehci_pme);
595
596 /*
597 * Device [1022:7914]
598 * When in D0, PME# doesn't get asserted when plugging USB 2.0 device.
599 */
pci_fixup_amd_fch_xhci_pme(struct pci_dev * dev)600 static void pci_fixup_amd_fch_xhci_pme(struct pci_dev *dev)
601 {
602 dev_info(&dev->dev, "PME# does not work under D0, disabling it\n");
603 dev->pme_support &= ~(PCI_PM_CAP_PME_D0 >> PCI_PM_CAP_PME_SHIFT);
604 }
605 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x7914, pci_fixup_amd_fch_xhci_pme);
606
607 /*
608 * Apple MacBook Pro: Avoid [mem 0x7fa00000-0x7fbfffff]
609 *
610 * Using the [mem 0x7fa00000-0x7fbfffff] region, e.g., by assigning it to
611 * the 00:1c.0 Root Port, causes a conflict with [io 0x1804], which is used
612 * for soft poweroff and suspend-to-RAM.
613 *
614 * As far as we know, this is related to the address space, not to the Root
615 * Port itself. Attaching the quirk to the Root Port is a convenience, but
616 * it could probably also be a standalone DMI quirk.
617 *
618 * https://bugzilla.kernel.org/show_bug.cgi?id=103211
619 */
quirk_apple_mbp_poweroff(struct pci_dev * pdev)620 static void quirk_apple_mbp_poweroff(struct pci_dev *pdev)
621 {
622 struct device *dev = &pdev->dev;
623 struct resource *res;
624
625 if ((!dmi_match(DMI_PRODUCT_NAME, "MacBookPro11,4") &&
626 !dmi_match(DMI_PRODUCT_NAME, "MacBookPro11,5")) ||
627 pdev->bus->number != 0 || pdev->devfn != PCI_DEVFN(0x1c, 0))
628 return;
629
630 res = request_mem_region(0x7fa00000, 0x200000,
631 "MacBook Pro poweroff workaround");
632 if (res)
633 dev_info(dev, "claimed %s %pR\n", res->name, res);
634 else
635 dev_info(dev, "can't work around MacBook Pro poweroff issue\n");
636 }
637 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x8c10, quirk_apple_mbp_poweroff);
638
639 /*
640 * VMD-enabled root ports will change the source ID for all messages
641 * to the VMD device. Rather than doing device matching with the source
642 * ID, the AER driver should traverse the child device tree, reading
643 * AER registers to find the faulting device.
644 */
quirk_no_aersid(struct pci_dev * pdev)645 static void quirk_no_aersid(struct pci_dev *pdev)
646 {
647 /* VMD Domain */
648 if (is_vmd(pdev->bus) && pci_is_root_bus(pdev->bus))
649 pdev->bus->bus_flags |= PCI_BUS_FLAGS_NO_AERSID;
650 }
651 DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
652 PCI_CLASS_BRIDGE_PCI, 8, quirk_no_aersid);
653
quirk_intel_th_dnv(struct pci_dev * dev)654 static void quirk_intel_th_dnv(struct pci_dev *dev)
655 {
656 struct resource *r = &dev->resource[4];
657
658 /*
659 * Denverton reports 2k of RTIT_BAR (intel_th resource 4), which
660 * appears to be 4 MB in reality.
661 */
662 if (r->end == r->start + 0x7ff) {
663 r->start = 0;
664 r->end = 0x3fffff;
665 r->flags |= IORESOURCE_UNSET;
666 }
667 }
668 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x19e1, quirk_intel_th_dnv);
669
670 #ifdef CONFIG_PHYS_ADDR_T_64BIT
671
672 #define AMD_141b_MMIO_BASE(x) (0x80 + (x) * 0x8)
673 #define AMD_141b_MMIO_BASE_RE_MASK BIT(0)
674 #define AMD_141b_MMIO_BASE_WE_MASK BIT(1)
675 #define AMD_141b_MMIO_BASE_MMIOBASE_MASK GENMASK(31,8)
676
677 #define AMD_141b_MMIO_LIMIT(x) (0x84 + (x) * 0x8)
678 #define AMD_141b_MMIO_LIMIT_MMIOLIMIT_MASK GENMASK(31,8)
679
680 #define AMD_141b_MMIO_HIGH(x) (0x180 + (x) * 0x4)
681 #define AMD_141b_MMIO_HIGH_MMIOBASE_MASK GENMASK(7,0)
682 #define AMD_141b_MMIO_HIGH_MMIOLIMIT_SHIFT 16
683 #define AMD_141b_MMIO_HIGH_MMIOLIMIT_MASK GENMASK(23,16)
684
685 /*
686 * The PCI Firmware Spec, rev 3.2, notes that ACPI should optionally allow
687 * configuring host bridge windows using the _PRS and _SRS methods.
688 *
689 * But this is rarely implemented, so we manually enable a large 64bit BAR for
690 * PCIe device on AMD Family 15h (Models 00h-1fh, 30h-3fh, 60h-7fh) Processors
691 * here.
692 */
pci_amd_enable_64bit_bar(struct pci_dev * dev)693 static void pci_amd_enable_64bit_bar(struct pci_dev *dev)
694 {
695 static const char *name = "PCI Bus 0000:00";
696 struct resource *res, *conflict;
697 u32 base, limit, high;
698 struct pci_dev *other;
699 unsigned i;
700
701 if (!(pci_probe & PCI_BIG_ROOT_WINDOW))
702 return;
703
704 /* Check that we are the only device of that type */
705 other = pci_get_device(dev->vendor, dev->device, NULL);
706 if (other != dev ||
707 (other = pci_get_device(dev->vendor, dev->device, other))) {
708 /* This is a multi-socket system, don't touch it for now */
709 pci_dev_put(other);
710 return;
711 }
712
713 for (i = 0; i < 8; i++) {
714 pci_read_config_dword(dev, AMD_141b_MMIO_BASE(i), &base);
715 pci_read_config_dword(dev, AMD_141b_MMIO_HIGH(i), &high);
716
717 /* Is this slot free? */
718 if (!(base & (AMD_141b_MMIO_BASE_RE_MASK |
719 AMD_141b_MMIO_BASE_WE_MASK)))
720 break;
721
722 base >>= 8;
723 base |= high << 24;
724
725 /* Abort if a slot already configures a 64bit BAR. */
726 if (base > 0x10000)
727 return;
728 }
729 if (i == 8)
730 return;
731
732 res = kzalloc(sizeof(*res), GFP_KERNEL);
733 if (!res)
734 return;
735
736 /*
737 * Allocate a 256GB window directly below the 0xfd00000000 hardware
738 * limit (see AMD Family 15h Models 30h-3Fh BKDG, sec 2.4.6).
739 */
740 res->name = name;
741 res->flags = IORESOURCE_PREFETCH | IORESOURCE_MEM |
742 IORESOURCE_MEM_64 | IORESOURCE_WINDOW;
743 res->start = 0xbd00000000ull;
744 res->end = 0xfd00000000ull - 1;
745
746 conflict = request_resource_conflict(&iomem_resource, res);
747 if (conflict) {
748 kfree(res);
749 if (conflict->name != name)
750 return;
751
752 /* We are resuming from suspend; just reenable the window */
753 res = conflict;
754 } else {
755 dev_info(&dev->dev, "adding root bus resource %pR (tainting kernel)\n",
756 res);
757 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
758 pci_bus_add_resource(dev->bus, res, 0);
759 }
760
761 base = ((res->start >> 8) & AMD_141b_MMIO_BASE_MMIOBASE_MASK) |
762 AMD_141b_MMIO_BASE_RE_MASK | AMD_141b_MMIO_BASE_WE_MASK;
763 limit = ((res->end + 1) >> 8) & AMD_141b_MMIO_LIMIT_MMIOLIMIT_MASK;
764 high = ((res->start >> 40) & AMD_141b_MMIO_HIGH_MMIOBASE_MASK) |
765 ((((res->end + 1) >> 40) << AMD_141b_MMIO_HIGH_MMIOLIMIT_SHIFT)
766 & AMD_141b_MMIO_HIGH_MMIOLIMIT_MASK);
767
768 pci_write_config_dword(dev, AMD_141b_MMIO_HIGH(i), high);
769 pci_write_config_dword(dev, AMD_141b_MMIO_LIMIT(i), limit);
770 pci_write_config_dword(dev, AMD_141b_MMIO_BASE(i), base);
771 }
772 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x1401, pci_amd_enable_64bit_bar);
773 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x141b, pci_amd_enable_64bit_bar);
774 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x1571, pci_amd_enable_64bit_bar);
775 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x15b1, pci_amd_enable_64bit_bar);
776 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x1601, pci_amd_enable_64bit_bar);
777 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x1401, pci_amd_enable_64bit_bar);
778 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x141b, pci_amd_enable_64bit_bar);
779 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x1571, pci_amd_enable_64bit_bar);
780 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x15b1, pci_amd_enable_64bit_bar);
781 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x1601, pci_amd_enable_64bit_bar);
782
783 #define RS690_LOWER_TOP_OF_DRAM2 0x30
784 #define RS690_LOWER_TOP_OF_DRAM2_VALID 0x1
785 #define RS690_UPPER_TOP_OF_DRAM2 0x31
786 #define RS690_HTIU_NB_INDEX 0xA8
787 #define RS690_HTIU_NB_INDEX_WR_ENABLE 0x100
788 #define RS690_HTIU_NB_DATA 0xAC
789
790 /*
791 * Some BIOS implementations support RAM above 4GB, but do not configure the
792 * PCI host to respond to bus master accesses for these addresses. These
793 * implementations set the TOP_OF_DRAM_SLOT1 register correctly, so PCI DMA
794 * works as expected for addresses below 4GB.
795 *
796 * Reference: "AMD RS690 ASIC Family Register Reference Guide" (pg. 2-57)
797 * https://www.amd.com/system/files/TechDocs/43372_rs690_rrg_3.00o.pdf
798 */
rs690_fix_64bit_dma(struct pci_dev * pdev)799 static void rs690_fix_64bit_dma(struct pci_dev *pdev)
800 {
801 u32 val = 0;
802 phys_addr_t top_of_dram = __pa(high_memory - 1) + 1;
803
804 if (top_of_dram <= (1ULL << 32))
805 return;
806
807 pci_write_config_dword(pdev, RS690_HTIU_NB_INDEX,
808 RS690_LOWER_TOP_OF_DRAM2);
809 pci_read_config_dword(pdev, RS690_HTIU_NB_DATA, &val);
810
811 if (val)
812 return;
813
814 pci_info(pdev, "Adjusting top of DRAM to %pa for 64-bit DMA support\n", &top_of_dram);
815
816 pci_write_config_dword(pdev, RS690_HTIU_NB_INDEX,
817 RS690_UPPER_TOP_OF_DRAM2 | RS690_HTIU_NB_INDEX_WR_ENABLE);
818 pci_write_config_dword(pdev, RS690_HTIU_NB_DATA, top_of_dram >> 32);
819
820 pci_write_config_dword(pdev, RS690_HTIU_NB_INDEX,
821 RS690_LOWER_TOP_OF_DRAM2 | RS690_HTIU_NB_INDEX_WR_ENABLE);
822 pci_write_config_dword(pdev, RS690_HTIU_NB_DATA,
823 top_of_dram | RS690_LOWER_TOP_OF_DRAM2_VALID);
824 }
825 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7910, rs690_fix_64bit_dma);
826
827 #endif
828
829 #ifdef CONFIG_AMD_NB
830
831 #define AMD_15B8_RCC_DEV2_EPF0_STRAP2 0x10136008
832 #define AMD_15B8_RCC_DEV2_EPF0_STRAP2_NO_SOFT_RESET_DEV2_F0_MASK 0x00000080L
833
quirk_clear_strap_no_soft_reset_dev2_f0(struct pci_dev * dev)834 static void quirk_clear_strap_no_soft_reset_dev2_f0(struct pci_dev *dev)
835 {
836 u32 data;
837
838 if (!amd_smn_read(0, AMD_15B8_RCC_DEV2_EPF0_STRAP2, &data)) {
839 data &= ~AMD_15B8_RCC_DEV2_EPF0_STRAP2_NO_SOFT_RESET_DEV2_F0_MASK;
840 if (amd_smn_write(0, AMD_15B8_RCC_DEV2_EPF0_STRAP2, data))
841 pci_err(dev, "Failed to write data 0x%x\n", data);
842 } else {
843 pci_err(dev, "Failed to read data\n");
844 }
845 }
846 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x15b8, quirk_clear_strap_no_soft_reset_dev2_f0);
847 #endif
848
849 /*
850 * When returning from D3cold to D0, firmware on some Google Coral and Reef
851 * family Chromebooks with Intel Apollo Lake SoC clobbers the headers of
852 * both the L1 PM Substates capability and the previous capability for the
853 * "Celeron N3350/Pentium N4200/Atom E3900 Series PCI Express Port B #1".
854 *
855 * Save those values at enumeration-time and restore them at resume.
856 */
857
858 static u16 prev_cap, l1ss_cap;
859 static u32 prev_header, l1ss_header;
860
chromeos_save_apl_pci_l1ss_capability(struct pci_dev * dev)861 static void chromeos_save_apl_pci_l1ss_capability(struct pci_dev *dev)
862 {
863 int pos = PCI_CFG_SPACE_SIZE, prev = 0;
864 u32 header, pheader = 0;
865
866 while (pos) {
867 pci_read_config_dword(dev, pos, &header);
868 if (PCI_EXT_CAP_ID(header) == PCI_EXT_CAP_ID_L1SS) {
869 prev_cap = prev;
870 prev_header = pheader;
871 l1ss_cap = pos;
872 l1ss_header = header;
873 return;
874 }
875
876 prev = pos;
877 pheader = header;
878 pos = PCI_EXT_CAP_NEXT(header);
879 }
880 }
881
chromeos_fixup_apl_pci_l1ss_capability(struct pci_dev * dev)882 static void chromeos_fixup_apl_pci_l1ss_capability(struct pci_dev *dev)
883 {
884 u32 header;
885
886 if (!prev_cap || !prev_header || !l1ss_cap || !l1ss_header)
887 return;
888
889 /* Fixup the header of L1SS Capability if missing */
890 pci_read_config_dword(dev, l1ss_cap, &header);
891 if (header != l1ss_header) {
892 pci_write_config_dword(dev, l1ss_cap, l1ss_header);
893 pci_info(dev, "restore L1SS Capability header (was %#010x now %#010x)\n",
894 header, l1ss_header);
895 }
896
897 /* Fixup the link to L1SS Capability if missing */
898 pci_read_config_dword(dev, prev_cap, &header);
899 if (header != prev_header) {
900 pci_write_config_dword(dev, prev_cap, prev_header);
901 pci_info(dev, "restore previous Capability header (was %#010x now %#010x)\n",
902 header, prev_header);
903 }
904 }
905 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x5ad6, chromeos_save_apl_pci_l1ss_capability);
906 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, 0x5ad6, chromeos_fixup_apl_pci_l1ss_capability);
907