1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/dma-map-ops.h>
3 #include <linux/dma-direct.h>
4 #include <linux/iommu.h>
5 #include <linux/dmar.h>
6 #include <linux/export.h>
7 #include <linux/memblock.h>
8 #include <linux/gfp.h>
9 #include <linux/pci.h>
10
11 #include <asm/proto.h>
12 #include <asm/dma.h>
13 #include <asm/iommu.h>
14 #include <asm/gart.h>
15 #include <asm/x86_init.h>
16 #include <asm/iommu_table.h>
17
18 static bool disable_dac_quirk __read_mostly;
19
20 const struct dma_map_ops *dma_ops;
21 EXPORT_SYMBOL(dma_ops);
22
23 #ifdef CONFIG_IOMMU_DEBUG
24 int panic_on_overflow __read_mostly = 1;
25 int force_iommu __read_mostly = 1;
26 #else
27 int panic_on_overflow __read_mostly = 0;
28 int force_iommu __read_mostly = 0;
29 #endif
30
31 int iommu_merge __read_mostly = 0;
32
33 int no_iommu __read_mostly;
34 /* Set this to 1 if there is a HW IOMMU in the system */
35 int iommu_detected __read_mostly = 0;
36
37 extern struct iommu_table_entry __iommu_table[], __iommu_table_end[];
38
pci_iommu_alloc(void)39 void __init pci_iommu_alloc(void)
40 {
41 struct iommu_table_entry *p;
42
43 sort_iommu_table(__iommu_table, __iommu_table_end);
44 check_iommu_entries(__iommu_table, __iommu_table_end);
45
46 for (p = __iommu_table; p < __iommu_table_end; p++) {
47 if (p && p->detect && p->detect() > 0) {
48 p->flags |= IOMMU_DETECTED;
49 if (p->early_init)
50 p->early_init();
51 if (p->flags & IOMMU_FINISH_IF_DETECTED)
52 break;
53 }
54 }
55 }
56
57 /*
58 * See <Documentation/x86/x86_64/boot-options.rst> for the iommu kernel
59 * parameter documentation.
60 */
iommu_setup(char * p)61 static __init int iommu_setup(char *p)
62 {
63 iommu_merge = 1;
64
65 if (!p)
66 return -EINVAL;
67
68 while (*p) {
69 if (!strncmp(p, "off", 3))
70 no_iommu = 1;
71 /* gart_parse_options has more force support */
72 if (!strncmp(p, "force", 5))
73 force_iommu = 1;
74 if (!strncmp(p, "noforce", 7)) {
75 iommu_merge = 0;
76 force_iommu = 0;
77 }
78
79 if (!strncmp(p, "biomerge", 8)) {
80 iommu_merge = 1;
81 force_iommu = 1;
82 }
83 if (!strncmp(p, "panic", 5))
84 panic_on_overflow = 1;
85 if (!strncmp(p, "nopanic", 7))
86 panic_on_overflow = 0;
87 if (!strncmp(p, "merge", 5)) {
88 iommu_merge = 1;
89 force_iommu = 1;
90 }
91 if (!strncmp(p, "nomerge", 7))
92 iommu_merge = 0;
93 if (!strncmp(p, "forcesac", 8))
94 pr_warn("forcesac option ignored.\n");
95 if (!strncmp(p, "allowdac", 8))
96 pr_warn("allowdac option ignored.\n");
97 if (!strncmp(p, "nodac", 5))
98 pr_warn("nodac option ignored.\n");
99 if (!strncmp(p, "usedac", 6)) {
100 disable_dac_quirk = true;
101 return 1;
102 }
103 #ifdef CONFIG_SWIOTLB
104 if (!strncmp(p, "soft", 4))
105 swiotlb = 1;
106 #endif
107 if (!strncmp(p, "pt", 2))
108 iommu_set_default_passthrough(true);
109 if (!strncmp(p, "nopt", 4))
110 iommu_set_default_translated(true);
111
112 gart_parse_options(p);
113
114 p += strcspn(p, ",");
115 if (*p == ',')
116 ++p;
117 }
118 return 0;
119 }
120 early_param("iommu", iommu_setup);
121
pci_iommu_init(void)122 static int __init pci_iommu_init(void)
123 {
124 struct iommu_table_entry *p;
125
126 x86_init.iommu.iommu_init();
127
128 for (p = __iommu_table; p < __iommu_table_end; p++) {
129 if (p && (p->flags & IOMMU_DETECTED) && p->late_init)
130 p->late_init();
131 }
132
133 return 0;
134 }
135 /* Must execute after PCI subsystem */
136 rootfs_initcall(pci_iommu_init);
137
138 #ifdef CONFIG_PCI
139 /* Many VIA bridges seem to corrupt data for DAC. Disable it here */
140
via_no_dac_cb(struct pci_dev * pdev,void * data)141 static int via_no_dac_cb(struct pci_dev *pdev, void *data)
142 {
143 pdev->dev.bus_dma_limit = DMA_BIT_MASK(32);
144 return 0;
145 }
146
via_no_dac(struct pci_dev * dev)147 static void via_no_dac(struct pci_dev *dev)
148 {
149 if (!disable_dac_quirk) {
150 dev_info(&dev->dev, "disabling DAC on VIA PCI bridge\n");
151 pci_walk_bus(dev->subordinate, via_no_dac_cb, NULL);
152 }
153 }
154 DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID,
155 PCI_CLASS_BRIDGE_PCI, 8, via_no_dac);
156 #endif
157