1 /*
2  * Copyright (c) 2019 Intel Corporation
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <zephyr/kernel.h>
8 #include <zephyr/sys/device_mmio.h>
9 #include <zephyr/drivers/pcie/pcie.h>
10 
11 #ifdef CONFIG_ACPI
12 #include <zephyr/acpi/acpi.h>
13 #endif
14 
15 #ifdef CONFIG_PCIE_MSI
16 #include <kernel_arch_func.h>
17 #include <zephyr/device.h>
18 #include <zephyr/drivers/pcie/msi.h>
19 #include <zephyr/drivers/interrupt_controller/sysapic.h>
20 #include <zephyr/arch/x86/cpuid.h>
21 #endif
22 
23 /* PCI Express Extended Configuration Mechanism (MMIO) */
24 #ifdef CONFIG_PCIE_MMIO_CFG
25 
26 #define MAX_PCI_BUS_SEGMENTS 4
27 
28 static struct {
29 	uint32_t start_bus;
30 	uint32_t n_buses;
31 	uint8_t *mmio;
32 } bus_segs[MAX_PCI_BUS_SEGMENTS];
33 
34 static bool do_pcie_mmio_cfg;
35 
pcie_mm_init(void)36 static void pcie_mm_init(void)
37 {
38 #ifdef CONFIG_ACPI
39 	struct acpi_mcfg *m = acpi_table_get("MCFG", 0);
40 
41 	if (m != NULL) {
42 		int n = (m->header.Length - sizeof(*m)) / sizeof(m->pci_segs[0]);
43 
44 		for (int i = 0; i < n && i < MAX_PCI_BUS_SEGMENTS; i++) {
45 			size_t size;
46 			uintptr_t phys_addr;
47 
48 			bus_segs[i].start_bus = m->pci_segs[i].StartBusNumber;
49 			bus_segs[i].n_buses =
50 				1 + m->pci_segs[i].EndBusNumber - m->pci_segs[i].StartBusNumber;
51 
52 			phys_addr = m->pci_segs[i].Address;
53 			/* 32 devices & 8 functions per bus, 4k per device */
54 			size = bus_segs[i].n_buses * (32 * 8 * 4096);
55 
56 			device_map((mm_reg_t *)&bus_segs[i].mmio, phys_addr, size,
57 				   K_MEM_CACHE_NONE);
58 		}
59 
60 		do_pcie_mmio_cfg = true;
61 	}
62 #endif
63 }
64 
pcie_mm_conf(pcie_bdf_t bdf,unsigned int reg,bool write,uint32_t * data)65 static inline void pcie_mm_conf(pcie_bdf_t bdf, unsigned int reg,
66 				bool write, uint32_t *data)
67 {
68 	for (int i = 0; i < ARRAY_SIZE(bus_segs); i++) {
69 		int off = PCIE_BDF_TO_BUS(bdf) - bus_segs[i].start_bus;
70 
71 		if (off >= 0 && off < bus_segs[i].n_buses) {
72 			bdf = PCIE_BDF(off,
73 				       PCIE_BDF_TO_DEV(bdf),
74 				       PCIE_BDF_TO_FUNC(bdf));
75 
76 			volatile uint32_t *regs
77 				= (void *)&bus_segs[i].mmio[bdf << 4];
78 
79 			if (write) {
80 				regs[reg] = *data;
81 			} else {
82 				*data = regs[reg];
83 			}
84 		}
85 	}
86 }
87 
88 #endif /* CONFIG_PCIE_MMIO_CFG */
89 
90 /* Traditional Configuration Mechanism */
91 
92 #define PCIE_X86_CAP	0xCF8U	/* Configuration Address Port */
93 #define PCIE_X86_CAP_BDF_MASK	0x00FFFF00U  /* b/d/f bits */
94 #define PCIE_X86_CAP_EN		0x80000000U  /* enable bit */
95 #define PCIE_X86_CAP_WORD_MASK	0x3FU  /*  6-bit word index .. */
96 #define PCIE_X86_CAP_WORD_SHIFT	2U  /* .. is in CAP[7:2] */
97 
98 #define PCIE_X86_CDP	0xCFCU	/* Configuration Data Port */
99 
100 /*
101  * Helper function for exported configuration functions. Configuration access
102  * is not atomic, so spinlock to keep drivers from clobbering each other.
103  */
pcie_io_conf(pcie_bdf_t bdf,unsigned int reg,bool write,uint32_t * data)104 static inline void pcie_io_conf(pcie_bdf_t bdf, unsigned int reg,
105 				bool write, uint32_t *data)
106 {
107 	static struct k_spinlock lock;
108 	k_spinlock_key_t k;
109 
110 	bdf &= PCIE_X86_CAP_BDF_MASK;
111 	bdf |= PCIE_X86_CAP_EN;
112 	bdf |= (reg & PCIE_X86_CAP_WORD_MASK) << PCIE_X86_CAP_WORD_SHIFT;
113 
114 	k = k_spin_lock(&lock);
115 	sys_out32(bdf, PCIE_X86_CAP);
116 
117 	if (write) {
118 		sys_out32(*data, PCIE_X86_CDP);
119 	} else {
120 		*data = sys_in32(PCIE_X86_CDP);
121 	}
122 
123 	sys_out32(0U, PCIE_X86_CAP);
124 	k_spin_unlock(&lock, k);
125 }
126 
pcie_conf(pcie_bdf_t bdf,unsigned int reg,bool write,uint32_t * data)127 static inline void pcie_conf(pcie_bdf_t bdf, unsigned int reg,
128 			     bool write, uint32_t *data)
129 
130 {
131 #ifdef CONFIG_PCIE_MMIO_CFG
132 	if (bus_segs[0].mmio == NULL) {
133 		pcie_mm_init();
134 	}
135 
136 	if (do_pcie_mmio_cfg) {
137 		pcie_mm_conf(bdf, reg, write, data);
138 	} else
139 #endif
140 	{
141 		pcie_io_conf(bdf, reg, write, data);
142 	}
143 }
144 
145 /* these functions are explained in include/drivers/pcie/pcie.h */
146 
pcie_conf_read(pcie_bdf_t bdf,unsigned int reg)147 uint32_t pcie_conf_read(pcie_bdf_t bdf, unsigned int reg)
148 {
149 	uint32_t data = 0U;
150 
151 	pcie_conf(bdf, reg, false, &data);
152 	return data;
153 }
154 
pcie_conf_write(pcie_bdf_t bdf,unsigned int reg,uint32_t data)155 void pcie_conf_write(pcie_bdf_t bdf, unsigned int reg, uint32_t data)
156 {
157 	pcie_conf(bdf, reg, true, &data);
158 }
159 
160 #ifdef CONFIG_PCIE_MSI
161 
162 #ifdef CONFIG_INTEL_VTD_ICTL
163 
164 #include <zephyr/drivers/interrupt_controller/intel_vtd.h>
165 
166 static const struct device *const vtd = DEVICE_DT_GET_ONE(intel_vt_d);
167 
168 #endif /* CONFIG_INTEL_VTD_ICTL */
169 
170 /* these functions are explained in include/drivers/pcie/msi.h */
171 
172 #define MSI_MAP_DESTINATION_ID_SHIFT 12
173 #define MSI_RH BIT(3)
174 
pcie_msi_map(unsigned int irq,msi_vector_t * vector,uint8_t n_vector)175 uint32_t pcie_msi_map(unsigned int irq,
176 		      msi_vector_t *vector,
177 		      uint8_t n_vector)
178 {
179 	uint32_t dest_id;
180 
181 	ARG_UNUSED(irq);
182 
183 #if defined(CONFIG_INTEL_VTD_ICTL)
184 	if (vector != NULL && n_vector > 0) {
185 		return vtd_remap_msi(vtd, vector, n_vector);
186 	}
187 #endif
188 
189 	dest_id = z_x86_cpuid_get_current_physical_apic_id() <<
190 		MSI_MAP_DESTINATION_ID_SHIFT;
191 
192 	/* Directing to current physical CPU (may not be BSP)
193 	 * Destination ID - RH 1 - DM 0
194 	 */
195 	return 0xFEE00000U | dest_id | MSI_RH;
196 }
197 
pcie_msi_mdr(unsigned int irq,msi_vector_t * vector)198 uint16_t pcie_msi_mdr(unsigned int irq,
199 		      msi_vector_t *vector)
200 {
201 	if (vector != NULL) {
202 		if (IS_ENABLED(CONFIG_INTEL_VTD_ICTL)) {
203 			return 0;
204 		}
205 
206 #if defined(CONFIG_PCIE_MSI_X)
207 		if (vector->msix) {
208 			return 0x4000U | vector->arch.vector;
209 		}
210 #endif
211 	}
212 
213 	return 0x4000U | Z_IRQ_TO_INTERRUPT_VECTOR(irq);
214 }
215 
216 #if defined(CONFIG_INTEL_VTD_ICTL) || defined(CONFIG_PCIE_MSI_X)
217 
arch_pcie_msi_vectors_allocate(unsigned int priority,msi_vector_t * vectors,uint8_t n_vector)218 uint8_t arch_pcie_msi_vectors_allocate(unsigned int priority,
219 				       msi_vector_t *vectors,
220 				       uint8_t n_vector)
221 {
222 	int prev_vector = -1;
223 	int i, irq, vector;
224 
225 	if (vectors == NULL || n_vector == 0) {
226 		return 0;
227 	}
228 
229 
230 #ifdef CONFIG_INTEL_VTD_ICTL
231 	{
232 		int irte;
233 
234 		if (!device_is_ready(vtd)) {
235 			return 0;
236 		}
237 
238 		irte = vtd_allocate_entries(vtd, n_vector);
239 		if (irte < 0) {
240 			return 0;
241 		}
242 
243 		for (i = 0; i < n_vector; i++, irte++) {
244 			vectors[i].arch.irte = irte;
245 			vectors[i].arch.remap = true;
246 		}
247 	}
248 #endif /* CONFIG_INTEL_VTD_ICTL */
249 
250 	for (i = 0; i < n_vector; i++) {
251 		if (n_vector == 1) {
252 			/* This path is taken by PCIE device with fixed
253 			 * or single MSI: IRQ has been already allocated
254 			 * and/or set on the PCIe bus. Thus we only require
255 			 * to get it.
256 			 */
257 			irq = pcie_get_irq(vectors->bdf);
258 		} else {
259 			irq = arch_irq_allocate();
260 		}
261 
262 		if ((irq == PCIE_CONF_INTR_IRQ_NONE) || (irq == -1)) {
263 			return -1;
264 		}
265 
266 		vector = z_x86_allocate_vector(priority, prev_vector);
267 		if (vector < 0) {
268 			return 0;
269 		}
270 
271 		vectors[i].arch.irq = irq;
272 		vectors[i].arch.vector = vector;
273 
274 #ifdef CONFIG_INTEL_VTD_ICTL
275 		vtd_set_irte_vector(vtd, vectors[i].arch.irte,
276 				    vectors[i].arch.vector);
277 		vtd_set_irte_irq(vtd, vectors[i].arch.irte,
278 				 vectors[i].arch.irq);
279 		vtd_set_irte_msi(vtd, vectors[i].arch.irte, true);
280 #endif
281 		prev_vector = vectors[i].arch.vector;
282 	}
283 
284 	return n_vector;
285 }
286 
arch_pcie_msi_vector_connect(msi_vector_t * vector,void (* routine)(const void * parameter),const void * parameter,uint32_t flags)287 bool arch_pcie_msi_vector_connect(msi_vector_t *vector,
288 				  void (*routine)(const void *parameter),
289 				  const void *parameter,
290 				  uint32_t flags)
291 {
292 #ifdef CONFIG_INTEL_VTD_ICTL
293 	if (vector->arch.remap) {
294 		union acpi_dmar_id id;
295 
296 		if (!device_is_ready(vtd)) {
297 			return false;
298 		}
299 
300 		id.bits.bus = PCIE_BDF_TO_BUS(vector->bdf);
301 		id.bits.device = PCIE_BDF_TO_DEV(vector->bdf);
302 		id.bits.function = PCIE_BDF_TO_FUNC(vector->bdf);
303 
304 		vtd_remap(vtd, vector->arch.irte, vector->arch.vector,
305 			  flags, id.raw);
306 	}
307 #endif /* CONFIG_INTEL_VTD_ICTL */
308 
309 	z_x86_irq_connect_on_vector(vector->arch.irq, vector->arch.vector,
310 				    routine, parameter);
311 
312 	return true;
313 }
314 
315 #endif /* CONFIG_INTEL_VTD_ICTL || CONFIG_PCIE_MSI_X */
316 #endif /* CONFIG_PCIE_MSI */
317