1 /*
2 * Copyright (c) 2021 BayLibre, SAS
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/logging/log.h>
8 LOG_MODULE_REGISTER(pcie_core, LOG_LEVEL_INF);
9
10 #include <zephyr/kernel.h>
11 #include <zephyr/drivers/pcie/pcie.h>
12 #include <zephyr/drivers/pcie/controller.h>
13
14 #if CONFIG_PCIE_MSI
15 #include <zephyr/drivers/pcie/msi.h>
16 #endif
17
18 /* arch agnostic PCIe API implementation */
19
pcie_conf_read(pcie_bdf_t bdf,unsigned int reg)20 uint32_t pcie_conf_read(pcie_bdf_t bdf, unsigned int reg)
21 {
22 const struct device *dev;
23
24 dev = DEVICE_DT_GET(DT_CHOSEN(zephyr_pcie_controller));
25 if (!dev) {
26 LOG_ERR("Failed to get PCIe root complex");
27 return 0xffffffff;
28 }
29
30 return pcie_ctrl_conf_read(dev, bdf, reg);
31 }
32
pcie_conf_write(pcie_bdf_t bdf,unsigned int reg,uint32_t data)33 void pcie_conf_write(pcie_bdf_t bdf, unsigned int reg, uint32_t data)
34 {
35 const struct device *dev;
36
37 dev = DEVICE_DT_GET(DT_CHOSEN(zephyr_pcie_controller));
38 if (!dev) {
39 LOG_ERR("Failed to get PCIe root complex");
40 return;
41 }
42
43 pcie_ctrl_conf_write(dev, bdf, reg, data);
44 }
45
pcie_generic_ctrl_conf_read(mm_reg_t cfg_addr,pcie_bdf_t bdf,unsigned int reg)46 uint32_t pcie_generic_ctrl_conf_read(mm_reg_t cfg_addr, pcie_bdf_t bdf, unsigned int reg)
47 {
48 volatile uint32_t *bdf_cfg_mem = (volatile uint32_t *)((uintptr_t)cfg_addr + (bdf << 4));
49
50 if (!cfg_addr) {
51 return 0xffffffff;
52 }
53
54 return bdf_cfg_mem[reg];
55 }
56
pcie_generic_ctrl_conf_write(mm_reg_t cfg_addr,pcie_bdf_t bdf,unsigned int reg,uint32_t data)57 void pcie_generic_ctrl_conf_write(mm_reg_t cfg_addr, pcie_bdf_t bdf,
58 unsigned int reg, uint32_t data)
59 {
60 volatile uint32_t *bdf_cfg_mem = (volatile uint32_t *)((uintptr_t)cfg_addr + (bdf << 4));
61
62 if (!cfg_addr) {
63 return;
64 }
65
66 bdf_cfg_mem[reg] = data;
67 }
68
pcie_generic_ctrl_enumerate_bars(const struct device * ctrl_dev,pcie_bdf_t bdf,unsigned int nbars)69 static void pcie_generic_ctrl_enumerate_bars(const struct device *ctrl_dev, pcie_bdf_t bdf,
70 unsigned int nbars)
71 {
72 unsigned int bar, reg, data;
73 uintptr_t scratch, bar_bus_addr;
74 size_t size, bar_size;
75
76 for (bar = 0, reg = PCIE_CONF_BAR0; bar < nbars && reg <= PCIE_CONF_BAR5; reg ++, bar++) {
77 bool found_mem64 = false;
78 bool found_mem = false;
79
80 data = scratch = pcie_conf_read(bdf, reg);
81
82 if (PCIE_CONF_BAR_INVAL_FLAGS(data)) {
83 continue;
84 }
85
86 if (PCIE_CONF_BAR_MEM(data)) {
87 found_mem = true;
88 if (PCIE_CONF_BAR_64(data)) {
89 found_mem64 = true;
90 scratch |= ((uint64_t)pcie_conf_read(bdf, reg + 1)) << 32;
91 if (PCIE_CONF_BAR_ADDR(scratch) == PCIE_CONF_BAR_INVAL64) {
92 continue;
93 }
94 } else {
95 if (PCIE_CONF_BAR_ADDR(scratch) == PCIE_CONF_BAR_INVAL) {
96 continue;
97 }
98 }
99 }
100
101 pcie_conf_write(bdf, reg, 0xFFFFFFFF);
102 size = pcie_conf_read(bdf, reg);
103 pcie_conf_write(bdf, reg, scratch & 0xFFFFFFFF);
104
105 if (found_mem64) {
106 pcie_conf_write(bdf, reg + 1, 0xFFFFFFFF);
107 size |= ((uint64_t)pcie_conf_read(bdf, reg + 1)) << 32;
108 pcie_conf_write(bdf, reg + 1, scratch >> 32);
109 }
110
111 if (!PCIE_CONF_BAR_ADDR(size)) {
112 if (found_mem64) {
113 reg++;
114 }
115 continue;
116 }
117
118 if (found_mem) {
119 if (found_mem64) {
120 bar_size = (uint64_t)~PCIE_CONF_BAR_ADDR(size) + 1;
121 } else {
122 bar_size = (uint32_t)~PCIE_CONF_BAR_ADDR(size) + 1;
123 }
124 } else {
125 bar_size = (uint32_t)~PCIE_CONF_BAR_IO_ADDR(size) + 1;
126 }
127
128 if (pcie_ctrl_region_allocate(ctrl_dev, bdf, found_mem,
129 found_mem64, bar_size, &bar_bus_addr)) {
130 uintptr_t bar_phys_addr;
131
132 pcie_ctrl_region_translate(ctrl_dev, bdf, found_mem,
133 found_mem64, bar_bus_addr, &bar_phys_addr);
134
135 LOG_INF("[%02x:%02x.%x] BAR%d size 0x%lx "
136 "assigned [%s 0x%lx-0x%lx -> 0x%lx-0x%lx]",
137 PCIE_BDF_TO_BUS(bdf), PCIE_BDF_TO_DEV(bdf), PCIE_BDF_TO_FUNC(bdf),
138 bar, bar_size,
139 found_mem ? (found_mem64 ? "mem64" : "mem") : "io",
140 bar_bus_addr, bar_bus_addr + bar_size - 1,
141 bar_phys_addr, bar_phys_addr + bar_size - 1);
142
143 pcie_conf_write(bdf, reg, bar_bus_addr & 0xFFFFFFFF);
144 if (found_mem64) {
145 pcie_conf_write(bdf, reg + 1, bar_bus_addr >> 32);
146 }
147 } else {
148 LOG_INF("[%02x:%02x.%x] BAR%d size 0x%lx Failed memory allocation.",
149 PCIE_BDF_TO_BUS(bdf), PCIE_BDF_TO_DEV(bdf), PCIE_BDF_TO_FUNC(bdf),
150 bar, bar_size);
151 }
152
153 if (found_mem64) {
154 reg++;
155 }
156 }
157 }
158
pcie_generic_ctrl_enumerate_type1(const struct device * ctrl_dev,pcie_bdf_t bdf,unsigned int bus_number)159 static bool pcie_generic_ctrl_enumerate_type1(const struct device *ctrl_dev, pcie_bdf_t bdf,
160 unsigned int bus_number)
161 {
162 uint32_t class = pcie_conf_read(bdf, PCIE_CONF_CLASSREV);
163
164 /* Handle only PCI-to-PCI bridge for now */
165 if (PCIE_CONF_CLASSREV_CLASS(class) == 0x06 &&
166 PCIE_CONF_CLASSREV_SUBCLASS(class) == 0x04) {
167 uint32_t number = pcie_conf_read(bdf, PCIE_BUS_NUMBER);
168 uintptr_t bar_base_addr;
169
170 pcie_generic_ctrl_enumerate_bars(ctrl_dev, bdf, 2);
171
172 /* Configure bus number registers */
173 pcie_conf_write(bdf, PCIE_BUS_NUMBER,
174 PCIE_BUS_NUMBER_VAL(PCIE_BDF_TO_BUS(bdf),
175 bus_number,
176 0xff, /* set max until we finished scanning */
177 PCIE_SECONDARY_LATENCY_TIMER(number)));
178
179 /* I/O align on 4k boundary */
180 if (pcie_ctrl_region_get_allocate_base(ctrl_dev, bdf, false, false,
181 KB(4), &bar_base_addr)) {
182 uint32_t io = pcie_conf_read(bdf, PCIE_IO_SEC_STATUS);
183 uint32_t io_upper = pcie_conf_read(bdf, PCIE_IO_BASE_LIMIT_UPPER);
184
185 pcie_conf_write(bdf, PCIE_IO_SEC_STATUS,
186 PCIE_IO_SEC_STATUS_VAL(PCIE_IO_BASE(io),
187 PCIE_IO_LIMIT(io),
188 PCIE_SEC_STATUS(io)));
189
190 pcie_conf_write(bdf, PCIE_IO_BASE_LIMIT_UPPER,
191 PCIE_IO_BASE_LIMIT_UPPER_VAL(PCIE_IO_BASE_UPPER(io_upper),
192 PCIE_IO_LIMIT_UPPER(io_upper)));
193
194 pcie_set_cmd(bdf, PCIE_CONF_CMDSTAT_IO, true);
195 }
196
197 /* MEM align on 1MiB boundary */
198 if (pcie_ctrl_region_get_allocate_base(ctrl_dev, bdf, true, false,
199 MB(1), &bar_base_addr)) {
200 uint32_t mem = pcie_conf_read(bdf, PCIE_MEM_BASE_LIMIT);
201
202 pcie_conf_write(bdf, PCIE_MEM_BASE_LIMIT,
203 PCIE_MEM_BASE_LIMIT_VAL((bar_base_addr & 0xfff00000) >> 16,
204 PCIE_MEM_LIMIT(mem)));
205
206 pcie_set_cmd(bdf, PCIE_CONF_CMDSTAT_MEM, true);
207 }
208
209 /* TODO: add support for prefetchable */
210
211 pcie_set_cmd(bdf, PCIE_CONF_CMDSTAT_MASTER, true);
212
213 return true;
214 }
215
216 return false;
217 }
218
pcie_generic_ctrl_post_enumerate_type1(const struct device * ctrl_dev,pcie_bdf_t bdf,unsigned int bus_number)219 static void pcie_generic_ctrl_post_enumerate_type1(const struct device *ctrl_dev, pcie_bdf_t bdf,
220 unsigned int bus_number)
221 {
222 uint32_t number = pcie_conf_read(bdf, PCIE_BUS_NUMBER);
223 uintptr_t bar_base_addr;
224
225 /* Configure bus subordinate */
226 pcie_conf_write(bdf, PCIE_BUS_NUMBER,
227 PCIE_BUS_NUMBER_VAL(PCIE_BUS_PRIMARY_NUMBER(number),
228 PCIE_BUS_SECONDARY_NUMBER(number),
229 bus_number - 1,
230 PCIE_SECONDARY_LATENCY_TIMER(number)));
231
232 /* I/O align on 4k boundary */
233 if (pcie_ctrl_region_get_allocate_base(ctrl_dev, bdf, false, false,
234 KB(4), &bar_base_addr)) {
235 uint32_t io = pcie_conf_read(bdf, PCIE_IO_SEC_STATUS);
236 uint32_t io_upper = pcie_conf_read(bdf, PCIE_IO_BASE_LIMIT_UPPER);
237
238 pcie_conf_write(bdf, PCIE_IO_SEC_STATUS,
239 PCIE_IO_SEC_STATUS_VAL(PCIE_IO_BASE(io),
240 ((bar_base_addr - 1) & 0x0000f000) >> 16,
241 PCIE_SEC_STATUS(io)));
242
243 pcie_conf_write(bdf, PCIE_IO_BASE_LIMIT_UPPER,
244 PCIE_IO_BASE_LIMIT_UPPER_VAL(PCIE_IO_BASE_UPPER(io_upper),
245 ((bar_base_addr - 1) & 0xffff0000) >> 16));
246 }
247
248 /* MEM align on 1MiB boundary */
249 if (pcie_ctrl_region_get_allocate_base(ctrl_dev, bdf, true, false,
250 MB(1), &bar_base_addr)) {
251 uint32_t mem = pcie_conf_read(bdf, PCIE_MEM_BASE_LIMIT);
252
253 pcie_conf_write(bdf, PCIE_MEM_BASE_LIMIT,
254 PCIE_MEM_BASE_LIMIT_VAL(PCIE_MEM_BASE(mem),
255 (bar_base_addr - 1) >> 16));
256 }
257
258 /* TODO: add support for prefetchable */
259 }
260
pcie_generic_ctrl_enumerate_type0(const struct device * ctrl_dev,pcie_bdf_t bdf)261 static void pcie_generic_ctrl_enumerate_type0(const struct device *ctrl_dev, pcie_bdf_t bdf)
262 {
263 /* Setup Type0 BARs */
264 pcie_generic_ctrl_enumerate_bars(ctrl_dev, bdf, 6);
265 }
266
pcie_generic_ctrl_enumerate_endpoint(const struct device * ctrl_dev,pcie_bdf_t bdf,unsigned int bus_number,bool * skip_next_func)267 static bool pcie_generic_ctrl_enumerate_endpoint(const struct device *ctrl_dev,
268 pcie_bdf_t bdf, unsigned int bus_number,
269 bool *skip_next_func)
270 {
271 bool multifunction_device = false;
272 bool layout_type_1 = false;
273 uint32_t data, class, id;
274 bool is_bridge = false;
275
276 *skip_next_func = false;
277
278 id = pcie_conf_read(bdf, PCIE_CONF_ID);
279 if (id == PCIE_ID_NONE) {
280 return false;
281 }
282
283 class = pcie_conf_read(bdf, PCIE_CONF_CLASSREV);
284 data = pcie_conf_read(bdf, PCIE_CONF_TYPE);
285
286 multifunction_device = PCIE_CONF_MULTIFUNCTION(data);
287 layout_type_1 = PCIE_CONF_TYPE_BRIDGE(data);
288
289 LOG_INF("[%02x:%02x.%x] %04x:%04x class %x subclass %x progif %x "
290 "rev %x Type%x multifunction %s",
291 PCIE_BDF_TO_BUS(bdf), PCIE_BDF_TO_DEV(bdf), PCIE_BDF_TO_FUNC(bdf),
292 id & 0xffff, id >> 16,
293 PCIE_CONF_CLASSREV_CLASS(class),
294 PCIE_CONF_CLASSREV_SUBCLASS(class),
295 PCIE_CONF_CLASSREV_PROGIF(class),
296 PCIE_CONF_CLASSREV_REV(class),
297 layout_type_1 ? 1 : 0,
298 multifunction_device ? "true" : "false");
299
300 /* Do not enumerate sub-functions if not a multifunction device */
301 if (PCIE_BDF_TO_FUNC(bdf) == 0 && !multifunction_device) {
302 *skip_next_func = true;
303 }
304
305 if (layout_type_1) {
306 is_bridge = pcie_generic_ctrl_enumerate_type1(ctrl_dev, bdf, bus_number);
307 } else {
308 pcie_generic_ctrl_enumerate_type0(ctrl_dev, bdf);
309 }
310
311 return is_bridge;
312 }
313
314 /* Return the next BDF or PCIE_BDF_NONE without changing bus number */
pcie_bdf_bus_next(unsigned int bdf,bool skip_next_func)315 static inline unsigned int pcie_bdf_bus_next(unsigned int bdf, bool skip_next_func)
316 {
317 if (skip_next_func) {
318 if (PCIE_BDF_TO_DEV(bdf) == PCIE_BDF_DEV_MASK) {
319 return PCIE_BDF_NONE;
320 }
321
322 return PCIE_BDF(PCIE_BDF_TO_BUS(bdf), PCIE_BDF_TO_DEV(bdf) + 1, 0);
323 }
324
325 if (PCIE_BDF_TO_DEV(bdf) == PCIE_BDF_DEV_MASK &&
326 PCIE_BDF_TO_FUNC(bdf) == PCIE_BDF_FUNC_MASK) {
327 return PCIE_BDF_NONE;
328 }
329
330 return PCIE_BDF(PCIE_BDF_TO_BUS(bdf),
331 (PCIE_BDF_TO_DEV(bdf) +
332 ((PCIE_BDF_TO_FUNC(bdf) + 1) / (PCIE_BDF_FUNC_MASK + 1))),
333 ((PCIE_BDF_TO_FUNC(bdf) + 1) & PCIE_BDF_FUNC_MASK));
334 }
335
336 struct pcie_bus_state {
337 /* Current scanned bus BDF, always valid */
338 unsigned int bus_bdf;
339 /* Current bridge endpoint BDF, either valid or PCIE_BDF_NONE */
340 unsigned int bridge_bdf;
341 /* Next BDF to scan on bus, either valid or PCIE_BDF_NONE when all EP scanned */
342 unsigned int next_bdf;
343 };
344
345 #define MAX_TRAVERSE_STACK 256
346
347 /* Non-recursive stack based PCIe bus & bridge enumeration */
pcie_generic_ctrl_enumerate(const struct device * ctrl_dev,pcie_bdf_t bdf_start)348 void pcie_generic_ctrl_enumerate(const struct device *ctrl_dev, pcie_bdf_t bdf_start)
349 {
350 struct pcie_bus_state stack[MAX_TRAVERSE_STACK], *state;
351 unsigned int bus_number = PCIE_BDF_TO_BUS(bdf_start) + 1;
352 bool skip_next_func = false;
353 bool is_bridge = false;
354
355 int stack_top = 0;
356
357 /* Start with first endpoint of immediate Root Controller bus */
358 stack[stack_top].bus_bdf = PCIE_BDF(PCIE_BDF_TO_BUS(bdf_start), 0, 0);
359 stack[stack_top].bridge_bdf = PCIE_BDF_NONE;
360 stack[stack_top].next_bdf = bdf_start;
361
362 while (stack_top >= 0) {
363 /* Top of stack contains the current PCIe bus to traverse */
364 state = &stack[stack_top];
365
366 /* Finish current bridge configuration before scanning other endpoints */
367 if (state->bridge_bdf != PCIE_BDF_NONE) {
368 pcie_generic_ctrl_post_enumerate_type1(ctrl_dev, state->bridge_bdf,
369 bus_number);
370
371 state->bridge_bdf = PCIE_BDF_NONE;
372 }
373
374 /* We still have more endpoints to scan */
375 if (state->next_bdf != PCIE_BDF_NONE) {
376 while (state->next_bdf != PCIE_BDF_NONE) {
377 is_bridge = pcie_generic_ctrl_enumerate_endpoint(ctrl_dev,
378 state->next_bdf,
379 bus_number,
380 &skip_next_func);
381 if (is_bridge) {
382 state->bridge_bdf = state->next_bdf;
383 state->next_bdf = pcie_bdf_bus_next(state->next_bdf,
384 skip_next_func);
385
386 /* If we can't handle more bridges, don't go further */
387 if (stack_top == (MAX_TRAVERSE_STACK - 1) ||
388 bus_number == PCIE_BDF_BUS_MASK) {
389 break;
390 }
391
392 /* Push to stack to scan this bus */
393 stack_top++;
394 stack[stack_top].bus_bdf = PCIE_BDF(bus_number, 0, 0);
395 stack[stack_top].bridge_bdf = PCIE_BDF_NONE;
396 stack[stack_top].next_bdf = PCIE_BDF(bus_number, 0, 0);
397
398 /* Increase bus number */
399 bus_number++;
400
401 break;
402 }
403
404 state->next_bdf = pcie_bdf_bus_next(state->next_bdf,
405 skip_next_func);
406 }
407 } else {
408 /* We finished scanning this bus, go back and scan next endpoints */
409 stack_top--;
410 }
411 }
412 }
413
414 #ifdef CONFIG_PCIE_MSI
pcie_msi_map(unsigned int irq,msi_vector_t * vector,uint8_t n_vector)415 uint32_t pcie_msi_map(unsigned int irq, msi_vector_t *vector, uint8_t n_vector)
416 {
417 ARG_UNUSED(irq);
418
419 return vector->arch.address;
420 }
421
pcie_msi_mdr(unsigned int irq,msi_vector_t * vector)422 uint16_t pcie_msi_mdr(unsigned int irq, msi_vector_t *vector)
423 {
424 ARG_UNUSED(irq);
425
426 return vector->arch.eventid;
427 }
428
arch_pcie_msi_vectors_allocate(unsigned int priority,msi_vector_t * vectors,uint8_t n_vector)429 uint8_t arch_pcie_msi_vectors_allocate(unsigned int priority,
430 msi_vector_t *vectors,
431 uint8_t n_vector)
432 {
433 const struct device *dev;
434
435 dev = DEVICE_DT_GET(DT_CHOSEN(zephyr_pcie_controller));
436 if (!dev) {
437 LOG_ERR("Failed to get PCIe root complex");
438 return 0;
439 }
440
441 return pcie_ctrl_msi_device_setup(dev, priority, vectors, n_vector);
442 }
443
arch_pcie_msi_vector_connect(msi_vector_t * vector,void (* routine)(const void * parameter),const void * parameter,uint32_t flags)444 bool arch_pcie_msi_vector_connect(msi_vector_t *vector,
445 void (*routine)(const void *parameter),
446 const void *parameter,
447 uint32_t flags)
448 {
449 if (irq_connect_dynamic(vector->arch.irq, vector->arch.priority, routine,
450 parameter, flags) != vector->arch.irq) {
451 return false;
452 }
453
454 irq_enable(vector->arch.irq);
455
456 return true;
457 }
458 #endif
459