1 /*
2 * Copyright (c) 2021 BayLibre, SAS
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/logging/log.h>
8 LOG_MODULE_REGISTER(pcie_ecam, LOG_LEVEL_ERR);
9
10 #include <zephyr/kernel.h>
11 #include <zephyr/device.h>
12 #include <zephyr/drivers/pcie/pcie.h>
13 #include <zephyr/drivers/pcie/controller.h>
14 #ifdef CONFIG_GIC_V3_ITS
15 #include <zephyr/drivers/interrupt_controller/gicv3_its.h>
16 #endif
17
18 #define DT_DRV_COMPAT pci_host_ecam_generic
19
20 /*
21 * PCIe Controllers Regions
22 *
23 * TOFIX:
24 * - handle prefetchable regions
25 */
26 enum pcie_region_type {
27 PCIE_REGION_IO = 0,
28 PCIE_REGION_MEM,
29 PCIE_REGION_MEM64,
30 PCIE_REGION_MAX,
31 };
32
33 struct pcie_ecam_data {
34 uintptr_t cfg_phys_addr;
35 mm_reg_t cfg_addr;
36 size_t cfg_size;
37 struct {
38 uintptr_t phys_start;
39 uintptr_t bus_start;
40 size_t size;
41 size_t allocation_offset;
42 } regions[PCIE_REGION_MAX];
43 };
44
pcie_ecam_init(const struct device * dev)45 static int pcie_ecam_init(const struct device *dev)
46 {
47 const struct pcie_ctrl_config *cfg = dev->config;
48 struct pcie_ecam_data *data = dev->data;
49 int i;
50
51 /*
52 * Flags defined in the PCI Bus Binding to IEEE Std 1275-1994 :
53 * Bit# 33222222 22221111 11111100 00000000
54 * 10987654 32109876 54321098 76543210
55 *
56 * phys.hi cell: npt000ss bbbbbbbb dddddfff rrrrrrrr
57 * phys.mid cell: hhhhhhhh hhhhhhhh hhhhhhhh hhhhhhhh
58 * phys.lo cell: llllllll llllllll llllllll llllllll
59 *
60 * where:
61 *
62 * n is 0 if the address is relocatable, 1 otherwise
63 * p is 1 if the addressable region is "prefetchable", 0 otherwise
64 * t is 1 if the address is aliased (for non-relocatable I/O), below 1 MB (for Memory),
65 * or below 64 KB (for relocatable I/O).
66 * ss is the space code, denoting the address space
67 * 00 denotes Configuration Space
68 * 01 denotes I/O Space
69 * 10 denotes 32-bit-address Memory Space
70 * 11 denotes 64-bit-address Memory Space
71 * bbbbbbbb is the 8-bit Bus Number
72 * ddddd is the 5-bit Device Number
73 * fff is the 3-bit Function Number
74 * rrrrrrrr is the 8-bit Register Number
75 * hh...hh is a 32-bit unsigned number
76 * ll...ll is a 32-bit unsigned number
77 * for I/O Space is the 32-bit offset from the start of the region
78 * for 32-bit-address Memory Space is the 32-bit offset from the start of the region
79 * for 64-bit-address Memory Space is the 64-bit offset from the start of the region
80 *
81 * Here we only handle the p, ss, hh and ll fields.
82 *
83 * TOFIX:
84 * - handle prefetchable bit
85 */
86 for (i = 0 ; i < cfg->ranges_count ; ++i) {
87 switch ((cfg->ranges[i].flags >> 24) & 0x03) {
88 case 0x01:
89 data->regions[PCIE_REGION_IO].bus_start = cfg->ranges[i].pcie_bus_addr;
90 data->regions[PCIE_REGION_IO].phys_start = cfg->ranges[i].host_map_addr;
91 data->regions[PCIE_REGION_IO].size = cfg->ranges[i].map_length;
92 /* Linux & U-Boot avoids allocating PCI resources from address 0 */
93 if (data->regions[PCIE_REGION_IO].bus_start < 0x1000) {
94 data->regions[PCIE_REGION_IO].allocation_offset = 0x1000;
95 }
96 break;
97 case 0x02:
98 data->regions[PCIE_REGION_MEM].bus_start = cfg->ranges[i].pcie_bus_addr;
99 data->regions[PCIE_REGION_MEM].phys_start = cfg->ranges[i].host_map_addr;
100 data->regions[PCIE_REGION_MEM].size = cfg->ranges[i].map_length;
101 /* Linux & U-Boot avoids allocating PCI resources from address 0 */
102 if (data->regions[PCIE_REGION_MEM].bus_start < 0x1000) {
103 data->regions[PCIE_REGION_MEM].allocation_offset = 0x1000;
104 }
105 break;
106 case 0x03:
107 data->regions[PCIE_REGION_MEM64].bus_start = cfg->ranges[i].pcie_bus_addr;
108 data->regions[PCIE_REGION_MEM64].phys_start = cfg->ranges[i].host_map_addr;
109 data->regions[PCIE_REGION_MEM64].size = cfg->ranges[i].map_length;
110 /* Linux & U-Boot avoids allocating PCI resources from address 0 */
111 if (data->regions[PCIE_REGION_MEM64].bus_start < 0x1000) {
112 data->regions[PCIE_REGION_MEM64].allocation_offset = 0x1000;
113 }
114 break;
115 }
116 }
117
118 if (!data->regions[PCIE_REGION_IO].size &&
119 !data->regions[PCIE_REGION_MEM].size &&
120 !data->regions[PCIE_REGION_MEM64].size) {
121 LOG_ERR("No regions defined");
122 return -EINVAL;
123 }
124
125 /* Get Config address space physical address & size */
126 data->cfg_phys_addr = cfg->cfg_addr;
127 data->cfg_size = cfg->cfg_size;
128
129 if (data->regions[PCIE_REGION_IO].size) {
130 LOG_DBG("IO bus [0x%lx - 0x%lx, size 0x%lx]",
131 data->regions[PCIE_REGION_IO].bus_start,
132 (data->regions[PCIE_REGION_IO].bus_start +
133 data->regions[PCIE_REGION_IO].size - 1),
134 data->regions[PCIE_REGION_IO].size);
135 LOG_DBG("IO space [0x%lx - 0x%lx, size 0x%lx]",
136 data->regions[PCIE_REGION_IO].phys_start,
137 (data->regions[PCIE_REGION_IO].phys_start +
138 data->regions[PCIE_REGION_IO].size - 1),
139 data->regions[PCIE_REGION_IO].size);
140 }
141 if (data->regions[PCIE_REGION_MEM].size) {
142 LOG_DBG("MEM bus [0x%lx - 0x%lx, size 0x%lx]",
143 data->regions[PCIE_REGION_MEM].bus_start,
144 (data->regions[PCIE_REGION_MEM].bus_start +
145 data->regions[PCIE_REGION_MEM].size - 1),
146 data->regions[PCIE_REGION_MEM].size);
147 LOG_DBG("MEM space [0x%lx - 0x%lx, size 0x%lx]",
148 data->regions[PCIE_REGION_MEM].phys_start,
149 (data->regions[PCIE_REGION_MEM].phys_start +
150 data->regions[PCIE_REGION_MEM].size - 1),
151 data->regions[PCIE_REGION_MEM].size);
152 }
153 if (data->regions[PCIE_REGION_MEM64].size) {
154 LOG_DBG("MEM64 bus [0x%lx - 0x%lx, size 0x%lx]",
155 data->regions[PCIE_REGION_MEM64].bus_start,
156 (data->regions[PCIE_REGION_MEM64].bus_start +
157 data->regions[PCIE_REGION_MEM64].size - 1),
158 data->regions[PCIE_REGION_MEM64].size);
159 LOG_DBG("MEM64 space [0x%lx - 0x%lx, size 0x%lx]",
160 data->regions[PCIE_REGION_MEM64].phys_start,
161 (data->regions[PCIE_REGION_MEM64].phys_start +
162 data->regions[PCIE_REGION_MEM64].size - 1),
163 data->regions[PCIE_REGION_MEM64].size);
164 }
165
166 /* Map config space to be used by the pcie_generic_ctrl_conf_read/write callbacks */
167 device_map(&data->cfg_addr, data->cfg_phys_addr, data->cfg_size, K_MEM_CACHE_NONE);
168
169 LOG_DBG("Config space [0x%lx - 0x%lx, size 0x%lx]",
170 data->cfg_phys_addr, (data->cfg_phys_addr + data->cfg_size - 1), data->cfg_size);
171 LOG_DBG("Config mapped [0x%lx - 0x%lx, size 0x%lx]",
172 data->cfg_addr, (data->cfg_addr + data->cfg_size - 1), data->cfg_size);
173
174 pcie_generic_ctrl_enumerate(dev, PCIE_BDF(0, 0, 0));
175
176 return 0;
177 }
178
pcie_ecam_ctrl_conf_read(const struct device * dev,pcie_bdf_t bdf,unsigned int reg)179 static uint32_t pcie_ecam_ctrl_conf_read(const struct device *dev, pcie_bdf_t bdf, unsigned int reg)
180 {
181 struct pcie_ecam_data *data = dev->data;
182
183 return pcie_generic_ctrl_conf_read(data->cfg_addr, bdf, reg);
184 }
185
pcie_ecam_ctrl_conf_write(const struct device * dev,pcie_bdf_t bdf,unsigned int reg,uint32_t reg_data)186 static void pcie_ecam_ctrl_conf_write(const struct device *dev, pcie_bdf_t bdf, unsigned int reg,
187 uint32_t reg_data)
188 {
189 struct pcie_ecam_data *data = dev->data;
190
191 pcie_generic_ctrl_conf_write(data->cfg_addr, bdf, reg, reg_data);
192 }
193
pcie_ecam_region_allocate_type(struct pcie_ecam_data * data,pcie_bdf_t bdf,size_t bar_size,uintptr_t * bar_bus_addr,enum pcie_region_type type)194 static bool pcie_ecam_region_allocate_type(struct pcie_ecam_data *data, pcie_bdf_t bdf,
195 size_t bar_size, uintptr_t *bar_bus_addr,
196 enum pcie_region_type type)
197 {
198 uintptr_t addr;
199
200 addr = (((data->regions[type].bus_start + data->regions[type].allocation_offset) - 1) |
201 ((bar_size) - 1)) + 1;
202
203 if (addr - data->regions[type].bus_start + bar_size > data->regions[type].size) {
204 return false;
205 }
206
207 *bar_bus_addr = addr;
208 data->regions[type].allocation_offset = addr - data->regions[type].bus_start + bar_size;
209
210 return true;
211 }
212
pcie_ecam_region_allocate(const struct device * dev,pcie_bdf_t bdf,bool mem,bool mem64,size_t bar_size,uintptr_t * bar_bus_addr)213 static bool pcie_ecam_region_allocate(const struct device *dev, pcie_bdf_t bdf,
214 bool mem, bool mem64, size_t bar_size,
215 uintptr_t *bar_bus_addr)
216 {
217 struct pcie_ecam_data *data = dev->data;
218 enum pcie_region_type type;
219
220 if (mem && !data->regions[PCIE_REGION_MEM64].size &&
221 !data->regions[PCIE_REGION_MEM].size) {
222 LOG_DBG("bdf %x no mem region defined for allocation", bdf);
223 return false;
224 }
225
226 if (!mem && !data->regions[PCIE_REGION_IO].size) {
227 LOG_DBG("bdf %x no io region defined for allocation", bdf);
228 return false;
229 }
230
231 /*
232 * Allocate into mem64 region if available or is the only available
233 *
234 * TOFIX:
235 * - handle allocation from/to mem/mem64 when a region is full
236 */
237 if (mem && ((mem64 && data->regions[PCIE_REGION_MEM64].size) ||
238 (data->regions[PCIE_REGION_MEM64].size &&
239 !data->regions[PCIE_REGION_MEM].size))) {
240 type = PCIE_REGION_MEM64;
241 } else if (mem) {
242 type = PCIE_REGION_MEM;
243 } else {
244 type = PCIE_REGION_IO;
245 }
246
247 return pcie_ecam_region_allocate_type(data, bdf, bar_size, bar_bus_addr, type);
248 }
249
pcie_ecam_region_get_allocate_base(const struct device * dev,pcie_bdf_t bdf,bool mem,bool mem64,size_t align,uintptr_t * bar_base_addr)250 static bool pcie_ecam_region_get_allocate_base(const struct device *dev, pcie_bdf_t bdf,
251 bool mem, bool mem64, size_t align,
252 uintptr_t *bar_base_addr)
253 {
254 struct pcie_ecam_data *data = (struct pcie_ecam_data *)dev->data;
255 enum pcie_region_type type;
256
257 if (mem && !data->regions[PCIE_REGION_MEM64].size &&
258 !data->regions[PCIE_REGION_MEM].size) {
259 LOG_DBG("bdf %x no mem region defined for allocation", bdf);
260 return false;
261 }
262
263 if (!mem && !data->regions[PCIE_REGION_IO].size) {
264 LOG_DBG("bdf %x no io region defined for allocation", bdf);
265 return false;
266 }
267
268 /*
269 * Allocate into mem64 region if available or is the only available
270 *
271 * TOFIX:
272 * - handle allocation from/to mem/mem64 when a region is full
273 */
274 if (mem && ((mem64 && data->regions[PCIE_REGION_MEM64].size) ||
275 (data->regions[PCIE_REGION_MEM64].size &&
276 !data->regions[PCIE_REGION_MEM].size))) {
277 type = PCIE_REGION_MEM64;
278 } else if (mem) {
279 type = PCIE_REGION_MEM;
280 } else {
281 type = PCIE_REGION_IO;
282 }
283
284 *bar_base_addr = (((data->regions[type].bus_start +
285 data->regions[type].allocation_offset) - 1) | ((align) - 1)) + 1;
286
287 return true;
288 }
289
pcie_ecam_region_translate(const struct device * dev,pcie_bdf_t bdf,bool mem,bool mem64,uintptr_t bar_bus_addr,uintptr_t * bar_addr)290 static bool pcie_ecam_region_translate(const struct device *dev, pcie_bdf_t bdf,
291 bool mem, bool mem64, uintptr_t bar_bus_addr,
292 uintptr_t *bar_addr)
293 {
294 struct pcie_ecam_data *data = dev->data;
295 enum pcie_region_type type;
296
297 /* Means it hasn't been allocated */
298 if (!bar_bus_addr) {
299 return false;
300 }
301
302 if (mem && ((mem64 && data->regions[PCIE_REGION_MEM64].size) ||
303 (data->regions[PCIE_REGION_MEM64].size &&
304 !data->regions[PCIE_REGION_MEM].size))) {
305 type = PCIE_REGION_MEM64;
306 } else if (mem) {
307 type = PCIE_REGION_MEM;
308 } else {
309 type = PCIE_REGION_IO;
310 }
311
312 *bar_addr = data->regions[type].phys_start + (bar_bus_addr - data->regions[type].bus_start);
313
314 return true;
315 }
316
317 #if CONFIG_PCIE_MSI
pcie_ecam_msi_device_setup(const struct device * dev,unsigned int priority,msi_vector_t * vectors,uint8_t n_vector)318 static uint8_t pcie_ecam_msi_device_setup(const struct device *dev, unsigned int priority,
319 msi_vector_t *vectors, uint8_t n_vector)
320 {
321 #ifdef CONFIG_GIC_V3_ITS
322 const struct pcie_ctrl_config *cfg = (const struct pcie_ctrl_config *)dev->config;
323 unsigned int device_id;
324 pcie_bdf_t bdf;
325 int ret, i;
326
327 if (!n_vector) {
328 return 0;
329 }
330
331 bdf = vectors[0].bdf;
332
333 /* We do not support allocating vectors for multiple BDFs for now,
334 * This would need tracking vectors already allocated for a BDF and
335 * re-allocating a proper table in ITS for each BDF since we can't be
336 * sure more vectors for each BDF will be allocated later.
337 * Simply bail-out if it's the case here.
338 */
339 for (i = 1; i < n_vector; i++) {
340 if (vectors[i].bdf != bdf) {
341 LOG_ERR("Multiple BDFs in a single MSI vector allocation isn't supported");
342 return 0;
343 }
344 }
345
346 device_id = PCI_BDF_TO_DEVID(bdf);
347
348 ret = its_setup_deviceid(cfg->msi_parent, device_id, n_vector);
349 if (ret) {
350 return 0;
351 }
352
353 for (i = 0; i < n_vector; i++) {
354 vectors[i].arch.irq = its_alloc_intid(cfg->msi_parent);
355 vectors[i].arch.address = its_get_msi_addr(cfg->msi_parent);
356 vectors[i].arch.eventid = i;
357 vectors[i].arch.priority = priority;
358
359 ret = its_map_intid(cfg->msi_parent, device_id,
360 vectors[i].arch.eventid, vectors[i].arch.irq);
361 if (ret) {
362 break;
363 }
364 }
365
366 return i;
367 #else
368 return 0;
369 #endif
370 }
371 #endif
372
373 static const struct pcie_ctrl_driver_api pcie_ecam_api = {
374 .conf_read = pcie_ecam_ctrl_conf_read,
375 .conf_write = pcie_ecam_ctrl_conf_write,
376 .region_allocate = pcie_ecam_region_allocate,
377 .region_get_allocate_base = pcie_ecam_region_get_allocate_base,
378 .region_translate = pcie_ecam_region_translate,
379 #if CONFIG_PCIE_MSI
380 .msi_device_setup = pcie_ecam_msi_device_setup,
381 #endif
382 };
383
384 #if CONFIG_PCIE_MSI
385 #define DEVICE_DT_GET_MSI_PARENT(n) \
386 .msi_parent = DEVICE_DT_GET(DT_PHANDLE(DT_DRV_INST(n), msi_parent)),
387 #else
388 #define DEVICE_DT_GET_MSI_PARENT(n)
389 #endif
390
391 #define PCIE_ECAM_INIT(n) \
392 static struct pcie_ecam_data pcie_ecam_data##n; \
393 static const struct pcie_ctrl_config pcie_ecam_config##n = { \
394 DEVICE_DT_GET_MSI_PARENT(n) \
395 .cfg_addr = DT_INST_REG_ADDR(n), \
396 .cfg_size = DT_INST_REG_SIZE(n), \
397 .ranges_count = DT_NUM_RANGES(DT_DRV_INST(n)), \
398 .ranges = { \
399 DT_FOREACH_RANGE(DT_DRV_INST(n), PCIE_RANGE_FORMAT) \
400 }, \
401 }; \
402 DEVICE_DT_INST_DEFINE(n, &pcie_ecam_init, NULL, \
403 &pcie_ecam_data##n, \
404 &pcie_ecam_config##n, \
405 PRE_KERNEL_1, \
406 CONFIG_PCIE_INIT_PRIORITY, \
407 &pcie_ecam_api);
408
409 DT_INST_FOREACH_STATUS_OKAY(PCIE_ECAM_INIT)
410