1 /*
2 * Copyright 2020 Broadcom
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/drivers/dma.h>
8 #include <zephyr/drivers/pcie/endpoint/pcie_ep.h>
9
10 #define LOG_LEVEL CONFIG_PCIE_EP_LOG_LEVEL
11 #include <zephyr/logging/log.h>
12 #include <zephyr/irq.h>
13 LOG_MODULE_REGISTER(iproc_pcie);
14
15 #include "pcie_ep_iproc.h"
16
17 #define DT_DRV_COMPAT brcm_iproc_pcie_ep
18
iproc_pcie_conf_read(const struct device * dev,uint32_t offset,uint32_t * data)19 static int iproc_pcie_conf_read(const struct device *dev, uint32_t offset,
20 uint32_t *data)
21 {
22 const struct iproc_pcie_ep_config *cfg = dev->config;
23
24 /* Write offset to Configuration Indirect Address register */
25 pcie_write32(offset, &cfg->base->paxb_config_ind_addr);
26
27 /* Read data from Configuration Indirect Data register */
28 *data = pcie_read32(&cfg->base->paxb_config_ind_data);
29
30 return 0;
31 }
32
iproc_pcie_conf_write(const struct device * dev,uint32_t offset,uint32_t data)33 static void iproc_pcie_conf_write(const struct device *dev, uint32_t offset,
34 uint32_t data)
35 {
36 const struct iproc_pcie_ep_config *cfg = dev->config;
37
38 /* Write offset to Configuration Indirect Address register */
39 pcie_write32(offset, &cfg->base->paxb_config_ind_addr);
40
41 /* Write data to Configuration Indirect Data register */
42 pcie_write32(data, &cfg->base->paxb_config_ind_data);
43 }
44
iproc_pcie_map_addr(const struct device * dev,uint64_t pcie_addr,uint64_t * mapped_addr,uint32_t size,enum pcie_ob_mem_type ob_mem_type)45 static int iproc_pcie_map_addr(const struct device *dev, uint64_t pcie_addr,
46 uint64_t *mapped_addr, uint32_t size,
47 enum pcie_ob_mem_type ob_mem_type)
48 {
49 const struct iproc_pcie_ep_config *cfg = dev->config;
50 struct iproc_pcie_ep_ctx *ctx = dev->data;
51 uint64_t pcie_ob_base, pcie_ob_size, pcie_addr_start, offset;
52 uint32_t mapped_size;
53 enum pcie_outbound_map idx;
54 k_spinlock_key_t key;
55 int ret;
56
57 key = k_spin_lock(&ctx->ob_map_lock);
58
59 /* We support 2 outbound windows,
60 * one in highmem region and another in lowmem region
61 */
62 if ((ob_mem_type == PCIE_OB_HIGHMEM ||
63 ob_mem_type == PCIE_OB_ANYMEM) && !ctx->highmem_in_use) {
64 idx = PCIE_MAP_HIGHMEM_IDX;
65 pcie_ob_base = cfg->map_high_base;
66 pcie_ob_size = cfg->map_high_size;
67 } else if ((ob_mem_type == PCIE_OB_LOWMEM ||
68 ob_mem_type == PCIE_OB_ANYMEM) && !ctx->lowmem_in_use) {
69 idx = PCIE_MAP_LOWMEM_IDX;
70 pcie_ob_base = cfg->map_low_base;
71 pcie_ob_size = cfg->map_low_size;
72 } else {
73 ret = -EBUSY;
74 goto out;
75 }
76
77 /* check if the selected OB window supports size we want to map */
78 if (size > pcie_ob_size) {
79 ret = -ENOTSUP;
80 goto out;
81 }
82
83 /* Host PCIe address should be aligned to outbound window size */
84 pcie_addr_start = pcie_addr & ~(pcie_ob_size - 1);
85
86 /* Program OARR with PCIe outbound address */
87 pcie_write32(((pcie_ob_base & ~(pcie_ob_size - 1)) | PAXB_OARR_VALID),
88 &cfg->base->paxb_oarr[idx].lower);
89 pcie_write32(pcie_ob_base >> 32, &cfg->base->paxb_oarr[idx].upper);
90
91 /* Program OMAP with Host PCIe address */
92 pcie_write32((uint32_t)pcie_addr_start,
93 &cfg->base->paxb_omap[idx].lower);
94 pcie_write32((uint32_t)(pcie_addr_start >> 32),
95 &cfg->base->paxb_omap[idx].upper);
96
97 /* Mark usage of outbound window */
98 if (idx == PCIE_MAP_HIGHMEM_IDX) {
99 ctx->highmem_in_use = true;
100 } else {
101 ctx->lowmem_in_use = true;
102 }
103
104 /* offset holds extra size mapped due to alignment requirement */
105 offset = pcie_addr - pcie_addr_start;
106 *mapped_addr = pcie_ob_base + offset;
107 mapped_size = pcie_ob_size - offset;
108 ret = ((mapped_size >= size) ? size : mapped_size);
109 out:
110 k_spin_unlock(&ctx->ob_map_lock, key);
111
112 return ret;
113 }
114
iproc_pcie_unmap_addr(const struct device * dev,uint64_t mapped_addr)115 static void iproc_pcie_unmap_addr(const struct device *dev,
116 uint64_t mapped_addr)
117 {
118 struct iproc_pcie_ep_ctx *ctx = dev->data;
119 k_spinlock_key_t key;
120
121 key = k_spin_lock(&ctx->ob_map_lock);
122
123 if (mapped_addr >> 32) {
124 ctx->highmem_in_use = false;
125 } else {
126 ctx->lowmem_in_use = false;
127 }
128
129 k_spin_unlock(&ctx->ob_map_lock, key);
130 }
131
iproc_pcie_raise_irq(const struct device * dev,enum pci_ep_irq_type irq_type,uint32_t irq_num)132 static int iproc_pcie_raise_irq(const struct device *dev,
133 enum pci_ep_irq_type irq_type,
134 uint32_t irq_num)
135 {
136 struct iproc_pcie_ep_ctx *ctx = dev->data;
137 k_spinlock_key_t key;
138 int ret;
139
140 key = k_spin_lock(&ctx->raise_irq_lock);
141
142 switch (irq_type) {
143 case PCIE_EP_IRQ_MSI:
144 ret = iproc_pcie_generate_msi(dev, irq_num);
145 break;
146 case PCIE_EP_IRQ_MSIX:
147 ret = iproc_pcie_generate_msix(dev, irq_num);
148 break;
149 case PCIE_EP_IRQ_LEGACY:
150 ret = -ENOTSUP;
151 break;
152 default:
153 LOG_ERR("Unknown IRQ type\n");
154 ret = -EINVAL;
155 }
156
157 k_spin_unlock(&ctx->raise_irq_lock, key);
158 return ret;
159 }
160
iproc_pcie_register_reset_cb(const struct device * dev,enum pcie_reset reset,pcie_ep_reset_callback_t cb,void * arg)161 static int iproc_pcie_register_reset_cb(const struct device *dev,
162 enum pcie_reset reset,
163 pcie_ep_reset_callback_t cb, void *arg)
164 {
165 struct iproc_pcie_ep_ctx *ctx = dev->data;
166
167 if (reset < PCIE_PERST || reset >= PCIE_RESET_MAX) {
168 return -EINVAL;
169 }
170
171 LOG_DBG("Registering the callback for reset %d", reset);
172 ctx->reset_cb[reset] = cb;
173 ctx->reset_data[reset] = arg;
174
175 return 0;
176 }
177
178 #if DT_ANY_INST_HAS_PROP_STATUS_OKAY(dmas)
iproc_pcie_pl330_dma_xfer(const struct device * dev,uint64_t mapped_addr,uintptr_t local_addr,uint32_t size,const enum xfer_direction dir)179 static int iproc_pcie_pl330_dma_xfer(const struct device *dev,
180 uint64_t mapped_addr,
181 uintptr_t local_addr, uint32_t size,
182 const enum xfer_direction dir)
183 {
184 const struct iproc_pcie_ep_config *cfg = dev->config;
185 struct dma_config dma_cfg = { 0 };
186 struct dma_block_config dma_block_cfg = { 0 };
187 uint32_t chan_id;
188 int ret = -EINVAL;
189
190 if (!device_is_ready(cfg->pl330_dev)) {
191 LOG_ERR("DMA controller is not ready\n");
192 ret = -ENODEV;
193 goto out;
194 }
195
196 /* configure DMA */
197 dma_cfg.channel_direction = MEMORY_TO_MEMORY;
198 dma_cfg.block_count = 1U;
199 dma_cfg.head_block = &dma_block_cfg;
200
201 dma_block_cfg.block_size = size;
202 if (dir == DEVICE_TO_HOST) {
203 dma_block_cfg.source_address = local_addr;
204 dma_block_cfg.dest_address = mapped_addr;
205 chan_id = cfg->pl330_tx_chan_id;
206 } else {
207 dma_block_cfg.source_address = mapped_addr;
208 dma_block_cfg.dest_address = local_addr;
209 chan_id = cfg->pl330_rx_chan_id;
210 }
211
212 ret = dma_config(cfg->pl330_dev, chan_id, &dma_cfg);
213 if (ret) {
214 LOG_ERR("DMA config failed\n");
215 goto out;
216 }
217
218 /* start DMA */
219 ret = dma_start(cfg->pl330_dev, chan_id);
220 if (ret) {
221 LOG_ERR("DMA transfer failed\n");
222 }
223 out:
224 return ret;
225 }
226 #endif
227
228 #if DT_INST_IRQ_HAS_NAME(0, perst)
iproc_pcie_perst(const struct device * dev)229 static void iproc_pcie_perst(const struct device *dev)
230 {
231 struct iproc_pcie_ep_ctx *ctx = dev->data;
232 void *reset_data;
233 uint32_t data;
234
235 data = sys_read32(CRMU_MCU_EXTRA_EVENT_STATUS);
236
237 if (data & PCIE0_PERST_INTR) {
238 LOG_DBG("PERST interrupt [0x%x]", data);
239 sys_write32(PCIE0_PERST_INTR, CRMU_MCU_EXTRA_EVENT_CLEAR);
240
241 if (ctx->reset_cb[PCIE_PERST] != NULL) {
242 reset_data = ctx->reset_data[PCIE_PERST];
243 ctx->reset_cb[PCIE_PERST](reset_data);
244 }
245 }
246 }
247 #endif
248
249 #if DT_INST_IRQ_HAS_NAME(0, perst_inband)
iproc_pcie_hot_reset(const struct device * dev)250 static void iproc_pcie_hot_reset(const struct device *dev)
251 {
252 struct iproc_pcie_ep_ctx *ctx = dev->data;
253 void *reset_data;
254 uint32_t data;
255
256 data = sys_read32(CRMU_MCU_EXTRA_EVENT_STATUS);
257
258 if (data & PCIE0_PERST_INB_INTR) {
259 LOG_DBG("INBAND PERST interrupt [0x%x]", data);
260 sys_write32(PCIE0_PERST_INB_INTR, CRMU_MCU_EXTRA_EVENT_CLEAR);
261
262 if (ctx->reset_cb[PCIE_PERST_INB] != NULL) {
263 reset_data = ctx->reset_data[PCIE_PERST_INB];
264 ctx->reset_cb[PCIE_PERST_INB](reset_data);
265 }
266 }
267 }
268 #endif
269
270 #if DT_INST_IRQ_HAS_NAME(0, flr)
iproc_pcie_flr(const struct device * dev)271 static void iproc_pcie_flr(const struct device *dev)
272 {
273 const struct iproc_pcie_ep_config *cfg = dev->config;
274 struct iproc_pcie_ep_ctx *ctx = dev->data;
275 void *reset_data;
276 uint32_t data;
277
278 data = pcie_read32(&cfg->base->paxb_paxb_intr_status);
279
280 if (data & PCIE0_FLR_INTR) {
281 LOG_DBG("FLR interrupt[0x%x]", data);
282 pcie_write32(PCIE0_FLR_INTR, &cfg->base->paxb_paxb_intr_clear);
283
284 if (ctx->reset_cb[PCIE_FLR] != NULL) {
285 reset_data = ctx->reset_data[PCIE_FLR];
286 ctx->reset_cb[PCIE_FLR](reset_data);
287 }
288 } else {
289 /*
290 * Other interrupts like PAXB ECC Error interrupt
291 * could show up at the beginning which are harmless.
292 * So simply clearing those interrupts here
293 */
294 LOG_DBG("PAXB interrupt[0x%x]", data);
295 pcie_write32(data, &cfg->base->paxb_paxb_intr_clear);
296 }
297
298 /* Clear FLR in Progress bit */
299 iproc_pcie_conf_read(dev, PCIE_DEV_CTRL_OFFSET, &data);
300 data |= FLR_IN_PROGRESS;
301 iproc_pcie_conf_write(dev, PCIE_DEV_CTRL_OFFSET, data);
302 }
303 #endif
304
iproc_pcie_reset_config(const struct device * dev)305 static void iproc_pcie_reset_config(const struct device *dev)
306 {
307 __unused uint32_t data;
308 __unused const struct iproc_pcie_ep_config *cfg = dev->config;
309
310 #if DT_INST_IRQ_HAS_NAME(0, perst)
311 /* Clear any possible prior pending PERST interrupt */
312 sys_write32(PCIE0_PERST_INTR, CRMU_MCU_EXTRA_EVENT_CLEAR);
313
314 /* Enable PERST interrupt */
315 data = sys_read32(PCIE_PERSTB_INTR_CTL_STS);
316 data |= PCIE0_PERST_FE_INTR;
317 sys_write32(data, PCIE_PERSTB_INTR_CTL_STS);
318
319 data = sys_read32(CRMU_MCU_EXTRA_EVENT_MASK);
320 data &= ~PCIE0_PERST_INTR;
321 sys_write32(data, CRMU_MCU_EXTRA_EVENT_MASK);
322
323 IRQ_CONNECT(DT_INST_IRQ_BY_NAME(0, perst, irq),
324 DT_INST_IRQ_BY_NAME(0, perst, priority),
325 iproc_pcie_perst, DEVICE_DT_INST_GET(0), 0);
326 irq_enable(DT_INST_IRQ_BY_NAME(0, perst, irq));
327 #endif
328
329 #if DT_INST_IRQ_HAS_NAME(0, perst_inband)
330 /* Clear any possible prior pending Inband PERST interrupt */
331 sys_write32(PCIE0_PERST_INB_INTR, CRMU_MCU_EXTRA_EVENT_CLEAR);
332
333 /* Enable Inband PERST interrupt */
334 data = sys_read32(PCIE_PERSTB_INTR_CTL_STS);
335 data |= PCIE0_PERST_INB_FE_INTR;
336 sys_write32(data, PCIE_PERSTB_INTR_CTL_STS);
337
338 data = sys_read32(CRMU_MCU_EXTRA_EVENT_MASK);
339 data &= ~PCIE0_PERST_INB_INTR;
340 sys_write32(data, CRMU_MCU_EXTRA_EVENT_MASK);
341
342 IRQ_CONNECT(DT_INST_IRQ_BY_NAME(0, perst_inband, irq),
343 DT_INST_IRQ_BY_NAME(0, perst_inband, priority),
344 iproc_pcie_hot_reset, DEVICE_DT_INST_GET(0), 0);
345 irq_enable(DT_INST_IRQ_BY_NAME(0, perst_inband, irq));
346 #endif
347
348 #if DT_INST_IRQ_HAS_NAME(0, flr)
349 /* Clear any possible prior pending FLR */
350 pcie_write32(PCIE0_FLR_INTR, &cfg->base->paxb_paxb_intr_clear);
351
352 /* Set auto clear FLR and auto clear CRS post FLR */
353 iproc_pcie_conf_read(dev, PCIE_TL_CTRL0_OFFSET, &data);
354 data |= (AUTO_CLR_CRS_POST_FLR | AUTO_CLR_FLR_AFTER_DELAY);
355 iproc_pcie_conf_write(dev, PCIE_TL_CTRL0_OFFSET, data);
356
357 /* Enable Function Level Reset */
358 data = pcie_read32(&cfg->base->paxb_paxb_intr_en);
359 data |= PCIE0_FLR_INTR;
360 pcie_write32(data, &cfg->base->paxb_paxb_intr_en);
361
362 IRQ_CONNECT(DT_INST_IRQ_BY_NAME(0, flr, irq),
363 DT_INST_IRQ_BY_NAME(0, flr, priority),
364 iproc_pcie_flr, DEVICE_DT_INST_GET(0), 0);
365 irq_enable(DT_INST_IRQ_BY_NAME(0, flr, irq));
366 #endif
367 }
368
369 #ifdef CONFIG_PCIE_EP_IPROC_V2
iproc_pcie_msix_pvm_config(const struct device * dev)370 static void iproc_pcie_msix_pvm_config(const struct device *dev)
371 {
372 __unused const struct iproc_pcie_ep_config *cfg = dev->config;
373 __unused struct iproc_pcie_reg *base = cfg->base;
374 __unused uint32_t data;
375
376 /* configure snoop irq 1 for monitoring MSIX_CAP register */
377 #if DT_INST_IRQ_HAS_NAME(0, snoop_irq1)
378 data = pcie_read32(&cfg->base->paxb_snoop_addr_cfg[1]);
379 data &= ~SNOOP_ADDR1_MASK;
380 data |= (SNOOP_ADDR1 | SNOOP_ADDR1_EN);
381 pcie_write32(data, &cfg->base->paxb_snoop_addr_cfg[1]);
382
383 data = pcie_read32(&base->paxb_pcie_cfg_intr_mask);
384 data &= ~SNOOP_VALID_INTR;
385 pcie_write32(data, &base->paxb_pcie_cfg_intr_mask);
386
387 IRQ_CONNECT(DT_INST_IRQ_BY_NAME(0, snoop_irq1, irq),
388 DT_INST_IRQ_BY_NAME(0, snoop_irq1, priority),
389 iproc_pcie_func_mask_isr, DEVICE_DT_INST_GET(0), 0);
390 irq_enable(DT_INST_IRQ_BY_NAME(0, snoop_irq1, irq));
391
392 LOG_DBG("snoop interrupt configured\n");
393 #endif
394
395 /* configure pmon lite interrupt for monitoring MSIX table */
396 #if DT_INST_IRQ_HAS_NAME(0, pcie_pmon_lite)
397 data = sys_read32(PMON_LITE_PCIE_AXI_FILTER_0_CONTROL);
398 data |= AXI_FILTER_0_ENABLE;
399 sys_write32(data, PMON_LITE_PCIE_AXI_FILTER_0_CONTROL);
400
401 sys_write32(MSIX_TABLE_BASE, AXI_FILTER_0_ADDR_START_LOW);
402 /* Start of PBA is end of MSI-X table in our case */
403 sys_write32(PBA_TABLE_BASE, AXI_FILTER_0_ADDR_END_LOW);
404
405 sys_set_bit(PMON_LITE_PCIE_INTERRUPT_ENABLE, WR_ADDR_CHK_INTR_EN);
406
407 memset((void *)PBA_TABLE_BASE, 0, PBA_TABLE_SIZE);
408
409 IRQ_CONNECT(DT_INST_IRQ_BY_NAME(0, pcie_pmon_lite, irq),
410 DT_INST_IRQ_BY_NAME(0, pcie_pmon_lite, priority),
411 iproc_pcie_vector_mask_isr, DEVICE_DT_INST_GET(0), 0);
412 irq_enable(DT_INST_IRQ_BY_NAME(0, pcie_pmon_lite, irq));
413
414 LOG_DBG("pcie pmon lite interrupt configured\n");
415 #endif
416 }
417 #endif
418
iproc_pcie_mode_check(const struct iproc_pcie_ep_config * cfg)419 static int iproc_pcie_mode_check(const struct iproc_pcie_ep_config *cfg)
420 {
421 uint32_t data;
422
423 data = pcie_read32(&cfg->base->paxb_strap_status);
424 LOG_DBG("PAXB_STRAP_STATUS = 0x%08X\n", data);
425
426 if (data & PCIE_RC_MODE_MASK) {
427 return -ENOTSUP;
428 }
429
430 return 0;
431 }
432
iproc_pcie_ep_init(const struct device * dev)433 static int iproc_pcie_ep_init(const struct device *dev)
434 {
435 const struct iproc_pcie_ep_config *cfg = dev->config;
436 struct iproc_pcie_ep_ctx *ctx = dev->data;
437 int ret;
438 uint32_t data;
439
440 ret = iproc_pcie_mode_check(cfg);
441 if (ret) {
442 LOG_ERR("ERROR: Only PCIe EP mode is supported\n");
443 goto err_out;
444 }
445
446 iproc_pcie_conf_read(dev, PCIE_LINK_STATUS_CONTROL, &data);
447 LOG_INF("PCIe linkup speed 0x%x\n", ((data >>
448 PCIE_LINKSPEED_SHIFT) & PCIE_LINKSPEED_MASK));
449 LOG_INF("PCIe linkup width 0x%x\n", ((data >>
450 PCIE_LINKWIDTH_SHIFT) & PCIE_LINKWIDTH_MASK));
451
452 #ifdef PCIE_EP_IPROC_INIT_CFG
453 iproc_pcie_msi_config(dev);
454 iproc_pcie_msix_config(dev);
455 #endif
456
457 /* configure interrupts for MSI-X Per-Vector Masking feature */
458 #ifdef CONFIG_PCIE_EP_IPROC_V2
459 iproc_pcie_msix_pvm_config(dev);
460 #endif
461
462 iproc_pcie_reset_config(dev);
463
464 ctx->highmem_in_use = false;
465 ctx->lowmem_in_use = false;
466 LOG_INF("PCIe initialized successfully\n");
467
468 err_out:
469 return ret;
470 }
471
472 static struct iproc_pcie_ep_ctx iproc_pcie_ep_ctx_0;
473
474 static const struct iproc_pcie_ep_config iproc_pcie_ep_config_0 = {
475 .id = 0,
476 .base = (struct iproc_pcie_reg *)DT_INST_REG_ADDR(0),
477 .reg_size = DT_INST_REG_SIZE(0),
478 .map_low_base = DT_INST_REG_ADDR_BY_NAME(0, map_lowmem),
479 .map_low_size = DT_INST_REG_SIZE_BY_NAME(0, map_lowmem),
480 .map_high_base = DT_INST_REG_ADDR_BY_NAME(0, map_highmem),
481 .map_high_size = DT_INST_REG_SIZE_BY_NAME(0, map_highmem),
482 #if DT_INST_NODE_HAS_PROP(0, dmas)
483 .pl330_dev = DEVICE_DT_GET(DT_INST_DMAS_CTLR_BY_IDX(0, 0)),
484 .pl330_tx_chan_id = DT_INST_DMAS_CELL_BY_NAME(0, txdma, channel),
485 .pl330_rx_chan_id = DT_INST_DMAS_CELL_BY_NAME(0, rxdma, channel),
486 #endif
487 };
488
489 static DEVICE_API(pcie_ep, iproc_pcie_ep_api) = {
490 .conf_read = iproc_pcie_conf_read,
491 .conf_write = iproc_pcie_conf_write,
492 .map_addr = iproc_pcie_map_addr,
493 .unmap_addr = iproc_pcie_unmap_addr,
494 .raise_irq = iproc_pcie_raise_irq,
495 .register_reset_cb = iproc_pcie_register_reset_cb,
496 #if DT_INST_NODE_HAS_PROP(0, dmas)
497 .dma_xfer = iproc_pcie_pl330_dma_xfer,
498 #endif
499 };
500
501 DEVICE_DT_INST_DEFINE(0, &iproc_pcie_ep_init, NULL,
502 &iproc_pcie_ep_ctx_0,
503 &iproc_pcie_ep_config_0,
504 POST_KERNEL, CONFIG_KERNEL_INIT_PRIORITY_DEVICE,
505 &iproc_pcie_ep_api);
506