1 /*
2  * Copyright 2020 Broadcom
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <zephyr/drivers/pcie/endpoint/pcie_ep.h>
8 #include <zephyr/logging/log.h>
9 
10 #include "pcie_ep_iproc.h"
11 
12 LOG_MODULE_DECLARE(iproc_pcie, CONFIG_PCIE_EP_LOG_LEVEL);
13 
14 /* Helper macro to read 64-bit data using two 32-bit data read */
15 #define sys_read64(addr)    (((uint64_t)(sys_read32(addr + 4)) << 32) | \
16 			     sys_read32(addr))
17 
18 #ifdef PCIE_EP_IPROC_INIT_CFG
iproc_pcie_msix_config(const struct device * dev)19 void iproc_pcie_msix_config(const struct device *dev)
20 {
21 	/*
22 	 * Configure capability of generating 16 messages,
23 	 * MSI-X Table offset 0x10000 on BAR2,
24 	 * MSI-X PBA offset 0x10800 on BAR2.
25 	 */
26 	pcie_ep_conf_write(dev, MSIX_CONTROL, (MSIX_TABLE_SIZE - 1));
27 	pcie_ep_conf_write(dev, MSIX_TBL_OFF_BIR, MSIX_TBL_B2_10000);
28 	pcie_ep_conf_write(dev, MSIX_PBA_OFF_BIR, MSIX_PBA_B2_10800);
29 }
30 
iproc_pcie_msi_config(const struct device * dev)31 void iproc_pcie_msi_config(const struct device *dev)
32 {
33 	uint32_t data;
34 
35 	/* Configure capability of generating 16 messages */
36 	pcie_ep_conf_read(dev, ID_VAL4_OFFSET, &data);
37 	data = (data & ~(MSI_COUNT_MASK)) | (MSI_COUNT_VAL << MSI_COUNT_SHIFT);
38 	pcie_ep_conf_write(dev, ID_VAL4_OFFSET, data);
39 }
40 #endif
41 
iproc_pcie_generate_msi(const struct device * dev,const uint32_t msi_num)42 int iproc_pcie_generate_msi(const struct device *dev, const uint32_t msi_num)
43 {
44 	int ret = 0;
45 #ifdef CONFIG_PCIE_EP_IPROC_V2
46 	uint64_t addr;
47 	uint32_t data;
48 
49 	pcie_ep_conf_read(dev, MSI_ADDR_H, &data);
50 	addr = ((uint64_t)data) << 32;
51 	pcie_ep_conf_read(dev, MSI_ADDR_L, &data);
52 	addr = addr | data;
53 
54 	if (data == 0) {
55 		/*
56 		 * This is mostly the case where the test is being run
57 		 * from device before host driver sets up MSI.
58 		 * Returning zero instead of error because of this.
59 		 */
60 		LOG_WRN("MSI is not setup, skipping MSI");
61 		return 0;
62 	}
63 
64 	pcie_ep_conf_read(dev, MSI_DATA, &data);
65 	data |= msi_num;
66 
67 	ret = pcie_ep_xfer_data_memcpy(dev, addr,
68 				       (uintptr_t *)&data, sizeof(data),
69 				       PCIE_OB_LOWMEM, DEVICE_TO_HOST);
70 
71 #else
72 	const struct iproc_pcie_ep_config *cfg = dev->config;
73 
74 	pcie_write32(msi_num, &cfg->base->paxb_pcie_sys_msi_req);
75 #endif
76 	return ret;
77 }
78 
generate_msix(const struct device * dev,const uint32_t msix_num)79 static int generate_msix(const struct device *dev, const uint32_t msix_num)
80 {
81 	int ret;
82 	uint64_t addr;
83 	uint32_t data;
84 
85 	addr = sys_read64(MSIX_VECTOR_OFF(msix_num) + MSIX_TBL_ADDR_OFF);
86 
87 	if (addr == 0) {
88 		/*
89 		 * This is mostly the case where the test is being run
90 		 * from device before host driver has setup MSIX table.
91 		 * Returning zero instead of error because of this.
92 		 */
93 		LOG_WRN("MSIX table is not setup, skipping MSIX\n");
94 		ret = 0;
95 		goto out;
96 	}
97 
98 	data = sys_read32(MSIX_VECTOR_OFF(msix_num) + MSIX_TBL_DATA_OFF);
99 
100 	ret = pcie_ep_xfer_data_memcpy(dev, addr,
101 				       (uintptr_t *)&data, sizeof(data),
102 				       PCIE_OB_LOWMEM, DEVICE_TO_HOST);
103 
104 	if (ret < 0) {
105 		goto out;
106 	}
107 
108 	LOG_DBG("msix %d generated\n", msix_num);
109 out:
110 	return ret;
111 }
112 
113 #ifdef CONFIG_PCIE_EP_IPROC_V2
is_pcie_function_mask(const struct device * dev)114 static bool is_pcie_function_mask(const struct device *dev)
115 {
116 	uint32_t data;
117 
118 	pcie_ep_conf_read(dev, MSIX_CAP, &data);
119 
120 	return ((data & MSIX_FUNC_MASK) ? true : false);
121 }
122 
is_msix_vector_mask(const int msix_num)123 static bool is_msix_vector_mask(const int msix_num)
124 {
125 	uint32_t data;
126 
127 	data = sys_read32(MSIX_VECTOR_OFF(msix_num) + MSIX_TBL_VECTOR_CTRL_OFF);
128 
129 	return ((data & MSIX_VECTOR_MASK) ? true : false);
130 }
131 
132 /* Below function will be called from interrupt context */
generate_pending_msix(const struct device * dev,const int msix_num)133 static int generate_pending_msix(const struct device *dev, const int msix_num)
134 {
135 	int is_msix_pending;
136 	struct iproc_pcie_ep_ctx *ctx = dev->data;
137 	k_spinlock_key_t key;
138 
139 	/* check if function mask bit got set by Host */
140 	if (is_pcie_function_mask(dev)) {
141 		LOG_DBG("function mask set! %d\n", msix_num);
142 		return 0;
143 	}
144 
145 	key = k_spin_lock(&ctx->pba_lock);
146 
147 	is_msix_pending = sys_test_bit(PBA_OFFSET(msix_num),
148 				       PENDING_BIT(msix_num));
149 
150 	/* check if vector mask bit is cleared for pending msix */
151 	if (is_msix_pending && !(is_msix_vector_mask(msix_num))) {
152 		LOG_DBG("msix %d unmasked\n", msix_num);
153 		/* generate msix and clear pending bit */
154 		generate_msix(dev, msix_num);
155 		sys_clear_bit(PBA_OFFSET(msix_num), PENDING_BIT(msix_num));
156 	}
157 
158 	k_spin_unlock(&ctx->pba_lock, key);
159 	return 0;
160 }
161 
162 /* Below function will be called from interrupt context */
generate_all_pending_msix(const struct device * dev)163 static int generate_all_pending_msix(const struct device *dev)
164 {
165 	int i;
166 
167 	for (i = 0; i < MSIX_TABLE_SIZE; i++) {
168 		generate_pending_msix(dev, i);
169 	}
170 
171 	return 0;
172 }
173 
iproc_pcie_func_mask_isr(void * arg)174 void iproc_pcie_func_mask_isr(void *arg)
175 {
176 	const struct device *dev = arg;
177 	const struct iproc_pcie_ep_config *cfg = dev->config;
178 	uint32_t data;
179 
180 	data = pcie_read32(&cfg->base->paxb_pcie_cfg_intr_status);
181 
182 	LOG_DBG("%s: %x\n", __func__, data);
183 
184 	if (data & SNOOP_VALID_INTR) {
185 		pcie_write32(SNOOP_VALID_INTR,
186 			     &cfg->base->paxb_pcie_cfg_intr_clear);
187 		if (!is_pcie_function_mask(dev)) {
188 			generate_all_pending_msix(dev);
189 		}
190 	}
191 }
192 
iproc_pcie_vector_mask_isr(void * arg)193 void iproc_pcie_vector_mask_isr(void *arg)
194 {
195 	const struct device *dev = arg;
196 	int msix_table_update = sys_test_bit(PMON_LITE_PCIE_INTERRUPT_STATUS,
197 					     WR_ADDR_CHK_INTR_EN);
198 
199 	LOG_DBG("%s: %x\n", __func__,
200 		sys_read32(PMON_LITE_PCIE_INTERRUPT_STATUS));
201 
202 	if (msix_table_update) {
203 		sys_write32(BIT(WR_ADDR_CHK_INTR_EN),
204 			    PMON_LITE_PCIE_INTERRUPT_CLEAR);
205 		generate_all_pending_msix(dev);
206 	}
207 }
208 #endif
209 
iproc_pcie_generate_msix(const struct device * dev,const uint32_t msix_num)210 int iproc_pcie_generate_msix(const struct device *dev, const uint32_t msix_num)
211 {
212 	if (msix_num >= MSIX_TABLE_SIZE) {
213 		LOG_WRN("Exceeded max supported MSI-X (%d)", MSIX_TABLE_SIZE);
214 		return -ENOTSUP;
215 	}
216 
217 #ifdef CONFIG_PCIE_EP_IPROC_V2
218 	struct iproc_pcie_ep_ctx *ctx = dev->data;
219 	k_spinlock_key_t key;
220 
221 	/*
222 	 * Read function mask bit/vector mask bit and update pending bit
223 	 * with spin_lock - aim is not to allow interrupt context
224 	 * to update PBA during this section
225 	 * This will make sure of no races between mask bit read
226 	 * and pending bit update.
227 	 */
228 
229 	key = k_spin_lock(&ctx->pba_lock);
230 
231 	if (is_pcie_function_mask(dev) || is_msix_vector_mask(msix_num)) {
232 		LOG_DBG("msix %d masked\n", msix_num);
233 		/* set pending bit and return */
234 		sys_set_bit(PBA_OFFSET(msix_num), PENDING_BIT(msix_num));
235 		k_spin_unlock(&ctx->pba_lock, key);
236 		return -EBUSY;
237 	}
238 
239 	k_spin_unlock(&ctx->pba_lock, key);
240 #endif
241 	return generate_msix(dev, msix_num);
242 }
243