1 /*
2 * Copyright (c) 2019 Intel Corporation
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/kernel.h>
8 #include <zephyr/drivers/pcie/msi.h>
9 #include <zephyr/drivers/pcie/cap.h>
10
11 /* functions documented in include/drivers/pcie/msi.h */
12
pcie_msi_base(pcie_bdf_t bdf,bool * msi)13 static uint32_t pcie_msi_base(pcie_bdf_t bdf, bool *msi)
14 {
15 uint32_t base;
16
17 if (msi != NULL) {
18 *msi = true;
19 }
20
21 base = pcie_get_cap(bdf, PCI_CAP_ID_MSI);
22
23 if (IS_ENABLED(CONFIG_PCIE_MSI_X)) {
24 uint32_t base_msix;
25
26 base_msix = pcie_get_cap(bdf, PCI_CAP_ID_MSIX);
27 if (base_msix != 0U) {
28 base = base_msix;
29
30 if (msi != NULL) {
31 *msi = false;
32 }
33 }
34 }
35
36 return base;
37 }
38
39 #ifdef CONFIG_PCIE_MSI_MULTI_VECTOR
40
41 #include <zephyr/kernel/mm.h>
42
arch_pcie_msi_vectors_allocate(unsigned int priority,msi_vector_t * vectors,uint8_t n_vector)43 __weak uint8_t arch_pcie_msi_vectors_allocate(unsigned int priority,
44 msi_vector_t *vectors,
45 uint8_t n_vector)
46 {
47 ARG_UNUSED(priority);
48 ARG_UNUSED(vectors);
49 ARG_UNUSED(n_vector);
50
51 return 0;
52 }
53
54
arch_pcie_msi_vector_connect(msi_vector_t * vector,void (* routine)(const void * parameter),const void * parameter,uint32_t flags)55 __weak bool arch_pcie_msi_vector_connect(msi_vector_t *vector,
56 void (*routine)(const void *parameter),
57 const void *parameter,
58 uint32_t flags)
59 {
60 ARG_UNUSED(vector);
61 ARG_UNUSED(routine);
62 ARG_UNUSED(parameter);
63 ARG_UNUSED(flags);
64
65 return false;
66 }
67
68 #ifdef CONFIG_PCIE_MSI_X
69
get_msix_table_size(pcie_bdf_t bdf,uint32_t base)70 static uint32_t get_msix_table_size(pcie_bdf_t bdf,
71 uint32_t base)
72 {
73 uint32_t mcr;
74
75 mcr = pcie_conf_read(bdf, base + PCIE_MSIX_MCR);
76
77 return ((mcr & PCIE_MSIX_MCR_TSIZE) >> PCIE_MSIX_MCR_TSIZE_SHIFT) + 1;
78 }
79
map_msix_table_entries(pcie_bdf_t bdf,uint32_t base,msi_vector_t * vectors,uint8_t n_vector)80 static bool map_msix_table_entries(pcie_bdf_t bdf,
81 uint32_t base,
82 msi_vector_t *vectors,
83 uint8_t n_vector)
84 {
85 uint32_t table_offset;
86 uint8_t table_bir;
87 struct pcie_bar bar;
88 uintptr_t mapped_table;
89 int i;
90
91 table_offset = pcie_conf_read(bdf, base + PCIE_MSIX_TR);
92 table_bir = table_offset & PCIE_MSIX_TR_BIR;
93 table_offset &= PCIE_MSIX_TR_OFFSET;
94
95 if (!pcie_get_mbar(bdf, table_bir, &bar)) {
96 return false;
97 }
98
99 z_phys_map((uint8_t **)&mapped_table,
100 bar.phys_addr + table_offset,
101 n_vector * PCIE_MSIR_TABLE_ENTRY_SIZE, K_MEM_PERM_RW);
102
103 for (i = 0; i < n_vector; i++) {
104 vectors[i].msix_vector = (struct msix_vector *)
105 (mapped_table + (i * PCIE_MSIR_TABLE_ENTRY_SIZE));
106 }
107
108 return true;
109 }
110
set_msix(msi_vector_t * vectors,uint8_t n_vector,bool msix)111 static void set_msix(msi_vector_t *vectors,
112 uint8_t n_vector,
113 bool msix)
114 {
115 int i;
116
117 for (i = 0; i < n_vector; i++) {
118 vectors[i].msix = msix;
119 }
120 }
121
122 #else
123 #define get_msix_table_size(...) 0
124 #define map_msix_table_entries(...) true
125 #define set_msix(...)
126 #endif /* CONFIG_PCIE_MSI_X */
127
get_msi_mmc(pcie_bdf_t bdf,uint32_t base)128 static uint32_t get_msi_mmc(pcie_bdf_t bdf,
129 uint32_t base)
130 {
131 uint32_t mcr;
132
133 mcr = pcie_conf_read(bdf, base + PCIE_MSI_MCR);
134
135 /* Getting MMC true count: 2^(MMC field) */
136 return 1 << ((mcr & PCIE_MSI_MCR_MMC) >> PCIE_MSI_MCR_MMC_SHIFT);
137 }
138
pcie_msi_vectors_allocate(pcie_bdf_t bdf,unsigned int priority,msi_vector_t * vectors,uint8_t n_vector)139 uint8_t pcie_msi_vectors_allocate(pcie_bdf_t bdf,
140 unsigned int priority,
141 msi_vector_t *vectors,
142 uint8_t n_vector)
143 {
144 uint32_t req_vectors;
145 uint32_t base;
146 bool msi;
147
148 base = pcie_msi_base(bdf, &msi);
149
150 if (IS_ENABLED(CONFIG_PCIE_MSI_X)) {
151 set_msix(vectors, n_vector, !msi);
152
153 if (!msi) {
154 req_vectors = get_msix_table_size(bdf, base);
155 if (!map_msix_table_entries(bdf, base,
156 vectors, n_vector)) {
157 return 0;
158 }
159 }
160 }
161
162 if (msi) {
163 req_vectors = get_msi_mmc(bdf, base);
164 }
165
166 if (n_vector > req_vectors) {
167 n_vector = req_vectors;
168 }
169
170 for (req_vectors = 0; req_vectors < n_vector; req_vectors++) {
171 vectors[req_vectors].bdf = bdf;
172 }
173
174 return arch_pcie_msi_vectors_allocate(priority, vectors, n_vector);
175 }
176
pcie_msi_vector_connect(pcie_bdf_t bdf,msi_vector_t * vector,void (* routine)(const void * parameter),const void * parameter,uint32_t flags)177 bool pcie_msi_vector_connect(pcie_bdf_t bdf,
178 msi_vector_t *vector,
179 void (*routine)(const void *parameter),
180 const void *parameter,
181 uint32_t flags)
182 {
183 uint32_t base;
184
185 base = pcie_msi_base(bdf, NULL);
186 if (base == 0U) {
187 return false;
188 }
189
190 return arch_pcie_msi_vector_connect(vector, routine, parameter, flags);
191 }
192
193 #endif /* CONFIG_PCIE_MSI_MULTI_VECTOR */
194
195 #ifdef CONFIG_PCIE_MSI_X
196
enable_msix(pcie_bdf_t bdf,msi_vector_t * vectors,uint8_t n_vector,uint32_t base,unsigned int irq)197 static void enable_msix(pcie_bdf_t bdf,
198 msi_vector_t *vectors,
199 uint8_t n_vector,
200 uint32_t base,
201 unsigned int irq)
202 {
203 uint32_t mcr;
204 int i;
205
206 for (i = 0; i < n_vector; i++) {
207 uint32_t map = pcie_msi_map(irq, &vectors[i], 1);
208 uint32_t mdr = pcie_msi_mdr(irq, &vectors[i]);
209
210 sys_write32(map, (mm_reg_t) &vectors[i].msix_vector->msg_addr);
211 sys_write32(0, (mm_reg_t) &vectors[i].msix_vector->msg_up_addr);
212 sys_write32(mdr, (mm_reg_t) &vectors[i].msix_vector->msg_data);
213 sys_write32(0, (mm_reg_t) &vectors[i].msix_vector->vector_ctrl);
214 }
215
216 mcr = pcie_conf_read(bdf, base + PCIE_MSIX_MCR);
217 mcr |= PCIE_MSIX_MCR_EN;
218 pcie_conf_write(bdf, base + PCIE_MSIX_MCR, mcr);
219 }
220
221 #else
222 #define enable_msix(...)
223 #endif /* CONFIG_PCIE_MSI_X */
224
disable_msi(pcie_bdf_t bdf,uint32_t base)225 static void disable_msi(pcie_bdf_t bdf,
226 uint32_t base)
227 {
228 uint32_t mcr;
229
230 mcr = pcie_conf_read(bdf, base + PCIE_MSI_MCR);
231 mcr &= ~PCIE_MSI_MCR_EN;
232 pcie_conf_write(bdf, base + PCIE_MSI_MCR, mcr);
233 }
234
enable_msi(pcie_bdf_t bdf,msi_vector_t * vectors,uint8_t n_vector,uint32_t base,unsigned int irq)235 static void enable_msi(pcie_bdf_t bdf,
236 msi_vector_t *vectors,
237 uint8_t n_vector,
238 uint32_t base,
239 unsigned int irq)
240 {
241 uint32_t mcr;
242 uint32_t map;
243 uint32_t mdr;
244 uint32_t mme;
245
246 map = pcie_msi_map(irq, vectors, n_vector);
247 pcie_conf_write(bdf, base + PCIE_MSI_MAP0, map);
248
249 mdr = pcie_msi_mdr(irq, vectors);
250 mcr = pcie_conf_read(bdf, base + PCIE_MSI_MCR);
251 if ((mcr & PCIE_MSI_MCR_64) != 0U) {
252 pcie_conf_write(bdf, base + PCIE_MSI_MAP1_64, 0U);
253 pcie_conf_write(bdf, base + PCIE_MSI_MDR_64, mdr);
254 } else {
255 pcie_conf_write(bdf, base + PCIE_MSI_MDR_32, mdr);
256 }
257
258 /* Generating MME field (1 counts as a power of 2) */
259 for (mme = 0; n_vector > 1; mme++) {
260 n_vector >>= 1;
261 }
262
263 mcr |= mme << PCIE_MSI_MCR_MME_SHIFT;
264
265 mcr |= PCIE_MSI_MCR_EN;
266 pcie_conf_write(bdf, base + PCIE_MSI_MCR, mcr);
267 }
268
pcie_msi_enable(pcie_bdf_t bdf,msi_vector_t * vectors,uint8_t n_vector,unsigned int irq)269 bool pcie_msi_enable(pcie_bdf_t bdf,
270 msi_vector_t *vectors,
271 uint8_t n_vector,
272 unsigned int irq)
273 {
274 uint32_t base;
275 bool msi;
276
277 base = pcie_msi_base(bdf, &msi);
278 if (base == 0U) {
279 return false;
280 }
281
282 if (!msi && IS_ENABLED(CONFIG_PCIE_MSI_X)) {
283 disable_msi(bdf, base);
284 enable_msix(bdf, vectors, n_vector, base, irq);
285 } else {
286 enable_msi(bdf, vectors, n_vector, base, irq);
287 }
288
289 pcie_set_cmd(bdf, PCIE_CONF_CMDSTAT_MASTER, true);
290
291 return true;
292 }
293
pcie_is_msi(pcie_bdf_t bdf)294 bool pcie_is_msi(pcie_bdf_t bdf)
295 {
296 return (pcie_msi_base(bdf, NULL) != 0);
297 }
298