1 /*
2 * Copyright (c) 2019 Intel Corporation
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <kernel.h>
8 #include <drivers/pcie/msi.h>
9 #include <drivers/pcie/cap.h>
10
11 /* functions documented in include/drivers/pcie/msi.h */
12
13 #ifdef CONFIG_PCIE_MSI_MULTI_VECTOR
14
15 #include <sys/mem_manage.h>
16
arch_pcie_msi_vectors_allocate(unsigned int priority,msi_vector_t * vectors,uint8_t n_vector)17 __weak uint8_t arch_pcie_msi_vectors_allocate(unsigned int priority,
18 msi_vector_t *vectors,
19 uint8_t n_vector)
20 {
21 ARG_UNUSED(priority);
22 ARG_UNUSED(vectors);
23 ARG_UNUSED(n_vector);
24
25 return 0;
26 }
27
28
arch_pcie_msi_vector_connect(msi_vector_t * vector,void (* routine)(const void * parameter),const void * parameter,uint32_t flags)29 __weak bool arch_pcie_msi_vector_connect(msi_vector_t *vector,
30 void (*routine)(const void *parameter),
31 const void *parameter,
32 uint32_t flags)
33 {
34 ARG_UNUSED(vector);
35 ARG_UNUSED(routine);
36 ARG_UNUSED(parameter);
37 ARG_UNUSED(flags);
38
39 return false;
40 }
41
42 #ifdef CONFIG_PCIE_MSI_X
43
get_msix_table_size(pcie_bdf_t bdf,uint32_t base)44 static uint32_t get_msix_table_size(pcie_bdf_t bdf,
45 uint32_t base)
46 {
47 uint32_t mcr;
48
49 mcr = pcie_conf_read(bdf, base + PCIE_MSIX_MCR);
50
51 return ((mcr & PCIE_MSIX_MCR_TSIZE) >> PCIE_MSIX_MCR_TSIZE_SHIFT) + 1;
52 }
53
map_msix_table_entries(pcie_bdf_t bdf,uint32_t base,msi_vector_t * vectors,uint8_t n_vector)54 static bool map_msix_table_entries(pcie_bdf_t bdf,
55 uint32_t base,
56 msi_vector_t *vectors,
57 uint8_t n_vector)
58 {
59 uint32_t table_offset;
60 uint8_t table_bir;
61 struct pcie_mbar bar;
62 uintptr_t mapped_table;
63 int i;
64
65 table_offset = pcie_conf_read(bdf, base + PCIE_MSIX_TR);
66 table_bir = table_offset & PCIE_MSIX_TR_BIR;
67 table_offset &= PCIE_MSIX_TR_OFFSET;
68
69 if (!pcie_get_mbar(bdf, table_bir, &bar)) {
70 return false;
71 }
72
73 z_phys_map((uint8_t **)&mapped_table,
74 bar.phys_addr + table_offset,
75 n_vector * PCIE_MSIR_TABLE_ENTRY_SIZE, K_MEM_PERM_RW);
76
77 for (i = 0; i < n_vector; i++) {
78 vectors[i].msix_vector = (struct msix_vector *)
79 (mapped_table + (i * PCIE_MSIR_TABLE_ENTRY_SIZE));
80 }
81
82 return true;
83 }
84
set_msix(msi_vector_t * vectors,uint8_t n_vector,bool msix)85 static void set_msix(msi_vector_t *vectors,
86 uint8_t n_vector,
87 bool msix)
88 {
89 int i;
90
91 for (i = 0; i < n_vector; i++) {
92 vectors[i].msix = msix;
93 }
94 }
95
96 #else
97 #define get_msix_table_size(...) 0
98 #define map_msix_table_entries(...) true
99 #define set_msix(...)
100 #endif /* CONFIG_PCIE_MSI_X */
101
get_msi_mmc(pcie_bdf_t bdf,uint32_t base)102 static uint32_t get_msi_mmc(pcie_bdf_t bdf,
103 uint32_t base)
104 {
105 uint32_t mcr;
106
107 mcr = pcie_conf_read(bdf, base + PCIE_MSI_MCR);
108
109 /* Getting MMC true count: 2^(MMC field) */
110 return 1 << ((mcr & PCIE_MSI_MCR_MMC) >> PCIE_MSI_MCR_MMC_SHIFT);
111 }
112
pcie_msi_vectors_allocate(pcie_bdf_t bdf,unsigned int priority,msi_vector_t * vectors,uint8_t n_vector)113 uint8_t pcie_msi_vectors_allocate(pcie_bdf_t bdf,
114 unsigned int priority,
115 msi_vector_t *vectors,
116 uint8_t n_vector)
117 {
118 bool msi = true;
119 uint32_t base;
120 uint32_t req_vectors;
121
122 base = pcie_get_cap(bdf, PCI_CAP_ID_MSI);
123
124 if (IS_ENABLED(CONFIG_PCIE_MSI_X)) {
125 uint32_t base_msix;
126
127 base_msix = pcie_get_cap(bdf, PCI_CAP_ID_MSIX);
128 if (base_msix != 0U) {
129 msi = false;
130 base = base_msix;
131 }
132 }
133
134 if (IS_ENABLED(CONFIG_PCIE_MSI_X)) {
135 set_msix(vectors, n_vector, !msi);
136
137 if (!msi) {
138 req_vectors = get_msix_table_size(bdf, base);
139 if (!map_msix_table_entries(bdf, base,
140 vectors, n_vector)) {
141 return 0;
142 }
143 }
144 }
145
146 if (msi) {
147 req_vectors = get_msi_mmc(bdf, base);
148 }
149
150 if (n_vector > req_vectors) {
151 n_vector = req_vectors;
152 }
153
154 return arch_pcie_msi_vectors_allocate(priority, vectors, n_vector);
155 }
156
pcie_msi_vector_connect(pcie_bdf_t bdf,msi_vector_t * vector,void (* routine)(const void * parameter),const void * parameter,uint32_t flags)157 bool pcie_msi_vector_connect(pcie_bdf_t bdf,
158 msi_vector_t *vector,
159 void (*routine)(const void *parameter),
160 const void *parameter,
161 uint32_t flags)
162 {
163 uint32_t base;
164
165 base = pcie_get_cap(bdf, PCI_CAP_ID_MSI);
166
167 if (IS_ENABLED(CONFIG_PCIE_MSI_X)) {
168 uint32_t base_msix;
169
170 base_msix = pcie_get_cap(bdf, PCI_CAP_ID_MSIX);
171 if (base_msix != 0U) {
172 base = base_msix;
173 }
174 }
175
176 if (base == 0U) {
177 return false;
178 }
179
180 vector->bdf = bdf;
181
182 return arch_pcie_msi_vector_connect(vector, routine, parameter, flags);
183 }
184
185 #endif /* CONFIG_PCIE_MSI_MULTI_VECTOR */
186
187 #ifdef CONFIG_PCIE_MSI_X
188
enable_msix(pcie_bdf_t bdf,msi_vector_t * vectors,uint8_t n_vector,uint32_t base,unsigned int irq)189 static void enable_msix(pcie_bdf_t bdf,
190 msi_vector_t *vectors,
191 uint8_t n_vector,
192 uint32_t base,
193 unsigned int irq)
194 {
195 uint32_t mcr;
196 int i;
197
198 for (i = 0; i < n_vector; i++) {
199 uint32_t map = pcie_msi_map(irq, &vectors[i]);
200 uint32_t mdr = pcie_msi_mdr(irq, &vectors[i]);
201
202 vectors[i].msix_vector->msg_addr = map;
203 vectors[i].msix_vector->msg_up_addr = 0;
204 vectors[i].msix_vector->msg_data = mdr;
205 vectors[i].msix_vector->vector_ctrl = 0;
206 }
207
208 mcr = pcie_conf_read(bdf, base + PCIE_MSIX_MCR);
209 mcr |= PCIE_MSIX_MCR_EN;
210 pcie_conf_write(bdf, base + PCIE_MSIX_MCR, mcr);
211 }
212
213 #else
214 #define enable_msix(...)
215 #endif /* CONFIG_PCIE_MSI_X */
216
disable_msi(pcie_bdf_t bdf,uint32_t base)217 static void disable_msi(pcie_bdf_t bdf,
218 uint32_t base)
219 {
220 uint32_t mcr;
221
222 mcr = pcie_conf_read(bdf, base + PCIE_MSI_MCR);
223 mcr &= ~PCIE_MSI_MCR_EN;
224 pcie_conf_write(bdf, base + PCIE_MSI_MCR, mcr);
225 }
226
enable_msi(pcie_bdf_t bdf,msi_vector_t * vectors,uint8_t n_vector,uint32_t base,unsigned int irq)227 static void enable_msi(pcie_bdf_t bdf,
228 msi_vector_t *vectors,
229 uint8_t n_vector,
230 uint32_t base,
231 unsigned int irq)
232 {
233 uint32_t mcr;
234 uint32_t map;
235 uint32_t mdr;
236 uint32_t mme;
237
238 map = pcie_msi_map(irq, vectors);
239 pcie_conf_write(bdf, base + PCIE_MSI_MAP0, map);
240
241 mdr = pcie_msi_mdr(irq, vectors);
242 mcr = pcie_conf_read(bdf, base + PCIE_MSI_MCR);
243 if (mcr & PCIE_MSI_MCR_64) {
244 pcie_conf_write(bdf, base + PCIE_MSI_MAP1_64, 0U);
245 pcie_conf_write(bdf, base + PCIE_MSI_MDR_64, mdr);
246 } else {
247 pcie_conf_write(bdf, base + PCIE_MSI_MDR_32, mdr);
248 }
249
250 /* Generating MME field (1 counts as a power of 2) */
251 for (mme = 0; n_vector > 1; mme++) {
252 n_vector >>= 1;
253 }
254
255 mcr |= mme << PCIE_MSI_MCR_MME_SHIFT;
256
257 mcr |= PCIE_MSI_MCR_EN;
258 pcie_conf_write(bdf, base + PCIE_MSI_MCR, mcr);
259 }
260
pcie_msi_enable(pcie_bdf_t bdf,msi_vector_t * vectors,uint8_t n_vector,unsigned int irq)261 bool pcie_msi_enable(pcie_bdf_t bdf,
262 msi_vector_t *vectors,
263 uint8_t n_vector,
264 unsigned int irq)
265 {
266 bool msi = true;
267 uint32_t base;
268
269 base = pcie_get_cap(bdf, PCI_CAP_ID_MSI);
270
271 if (IS_ENABLED(CONFIG_PCIE_MSI_X)) {
272 uint32_t base_msix;
273
274 base_msix = pcie_get_cap(bdf, PCI_CAP_ID_MSIX);
275 if ((base_msix != 0U) && (base != 0U)) {
276 disable_msi(bdf, base);
277 }
278 if ((base_msix != 0U)) {
279 msi = false;
280 base = base_msix;
281 }
282 }
283
284 if (base == 0U) {
285 return false;
286 }
287
288 if (!msi && IS_ENABLED(CONFIG_PCIE_MSI_X)) {
289 enable_msix(bdf, vectors, n_vector, base, irq);
290 } else {
291 enable_msi(bdf, vectors, n_vector, base, irq);
292 }
293
294 pcie_set_cmd(bdf, PCIE_CONF_CMDSTAT_MASTER, true);
295
296 return true;
297 }
298