1 /*
2 * Copyright (c) 2020 Intel Corporation.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #define DT_DRV_COMPAT qemu_ivshmem
8
9 #define LOG_LEVEL CONFIG_IVSHMEM_LOG_LEVEL
10 #include <zephyr/logging/log.h>
11 LOG_MODULE_REGISTER(ivshmem);
12
13 #include <errno.h>
14
15 #include <zephyr/kernel.h>
16
17 #include <zephyr/device.h>
18 #include <zephyr/drivers/pcie/cap.h>
19 #include <zephyr/init.h>
20
21 #include <zephyr/drivers/virtualization/ivshmem.h>
22 #include "virt_ivshmem.h"
23
24 #ifdef CONFIG_IVSHMEM_DOORBELL
25
ivshmem_doorbell(const void * arg)26 static void ivshmem_doorbell(const void *arg)
27 {
28 const struct ivshmem_param *param = arg;
29
30 LOG_DBG("Interrupt received on vector %u", param->vector);
31
32 if (param->signal != NULL) {
33 k_poll_signal_raise(param->signal, param->vector);
34 }
35 }
36
ivshmem_configure_msi_x_interrupts(const struct device * dev)37 static bool ivshmem_configure_msi_x_interrupts(const struct device *dev)
38 {
39 #if defined(CONFIG_PCIE_MSI_X) && defined(CONFIG_PCIE_MSI_MULTI_VECTOR)
40 struct ivshmem *data = dev->data;
41 bool ret = false;
42 uint8_t n_vectors;
43 uint32_t key;
44 int i;
45
46 key = irq_lock();
47
48 n_vectors = pcie_msi_vectors_allocate(data->pcie->bdf,
49 CONFIG_IVSHMEM_INT_PRIORITY,
50 data->vectors,
51 CONFIG_IVSHMEM_MSI_X_VECTORS);
52 if (n_vectors == 0) {
53 LOG_ERR("Could not allocate %u MSI-X vectors",
54 CONFIG_IVSHMEM_MSI_X_VECTORS);
55 goto out;
56 }
57
58 LOG_DBG("Allocated %u vectors", n_vectors);
59
60 for (i = 0; i < n_vectors; i++) {
61 data->params[i].dev = dev;
62 data->params[i].vector = i;
63
64 if (!pcie_msi_vector_connect(data->pcie->bdf,
65 &data->vectors[i],
66 ivshmem_doorbell,
67 &data->params[i], 0)) {
68 LOG_ERR("Failed to connect MSI-X vector %u", i);
69 goto out;
70 }
71 }
72
73 LOG_INF("%u MSI-X Vectors connected", n_vectors);
74
75 if (!pcie_msi_enable(data->pcie->bdf, data->vectors, n_vectors, 0)) {
76 LOG_ERR("Could not enable MSI-X");
77 goto out;
78 }
79
80 data->n_vectors = n_vectors;
81 ret = true;
82
83 LOG_DBG("MSI-X configured");
84 out:
85 irq_unlock(key);
86
87 return ret;
88 #else
89 return false;
90 #endif
91 }
92
93 #ifdef CONFIG_IVSHMEM_V2
ivshmem_configure_int_x_interrupts(const struct device * dev)94 static bool ivshmem_configure_int_x_interrupts(const struct device *dev)
95 {
96 struct ivshmem *data = dev->data;
97 const struct ivshmem_cfg *cfg = dev->config;
98 uint32_t cfg_int = pcie_conf_read(data->pcie->bdf, PCIE_CONF_INTR);
99 uint32_t cfg_intx_pin = PCIE_CONF_INTR_PIN(cfg_int);
100
101 if (!IN_RANGE(cfg_intx_pin, PCIE_INTX_PIN_MIN, PCIE_INTX_PIN_MAX)) {
102 LOG_ERR("Invalid INTx pin %u", cfg_intx_pin);
103 return false;
104 }
105
106 /* Ensure INTx is enabled */
107 pcie_set_cmd(data->pcie->bdf, PCIE_CONF_CMDSTAT_INTX_DISABLE, false);
108
109 const struct intx_info *intx = &cfg->intx_info[cfg_intx_pin - 1];
110
111 data->params[0].dev = dev;
112 data->params[0].vector = 0;
113
114 LOG_INF("Enabling INTx IRQ %u (pin %u)", intx->irq, cfg_intx_pin);
115 if (intx->irq == INTX_IRQ_UNUSED ||
116 !pcie_connect_dynamic_irq(
117 data->pcie->bdf, intx->irq, intx->priority,
118 ivshmem_doorbell, &data->params[0], intx->flags)) {
119 LOG_ERR("Failed to connect INTx ISR %u", cfg_intx_pin);
120 return false;
121 }
122
123 data->n_vectors = 1;
124
125 pcie_irq_enable(data->pcie->bdf, intx->irq);
126
127 return true;
128 }
129 #endif /* CONFIG_IVSHMEM_V2 */
130
register_signal(const struct device * dev,struct k_poll_signal * signal,uint16_t vector)131 static void register_signal(const struct device *dev,
132 struct k_poll_signal *signal,
133 uint16_t vector)
134 {
135 struct ivshmem *data = dev->data;
136
137 data->params[vector].signal = signal;
138 }
139
140 #else
141
142 #define ivshmem_configure_msi_x_interrupts(...) true
143 #define ivshmem_configure_int_x_interrupts(...) true
144 #define register_signal(...)
145
146 #endif /* CONFIG_IVSHMEM_DOORBELL */
147
148 static const struct ivshmem_reg no_reg;
149
pcie_conf_read_u64(pcie_bdf_t bdf,unsigned int reg)150 __maybe_unused static uint64_t pcie_conf_read_u64(pcie_bdf_t bdf, unsigned int reg)
151 {
152 uint64_t lo = pcie_conf_read(bdf, reg);
153 uint64_t hi = pcie_conf_read(bdf, reg + 1);
154
155 return hi << 32 | lo;
156 }
157
ivshmem_configure(const struct device * dev)158 static bool ivshmem_configure(const struct device *dev)
159 {
160 struct ivshmem *data = dev->data;
161 struct pcie_bar mbar_regs, mbar_msi_x, mbar_shmem;
162
163 if (!pcie_get_mbar(data->pcie->bdf, IVSHMEM_PCIE_REG_BAR_IDX, &mbar_regs)) {
164 if (IS_ENABLED(CONFIG_IVSHMEM_DOORBELL)
165 IF_ENABLED(CONFIG_IVSHMEM_V2, (|| data->ivshmem_v2))) {
166 LOG_ERR("ivshmem regs bar not found");
167 return false;
168 }
169 LOG_INF("ivshmem regs bar not found");
170 device_map(DEVICE_MMIO_RAM_PTR(dev), (uintptr_t)&no_reg,
171 sizeof(struct ivshmem_reg), K_MEM_CACHE_NONE);
172 } else {
173 pcie_set_cmd(data->pcie->bdf, PCIE_CONF_CMDSTAT_MEM |
174 PCIE_CONF_CMDSTAT_MASTER, true);
175
176 device_map(DEVICE_MMIO_RAM_PTR(dev), mbar_regs.phys_addr,
177 mbar_regs.size, K_MEM_CACHE_NONE);
178 }
179
180 bool msi_x_bar_present = pcie_get_mbar(
181 data->pcie->bdf, IVSHMEM_PCIE_MSI_X_BAR_IDX, &mbar_msi_x);
182 bool shmem_bar_present = pcie_get_mbar(
183 data->pcie->bdf, IVSHMEM_PCIE_SHMEM_BAR_IDX, &mbar_shmem);
184
185 LOG_INF("MSI-X bar present: %s", msi_x_bar_present ? "yes" : "no");
186 LOG_INF("SHMEM bar present: %s", shmem_bar_present ? "yes" : "no");
187
188 uintptr_t shmem_phys_addr = mbar_shmem.phys_addr;
189
190 #ifdef CONFIG_IVSHMEM_V2
191 if (data->ivshmem_v2) {
192 if (mbar_regs.size < sizeof(struct ivshmem_v2_reg)) {
193 LOG_ERR("Invalid ivshmem regs size %zu", mbar_regs.size);
194 return false;
195 }
196
197 volatile struct ivshmem_v2_reg *regs =
198 (volatile struct ivshmem_v2_reg *)DEVICE_MMIO_GET(dev);
199
200 data->max_peers = regs->max_peers;
201 if (!IN_RANGE(data->max_peers, 2, 0x10000)) {
202 LOG_ERR("Invalid max peers %u", data->max_peers);
203 return false;
204 }
205
206 uint32_t vendor_cap = pcie_get_cap(data->pcie->bdf, PCI_CAP_ID_VNDR);
207 uint32_t cap_pos;
208
209 if (!shmem_bar_present) {
210 cap_pos = vendor_cap + IVSHMEM_CFG_ADDRESS / 4;
211 shmem_phys_addr = pcie_conf_read_u64(data->pcie->bdf, cap_pos);
212 }
213
214 cap_pos = vendor_cap + IVSHMEM_CFG_STATE_TAB_SZ / 4;
215 size_t state_table_size = pcie_conf_read(data->pcie->bdf, cap_pos);
216
217 LOG_INF("State table size 0x%zX", state_table_size);
218 if (state_table_size < sizeof(uint32_t) * data->max_peers) {
219 LOG_ERR("Invalid state table size %zu", state_table_size);
220 return false;
221 }
222
223 cap_pos = vendor_cap + IVSHMEM_CFG_RW_SECTION_SZ / 4;
224 data->rw_section_size = pcie_conf_read_u64(data->pcie->bdf, cap_pos);
225 data->rw_section_offset = state_table_size;
226 LOG_INF("RW section size 0x%zX", data->rw_section_size);
227
228 cap_pos = vendor_cap + IVSHMEM_CFG_OUTPUT_SECTION_SZ / 4;
229 data->output_section_size = pcie_conf_read_u64(data->pcie->bdf, cap_pos);
230 data->output_section_offset = data->rw_section_offset + data->rw_section_size;
231 LOG_INF("Output section size 0x%zX", data->output_section_size);
232
233 data->size = data->output_section_offset +
234 data->output_section_size * data->max_peers;
235
236 /* Ensure one-shot ISR mode is disabled */
237 cap_pos = vendor_cap + IVSHMEM_CFG_PRIV_CNTL / 4;
238 uint32_t cfg_priv_cntl = pcie_conf_read(data->pcie->bdf, cap_pos);
239
240 cfg_priv_cntl &= ~(IVSHMEM_PRIV_CNTL_ONESHOT_INT <<
241 ((IVSHMEM_CFG_PRIV_CNTL % 4) * 8));
242 pcie_conf_write(data->pcie->bdf, cap_pos, cfg_priv_cntl);
243 } else
244 #endif /* CONFIG_IVSHMEM_V2 */
245 {
246 if (!shmem_bar_present) {
247 LOG_ERR("ivshmem mem bar not found");
248 return false;
249 }
250
251 data->size = mbar_shmem.size;
252 }
253
254 z_phys_map((uint8_t **)&data->shmem,
255 shmem_phys_addr, data->size,
256 K_MEM_CACHE_WB | K_MEM_PERM_RW | K_MEM_PERM_USER);
257
258 if (msi_x_bar_present) {
259 if (!ivshmem_configure_msi_x_interrupts(dev)) {
260 LOG_ERR("MSI-X init failed");
261 return false;
262 }
263 }
264 #ifdef CONFIG_IVSHMEM_V2
265 else if (data->ivshmem_v2) {
266 if (!ivshmem_configure_int_x_interrupts(dev)) {
267 LOG_ERR("INTx init failed");
268 return false;
269 }
270 }
271 #endif
272
273 LOG_INF("ivshmem configured:");
274 LOG_INF("- Registers at 0x%lX (mapped to 0x%lX)",
275 mbar_regs.phys_addr, DEVICE_MMIO_GET(dev));
276 LOG_INF("- Shared memory of 0x%zX bytes at 0x%lX (mapped to 0x%lX)",
277 data->size, shmem_phys_addr, data->shmem);
278
279 return true;
280 }
281
ivshmem_api_get_mem(const struct device * dev,uintptr_t * memmap)282 static size_t ivshmem_api_get_mem(const struct device *dev,
283 uintptr_t *memmap)
284 {
285 struct ivshmem *data = dev->data;
286
287 *memmap = data->shmem;
288
289 return data->size;
290 }
291
ivshmem_api_get_id(const struct device * dev)292 static uint32_t ivshmem_api_get_id(const struct device *dev)
293 {
294 uint32_t id;
295
296 #ifdef CONFIG_IVSHMEM_V2
297 struct ivshmem *data = dev->data;
298
299 if (data->ivshmem_v2) {
300 volatile struct ivshmem_v2_reg *regs =
301 (volatile struct ivshmem_v2_reg *) DEVICE_MMIO_GET(dev);
302
303 id = regs->id;
304 } else
305 #endif
306 {
307 volatile struct ivshmem_reg *regs =
308 (volatile struct ivshmem_reg *) DEVICE_MMIO_GET(dev);
309
310 id = regs->iv_position;
311 }
312
313 return id;
314 }
315
ivshmem_api_get_vectors(const struct device * dev)316 static uint16_t ivshmem_api_get_vectors(const struct device *dev)
317 {
318 #if CONFIG_IVSHMEM_DOORBELL
319 struct ivshmem *data = dev->data;
320
321 return data->n_vectors;
322 #else
323 return 0;
324 #endif
325 }
326
ivshmem_api_int_peer(const struct device * dev,uint32_t peer_id,uint16_t vector)327 static int ivshmem_api_int_peer(const struct device *dev,
328 uint32_t peer_id, uint16_t vector)
329 {
330 #if CONFIG_IVSHMEM_DOORBELL
331 struct ivshmem *data = dev->data;
332 volatile uint32_t *doorbell_reg;
333 uint32_t doorbell = IVSHMEM_GEN_DOORBELL(peer_id, vector);
334
335 if (vector >= data->n_vectors) {
336 return -EINVAL;
337 }
338
339 #ifdef CONFIG_IVSHMEM_V2
340 if (data->ivshmem_v2 && peer_id >= data->max_peers) {
341 return -EINVAL;
342 }
343
344 if (data->ivshmem_v2) {
345 volatile struct ivshmem_v2_reg *regs =
346 (volatile struct ivshmem_v2_reg *) DEVICE_MMIO_GET(dev);
347
348 doorbell_reg = ®s->doorbell;
349 } else
350 #endif
351 {
352 volatile struct ivshmem_reg *regs =
353 (volatile struct ivshmem_reg *) DEVICE_MMIO_GET(dev);
354
355 doorbell_reg = ®s->doorbell;
356 }
357 *doorbell_reg = doorbell;
358
359 return 0;
360 #else
361 return -ENOSYS;
362 #endif
363 }
364
ivshmem_api_register_handler(const struct device * dev,struct k_poll_signal * signal,uint16_t vector)365 static int ivshmem_api_register_handler(const struct device *dev,
366 struct k_poll_signal *signal,
367 uint16_t vector)
368 {
369 #if CONFIG_IVSHMEM_DOORBELL
370 struct ivshmem *data = dev->data;
371
372 if (vector >= data->n_vectors) {
373 return -EINVAL;
374 }
375
376 register_signal(dev, signal, vector);
377
378 return 0;
379 #else
380 return -ENOSYS;
381 #endif
382 }
383
384 #ifdef CONFIG_IVSHMEM_V2
385
ivshmem_api_get_rw_mem_section(const struct device * dev,uintptr_t * memmap)386 static size_t ivshmem_api_get_rw_mem_section(const struct device *dev,
387 uintptr_t *memmap)
388 {
389 struct ivshmem *data = dev->data;
390
391 if (!data->ivshmem_v2) {
392 memmap = NULL;
393 return 0;
394 }
395
396 *memmap = data->shmem + data->rw_section_offset;
397
398 return data->rw_section_size;
399 }
400
ivshmem_api_get_output_mem_section(const struct device * dev,uint32_t peer_id,uintptr_t * memmap)401 static size_t ivshmem_api_get_output_mem_section(const struct device *dev,
402 uint32_t peer_id,
403 uintptr_t *memmap)
404 {
405 struct ivshmem *data = dev->data;
406
407 if (!data->ivshmem_v2 || peer_id >= data->max_peers) {
408 memmap = NULL;
409 return 0;
410 }
411
412 *memmap = data->shmem + data->output_section_offset +
413 data->output_section_size * peer_id;
414
415 return data->output_section_size;
416 }
417
ivshmem_api_get_state(const struct device * dev,uint32_t peer_id)418 static uint32_t ivshmem_api_get_state(const struct device *dev,
419 uint32_t peer_id)
420 {
421 struct ivshmem *data = dev->data;
422
423 if (!data->ivshmem_v2 || peer_id >= data->max_peers) {
424 return 0;
425 }
426
427 const volatile uint32_t *state_table =
428 (const volatile uint32_t *)data->shmem;
429
430 return state_table[peer_id];
431 }
432
ivshmem_api_set_state(const struct device * dev,uint32_t state)433 static int ivshmem_api_set_state(const struct device *dev,
434 uint32_t state)
435 {
436 struct ivshmem *data = dev->data;
437
438 if (!data->ivshmem_v2) {
439 return -ENOSYS;
440 }
441
442 volatile struct ivshmem_v2_reg *regs =
443 (volatile struct ivshmem_v2_reg *) DEVICE_MMIO_GET(dev);
444
445 regs->state = state;
446
447 return 0;
448 }
449
ivshmem_api_get_max_peers(const struct device * dev)450 static uint32_t ivshmem_api_get_max_peers(const struct device *dev)
451 {
452 struct ivshmem *data = dev->data;
453
454 if (!data->ivshmem_v2) {
455 return 0;
456 }
457
458 return data->max_peers;
459 }
460
ivshmem_api_get_protocol(const struct device * dev)461 static uint16_t ivshmem_api_get_protocol(const struct device *dev)
462 {
463 struct ivshmem *data = dev->data;
464
465 if (!data->ivshmem_v2) {
466 return 0;
467 }
468
469 uint16_t protocol = (data->pcie->class_rev >> 8) & 0xFFFF;
470
471 return protocol;
472 }
473
ivshmem_api_enable_interrupts(const struct device * dev,bool enable)474 static int ivshmem_api_enable_interrupts(const struct device *dev,
475 bool enable)
476 {
477 struct ivshmem *data = dev->data;
478
479 if (!data->ivshmem_v2) {
480 return -ENOSYS;
481 }
482
483 volatile struct ivshmem_v2_reg *regs =
484 (volatile struct ivshmem_v2_reg *) DEVICE_MMIO_GET(dev);
485
486 regs->int_control = enable ? IVSHMEM_INT_ENABLE : 0;
487
488 return 0;
489 }
490
491 #endif /* CONFIG_IVSHMEM_V2 */
492
493 static const struct ivshmem_driver_api ivshmem_api = {
494 .get_mem = ivshmem_api_get_mem,
495 .get_id = ivshmem_api_get_id,
496 .get_vectors = ivshmem_api_get_vectors,
497 .int_peer = ivshmem_api_int_peer,
498 .register_handler = ivshmem_api_register_handler,
499 #ifdef CONFIG_IVSHMEM_V2
500 .get_rw_mem_section = ivshmem_api_get_rw_mem_section,
501 .get_output_mem_section = ivshmem_api_get_output_mem_section,
502 .get_state = ivshmem_api_get_state,
503 .set_state = ivshmem_api_set_state,
504 .get_max_peers = ivshmem_api_get_max_peers,
505 .get_protocol = ivshmem_api_get_protocol,
506 .enable_interrupts = ivshmem_api_enable_interrupts
507 #endif
508 };
509
ivshmem_init(const struct device * dev)510 static int ivshmem_init(const struct device *dev)
511 {
512 struct ivshmem *data = dev->data;
513
514 if (data->pcie->bdf == PCIE_BDF_NONE) {
515 LOG_WRN("ivshmem device not found");
516 return -ENOTSUP;
517 }
518
519 LOG_INF("PCIe: ID 0x%08X, BDF 0x%X, class-rev 0x%08X",
520 data->pcie->id, data->pcie->bdf, data->pcie->class_rev);
521
522 if (!ivshmem_configure(dev)) {
523 return -EIO;
524 }
525 return 0;
526 }
527
528 #define IVSHMEM_INTX_INFO(intx_idx, drv_idx) { \
529 COND_CODE_1(DT_IRQ_HAS_IDX(DT_DRV_INST(drv_idx), intx_idx), \
530 ( \
531 .irq = DT_IRQ_BY_IDX(DT_DRV_INST(drv_idx), intx_idx, irq), \
532 .priority = DT_IRQ_BY_IDX(DT_DRV_INST(drv_idx), intx_idx, priority), \
533 .flags = DT_IRQ_BY_IDX(DT_DRV_INST(drv_idx), intx_idx, flags), \
534 ), \
535 (.irq = INTX_IRQ_UNUSED)) \
536 }
537
538 #define IVSHMEM_DEVICE_INIT(n) \
539 BUILD_ASSERT(!IS_ENABLED(CONFIG_IVSHMEM_DOORBELL) || \
540 ((IS_ENABLED(CONFIG_PCIE_MSI_X) && \
541 IS_ENABLED(CONFIG_PCIE_MSI_MULTI_VECTOR)) || \
542 (DT_INST_PROP(n, ivshmem_v2) && \
543 DT_INST_NODE_HAS_PROP(n, interrupts))), \
544 "IVSHMEM_DOORBELL requires either MSI-X or INTx support"); \
545 BUILD_ASSERT(IS_ENABLED(CONFIG_IVSHMEM_V2) || !DT_INST_PROP(n, ivshmem_v2), \
546 "CONFIG_IVSHMEM_V2 must be enabled for ivshmem-v2"); \
547 DEVICE_PCIE_INST_DECLARE(n); \
548 static struct ivshmem ivshmem_data_##n = { \
549 DEVICE_PCIE_INST_INIT(n, pcie), \
550 IF_ENABLED(CONFIG_IVSHMEM_V2, \
551 (.ivshmem_v2 = DT_INST_PROP(n, ivshmem_v2),)) \
552 }; \
553 IF_ENABLED(CONFIG_IVSHMEM_V2, ( \
554 static struct ivshmem_cfg ivshmem_cfg_##n = { \
555 .intx_info = \
556 { FOR_EACH_FIXED_ARG(IVSHMEM_INTX_INFO, (,), n, 0, 1, 2, 3) } \
557 }; \
558 )); \
559 DEVICE_DT_INST_DEFINE(n, &ivshmem_init, NULL, \
560 &ivshmem_data_##n, \
561 COND_CODE_1(CONFIG_IVSHMEM_V2, (&ivshmem_cfg_##n), (NULL)), \
562 POST_KERNEL, CONFIG_KERNEL_INIT_PRIORITY_DEVICE, \
563 &ivshmem_api);
564
565 DT_INST_FOREACH_STATUS_OKAY(IVSHMEM_DEVICE_INIT)
566