Lines Matching +full:ivshmem +full:- +full:v2

4  * SPDX-License-Identifier: Apache-2.0
11 LOG_MODULE_REGISTER(ivshmem);
21 #include <zephyr/drivers/virtualization/ivshmem.h>
30 LOG_DBG("Interrupt received on vector %u", param->vector); in ivshmem_doorbell()
32 if (param->signal != NULL) { in ivshmem_doorbell()
33 k_poll_signal_raise(param->signal, param->vector); in ivshmem_doorbell()
40 struct ivshmem *data = dev->data; in ivshmem_configure_msi_x_interrupts()
48 n_vectors = pcie_msi_vectors_allocate(data->pcie->bdf, in ivshmem_configure_msi_x_interrupts()
50 data->vectors, in ivshmem_configure_msi_x_interrupts()
53 LOG_ERR("Could not allocate %u MSI-X vectors", in ivshmem_configure_msi_x_interrupts()
61 data->params[i].dev = dev; in ivshmem_configure_msi_x_interrupts()
62 data->params[i].vector = i; in ivshmem_configure_msi_x_interrupts()
64 if (!pcie_msi_vector_connect(data->pcie->bdf, in ivshmem_configure_msi_x_interrupts()
65 &data->vectors[i], in ivshmem_configure_msi_x_interrupts()
67 &data->params[i], 0)) { in ivshmem_configure_msi_x_interrupts()
68 LOG_ERR("Failed to connect MSI-X vector %u", i); in ivshmem_configure_msi_x_interrupts()
73 LOG_INF("%u MSI-X Vectors connected", n_vectors); in ivshmem_configure_msi_x_interrupts()
75 if (!pcie_msi_enable(data->pcie->bdf, data->vectors, n_vectors, 0)) { in ivshmem_configure_msi_x_interrupts()
76 LOG_ERR("Could not enable MSI-X"); in ivshmem_configure_msi_x_interrupts()
80 data->n_vectors = n_vectors; in ivshmem_configure_msi_x_interrupts()
83 LOG_DBG("MSI-X configured"); in ivshmem_configure_msi_x_interrupts()
96 struct ivshmem *data = dev->data; in ivshmem_configure_int_x_interrupts()
97 const struct ivshmem_cfg *cfg = dev->config; in ivshmem_configure_int_x_interrupts()
98 uint32_t cfg_int = pcie_conf_read(data->pcie->bdf, PCIE_CONF_INTR); in ivshmem_configure_int_x_interrupts()
107 pcie_set_cmd(data->pcie->bdf, PCIE_CONF_CMDSTAT_INTX_DISABLE, false); in ivshmem_configure_int_x_interrupts()
109 const struct intx_info *intx = &cfg->intx_info[cfg_intx_pin - 1]; in ivshmem_configure_int_x_interrupts()
111 data->params[0].dev = dev; in ivshmem_configure_int_x_interrupts()
112 data->params[0].vector = 0; in ivshmem_configure_int_x_interrupts()
114 LOG_INF("Enabling INTx IRQ %u (pin %u)", intx->irq, cfg_intx_pin); in ivshmem_configure_int_x_interrupts()
115 if (intx->irq == INTX_IRQ_UNUSED || in ivshmem_configure_int_x_interrupts()
117 data->pcie->bdf, intx->irq, intx->priority, in ivshmem_configure_int_x_interrupts()
118 ivshmem_doorbell, &data->params[0], intx->flags)) { in ivshmem_configure_int_x_interrupts()
123 data->n_vectors = 1; in ivshmem_configure_int_x_interrupts()
125 pcie_irq_enable(data->pcie->bdf, intx->irq); in ivshmem_configure_int_x_interrupts()
135 struct ivshmem *data = dev->data; in register_signal()
137 data->params[vector].signal = signal; in register_signal()
160 struct ivshmem *data = dev->data; in ivshmem_configure()
163 if (!pcie_get_mbar(data->pcie->bdf, IVSHMEM_PCIE_REG_BAR_IDX, &mbar_regs)) { in ivshmem_configure()
165 IF_ENABLED(CONFIG_IVSHMEM_V2, (|| data->ivshmem_v2))) { in ivshmem_configure()
166 LOG_ERR("ivshmem regs bar not found"); in ivshmem_configure()
169 LOG_INF("ivshmem regs bar not found"); in ivshmem_configure()
173 pcie_set_cmd(data->pcie->bdf, PCIE_CONF_CMDSTAT_MEM | in ivshmem_configure()
181 data->pcie->bdf, IVSHMEM_PCIE_MSI_X_BAR_IDX, &mbar_msi_x); in ivshmem_configure()
183 data->pcie->bdf, IVSHMEM_PCIE_SHMEM_BAR_IDX, &mbar_shmem); in ivshmem_configure()
185 LOG_INF("MSI-X bar present: %s", msi_x_bar_present ? "yes" : "no"); in ivshmem_configure()
191 if (data->ivshmem_v2) { in ivshmem_configure()
193 LOG_ERR("Invalid ivshmem regs size %zu", mbar_regs.size); in ivshmem_configure()
200 data->max_peers = regs->max_peers; in ivshmem_configure()
201 if (!IN_RANGE(data->max_peers, 2, CONFIG_IVSHMEM_V2_MAX_PEERS)) { in ivshmem_configure()
202 LOG_ERR("Invalid max peers %u", data->max_peers); in ivshmem_configure()
206 uint32_t vendor_cap = pcie_get_cap(data->pcie->bdf, PCI_CAP_ID_VNDR); in ivshmem_configure()
211 shmem_phys_addr = pcie_conf_read_u64(data->pcie->bdf, cap_pos); in ivshmem_configure()
216 size_t state_table_size = pcie_conf_read(data->pcie->bdf, cap_pos); in ivshmem_configure()
218 if (state_table_size < sizeof(uint32_t) * data->max_peers) { in ivshmem_configure()
222 k_mem_map_phys_bare((uint8_t **)&data->state_table_shmem, in ivshmem_configure()
228 data->rw_section_size = pcie_conf_read_u64(data->pcie->bdf, cap_pos); in ivshmem_configure()
230 LOG_INF("RW section size 0x%zX", data->rw_section_size); in ivshmem_configure()
231 if (data->rw_section_size > 0) { in ivshmem_configure()
232 k_mem_map_phys_bare((uint8_t **)&data->rw_section_shmem, in ivshmem_configure()
234 data->rw_section_size, in ivshmem_configure()
240 data->output_section_size = pcie_conf_read_u64(data->pcie->bdf, cap_pos); in ivshmem_configure()
241 size_t output_section_offset = rw_section_offset + data->rw_section_size; in ivshmem_configure()
242 LOG_INF("Output section size 0x%zX", data->output_section_size); in ivshmem_configure()
243 for (uint32_t i = 0; i < data->max_peers; i++) { in ivshmem_configure()
246 (data->output_section_size * i); in ivshmem_configure()
250 if (i == regs->id) { in ivshmem_configure()
253 k_mem_map_phys_bare((uint8_t **)&data->output_section_shmem[i], in ivshmem_configure()
254 phys_addr, data->output_section_size, flags); in ivshmem_configure()
257 data->size = output_section_offset + in ivshmem_configure()
258 data->output_section_size * data->max_peers; in ivshmem_configure()
260 /* Ensure one-shot ISR mode is disabled */ in ivshmem_configure()
262 uint32_t cfg_priv_cntl = pcie_conf_read(data->pcie->bdf, cap_pos); in ivshmem_configure()
266 pcie_conf_write(data->pcie->bdf, cap_pos, cfg_priv_cntl); in ivshmem_configure()
271 LOG_ERR("ivshmem mem bar not found"); in ivshmem_configure()
275 data->size = mbar_shmem.size; in ivshmem_configure()
277 k_mem_map_phys_bare((uint8_t **)&data->shmem, in ivshmem_configure()
278 shmem_phys_addr, data->size, in ivshmem_configure()
284 LOG_ERR("MSI-X init failed"); in ivshmem_configure()
289 else if (data->ivshmem_v2) { in ivshmem_configure()
297 LOG_INF("ivshmem configured:"); in ivshmem_configure()
298 LOG_INF("- Registers at 0x%lX (mapped to 0x%lX)", in ivshmem_configure()
300 LOG_INF("- Shared memory of 0x%zX bytes at 0x%lX (mapped to 0x%lX)", in ivshmem_configure()
301 data->size, shmem_phys_addr, data->shmem); in ivshmem_configure()
309 struct ivshmem *data = dev->data; in ivshmem_api_get_mem()
312 if (data->ivshmem_v2) { in ivshmem_api_get_mem()
318 *memmap = data->shmem; in ivshmem_api_get_mem()
320 return data->size; in ivshmem_api_get_mem()
328 struct ivshmem *data = dev->data; in ivshmem_api_get_id()
330 if (data->ivshmem_v2) { in ivshmem_api_get_id()
334 id = regs->id; in ivshmem_api_get_id()
341 id = regs->iv_position; in ivshmem_api_get_id()
350 struct ivshmem *data = dev->data; in ivshmem_api_get_vectors()
352 return data->n_vectors; in ivshmem_api_get_vectors()
362 struct ivshmem *data = dev->data; in ivshmem_api_int_peer()
366 if (vector >= data->n_vectors) { in ivshmem_api_int_peer()
367 return -EINVAL; in ivshmem_api_int_peer()
371 if (data->ivshmem_v2 && peer_id >= data->max_peers) { in ivshmem_api_int_peer()
372 return -EINVAL; in ivshmem_api_int_peer()
375 if (data->ivshmem_v2) { in ivshmem_api_int_peer()
379 doorbell_reg = &regs->doorbell; in ivshmem_api_int_peer()
386 doorbell_reg = &regs->doorbell; in ivshmem_api_int_peer()
392 return -ENOSYS; in ivshmem_api_int_peer()
401 struct ivshmem *data = dev->data; in ivshmem_api_register_handler()
403 if (vector >= data->n_vectors) { in ivshmem_api_register_handler()
404 return -EINVAL; in ivshmem_api_register_handler()
411 return -ENOSYS; in ivshmem_api_register_handler()
420 struct ivshmem *data = dev->data; in ivshmem_api_get_rw_mem_section()
422 if (!data->ivshmem_v2) { in ivshmem_api_get_rw_mem_section()
427 *memmap = data->rw_section_shmem; in ivshmem_api_get_rw_mem_section()
429 return data->rw_section_size; in ivshmem_api_get_rw_mem_section()
436 struct ivshmem *data = dev->data; in ivshmem_api_get_output_mem_section()
438 if (!data->ivshmem_v2 || peer_id >= data->max_peers) { in ivshmem_api_get_output_mem_section()
443 *memmap = data->output_section_shmem[peer_id]; in ivshmem_api_get_output_mem_section()
445 return data->output_section_size; in ivshmem_api_get_output_mem_section()
451 struct ivshmem *data = dev->data; in ivshmem_api_get_state()
453 if (!data->ivshmem_v2 || peer_id >= data->max_peers) { in ivshmem_api_get_state()
458 (const volatile uint32_t *)data->state_table_shmem; in ivshmem_api_get_state()
466 struct ivshmem *data = dev->data; in ivshmem_api_set_state()
468 if (!data->ivshmem_v2) { in ivshmem_api_set_state()
469 return -ENOSYS; in ivshmem_api_set_state()
475 regs->state = state; in ivshmem_api_set_state()
482 struct ivshmem *data = dev->data; in ivshmem_api_get_max_peers()
484 if (!data->ivshmem_v2) { in ivshmem_api_get_max_peers()
488 return data->max_peers; in ivshmem_api_get_max_peers()
493 struct ivshmem *data = dev->data; in ivshmem_api_get_protocol()
495 if (!data->ivshmem_v2) { in ivshmem_api_get_protocol()
499 uint16_t protocol = (data->pcie->class_rev >> 8) & 0xFFFF; in ivshmem_api_get_protocol()
507 struct ivshmem *data = dev->data; in ivshmem_api_enable_interrupts()
509 if (!data->ivshmem_v2) { in ivshmem_api_enable_interrupts()
510 return -ENOSYS; in ivshmem_api_enable_interrupts()
516 regs->int_control = enable ? IVSHMEM_INT_ENABLE : 0; in ivshmem_api_enable_interrupts()
523 static DEVICE_API(ivshmem, ivshmem_api) = {
542 struct ivshmem *data = dev->data; in ivshmem_init()
544 if (data->pcie->bdf == PCIE_BDF_NONE) { in ivshmem_init()
545 LOG_WRN("ivshmem device not found"); in ivshmem_init()
546 return -ENOTSUP; in ivshmem_init()
549 LOG_INF("PCIe: ID 0x%08X, BDF 0x%X, class-rev 0x%08X", in ivshmem_init()
550 data->pcie->id, data->pcie->bdf, data->pcie->class_rev); in ivshmem_init()
553 return -EIO; in ivshmem_init()
574 "IVSHMEM_DOORBELL requires either MSI-X or INTx support"); \
576 "CONFIG_IVSHMEM_V2 must be enabled for ivshmem-v2"); \
578 static struct ivshmem ivshmem_data_##n = { \