1 /*
2 * Copyright (c) 2020 Intel Corporation.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #define DT_DRV_COMPAT qemu_ivshmem
8
9 #define LOG_LEVEL CONFIG_IVSHMEM_LOG_LEVEL
10 #include <zephyr/logging/log.h>
11 LOG_MODULE_REGISTER(ivshmem);
12
13 #include <errno.h>
14
15 #include <zephyr/kernel.h>
16
17 #include <zephyr/device.h>
18 #include <zephyr/drivers/pcie/cap.h>
19 #include <zephyr/init.h>
20
21 #include <zephyr/drivers/virtualization/ivshmem.h>
22 #include "virt_ivshmem.h"
23
24 #ifdef CONFIG_IVSHMEM_DOORBELL
25
ivshmem_doorbell(const void * arg)26 static void ivshmem_doorbell(const void *arg)
27 {
28 const struct ivshmem_param *param = arg;
29
30 LOG_DBG("Interrupt received on vector %u", param->vector);
31
32 if (param->signal != NULL) {
33 k_poll_signal_raise(param->signal, param->vector);
34 }
35 }
36
ivshmem_configure_msi_x_interrupts(const struct device * dev)37 static bool ivshmem_configure_msi_x_interrupts(const struct device *dev)
38 {
39 #if defined(CONFIG_PCIE_MSI_X) && defined(CONFIG_PCIE_MSI_MULTI_VECTOR)
40 struct ivshmem *data = dev->data;
41 bool ret = false;
42 uint8_t n_vectors;
43 uint32_t key;
44 int i;
45
46 key = irq_lock();
47
48 n_vectors = pcie_msi_vectors_allocate(data->pcie->bdf,
49 CONFIG_IVSHMEM_INT_PRIORITY,
50 data->vectors,
51 CONFIG_IVSHMEM_MSI_X_VECTORS);
52 if (n_vectors == 0) {
53 LOG_ERR("Could not allocate %u MSI-X vectors",
54 CONFIG_IVSHMEM_MSI_X_VECTORS);
55 goto out;
56 }
57
58 LOG_DBG("Allocated %u vectors", n_vectors);
59
60 for (i = 0; i < n_vectors; i++) {
61 data->params[i].dev = dev;
62 data->params[i].vector = i;
63
64 if (!pcie_msi_vector_connect(data->pcie->bdf,
65 &data->vectors[i],
66 ivshmem_doorbell,
67 &data->params[i], 0)) {
68 LOG_ERR("Failed to connect MSI-X vector %u", i);
69 goto out;
70 }
71 }
72
73 LOG_INF("%u MSI-X Vectors connected", n_vectors);
74
75 if (!pcie_msi_enable(data->pcie->bdf, data->vectors, n_vectors, 0)) {
76 LOG_ERR("Could not enable MSI-X");
77 goto out;
78 }
79
80 data->n_vectors = n_vectors;
81 ret = true;
82
83 LOG_DBG("MSI-X configured");
84 out:
85 irq_unlock(key);
86
87 return ret;
88 #else
89 return false;
90 #endif
91 }
92
93 #ifdef CONFIG_IVSHMEM_V2
ivshmem_configure_int_x_interrupts(const struct device * dev)94 static bool ivshmem_configure_int_x_interrupts(const struct device *dev)
95 {
96 struct ivshmem *data = dev->data;
97 const struct ivshmem_cfg *cfg = dev->config;
98 uint32_t cfg_int = pcie_conf_read(data->pcie->bdf, PCIE_CONF_INTR);
99 uint32_t cfg_intx_pin = PCIE_CONF_INTR_PIN(cfg_int);
100
101 if (!IN_RANGE(cfg_intx_pin, PCIE_INTX_PIN_MIN, PCIE_INTX_PIN_MAX)) {
102 LOG_ERR("Invalid INTx pin %u", cfg_intx_pin);
103 return false;
104 }
105
106 /* Ensure INTx is enabled */
107 pcie_set_cmd(data->pcie->bdf, PCIE_CONF_CMDSTAT_INTX_DISABLE, false);
108
109 const struct intx_info *intx = &cfg->intx_info[cfg_intx_pin - 1];
110
111 data->params[0].dev = dev;
112 data->params[0].vector = 0;
113
114 LOG_INF("Enabling INTx IRQ %u (pin %u)", intx->irq, cfg_intx_pin);
115 if (intx->irq == INTX_IRQ_UNUSED ||
116 !pcie_connect_dynamic_irq(
117 data->pcie->bdf, intx->irq, intx->priority,
118 ivshmem_doorbell, &data->params[0], intx->flags)) {
119 LOG_ERR("Failed to connect INTx ISR %u", cfg_intx_pin);
120 return false;
121 }
122
123 data->n_vectors = 1;
124
125 pcie_irq_enable(data->pcie->bdf, intx->irq);
126
127 return true;
128 }
129 #endif /* CONFIG_IVSHMEM_V2 */
130
register_signal(const struct device * dev,struct k_poll_signal * signal,uint16_t vector)131 static void register_signal(const struct device *dev,
132 struct k_poll_signal *signal,
133 uint16_t vector)
134 {
135 struct ivshmem *data = dev->data;
136
137 data->params[vector].signal = signal;
138 }
139
140 #else
141
142 #define ivshmem_configure_msi_x_interrupts(...) true
143 #define ivshmem_configure_int_x_interrupts(...) true
144 #define register_signal(...)
145
146 #endif /* CONFIG_IVSHMEM_DOORBELL */
147
148 static const struct ivshmem_reg no_reg;
149
pcie_conf_read_u64(pcie_bdf_t bdf,unsigned int reg)150 __maybe_unused static uint64_t pcie_conf_read_u64(pcie_bdf_t bdf, unsigned int reg)
151 {
152 uint64_t lo = pcie_conf_read(bdf, reg);
153 uint64_t hi = pcie_conf_read(bdf, reg + 1);
154
155 return hi << 32 | lo;
156 }
157
ivshmem_configure(const struct device * dev)158 static bool ivshmem_configure(const struct device *dev)
159 {
160 struct ivshmem *data = dev->data;
161 struct pcie_bar mbar_regs, mbar_msi_x, mbar_shmem;
162
163 if (!pcie_get_mbar(data->pcie->bdf, IVSHMEM_PCIE_REG_BAR_IDX, &mbar_regs)) {
164 if (IS_ENABLED(CONFIG_IVSHMEM_DOORBELL)
165 IF_ENABLED(CONFIG_IVSHMEM_V2, (|| data->ivshmem_v2))) {
166 LOG_ERR("ivshmem regs bar not found");
167 return false;
168 }
169 LOG_INF("ivshmem regs bar not found");
170 device_map(DEVICE_MMIO_RAM_PTR(dev), (uintptr_t)&no_reg,
171 sizeof(struct ivshmem_reg), K_MEM_CACHE_NONE);
172 } else {
173 pcie_set_cmd(data->pcie->bdf, PCIE_CONF_CMDSTAT_MEM |
174 PCIE_CONF_CMDSTAT_MASTER, true);
175
176 device_map(DEVICE_MMIO_RAM_PTR(dev), mbar_regs.phys_addr,
177 mbar_regs.size, K_MEM_CACHE_NONE);
178 }
179
180 bool msi_x_bar_present = pcie_get_mbar(
181 data->pcie->bdf, IVSHMEM_PCIE_MSI_X_BAR_IDX, &mbar_msi_x);
182 bool shmem_bar_present = pcie_get_mbar(
183 data->pcie->bdf, IVSHMEM_PCIE_SHMEM_BAR_IDX, &mbar_shmem);
184
185 LOG_INF("MSI-X bar present: %s", msi_x_bar_present ? "yes" : "no");
186 LOG_INF("SHMEM bar present: %s", shmem_bar_present ? "yes" : "no");
187
188 uintptr_t shmem_phys_addr = mbar_shmem.phys_addr;
189
190 #ifdef CONFIG_IVSHMEM_V2
191 if (data->ivshmem_v2) {
192 if (mbar_regs.size < sizeof(struct ivshmem_v2_reg)) {
193 LOG_ERR("Invalid ivshmem regs size %zu", mbar_regs.size);
194 return false;
195 }
196
197 volatile struct ivshmem_v2_reg *regs =
198 (volatile struct ivshmem_v2_reg *)DEVICE_MMIO_GET(dev);
199
200 data->max_peers = regs->max_peers;
201 if (!IN_RANGE(data->max_peers, 2, CONFIG_IVSHMEM_V2_MAX_PEERS)) {
202 LOG_ERR("Invalid max peers %u", data->max_peers);
203 return false;
204 }
205
206 uint32_t vendor_cap = pcie_get_cap(data->pcie->bdf, PCI_CAP_ID_VNDR);
207 uint32_t cap_pos;
208
209 if (!shmem_bar_present) {
210 cap_pos = vendor_cap + IVSHMEM_CFG_ADDRESS / 4;
211 shmem_phys_addr = pcie_conf_read_u64(data->pcie->bdf, cap_pos);
212 }
213
214 /* State table R/O */
215 cap_pos = vendor_cap + IVSHMEM_CFG_STATE_TAB_SZ / 4;
216 size_t state_table_size = pcie_conf_read(data->pcie->bdf, cap_pos);
217 LOG_INF("State table size 0x%zX", state_table_size);
218 if (state_table_size < sizeof(uint32_t) * data->max_peers) {
219 LOG_ERR("Invalid state table size %zu", state_table_size);
220 return false;
221 }
222 k_mem_map_phys_bare((uint8_t **)&data->state_table_shmem,
223 shmem_phys_addr, state_table_size,
224 K_MEM_CACHE_WB | K_MEM_PERM_USER);
225
226 /* R/W section (optional) */
227 cap_pos = vendor_cap + IVSHMEM_CFG_RW_SECTION_SZ / 4;
228 data->rw_section_size = pcie_conf_read_u64(data->pcie->bdf, cap_pos);
229 size_t rw_section_offset = state_table_size;
230 LOG_INF("RW section size 0x%zX", data->rw_section_size);
231 if (data->rw_section_size > 0) {
232 k_mem_map_phys_bare((uint8_t **)&data->rw_section_shmem,
233 shmem_phys_addr + rw_section_offset,
234 data->rw_section_size,
235 K_MEM_CACHE_WB | K_MEM_PERM_RW | K_MEM_PERM_USER);
236 }
237
238 /* Output sections */
239 cap_pos = vendor_cap + IVSHMEM_CFG_OUTPUT_SECTION_SZ / 4;
240 data->output_section_size = pcie_conf_read_u64(data->pcie->bdf, cap_pos);
241 size_t output_section_offset = rw_section_offset + data->rw_section_size;
242 LOG_INF("Output section size 0x%zX", data->output_section_size);
243 for (uint32_t i = 0; i < data->max_peers; i++) {
244 uintptr_t phys_addr = shmem_phys_addr +
245 output_section_offset +
246 (data->output_section_size * i);
247 uint32_t flags = K_MEM_CACHE_WB | K_MEM_PERM_USER;
248
249 /* Only your own output section is R/W */
250 if (i == regs->id) {
251 flags |= K_MEM_PERM_RW;
252 }
253 k_mem_map_phys_bare((uint8_t **)&data->output_section_shmem[i],
254 phys_addr, data->output_section_size, flags);
255 }
256
257 data->size = output_section_offset +
258 data->output_section_size * data->max_peers;
259
260 /* Ensure one-shot ISR mode is disabled */
261 cap_pos = vendor_cap + IVSHMEM_CFG_PRIV_CNTL / 4;
262 uint32_t cfg_priv_cntl = pcie_conf_read(data->pcie->bdf, cap_pos);
263
264 cfg_priv_cntl &= ~(IVSHMEM_PRIV_CNTL_ONESHOT_INT <<
265 ((IVSHMEM_CFG_PRIV_CNTL % 4) * 8));
266 pcie_conf_write(data->pcie->bdf, cap_pos, cfg_priv_cntl);
267 } else
268 #endif /* CONFIG_IVSHMEM_V2 */
269 {
270 if (!shmem_bar_present) {
271 LOG_ERR("ivshmem mem bar not found");
272 return false;
273 }
274
275 data->size = mbar_shmem.size;
276
277 k_mem_map_phys_bare((uint8_t **)&data->shmem,
278 shmem_phys_addr, data->size,
279 K_MEM_CACHE_WB | K_MEM_PERM_RW | K_MEM_PERM_USER);
280 }
281
282 if (msi_x_bar_present) {
283 if (!ivshmem_configure_msi_x_interrupts(dev)) {
284 LOG_ERR("MSI-X init failed");
285 return false;
286 }
287 }
288 #ifdef CONFIG_IVSHMEM_V2
289 else if (data->ivshmem_v2) {
290 if (!ivshmem_configure_int_x_interrupts(dev)) {
291 LOG_ERR("INTx init failed");
292 return false;
293 }
294 }
295 #endif
296
297 LOG_INF("ivshmem configured:");
298 LOG_INF("- Registers at 0x%lX (mapped to 0x%lX)",
299 mbar_regs.phys_addr, DEVICE_MMIO_GET(dev));
300 LOG_INF("- Shared memory of 0x%zX bytes at 0x%lX (mapped to 0x%lX)",
301 data->size, shmem_phys_addr, data->shmem);
302
303 return true;
304 }
305
ivshmem_api_get_mem(const struct device * dev,uintptr_t * memmap)306 static size_t ivshmem_api_get_mem(const struct device *dev,
307 uintptr_t *memmap)
308 {
309 struct ivshmem *data = dev->data;
310
311 #ifdef CONFIG_IVSHMEM_V2
312 if (data->ivshmem_v2) {
313 *memmap = 0;
314 return 0;
315 }
316 #endif
317
318 *memmap = data->shmem;
319
320 return data->size;
321 }
322
ivshmem_api_get_id(const struct device * dev)323 static uint32_t ivshmem_api_get_id(const struct device *dev)
324 {
325 uint32_t id;
326
327 #ifdef CONFIG_IVSHMEM_V2
328 struct ivshmem *data = dev->data;
329
330 if (data->ivshmem_v2) {
331 volatile struct ivshmem_v2_reg *regs =
332 (volatile struct ivshmem_v2_reg *) DEVICE_MMIO_GET(dev);
333
334 id = regs->id;
335 } else
336 #endif
337 {
338 volatile struct ivshmem_reg *regs =
339 (volatile struct ivshmem_reg *) DEVICE_MMIO_GET(dev);
340
341 id = regs->iv_position;
342 }
343
344 return id;
345 }
346
ivshmem_api_get_vectors(const struct device * dev)347 static uint16_t ivshmem_api_get_vectors(const struct device *dev)
348 {
349 #if CONFIG_IVSHMEM_DOORBELL
350 struct ivshmem *data = dev->data;
351
352 return data->n_vectors;
353 #else
354 return 0;
355 #endif
356 }
357
ivshmem_api_int_peer(const struct device * dev,uint32_t peer_id,uint16_t vector)358 static int ivshmem_api_int_peer(const struct device *dev,
359 uint32_t peer_id, uint16_t vector)
360 {
361 #if CONFIG_IVSHMEM_DOORBELL
362 struct ivshmem *data = dev->data;
363 volatile uint32_t *doorbell_reg;
364 uint32_t doorbell = IVSHMEM_GEN_DOORBELL(peer_id, vector);
365
366 if (vector >= data->n_vectors) {
367 return -EINVAL;
368 }
369
370 #ifdef CONFIG_IVSHMEM_V2
371 if (data->ivshmem_v2 && peer_id >= data->max_peers) {
372 return -EINVAL;
373 }
374
375 if (data->ivshmem_v2) {
376 volatile struct ivshmem_v2_reg *regs =
377 (volatile struct ivshmem_v2_reg *) DEVICE_MMIO_GET(dev);
378
379 doorbell_reg = ®s->doorbell;
380 } else
381 #endif
382 {
383 volatile struct ivshmem_reg *regs =
384 (volatile struct ivshmem_reg *) DEVICE_MMIO_GET(dev);
385
386 doorbell_reg = ®s->doorbell;
387 }
388 *doorbell_reg = doorbell;
389
390 return 0;
391 #else
392 return -ENOSYS;
393 #endif
394 }
395
ivshmem_api_register_handler(const struct device * dev,struct k_poll_signal * signal,uint16_t vector)396 static int ivshmem_api_register_handler(const struct device *dev,
397 struct k_poll_signal *signal,
398 uint16_t vector)
399 {
400 #if CONFIG_IVSHMEM_DOORBELL
401 struct ivshmem *data = dev->data;
402
403 if (vector >= data->n_vectors) {
404 return -EINVAL;
405 }
406
407 register_signal(dev, signal, vector);
408
409 return 0;
410 #else
411 return -ENOSYS;
412 #endif
413 }
414
415 #ifdef CONFIG_IVSHMEM_V2
416
ivshmem_api_get_rw_mem_section(const struct device * dev,uintptr_t * memmap)417 static size_t ivshmem_api_get_rw_mem_section(const struct device *dev,
418 uintptr_t *memmap)
419 {
420 struct ivshmem *data = dev->data;
421
422 if (!data->ivshmem_v2) {
423 *memmap = 0;
424 return 0;
425 }
426
427 *memmap = data->rw_section_shmem;
428
429 return data->rw_section_size;
430 }
431
ivshmem_api_get_output_mem_section(const struct device * dev,uint32_t peer_id,uintptr_t * memmap)432 static size_t ivshmem_api_get_output_mem_section(const struct device *dev,
433 uint32_t peer_id,
434 uintptr_t *memmap)
435 {
436 struct ivshmem *data = dev->data;
437
438 if (!data->ivshmem_v2 || peer_id >= data->max_peers) {
439 *memmap = 0;
440 return 0;
441 }
442
443 *memmap = data->output_section_shmem[peer_id];
444
445 return data->output_section_size;
446 }
447
ivshmem_api_get_state(const struct device * dev,uint32_t peer_id)448 static uint32_t ivshmem_api_get_state(const struct device *dev,
449 uint32_t peer_id)
450 {
451 struct ivshmem *data = dev->data;
452
453 if (!data->ivshmem_v2 || peer_id >= data->max_peers) {
454 return 0;
455 }
456
457 const volatile uint32_t *state_table =
458 (const volatile uint32_t *)data->state_table_shmem;
459
460 return state_table[peer_id];
461 }
462
ivshmem_api_set_state(const struct device * dev,uint32_t state)463 static int ivshmem_api_set_state(const struct device *dev,
464 uint32_t state)
465 {
466 struct ivshmem *data = dev->data;
467
468 if (!data->ivshmem_v2) {
469 return -ENOSYS;
470 }
471
472 volatile struct ivshmem_v2_reg *regs =
473 (volatile struct ivshmem_v2_reg *) DEVICE_MMIO_GET(dev);
474
475 regs->state = state;
476
477 return 0;
478 }
479
ivshmem_api_get_max_peers(const struct device * dev)480 static uint32_t ivshmem_api_get_max_peers(const struct device *dev)
481 {
482 struct ivshmem *data = dev->data;
483
484 if (!data->ivshmem_v2) {
485 return 0;
486 }
487
488 return data->max_peers;
489 }
490
ivshmem_api_get_protocol(const struct device * dev)491 static uint16_t ivshmem_api_get_protocol(const struct device *dev)
492 {
493 struct ivshmem *data = dev->data;
494
495 if (!data->ivshmem_v2) {
496 return 0;
497 }
498
499 uint16_t protocol = (data->pcie->class_rev >> 8) & 0xFFFF;
500
501 return protocol;
502 }
503
ivshmem_api_enable_interrupts(const struct device * dev,bool enable)504 static int ivshmem_api_enable_interrupts(const struct device *dev,
505 bool enable)
506 {
507 struct ivshmem *data = dev->data;
508
509 if (!data->ivshmem_v2) {
510 return -ENOSYS;
511 }
512
513 volatile struct ivshmem_v2_reg *regs =
514 (volatile struct ivshmem_v2_reg *) DEVICE_MMIO_GET(dev);
515
516 regs->int_control = enable ? IVSHMEM_INT_ENABLE : 0;
517
518 return 0;
519 }
520
521 #endif /* CONFIG_IVSHMEM_V2 */
522
523 static const struct ivshmem_driver_api ivshmem_api = {
524 .get_mem = ivshmem_api_get_mem,
525 .get_id = ivshmem_api_get_id,
526 .get_vectors = ivshmem_api_get_vectors,
527 .int_peer = ivshmem_api_int_peer,
528 .register_handler = ivshmem_api_register_handler,
529 #ifdef CONFIG_IVSHMEM_V2
530 .get_rw_mem_section = ivshmem_api_get_rw_mem_section,
531 .get_output_mem_section = ivshmem_api_get_output_mem_section,
532 .get_state = ivshmem_api_get_state,
533 .set_state = ivshmem_api_set_state,
534 .get_max_peers = ivshmem_api_get_max_peers,
535 .get_protocol = ivshmem_api_get_protocol,
536 .enable_interrupts = ivshmem_api_enable_interrupts
537 #endif
538 };
539
ivshmem_init(const struct device * dev)540 static int ivshmem_init(const struct device *dev)
541 {
542 struct ivshmem *data = dev->data;
543
544 if (data->pcie->bdf == PCIE_BDF_NONE) {
545 LOG_WRN("ivshmem device not found");
546 return -ENOTSUP;
547 }
548
549 LOG_INF("PCIe: ID 0x%08X, BDF 0x%X, class-rev 0x%08X",
550 data->pcie->id, data->pcie->bdf, data->pcie->class_rev);
551
552 if (!ivshmem_configure(dev)) {
553 return -EIO;
554 }
555 return 0;
556 }
557
558 #define IVSHMEM_INTX_INFO(intx_idx, drv_idx) { \
559 COND_CODE_1(DT_IRQ_HAS_IDX(DT_DRV_INST(drv_idx), intx_idx), \
560 ( \
561 .irq = DT_IRQ_BY_IDX(DT_DRV_INST(drv_idx), intx_idx, irq), \
562 .priority = DT_IRQ_BY_IDX(DT_DRV_INST(drv_idx), intx_idx, priority), \
563 .flags = DT_IRQ_BY_IDX(DT_DRV_INST(drv_idx), intx_idx, flags), \
564 ), \
565 (.irq = INTX_IRQ_UNUSED)) \
566 }
567
568 #define IVSHMEM_DEVICE_INIT(n) \
569 BUILD_ASSERT(!IS_ENABLED(CONFIG_IVSHMEM_DOORBELL) || \
570 ((IS_ENABLED(CONFIG_PCIE_MSI_X) && \
571 IS_ENABLED(CONFIG_PCIE_MSI_MULTI_VECTOR)) || \
572 (DT_INST_PROP(n, ivshmem_v2) && \
573 DT_INST_NODE_HAS_PROP(n, interrupts))), \
574 "IVSHMEM_DOORBELL requires either MSI-X or INTx support"); \
575 BUILD_ASSERT(IS_ENABLED(CONFIG_IVSHMEM_V2) || !DT_INST_PROP(n, ivshmem_v2), \
576 "CONFIG_IVSHMEM_V2 must be enabled for ivshmem-v2"); \
577 DEVICE_PCIE_INST_DECLARE(n); \
578 static struct ivshmem ivshmem_data_##n = { \
579 DEVICE_PCIE_INST_INIT(n, pcie), \
580 IF_ENABLED(CONFIG_IVSHMEM_V2, \
581 (.ivshmem_v2 = DT_INST_PROP(n, ivshmem_v2),)) \
582 }; \
583 IF_ENABLED(CONFIG_IVSHMEM_V2, ( \
584 static struct ivshmem_cfg ivshmem_cfg_##n = { \
585 .intx_info = \
586 { FOR_EACH_FIXED_ARG(IVSHMEM_INTX_INFO, (,), n, 0, 1, 2, 3) } \
587 }; \
588 )); \
589 DEVICE_DT_INST_DEFINE(n, &ivshmem_init, NULL, \
590 &ivshmem_data_##n, \
591 COND_CODE_1(CONFIG_IVSHMEM_V2, (&ivshmem_cfg_##n), (NULL)), \
592 POST_KERNEL, CONFIG_KERNEL_INIT_PRIORITY_DEVICE, \
593 &ivshmem_api);
594
595 DT_INST_FOREACH_STATUS_OKAY(IVSHMEM_DEVICE_INIT)
596