1 /*
2  * Copyright (c) 2020 Intel Corporation.
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #define DT_DRV_COMPAT qemu_ivshmem
8 
9 #define LOG_LEVEL CONFIG_IVSHMEM_LOG_LEVEL
10 #include <logging/log.h>
11 LOG_MODULE_REGISTER(ivshmem);
12 
13 #include <errno.h>
14 
15 #include <kernel.h>
16 #include <arch/cpu.h>
17 
18 #include <soc.h>
19 #include <device.h>
20 #include <init.h>
21 
22 #include <drivers/virtualization/ivshmem.h>
23 #include "virt_ivshmem.h"
24 
25 #ifdef CONFIG_IVSHMEM_DOORBELL
26 
ivshmem_doorbell(const void * arg)27 static void ivshmem_doorbell(const void *arg)
28 {
29 	const struct ivshmem_param *param = arg;
30 
31 	LOG_DBG("Interrupt received on vector %u", param->vector);
32 
33 	if (param->signal != NULL) {
34 		k_poll_signal_raise(param->signal, param->vector);
35 	}
36 }
37 
ivshmem_configure_interrupts(const struct device * dev)38 static bool ivshmem_configure_interrupts(const struct device *dev)
39 {
40 	struct ivshmem *data = dev->data;
41 	bool ret = false;
42 	uint8_t n_vectors;
43 	uint32_t key;
44 	int i;
45 
46 	key = irq_lock();
47 
48 	n_vectors = pcie_msi_vectors_allocate(data->bdf,
49 					      CONFIG_IVSHMEM_INT_PRIORITY,
50 					      data->vectors,
51 					      CONFIG_IVSHMEM_MSI_X_VECTORS);
52 	if (n_vectors == 0) {
53 		LOG_ERR("Could not allocate %u MSI-X vectors",
54 			CONFIG_IVSHMEM_MSI_X_VECTORS);
55 		goto out;
56 	}
57 
58 	LOG_DBG("Allocated %u vectors", n_vectors);
59 
60 	for (i = 0; i < n_vectors; i++) {
61 		data->params[i].dev = dev;
62 		data->params[i].vector = i;
63 
64 		if (!pcie_msi_vector_connect(data->bdf,
65 					     &data->vectors[i],
66 					     ivshmem_doorbell,
67 					     &data->params[i], 0)) {
68 			LOG_ERR("Failed to connect MSI-X vector %u", i);
69 			goto out;
70 		}
71 	}
72 
73 	LOG_DBG("%u MSI-X Vectors connected", n_vectors);
74 
75 	if (!pcie_msi_enable(data->bdf, data->vectors, n_vectors, 0)) {
76 		LOG_ERR("Could not enable MSI-X");
77 		goto out;
78 	}
79 
80 	data->n_vectors = n_vectors;
81 	ret = true;
82 
83 	LOG_DBG("MSI-X configured");
84 out:
85 	irq_unlock(key);
86 
87 	return ret;
88 }
89 
register_signal(const struct device * dev,struct k_poll_signal * signal,uint16_t vector)90 static void register_signal(const struct device *dev,
91 			    struct k_poll_signal *signal,
92 			    uint16_t vector)
93 {
94 	struct ivshmem *data = dev->data;
95 
96 	data->params[vector].signal = signal;
97 }
98 
99 #else
100 
101 static const struct ivshmem_reg no_reg;
102 
103 #define ivshmem_configure_interrupts(...) true
104 #define register_signal(...)
105 
106 #endif /* CONFIG_IVSHMEM_DOORBELL */
107 
ivshmem_configure(const struct device * dev)108 static bool ivshmem_configure(const struct device *dev)
109 {
110 	struct ivshmem *data = dev->data;
111 	struct pcie_mbar mbar_regs, mbar_mem;
112 
113 	if (!pcie_get_mbar(data->bdf, IVSHMEM_PCIE_REG_BAR_IDX, &mbar_regs)) {
114 #ifdef CONFIG_IVSHMEM_DOORBELL
115 		LOG_ERR("ivshmem regs bar not found");
116 		return false;
117 #else
118 		LOG_DBG("ivshmem regs bar not found");
119 		device_map(DEVICE_MMIO_RAM_PTR(dev), (uintptr_t)&no_reg,
120 			   sizeof(struct ivshmem_reg), K_MEM_CACHE_NONE);
121 #endif /* CONFIG_IVSHMEM_DOORBELL */
122 	} else {
123 		pcie_set_cmd(data->bdf, PCIE_CONF_CMDSTAT_MEM, true);
124 
125 		device_map(DEVICE_MMIO_RAM_PTR(dev), mbar_regs.phys_addr,
126 			   mbar_regs.size, K_MEM_CACHE_NONE);
127 	}
128 
129 	if (!pcie_get_mbar(data->bdf, IVSHMEM_PCIE_SHMEM_BAR_IDX, &mbar_mem)) {
130 		LOG_ERR("ivshmem mem bar not found");
131 		return false;
132 	}
133 
134 	data->size = mbar_mem.size;
135 
136 	z_phys_map((uint8_t **)&data->shmem,
137 		   mbar_mem.phys_addr, data->size,
138 		   K_MEM_CACHE_WB | K_MEM_PERM_RW | K_MEM_PERM_USER);
139 
140 	LOG_DBG("ivshmem configured:");
141 	LOG_DBG("- Registers at 0x%lx (mapped to 0x%lx)",
142 		mbar_regs.phys_addr, DEVICE_MMIO_GET(dev));
143 	LOG_DBG("- Shared memory of %lu bytes at 0x%lx (mapped to 0x%lx)",
144 		data->size, mbar_mem.phys_addr, data->shmem);
145 
146 	return ivshmem_configure_interrupts(dev);
147 }
148 
ivshmem_api_get_mem(const struct device * dev,uintptr_t * memmap)149 static size_t ivshmem_api_get_mem(const struct device *dev,
150 				  uintptr_t *memmap)
151 {
152 	struct ivshmem *data = dev->data;
153 
154 	*memmap = data->shmem;
155 
156 	return data->size;
157 }
158 
ivshmem_api_get_id(const struct device * dev)159 static uint32_t ivshmem_api_get_id(const struct device *dev)
160 {
161 	struct ivshmem_reg *regs = (struct ivshmem_reg *)DEVICE_MMIO_GET(dev);
162 
163 	return regs->iv_position;
164 }
165 
ivshmem_api_get_vectors(const struct device * dev)166 static uint16_t ivshmem_api_get_vectors(const struct device *dev)
167 {
168 #if CONFIG_IVSHMEM_DOORBELL
169 	struct ivshmem *data = dev->data;
170 
171 	return data->n_vectors;
172 #else
173 	return 0;
174 #endif
175 }
176 
ivshmem_api_int_peer(const struct device * dev,uint32_t peer_id,uint16_t vector)177 static int ivshmem_api_int_peer(const struct device *dev,
178 				uint32_t peer_id, uint16_t vector)
179 {
180 #if CONFIG_IVSHMEM_DOORBELL
181 	struct ivshmem_reg *regs = (struct ivshmem_reg *)DEVICE_MMIO_GET(dev);
182 	struct ivshmem *data = dev->data;
183 	uint32_t doorbell;
184 
185 	if (vector >= data->n_vectors) {
186 		return -EINVAL;
187 	}
188 
189 	doorbell = IVSHMEM_GEN_DOORBELL(peer_id, vector);
190 	regs->doorbell = doorbell;
191 
192 	return 0;
193 #else
194 	return -ENOSYS;
195 #endif
196 }
197 
ivshmem_api_register_handler(const struct device * dev,struct k_poll_signal * signal,uint16_t vector)198 static int ivshmem_api_register_handler(const struct device *dev,
199 					struct k_poll_signal *signal,
200 					uint16_t vector)
201 {
202 #if CONFIG_IVSHMEM_DOORBELL
203 	struct ivshmem *data = dev->data;
204 
205 	if (vector >= data->n_vectors) {
206 		return -EINVAL;
207 	}
208 
209 	register_signal(dev, signal, vector);
210 
211 	return 0;
212 #else
213 	return -ENOSYS;
214 #endif
215 }
216 
217 static const struct ivshmem_driver_api ivshmem_api = {
218 	.get_mem = ivshmem_api_get_mem,
219 	.get_id = ivshmem_api_get_id,
220 	.get_vectors = ivshmem_api_get_vectors,
221 	.int_peer = ivshmem_api_int_peer,
222 	.register_handler = ivshmem_api_register_handler
223 };
224 
ivshmem_init(const struct device * dev)225 static int ivshmem_init(const struct device *dev)
226 {
227 	struct ivshmem *data = dev->data;
228 	static bool bdf_lookup_done;
229 
230 	if ((data->bdf == PCIE_BDF_NONE) && bdf_lookup_done) {
231 		LOG_ERR("One instance of ivshmem with pcie_bdf_lookup() already initialized.\n"
232 			"Using more than one with PCIE_BDF_NONE parameter might conflict\n"
233 			"with already initialized instances.");
234 		return -ENOTSUP;
235 	}
236 	if ((data->bdf == PCIE_BDF_NONE) && !bdf_lookup_done) {
237 		if (data->dev_ven_id) {
238 			data->bdf = pcie_bdf_lookup(data->dev_ven_id);
239 		} else {
240 			data->bdf = pcie_bdf_lookup(PCIE_ID(IVSHMEM_VENDOR_ID, IVSHMEM_DEVICE_ID));
241 		}
242 		if (data->bdf == PCIE_BDF_NONE) {
243 			LOG_WRN("ivshmem device not found");
244 			return -ENOTSUP;
245 		}
246 	}
247 	LOG_DBG("ivshmem found at bdf 0x%x", data->bdf);
248 	bdf_lookup_done = true;
249 
250 	if (!ivshmem_configure(dev)) {
251 		return -EIO;
252 	}
253 	return 0;
254 }
255 
256 #define IVSHMEM_DEVICE_INIT(n) \
257 	static struct ivshmem ivshmem_data_##n = { \
258 		.bdf = DT_INST_REG_ADDR_BY_IDX(n, 0), \
259 		.dev_ven_id = DT_INST_REG_SIZE_BY_IDX(n, 0) \
260 	}; \
261 	DEVICE_DT_INST_DEFINE(n, &ivshmem_init, NULL, \
262 			      &ivshmem_data_##n, NULL, \
263 			      POST_KERNEL, CONFIG_KERNEL_INIT_PRIORITY_DEVICE, \
264 			      &ivshmem_api);
265 
266 DT_INST_FOREACH_STATUS_OKAY(IVSHMEM_DEVICE_INIT)
267