1 /*
2 * Copyright (c) 2023 Enphase Energy
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #define DT_DRV_COMPAT siemens_ivshmem_eth
8
9 #include <zephyr/drivers/virtualization/ivshmem.h>
10 #include <zephyr/logging/log.h>
11 #include <zephyr/net/ethernet.h>
12 #include <ethernet/eth_stats.h>
13
14 #include "eth.h"
15 #include "eth_ivshmem_priv.h"
16
17 LOG_MODULE_REGISTER(eth_ivshmem, CONFIG_ETHERNET_LOG_LEVEL);
18
19 #define ETH_IVSHMEM_STATE_RESET 0
20 #define ETH_IVSHMEM_STATE_INIT 1
21 #define ETH_IVSHMEM_STATE_READY 2
22 #define ETH_IVSHMEM_STATE_RUN 3
23
24 static const char * const eth_ivshmem_state_names[] = {
25 [ETH_IVSHMEM_STATE_RESET] = "RESET",
26 [ETH_IVSHMEM_STATE_INIT] = "INIT",
27 [ETH_IVSHMEM_STATE_READY] = "READY",
28 [ETH_IVSHMEM_STATE_RUN] = "RUN"
29 };
30
31 struct eth_ivshmem_dev_data {
32 struct net_if *iface;
33
34 uint32_t tx_rx_vector;
35 uint32_t peer_id;
36 uint8_t mac_addr[6];
37 struct k_poll_signal poll_signal;
38 struct eth_ivshmem_queue ivshmem_queue;
39
40 K_KERNEL_STACK_MEMBER(thread_stack, CONFIG_ETH_IVSHMEM_THREAD_STACK_SIZE);
41 struct k_thread thread;
42 bool enabled;
43 uint32_t state;
44 #if defined(CONFIG_NET_STATISTICS_ETHERNET)
45 struct net_stats_eth stats;
46 #endif
47 };
48
49 struct eth_ivshmem_cfg_data {
50 const struct device *ivshmem;
51 const char *name;
52 void (*generate_mac_addr)(uint8_t mac_addr[6]);
53 };
54
55 #if defined(CONFIG_NET_STATISTICS_ETHERNET)
eth_ivshmem_get_stats(const struct device * dev)56 static struct net_stats_eth *eth_ivshmem_get_stats(const struct device *dev)
57 {
58 struct eth_ivshmem_dev_data *dev_data = dev->data;
59
60 return &dev_data->stats;
61 }
62 #endif
63
eth_ivshmem_start(const struct device * dev)64 static int eth_ivshmem_start(const struct device *dev)
65 {
66 struct eth_ivshmem_dev_data *dev_data = dev->data;
67
68 dev_data->enabled = true;
69
70 /* Wake up thread to check/update state */
71 k_poll_signal_raise(&dev_data->poll_signal, 0);
72
73 return 0;
74 }
75
eth_ivshmem_stop(const struct device * dev)76 static int eth_ivshmem_stop(const struct device *dev)
77 {
78 struct eth_ivshmem_dev_data *dev_data = dev->data;
79
80 dev_data->enabled = false;
81
82 /* Wake up thread to check/update state */
83 k_poll_signal_raise(&dev_data->poll_signal, 0);
84
85 return 0;
86 }
87
eth_ivshmem_caps(const struct device * dev)88 static enum ethernet_hw_caps eth_ivshmem_caps(const struct device *dev)
89 {
90 ARG_UNUSED(dev);
91 return ETHERNET_LINK_10BASE_T | ETHERNET_LINK_100BASE_T | ETHERNET_LINK_1000BASE_T;
92 }
93
eth_ivshmem_send(const struct device * dev,struct net_pkt * pkt)94 static int eth_ivshmem_send(const struct device *dev, struct net_pkt *pkt)
95 {
96 struct eth_ivshmem_dev_data *dev_data = dev->data;
97 const struct eth_ivshmem_cfg_data *cfg_data = dev->config;
98 size_t len = net_pkt_get_len(pkt);
99
100 void *data;
101 int res = eth_ivshmem_queue_tx_get_buff(&dev_data->ivshmem_queue, &data, len);
102
103 if (res != 0) {
104 LOG_ERR("Failed to allocate tx buffer");
105 eth_stats_update_errors_tx(dev_data->iface);
106 return res;
107 }
108
109 if (net_pkt_read(pkt, data, len)) {
110 LOG_ERR("Failed to read tx packet");
111 eth_stats_update_errors_tx(dev_data->iface);
112 return -EIO;
113 }
114
115 res = eth_ivshmem_queue_tx_commit_buff(&dev_data->ivshmem_queue);
116 if (res == 0) {
117 /* Notify peer */
118 ivshmem_int_peer(cfg_data->ivshmem, dev_data->peer_id, dev_data->tx_rx_vector);
119 }
120
121 return res;
122 }
123
eth_ivshmem_rx(const struct device * dev)124 static struct net_pkt *eth_ivshmem_rx(const struct device *dev)
125 {
126 struct eth_ivshmem_dev_data *dev_data = dev->data;
127 const struct eth_ivshmem_cfg_data *cfg_data = dev->config;
128 const void *rx_data;
129 size_t rx_len;
130
131 int res = eth_ivshmem_queue_rx(&dev_data->ivshmem_queue, &rx_data, &rx_len);
132
133 if (res != 0) {
134 if (res != -EWOULDBLOCK) {
135 LOG_ERR("Queue RX failed");
136 eth_stats_update_errors_rx(dev_data->iface);
137 }
138 return NULL;
139 }
140
141 struct net_pkt *pkt = net_pkt_rx_alloc_with_buffer(
142 dev_data->iface, rx_len, AF_UNSPEC, 0, K_MSEC(100));
143 if (pkt == NULL) {
144 LOG_ERR("Failed to allocate rx buffer");
145 eth_stats_update_errors_rx(dev_data->iface);
146 goto dequeue;
147 }
148
149 if (net_pkt_write(pkt, rx_data, rx_len) != 0) {
150 LOG_ERR("Failed to write rx packet");
151 eth_stats_update_errors_rx(dev_data->iface);
152 net_pkt_unref(pkt);
153 }
154
155 dequeue:
156 if (eth_ivshmem_queue_rx_complete(&dev_data->ivshmem_queue) == 0) {
157 /* Notify peer */
158 ivshmem_int_peer(cfg_data->ivshmem, dev_data->peer_id, dev_data->tx_rx_vector);
159 }
160
161 return pkt;
162 }
163
eth_ivshmem_set_state(const struct device * dev,uint32_t state)164 static void eth_ivshmem_set_state(const struct device *dev, uint32_t state)
165 {
166 struct eth_ivshmem_dev_data *dev_data = dev->data;
167 const struct eth_ivshmem_cfg_data *cfg_data = dev->config;
168
169 LOG_DBG("State update: %s -> %s",
170 eth_ivshmem_state_names[dev_data->state],
171 eth_ivshmem_state_names[state]);
172 dev_data->state = state;
173 ivshmem_set_state(cfg_data->ivshmem, state);
174 }
175
eth_ivshmem_state_update(const struct device * dev)176 static void eth_ivshmem_state_update(const struct device *dev)
177 {
178 struct eth_ivshmem_dev_data *dev_data = dev->data;
179 const struct eth_ivshmem_cfg_data *cfg_data = dev->config;
180
181 uint32_t peer_state = ivshmem_get_state(cfg_data->ivshmem, dev_data->peer_id);
182
183 switch (dev_data->state) {
184 case ETH_IVSHMEM_STATE_RESET:
185 switch (peer_state) {
186 case ETH_IVSHMEM_STATE_RESET:
187 case ETH_IVSHMEM_STATE_INIT:
188 eth_ivshmem_set_state(dev, ETH_IVSHMEM_STATE_INIT);
189 break;
190 default:
191 /* Wait for peer to reset */
192 break;
193 }
194 break;
195 case ETH_IVSHMEM_STATE_INIT:
196 if (dev_data->iface == NULL || peer_state == ETH_IVSHMEM_STATE_RESET) {
197 /* Peer is not ready for init */
198 break;
199 }
200 eth_ivshmem_queue_reset(&dev_data->ivshmem_queue);
201 eth_ivshmem_set_state(dev, ETH_IVSHMEM_STATE_READY);
202 break;
203 case ETH_IVSHMEM_STATE_READY:
204 case ETH_IVSHMEM_STATE_RUN:
205 switch (peer_state) {
206 case ETH_IVSHMEM_STATE_RESET:
207 net_eth_carrier_off(dev_data->iface);
208 eth_ivshmem_set_state(dev, ETH_IVSHMEM_STATE_RESET);
209 break;
210 case ETH_IVSHMEM_STATE_READY:
211 case ETH_IVSHMEM_STATE_RUN:
212 if (dev_data->enabled && dev_data->state == ETH_IVSHMEM_STATE_READY) {
213 eth_ivshmem_set_state(dev, ETH_IVSHMEM_STATE_RUN);
214 net_eth_carrier_on(dev_data->iface);
215 } else if (!dev_data->enabled && dev_data->state == ETH_IVSHMEM_STATE_RUN) {
216 net_eth_carrier_off(dev_data->iface);
217 eth_ivshmem_set_state(dev, ETH_IVSHMEM_STATE_RESET);
218 }
219 break;
220 }
221 break;
222 }
223 }
224
eth_ivshmem_thread(void * arg1,void * arg2,void * arg3)225 FUNC_NORETURN static void eth_ivshmem_thread(void *arg1, void *arg2, void *arg3)
226 {
227 const struct device *dev = arg1;
228 struct eth_ivshmem_dev_data *dev_data = dev->data;
229 struct k_poll_event poll_event;
230
231 ARG_UNUSED(arg2);
232 ARG_UNUSED(arg3);
233
234 k_poll_event_init(&poll_event,
235 K_POLL_TYPE_SIGNAL,
236 K_POLL_MODE_NOTIFY_ONLY,
237 &dev_data->poll_signal);
238
239 while (true) {
240 k_poll(&poll_event, 1, K_FOREVER);
241 poll_event.signal->signaled = 0;
242 poll_event.state = K_POLL_STATE_NOT_READY;
243
244 eth_ivshmem_state_update(dev);
245 if (dev_data->state != ETH_IVSHMEM_STATE_RUN) {
246 continue;
247 }
248
249 while (true) {
250 struct net_pkt *pkt = eth_ivshmem_rx(dev);
251
252 if (pkt == NULL) {
253 break;
254 }
255
256 if (net_recv_data(dev_data->iface, pkt) < 0) {
257 /* Upper layers are not ready to receive packets */
258 net_pkt_unref(pkt);
259 }
260
261 k_yield();
262 };
263 }
264 }
265
eth_ivshmem_initialize(const struct device * dev)266 int eth_ivshmem_initialize(const struct device *dev)
267 {
268 struct eth_ivshmem_dev_data *dev_data = dev->data;
269 const struct eth_ivshmem_cfg_data *cfg_data = dev->config;
270 int res;
271
272 k_poll_signal_init(&dev_data->poll_signal);
273
274 if (!device_is_ready(cfg_data->ivshmem)) {
275 LOG_ERR("ivshmem device not ready");
276 return -ENODEV;
277 }
278
279 uint16_t protocol = ivshmem_get_protocol(cfg_data->ivshmem);
280
281 if (protocol != IVSHMEM_V2_PROTO_NET) {
282 LOG_ERR("Invalid ivshmem protocol %hu", protocol);
283 return -EINVAL;
284 }
285
286 uint32_t id = ivshmem_get_id(cfg_data->ivshmem);
287 uint32_t max_peers = ivshmem_get_max_peers(cfg_data->ivshmem);
288
289 LOG_INF("ivshmem: id %u, max_peers %u", id, max_peers);
290 if (id > 1) {
291 LOG_ERR("Invalid ivshmem ID %u", id);
292 return -EINVAL;
293 }
294 if (max_peers != 2) {
295 LOG_ERR("Invalid ivshmem max peers %u", max_peers);
296 return -EINVAL;
297 }
298 dev_data->peer_id = (id == 0) ? 1 : 0;
299
300 uintptr_t output_sections[2];
301 size_t output_section_size = ivshmem_get_output_mem_section(
302 cfg_data->ivshmem, 0, &output_sections[0]);
303 ivshmem_get_output_mem_section(
304 cfg_data->ivshmem, 1, &output_sections[1]);
305
306 res = eth_ivshmem_queue_init(
307 &dev_data->ivshmem_queue, output_sections[id],
308 output_sections[dev_data->peer_id], output_section_size);
309 if (res != 0) {
310 LOG_ERR("Failed to init ivshmem queue");
311 return res;
312 }
313 LOG_INF("shmem queue: desc len 0x%hX, header size 0x%X, data size 0x%X",
314 dev_data->ivshmem_queue.desc_max_len,
315 dev_data->ivshmem_queue.vring_header_size,
316 dev_data->ivshmem_queue.vring_data_max_len);
317
318 uint16_t n_vectors = ivshmem_get_vectors(cfg_data->ivshmem);
319
320 /* For simplicity, state and TX/RX vectors do the same thing */
321 ivshmem_register_handler(cfg_data->ivshmem, &dev_data->poll_signal, 0);
322 dev_data->tx_rx_vector = 0;
323 if (n_vectors == 0) {
324 LOG_ERR("Error no ivshmem ISR vectors");
325 return -EINVAL;
326 } else if (n_vectors > 1) {
327 ivshmem_register_handler(cfg_data->ivshmem, &dev_data->poll_signal, 1);
328 dev_data->tx_rx_vector = 1;
329 }
330
331 ivshmem_set_state(cfg_data->ivshmem, ETH_IVSHMEM_STATE_RESET);
332
333 cfg_data->generate_mac_addr(dev_data->mac_addr);
334 LOG_INF("MAC Address %02X:%02X:%02X:%02X:%02X:%02X",
335 dev_data->mac_addr[0], dev_data->mac_addr[1],
336 dev_data->mac_addr[2], dev_data->mac_addr[3],
337 dev_data->mac_addr[4], dev_data->mac_addr[5]);
338
339 k_tid_t tid = k_thread_create(
340 &dev_data->thread, dev_data->thread_stack,
341 K_KERNEL_STACK_SIZEOF(dev_data->thread_stack),
342 eth_ivshmem_thread,
343 (void *) dev, NULL, NULL,
344 CONFIG_ETH_IVSHMEM_THREAD_PRIORITY,
345 K_ESSENTIAL, K_NO_WAIT);
346 k_thread_name_set(tid, cfg_data->name);
347
348 ivshmem_enable_interrupts(cfg_data->ivshmem, true);
349
350 /* Wake up thread to check/update state */
351 k_poll_signal_raise(&dev_data->poll_signal, 0);
352
353 return 0;
354 }
355
eth_ivshmem_iface_init(struct net_if * iface)356 static void eth_ivshmem_iface_init(struct net_if *iface)
357 {
358 const struct device *dev = net_if_get_device(iface);
359 struct eth_ivshmem_dev_data *dev_data = dev->data;
360
361 if (dev_data->iface == NULL) {
362 dev_data->iface = iface;
363 }
364
365 net_if_set_link_addr(
366 iface, dev_data->mac_addr,
367 sizeof(dev_data->mac_addr),
368 NET_LINK_ETHERNET);
369
370 ethernet_init(iface);
371
372 /* Do not start the interface until PHY link is up */
373 net_if_carrier_off(iface);
374
375 /* Wake up thread to check/update state */
376 k_poll_signal_raise(&dev_data->poll_signal, 0);
377 }
378
379 static const struct ethernet_api eth_ivshmem_api = {
380 .iface_api.init = eth_ivshmem_iface_init,
381 #if defined(CONFIG_NET_STATISTICS_ETHERNET)
382 .get_stats = eth_ivshmem_get_stats,
383 #endif
384 .start = eth_ivshmem_start,
385 .stop = eth_ivshmem_stop,
386 .get_capabilities = eth_ivshmem_caps,
387 .send = eth_ivshmem_send,
388 };
389
390 #define ETH_IVSHMEM_RANDOM_MAC_ADDR(inst) \
391 static void generate_mac_addr_##inst(uint8_t mac_addr[6]) \
392 { \
393 sys_rand_get(mac_addr, 3U); \
394 /* Clear multicast bit */ \
395 mac_addr[0] &= 0xFE; \
396 gen_random_mac(mac_addr, mac_addr[0], mac_addr[1], mac_addr[2]); \
397 }
398
399 #define ETH_IVSHMEM_LOCAL_MAC_ADDR(inst) \
400 static void generate_mac_addr_##inst(uint8_t mac_addr[6]) \
401 { \
402 const uint8_t addr[6] = DT_INST_PROP(0, local_mac_address); \
403 memcpy(mac_addr, addr, sizeof(addr)); \
404 }
405
406 #define ETH_IVSHMEM_GENERATE_MAC_ADDR(inst) \
407 BUILD_ASSERT(DT_INST_PROP(inst, zephyr_random_mac_address) || \
408 NODE_HAS_VALID_MAC_ADDR(DT_DRV_INST(inst)), \
409 "eth_ivshmem requires either a fixed or random mac address"); \
410 COND_CODE_1(DT_INST_PROP(inst, zephyr_random_mac_address), \
411 (ETH_IVSHMEM_RANDOM_MAC_ADDR(inst)), \
412 (ETH_IVSHMEM_LOCAL_MAC_ADDR(inst)))
413
414 #define ETH_IVSHMEM_INIT(inst) \
415 ETH_IVSHMEM_GENERATE_MAC_ADDR(inst); \
416 static struct eth_ivshmem_dev_data eth_ivshmem_dev_##inst = {}; \
417 static const struct eth_ivshmem_cfg_data eth_ivshmem_cfg_##inst = { \
418 .ivshmem = DEVICE_DT_GET(DT_INST_PHANDLE(inst, ivshmem_v2)), \
419 .name = "ivshmem_eth" STRINGIFY(inst), \
420 .generate_mac_addr = generate_mac_addr_##inst, \
421 }; \
422 ETH_NET_DEVICE_DT_INST_DEFINE(inst, \
423 eth_ivshmem_initialize, \
424 NULL, \
425 ð_ivshmem_dev_##inst, \
426 ð_ivshmem_cfg_##inst, \
427 CONFIG_ETH_INIT_PRIORITY, \
428 ð_ivshmem_api, \
429 NET_ETH_MTU);
430
431 DT_INST_FOREACH_STATUS_OKAY(ETH_IVSHMEM_INIT);
432