1 /*
2 * Copyright (c) 2021 BayLibre SAS
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/logging/log.h>
8 LOG_MODULE_REGISTER(net_eth_bridge, CONFIG_NET_ETHERNET_BRIDGE_LOG_LEVEL);
9
10 #include <zephyr/net/net_core.h>
11 #include <zephyr/net/net_l2.h>
12 #include <zephyr/net/net_if.h>
13 #include <zephyr/net/ethernet.h>
14 #include <zephyr/net/ethernet_bridge.h>
15 #include <zephyr/sys/iterable_sections.h>
16 #include <zephyr/sys/slist.h>
17
18 #include "bridge.h"
19
20 extern struct eth_bridge _eth_bridge_list_start[];
21 extern struct eth_bridge _eth_bridge_list_end[];
22
lock_bridge(struct eth_bridge * br)23 static void lock_bridge(struct eth_bridge *br)
24 {
25 /* Lazy-evaluate initialization. The ETH_BRIDGE_INITIALIZER()
26 * macro assumed that k_mutex can be statically initialized,
27 * and it can't. Post-zync, this will actually be possible
28 * and we can come back and fix this.
29 */
30 if (!br->initialized) {
31 k_mutex_init(&br->lock);
32 br->initialized = true;
33 }
34 k_mutex_lock(&br->lock, K_FOREVER);
35 }
36
net_eth_bridge_foreach(eth_bridge_cb_t cb,void * user_data)37 void net_eth_bridge_foreach(eth_bridge_cb_t cb, void *user_data)
38 {
39 STRUCT_SECTION_FOREACH(eth_bridge, br) {
40 cb(br, user_data);
41 }
42 }
43
eth_bridge_get_index(struct eth_bridge * br)44 int eth_bridge_get_index(struct eth_bridge *br)
45 {
46 if (!(br >= _eth_bridge_list_start && br < _eth_bridge_list_end)) {
47 return -1;
48 }
49
50 return (br - _eth_bridge_list_start) + 1;
51 }
52
eth_bridge_get_by_index(int index)53 struct eth_bridge *eth_bridge_get_by_index(int index)
54 {
55 if (index <= 0) {
56 return NULL;
57 }
58
59 if (&_eth_bridge_list_start[index - 1] >= _eth_bridge_list_end) {
60 NET_DBG("Index %d is too large", index);
61 return NULL;
62 }
63
64 return &_eth_bridge_list_start[index - 1];
65 }
66
eth_bridge_iface_add(struct eth_bridge * br,struct net_if * iface)67 int eth_bridge_iface_add(struct eth_bridge *br, struct net_if *iface)
68 {
69 struct ethernet_context *ctx = net_if_l2_data(iface);
70
71 if (net_if_l2(iface) != &NET_L2_GET_NAME(ETHERNET) ||
72 !(net_eth_get_hw_capabilities(iface) & ETHERNET_PROMISC_MODE)) {
73 return -EINVAL;
74 }
75
76 lock_bridge(br);
77
78 if (ctx->bridge.instance != NULL) {
79 k_mutex_unlock(&br->lock);
80 return -EBUSY;
81 }
82
83 ctx->bridge.instance = br;
84 ctx->bridge.allow_tx = false;
85 sys_slist_append(&br->interfaces, &ctx->bridge.node);
86
87 k_mutex_unlock(&br->lock);
88
89 int ret = net_eth_promisc_mode(iface, true);
90
91 if (ret != 0) {
92 NET_DBG("iface %p promiscuous mode failed: %d", iface, ret);
93 eth_bridge_iface_remove(br, iface);
94 return ret;
95 }
96
97 NET_DBG("iface %p added to bridge %p", iface, br);
98 return 0;
99 }
100
eth_bridge_iface_remove(struct eth_bridge * br,struct net_if * iface)101 int eth_bridge_iface_remove(struct eth_bridge *br, struct net_if *iface)
102 {
103 struct ethernet_context *ctx = net_if_l2_data(iface);
104
105 if (net_if_l2(iface) != &NET_L2_GET_NAME(ETHERNET)) {
106 return -EINVAL;
107 }
108
109 lock_bridge(br);
110
111 if (ctx->bridge.instance != br) {
112 k_mutex_unlock(&br->lock);
113 return -EINVAL;
114 }
115
116 sys_slist_find_and_remove(&br->interfaces, &ctx->bridge.node);
117 ctx->bridge.instance = NULL;
118
119 k_mutex_unlock(&br->lock);
120
121 NET_DBG("iface %p removed from bridge %p", iface, br);
122 return 0;
123 }
124
eth_bridge_iface_allow_tx(struct net_if * iface,bool allow)125 int eth_bridge_iface_allow_tx(struct net_if *iface, bool allow)
126 {
127 struct ethernet_context *ctx = net_if_l2_data(iface);
128
129 if (net_if_l2(iface) != &NET_L2_GET_NAME(ETHERNET) ||
130 ctx->bridge.instance == NULL) {
131 return -EINVAL;
132 }
133
134 ctx->bridge.allow_tx = allow;
135 return 0;
136 }
137
eth_bridge_listener_add(struct eth_bridge * br,struct eth_bridge_listener * l)138 int eth_bridge_listener_add(struct eth_bridge *br, struct eth_bridge_listener *l)
139 {
140 lock_bridge(br);
141 sys_slist_append(&br->listeners, &l->node);
142 k_mutex_unlock(&br->lock);
143 return 0;
144 }
145
eth_bridge_listener_remove(struct eth_bridge * br,struct eth_bridge_listener * l)146 int eth_bridge_listener_remove(struct eth_bridge *br, struct eth_bridge_listener *l)
147 {
148 lock_bridge(br);
149 sys_slist_find_and_remove(&br->listeners, &l->node);
150 k_mutex_unlock(&br->lock);
151 return 0;
152 }
153
is_link_local_addr(struct net_eth_addr * addr)154 static inline bool is_link_local_addr(struct net_eth_addr *addr)
155 {
156 if (addr->addr[0] == 0x01 &&
157 addr->addr[1] == 0x80 &&
158 addr->addr[2] == 0xc2 &&
159 addr->addr[3] == 0x00 &&
160 addr->addr[4] == 0x00 &&
161 (addr->addr[5] & 0x0f) == 0x00) {
162 return true;
163 }
164
165 return false;
166 }
167
net_eth_bridge_input(struct ethernet_context * ctx,struct net_pkt * pkt)168 enum net_verdict net_eth_bridge_input(struct ethernet_context *ctx,
169 struct net_pkt *pkt)
170 {
171 struct eth_bridge *br = ctx->bridge.instance;
172 sys_snode_t *node;
173
174 NET_DBG("new pkt %p", pkt);
175
176 /* Drop all link-local packets for now. */
177 if (is_link_local_addr((struct net_eth_addr *)net_pkt_lladdr_dst(pkt))) {
178 return NET_DROP;
179 }
180
181 lock_bridge(br);
182
183 /*
184 * Send packet to all registered interfaces for now.
185 * Eventually we could get smarter with a MAC address cache.
186 */
187 SYS_SLIST_FOR_EACH_NODE(&br->interfaces, node) {
188 struct ethernet_context *out_ctx;
189 struct net_pkt *out_pkt;
190
191 out_ctx = CONTAINER_OF(node, struct ethernet_context, bridge.node);
192
193 /* Don't xmit on the same interface as the incoming packet's */
194 if (ctx == out_ctx) {
195 continue;
196 }
197
198 /* Skip it if not allowed to transmit */
199 if (!out_ctx->bridge.allow_tx) {
200 continue;
201 }
202
203 /* Skip it if not up */
204 if (!net_if_flag_is_set(out_ctx->iface, NET_IF_UP)) {
205 continue;
206 }
207
208 out_pkt = net_pkt_shallow_clone(pkt, K_NO_WAIT);
209 if (out_pkt == NULL) {
210 continue;
211 }
212
213 NET_DBG("sending pkt %p as %p on iface %p", pkt, out_pkt, out_ctx->iface);
214
215 /*
216 * Use AF_UNSPEC to avoid interference, set the output
217 * interface and send the packet.
218 */
219 net_pkt_set_family(out_pkt, AF_UNSPEC);
220 net_pkt_set_orig_iface(out_pkt, net_pkt_iface(pkt));
221 net_pkt_set_iface(out_pkt, out_ctx->iface);
222 net_if_queue_tx(out_ctx->iface, out_pkt);
223 }
224
225 SYS_SLIST_FOR_EACH_NODE(&br->listeners, node) {
226 struct eth_bridge_listener *l;
227 struct net_pkt *out_pkt;
228
229 l = CONTAINER_OF(node, struct eth_bridge_listener, node);
230
231 out_pkt = net_pkt_shallow_clone(pkt, K_NO_WAIT);
232 if (out_pkt == NULL) {
233 continue;
234 }
235
236 k_fifo_put(&l->pkt_queue, out_pkt);
237 }
238
239 k_mutex_unlock(&br->lock);
240
241 net_pkt_unref(pkt);
242 return NET_OK;
243 }
244