1 /** @file
2 * @brief LLDP related functions
3 */
4
5 /*
6 * Copyright (c) 2018 Intel Corporation
7 *
8 * SPDX-License-Identifier: Apache-2.0
9 */
10
11 #include <zephyr/logging/log.h>
12 LOG_MODULE_REGISTER(net_lldp, CONFIG_NET_LLDP_LOG_LEVEL);
13
14 #include <errno.h>
15 #include <stdlib.h>
16
17 #include <zephyr/net/net_core.h>
18 #include <zephyr/net/ethernet.h>
19 #include <zephyr/net/net_mgmt.h>
20 #include <zephyr/net/lldp.h>
21
22 static struct net_mgmt_event_callback cb;
23
24 /* Have only one timer in order to save memory */
25 static struct k_work_delayable lldp_tx_timer;
26
27 /* Track currently active timers */
28 static sys_slist_t lldp_ifaces;
29
30 #define BUF_ALLOC_TIMEOUT K_MSEC(50)
31
lldp_find(struct ethernet_context * ctx,struct net_if * iface)32 static int lldp_find(struct ethernet_context *ctx, struct net_if *iface)
33 {
34 int i, found = -1;
35
36 for (i = 0; i < ARRAY_SIZE(ctx->lldp); i++) {
37 if (ctx->lldp[i].iface == iface) {
38 return i;
39 }
40
41 if (found < 0 && ctx->lldp[i].iface == NULL) {
42 found = i;
43 }
44 }
45
46 if (found >= 0) {
47 ctx->lldp[found].iface = iface;
48 return found;
49 }
50
51 return -ENOENT;
52 }
53
lldp_submit_work(uint32_t timeout)54 static void lldp_submit_work(uint32_t timeout)
55 {
56 k_work_cancel_delayable(&lldp_tx_timer);
57 k_work_reschedule(&lldp_tx_timer, K_MSEC(timeout));
58
59 NET_DBG("Next wakeup in %d ms",
60 k_ticks_to_ms_ceil32(
61 k_work_delayable_remaining_get(&lldp_tx_timer)));
62 }
63
lldp_check_timeout(int64_t start,uint32_t time,int64_t timeout)64 static bool lldp_check_timeout(int64_t start, uint32_t time, int64_t timeout)
65 {
66 start += time;
67 start = llabs(start);
68
69 if (start > timeout) {
70 return false;
71 }
72
73 return true;
74 }
75
lldp_timedout(struct ethernet_lldp * lldp,int64_t timeout)76 static bool lldp_timedout(struct ethernet_lldp *lldp, int64_t timeout)
77 {
78 return lldp_check_timeout(lldp->tx_timer_start,
79 lldp->tx_timer_timeout,
80 timeout);
81 }
82
lldp_send(struct ethernet_lldp * lldp)83 static int lldp_send(struct ethernet_lldp *lldp)
84 {
85 static const struct net_eth_addr lldp_multicast_eth_addr = {
86 { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e }
87 };
88 int ret = 0;
89 struct net_pkt *pkt;
90 size_t len;
91
92 if (!lldp->lldpdu) {
93 /* The ethernet driver has not set the lldpdu pointer */
94 NET_DBG("The LLDPDU is not set for lldp %p", lldp);
95 ret = -EINVAL;
96 goto out;
97 }
98
99 if (lldp->optional_du && lldp->optional_len) {
100 len = sizeof(struct net_lldpdu) + lldp->optional_len;
101 } else {
102 len = sizeof(struct net_lldpdu);
103 }
104
105 if (IS_ENABLED(CONFIG_NET_LLDP_END_LLDPDU_TLV_ENABLED)) {
106 len += sizeof(uint16_t);
107 }
108
109 pkt = net_pkt_alloc_with_buffer(lldp->iface, len, AF_UNSPEC, 0,
110 BUF_ALLOC_TIMEOUT);
111 if (!pkt) {
112 ret = -ENOMEM;
113 goto out;
114 }
115
116 net_pkt_set_lldp(pkt, true);
117 net_pkt_set_ll_proto_type(pkt, NET_ETH_PTYPE_LLDP);
118
119 ret = net_pkt_write(pkt, (uint8_t *)lldp->lldpdu,
120 sizeof(struct net_lldpdu));
121 if (ret < 0) {
122 net_pkt_unref(pkt);
123 goto out;
124 }
125
126 if (lldp->optional_du && lldp->optional_len) {
127 ret = net_pkt_write(pkt, (uint8_t *)lldp->optional_du,
128 lldp->optional_len);
129 if (ret < 0) {
130 net_pkt_unref(pkt);
131 goto out;
132 }
133 }
134
135 if (IS_ENABLED(CONFIG_NET_LLDP_END_LLDPDU_TLV_ENABLED)) {
136 uint16_t tlv_end = htons(NET_LLDP_END_LLDPDU_VALUE);
137
138 ret = net_pkt_write(pkt, (uint8_t *)&tlv_end, sizeof(tlv_end));
139 if (ret < 0) {
140 net_pkt_unref(pkt);
141 goto out;
142 }
143 }
144
145 (void)net_linkaddr_copy(net_pkt_lladdr_src(pkt),
146 net_if_get_link_addr(lldp->iface));
147
148 (void)net_linkaddr_set(net_pkt_lladdr_dst(pkt),
149 (uint8_t *)lldp_multicast_eth_addr.addr,
150 sizeof(struct net_eth_addr));
151
152 /* send without timeout, so we do not risk being blocked by tx when
153 * being flooded
154 */
155 if (net_if_try_send_data(lldp->iface, pkt, K_NO_WAIT) == NET_DROP) {
156 net_pkt_unref(pkt);
157 ret = -EIO;
158 }
159
160 out:
161 lldp->tx_timer_start = k_uptime_get();
162
163 return ret;
164 }
165
lldp_manage_timeouts(struct ethernet_lldp * lldp,int64_t timeout)166 static uint32_t lldp_manage_timeouts(struct ethernet_lldp *lldp, int64_t timeout)
167 {
168 int32_t next_timeout;
169
170 if (lldp_timedout(lldp, timeout)) {
171 lldp_send(lldp);
172 }
173
174 next_timeout = timeout - (lldp->tx_timer_start +
175 lldp->tx_timer_timeout);
176
177 return abs(next_timeout);
178 }
179
lldp_tx_timeout(struct k_work * work)180 static void lldp_tx_timeout(struct k_work *work)
181 {
182 uint32_t timeout_update = UINT32_MAX - 1;
183 int64_t timeout = k_uptime_get();
184 struct ethernet_lldp *current, *next;
185
186 ARG_UNUSED(work);
187
188 SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&lldp_ifaces, current, next, node) {
189 uint32_t next_timeout;
190
191 next_timeout = lldp_manage_timeouts(current, timeout);
192 if (next_timeout < timeout_update) {
193 timeout_update = next_timeout;
194 }
195 }
196
197 if (timeout_update < (UINT32_MAX - 1)) {
198 NET_DBG("Waiting for %u ms", timeout_update);
199
200 k_work_reschedule(&lldp_tx_timer, K_MSEC(timeout_update));
201 }
202 }
203
lldp_start_timer(struct ethernet_context * ctx,struct net_if * iface,int slot)204 static void lldp_start_timer(struct ethernet_context *ctx,
205 struct net_if *iface,
206 int slot)
207 {
208 /* exit if started */
209 if (ctx->lldp[slot].tx_timer_start != 0) {
210 return;
211 }
212
213 ctx->lldp[slot].iface = iface;
214
215 sys_slist_append(&lldp_ifaces, &ctx->lldp[slot].node);
216
217 ctx->lldp[slot].tx_timer_start = k_uptime_get();
218 ctx->lldp[slot].tx_timer_timeout =
219 CONFIG_NET_LLDP_TX_INTERVAL * MSEC_PER_SEC;
220
221 lldp_submit_work(ctx->lldp[slot].tx_timer_timeout);
222 }
223
lldp_check_iface(struct net_if * iface)224 static int lldp_check_iface(struct net_if *iface)
225 {
226 if (net_if_l2(iface) != &NET_L2_GET_NAME(ETHERNET)) {
227 return -ENOENT;
228 }
229
230 if (!(net_eth_get_hw_capabilities(iface) & ETHERNET_LLDP)) {
231 return -ESRCH;
232 }
233
234 return 0;
235 }
236
lldp_start(struct net_if * iface,uint32_t mgmt_event)237 static int lldp_start(struct net_if *iface, uint32_t mgmt_event)
238 {
239 struct ethernet_context *ctx;
240 int ret, slot;
241
242 ret = lldp_check_iface(iface);
243 if (ret < 0) {
244 return ret;
245 }
246
247 ctx = net_if_l2_data(iface);
248
249 ret = lldp_find(ctx, iface);
250 if (ret < 0) {
251 return ret;
252 }
253
254 slot = ret;
255
256 if (mgmt_event == NET_EVENT_IF_DOWN) {
257 if (sys_slist_find_and_remove(&lldp_ifaces,
258 &ctx->lldp[slot].node)) {
259 ctx->lldp[slot].tx_timer_start = 0;
260 }
261
262 if (sys_slist_is_empty(&lldp_ifaces)) {
263 k_work_cancel_delayable(&lldp_tx_timer);
264 }
265 } else if (mgmt_event == NET_EVENT_IF_UP) {
266 NET_DBG("Starting timer for iface %p", iface);
267 lldp_start_timer(ctx, iface, slot);
268 }
269
270 return 0;
271 }
272
net_lldp_recv(struct net_if * iface,uint16_t ptype,struct net_pkt * pkt)273 static enum net_verdict net_lldp_recv(struct net_if *iface, uint16_t ptype, struct net_pkt *pkt)
274 {
275 struct ethernet_context *ctx;
276 net_lldp_recv_cb_t recv_cb;
277 int ret;
278
279 ARG_UNUSED(ptype);
280
281 if (!net_eth_is_addr_lldp_multicast(
282 (struct net_eth_addr *)net_pkt_lladdr_dst(pkt)->addr)) {
283 return NET_DROP;
284 }
285
286 ret = lldp_check_iface(iface);
287 if (ret < 0) {
288 return NET_DROP;
289 }
290
291 ctx = net_if_l2_data(iface);
292
293 ret = lldp_find(ctx, iface);
294 if (ret < 0) {
295 return NET_DROP;
296 }
297
298 recv_cb = ctx->lldp[ret].cb;
299 if (recv_cb) {
300 return recv_cb(iface, pkt);
301 }
302
303 return NET_DROP;
304 }
305
306 ETH_NET_L3_REGISTER(LLDP, NET_ETH_PTYPE_LLDP, net_lldp_recv);
307
net_lldp_register_callback(struct net_if * iface,net_lldp_recv_cb_t recv_cb)308 int net_lldp_register_callback(struct net_if *iface, net_lldp_recv_cb_t recv_cb)
309 {
310 struct ethernet_context *ctx;
311 int ret;
312
313 ret = lldp_check_iface(iface);
314 if (ret < 0) {
315 return ret;
316 }
317
318 ctx = net_if_l2_data(iface);
319
320 ret = lldp_find(ctx, iface);
321 if (ret < 0) {
322 return ret;
323 }
324
325 ctx->lldp[ret].cb = recv_cb;
326
327 return 0;
328 }
329
iface_event_handler(struct net_mgmt_event_callback * evt_cb,uint32_t mgmt_event,struct net_if * iface)330 static void iface_event_handler(struct net_mgmt_event_callback *evt_cb,
331 uint32_t mgmt_event, struct net_if *iface)
332 {
333 lldp_start(iface, mgmt_event);
334 }
335
iface_cb(struct net_if * iface,void * user_data)336 static void iface_cb(struct net_if *iface, void *user_data)
337 {
338 /* If the network interface is already up, then call the sender
339 * immediately. If the interface is not ethernet one, then
340 * lldp_start() will return immediately.
341 */
342 if (net_if_flag_is_set(iface, NET_IF_UP)) {
343 lldp_start(iface, NET_EVENT_IF_UP);
344 }
345 }
346
net_lldp_config(struct net_if * iface,const struct net_lldpdu * lldpdu)347 int net_lldp_config(struct net_if *iface, const struct net_lldpdu *lldpdu)
348 {
349 struct ethernet_context *ctx = net_if_l2_data(iface);
350 int i;
351
352 i = lldp_find(ctx, iface);
353 if (i < 0) {
354 return i;
355 }
356
357 ctx->lldp[i].lldpdu = lldpdu;
358
359 return 0;
360 }
361
net_lldp_config_optional(struct net_if * iface,const uint8_t * tlv,size_t len)362 int net_lldp_config_optional(struct net_if *iface, const uint8_t *tlv, size_t len)
363 {
364 struct ethernet_context *ctx = net_if_l2_data(iface);
365 int i;
366
367 i = lldp_find(ctx, iface);
368 if (i < 0) {
369 return i;
370 }
371
372 ctx->lldp[i].optional_du = tlv;
373 ctx->lldp[i].optional_len = len;
374
375 return 0;
376 }
377
378 static const struct net_lldpdu lldpdu = {
379 .chassis_id = {
380 .type_length = htons((LLDP_TLV_CHASSIS_ID << 9) |
381 NET_LLDP_CHASSIS_ID_TLV_LEN),
382 .subtype = CONFIG_NET_LLDP_CHASSIS_ID_SUBTYPE,
383 .value = NET_LLDP_CHASSIS_ID_VALUE
384 },
385 .port_id = {
386 .type_length = htons((LLDP_TLV_PORT_ID << 9) |
387 NET_LLDP_PORT_ID_TLV_LEN),
388 .subtype = CONFIG_NET_LLDP_PORT_ID_SUBTYPE,
389 .value = NET_LLDP_PORT_ID_VALUE
390 },
391 .ttl = {
392 .type_length = htons((LLDP_TLV_TTL << 9) |
393 NET_LLDP_TTL_TLV_LEN),
394 .ttl = htons(NET_LLDP_TTL)
395 },
396 };
397
net_lldp_set_lldpdu(struct net_if * iface)398 int net_lldp_set_lldpdu(struct net_if *iface)
399 {
400 return net_lldp_config(iface, &lldpdu);
401 }
402
net_lldp_unset_lldpdu(struct net_if * iface)403 void net_lldp_unset_lldpdu(struct net_if *iface)
404 {
405 net_lldp_config(iface, NULL);
406 net_lldp_config_optional(iface, NULL, 0);
407 }
408
net_lldp_init(void)409 void net_lldp_init(void)
410 {
411 k_work_init_delayable(&lldp_tx_timer, lldp_tx_timeout);
412
413 net_if_foreach(iface_cb, NULL);
414
415 net_mgmt_init_event_callback(&cb, iface_event_handler,
416 NET_EVENT_IF_UP | NET_EVENT_IF_DOWN);
417 net_mgmt_add_event_callback(&cb);
418 }
419