1 /** @file
2 * @brief LLDP related functions
3 */
4
5 /*
6 * Copyright (c) 2018 Intel Corporation
7 *
8 * SPDX-License-Identifier: Apache-2.0
9 */
10
11 #include <zephyr/logging/log.h>
12 LOG_MODULE_REGISTER(net_lldp, CONFIG_NET_LLDP_LOG_LEVEL);
13
14 #include <errno.h>
15 #include <stdlib.h>
16
17 #include <zephyr/net/net_core.h>
18 #include <zephyr/net/ethernet.h>
19 #include <zephyr/net/net_mgmt.h>
20 #include <zephyr/net/lldp.h>
21
22 static struct net_mgmt_event_callback cb;
23
24 /* Have only one timer in order to save memory */
25 static struct k_work_delayable lldp_tx_timer;
26
27 /* Track currently active timers */
28 static sys_slist_t lldp_ifaces;
29
30 #define BUF_ALLOC_TIMEOUT K_MSEC(50)
31
lldp_find(struct ethernet_context * ctx,struct net_if * iface)32 static int lldp_find(struct ethernet_context *ctx, struct net_if *iface)
33 {
34 int i, found = -1;
35
36 for (i = 0; i < ARRAY_SIZE(ctx->lldp); i++) {
37 if (ctx->lldp[i].iface == iface) {
38 return i;
39 }
40
41 if (found < 0 && ctx->lldp[i].iface == NULL) {
42 found = i;
43 }
44 }
45
46 if (found >= 0) {
47 ctx->lldp[found].iface = iface;
48 return found;
49 }
50
51 return -ENOENT;
52 }
53
lldp_submit_work(uint32_t timeout)54 static void lldp_submit_work(uint32_t timeout)
55 {
56 k_work_cancel_delayable(&lldp_tx_timer);
57 k_work_reschedule(&lldp_tx_timer, K_MSEC(timeout));
58
59 NET_DBG("Next wakeup in %d ms",
60 k_ticks_to_ms_ceil32(
61 k_work_delayable_remaining_get(&lldp_tx_timer)));
62 }
63
lldp_check_timeout(int64_t start,uint32_t time,int64_t timeout)64 static bool lldp_check_timeout(int64_t start, uint32_t time, int64_t timeout)
65 {
66 start += time;
67 start = llabs(start);
68
69 if (start > timeout) {
70 return false;
71 }
72
73 return true;
74 }
75
lldp_timedout(struct ethernet_lldp * lldp,int64_t timeout)76 static bool lldp_timedout(struct ethernet_lldp *lldp, int64_t timeout)
77 {
78 return lldp_check_timeout(lldp->tx_timer_start,
79 lldp->tx_timer_timeout,
80 timeout);
81 }
82
lldp_send(struct ethernet_lldp * lldp)83 static int lldp_send(struct ethernet_lldp *lldp)
84 {
85 static const struct net_eth_addr lldp_multicast_eth_addr = {
86 { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e }
87 };
88 int ret = 0;
89 struct net_pkt *pkt;
90 size_t len;
91
92 if (!lldp->lldpdu) {
93 /* The ethernet driver has not set the lldpdu pointer */
94 NET_DBG("The LLDPDU is not set for lldp %p", lldp);
95 ret = -EINVAL;
96 goto out;
97 }
98
99 if (lldp->optional_du && lldp->optional_len) {
100 len = sizeof(struct net_lldpdu) + lldp->optional_len;
101 } else {
102 len = sizeof(struct net_lldpdu);
103 }
104
105 if (IS_ENABLED(CONFIG_NET_LLDP_END_LLDPDU_TLV_ENABLED)) {
106 len += sizeof(uint16_t);
107 }
108
109 pkt = net_pkt_alloc_with_buffer(lldp->iface, len, AF_UNSPEC, 0,
110 BUF_ALLOC_TIMEOUT);
111 if (!pkt) {
112 ret = -ENOMEM;
113 goto out;
114 }
115
116 net_pkt_set_lldp(pkt, true);
117
118 ret = net_pkt_write(pkt, (uint8_t *)lldp->lldpdu,
119 sizeof(struct net_lldpdu));
120 if (ret < 0) {
121 net_pkt_unref(pkt);
122 goto out;
123 }
124
125 if (lldp->optional_du && lldp->optional_len) {
126 ret = net_pkt_write(pkt, (uint8_t *)lldp->optional_du,
127 lldp->optional_len);
128 if (ret < 0) {
129 net_pkt_unref(pkt);
130 goto out;
131 }
132 }
133
134 if (IS_ENABLED(CONFIG_NET_LLDP_END_LLDPDU_TLV_ENABLED)) {
135 uint16_t tlv_end = htons(NET_LLDP_END_LLDPDU_VALUE);
136
137 ret = net_pkt_write(pkt, (uint8_t *)&tlv_end, sizeof(tlv_end));
138 if (ret < 0) {
139 net_pkt_unref(pkt);
140 goto out;
141 }
142 }
143
144 net_pkt_lladdr_src(pkt)->addr = net_if_get_link_addr(lldp->iface)->addr;
145 net_pkt_lladdr_src(pkt)->len = sizeof(struct net_eth_addr);
146 net_pkt_lladdr_dst(pkt)->addr = (uint8_t *)lldp_multicast_eth_addr.addr;
147 net_pkt_lladdr_dst(pkt)->len = sizeof(struct net_eth_addr);
148
149 if (net_if_send_data(lldp->iface, pkt) == NET_DROP) {
150 net_pkt_unref(pkt);
151 ret = -EIO;
152 }
153
154 out:
155 lldp->tx_timer_start = k_uptime_get();
156
157 return ret;
158 }
159
lldp_manage_timeouts(struct ethernet_lldp * lldp,int64_t timeout)160 static uint32_t lldp_manage_timeouts(struct ethernet_lldp *lldp, int64_t timeout)
161 {
162 int32_t next_timeout;
163
164 if (lldp_timedout(lldp, timeout)) {
165 lldp_send(lldp);
166 }
167
168 next_timeout = timeout - (lldp->tx_timer_start +
169 lldp->tx_timer_timeout);
170
171 return abs(next_timeout);
172 }
173
lldp_tx_timeout(struct k_work * work)174 static void lldp_tx_timeout(struct k_work *work)
175 {
176 uint32_t timeout_update = UINT32_MAX - 1;
177 int64_t timeout = k_uptime_get();
178 struct ethernet_lldp *current, *next;
179
180 ARG_UNUSED(work);
181
182 SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&lldp_ifaces, current, next, node) {
183 uint32_t next_timeout;
184
185 next_timeout = lldp_manage_timeouts(current, timeout);
186 if (next_timeout < timeout_update) {
187 timeout_update = next_timeout;
188 }
189 }
190
191 if (timeout_update < (UINT32_MAX - 1)) {
192 NET_DBG("Waiting for %u ms", timeout_update);
193
194 k_work_reschedule(&lldp_tx_timer, K_MSEC(timeout_update));
195 }
196 }
197
lldp_start_timer(struct ethernet_context * ctx,struct net_if * iface,int slot)198 static void lldp_start_timer(struct ethernet_context *ctx,
199 struct net_if *iface,
200 int slot)
201 {
202 /* exit if started */
203 if (ctx->lldp[slot].tx_timer_start != 0) {
204 return;
205 }
206
207 ctx->lldp[slot].iface = iface;
208
209 sys_slist_append(&lldp_ifaces, &ctx->lldp[slot].node);
210
211 ctx->lldp[slot].tx_timer_start = k_uptime_get();
212 ctx->lldp[slot].tx_timer_timeout =
213 CONFIG_NET_LLDP_TX_INTERVAL * MSEC_PER_SEC;
214
215 lldp_submit_work(ctx->lldp[slot].tx_timer_timeout);
216 }
217
lldp_check_iface(struct net_if * iface)218 static int lldp_check_iface(struct net_if *iface)
219 {
220 if (net_if_l2(iface) != &NET_L2_GET_NAME(ETHERNET)) {
221 return -ENOENT;
222 }
223
224 if (!(net_eth_get_hw_capabilities(iface) & ETHERNET_LLDP)) {
225 return -ESRCH;
226 }
227
228 return 0;
229 }
230
lldp_start(struct net_if * iface,uint32_t mgmt_event)231 static int lldp_start(struct net_if *iface, uint32_t mgmt_event)
232 {
233 struct ethernet_context *ctx;
234 int ret, slot;
235
236 ret = lldp_check_iface(iface);
237 if (ret < 0) {
238 return ret;
239 }
240
241 ctx = net_if_l2_data(iface);
242
243 ret = lldp_find(ctx, iface);
244 if (ret < 0) {
245 return ret;
246 }
247
248 slot = ret;
249
250 if (mgmt_event == NET_EVENT_IF_DOWN) {
251 if (sys_slist_find_and_remove(&lldp_ifaces,
252 &ctx->lldp[slot].node)) {
253 ctx->lldp[slot].tx_timer_start = 0;
254 }
255
256 if (sys_slist_is_empty(&lldp_ifaces)) {
257 k_work_cancel_delayable(&lldp_tx_timer);
258 }
259 } else if (mgmt_event == NET_EVENT_IF_UP) {
260 NET_DBG("Starting timer for iface %p", iface);
261 lldp_start_timer(ctx, iface, slot);
262 }
263
264 return 0;
265 }
266
net_lldp_recv(struct net_if * iface,struct net_pkt * pkt)267 enum net_verdict net_lldp_recv(struct net_if *iface, struct net_pkt *pkt)
268 {
269 struct ethernet_context *ctx;
270 net_lldp_recv_cb_t recv_cb;
271 int ret;
272
273 ret = lldp_check_iface(iface);
274 if (ret < 0) {
275 return NET_DROP;
276 }
277
278 ctx = net_if_l2_data(iface);
279
280 ret = lldp_find(ctx, iface);
281 if (ret < 0) {
282 return NET_DROP;
283 }
284
285 recv_cb = ctx->lldp[ret].cb;
286 if (recv_cb) {
287 return recv_cb(iface, pkt);
288 }
289
290 return NET_DROP;
291 }
292
net_lldp_register_callback(struct net_if * iface,net_lldp_recv_cb_t recv_cb)293 int net_lldp_register_callback(struct net_if *iface, net_lldp_recv_cb_t recv_cb)
294 {
295 struct ethernet_context *ctx;
296 int ret;
297
298 ret = lldp_check_iface(iface);
299 if (ret < 0) {
300 return ret;
301 }
302
303 ctx = net_if_l2_data(iface);
304
305 ret = lldp_find(ctx, iface);
306 if (ret < 0) {
307 return ret;
308 }
309
310 ctx->lldp[ret].cb = recv_cb;
311
312 return 0;
313 }
314
iface_event_handler(struct net_mgmt_event_callback * evt_cb,uint32_t mgmt_event,struct net_if * iface)315 static void iface_event_handler(struct net_mgmt_event_callback *evt_cb,
316 uint32_t mgmt_event, struct net_if *iface)
317 {
318 lldp_start(iface, mgmt_event);
319 }
320
iface_cb(struct net_if * iface,void * user_data)321 static void iface_cb(struct net_if *iface, void *user_data)
322 {
323 /* If the network interface is already up, then call the sender
324 * immediately. If the interface is not ethernet one, then
325 * lldp_start() will return immediately.
326 */
327 if (net_if_flag_is_set(iface, NET_IF_UP)) {
328 lldp_start(iface, NET_EVENT_IF_UP);
329 }
330 }
331
net_lldp_config(struct net_if * iface,const struct net_lldpdu * lldpdu)332 int net_lldp_config(struct net_if *iface, const struct net_lldpdu *lldpdu)
333 {
334 struct ethernet_context *ctx = net_if_l2_data(iface);
335 int i;
336
337 i = lldp_find(ctx, iface);
338 if (i < 0) {
339 return i;
340 }
341
342 ctx->lldp[i].lldpdu = lldpdu;
343
344 return 0;
345 }
346
net_lldp_config_optional(struct net_if * iface,const uint8_t * tlv,size_t len)347 int net_lldp_config_optional(struct net_if *iface, const uint8_t *tlv, size_t len)
348 {
349 struct ethernet_context *ctx = net_if_l2_data(iface);
350 int i;
351
352 i = lldp_find(ctx, iface);
353 if (i < 0) {
354 return i;
355 }
356
357 ctx->lldp[i].optional_du = tlv;
358 ctx->lldp[i].optional_len = len;
359
360 return 0;
361 }
362
363 static const struct net_lldpdu lldpdu = {
364 .chassis_id = {
365 .type_length = htons((LLDP_TLV_CHASSIS_ID << 9) |
366 NET_LLDP_CHASSIS_ID_TLV_LEN),
367 .subtype = CONFIG_NET_LLDP_CHASSIS_ID_SUBTYPE,
368 .value = NET_LLDP_CHASSIS_ID_VALUE
369 },
370 .port_id = {
371 .type_length = htons((LLDP_TLV_PORT_ID << 9) |
372 NET_LLDP_PORT_ID_TLV_LEN),
373 .subtype = CONFIG_NET_LLDP_PORT_ID_SUBTYPE,
374 .value = NET_LLDP_PORT_ID_VALUE
375 },
376 .ttl = {
377 .type_length = htons((LLDP_TLV_TTL << 9) |
378 NET_LLDP_TTL_TLV_LEN),
379 .ttl = htons(NET_LLDP_TTL)
380 },
381 };
382
net_lldp_set_lldpdu(struct net_if * iface)383 int net_lldp_set_lldpdu(struct net_if *iface)
384 {
385 return net_lldp_config(iface, &lldpdu);
386 }
387
net_lldp_unset_lldpdu(struct net_if * iface)388 void net_lldp_unset_lldpdu(struct net_if *iface)
389 {
390 net_lldp_config(iface, NULL);
391 net_lldp_config_optional(iface, NULL, 0);
392 }
393
net_lldp_init(void)394 void net_lldp_init(void)
395 {
396 k_work_init_delayable(&lldp_tx_timer, lldp_tx_timeout);
397
398 net_if_foreach(iface_cb, NULL);
399
400 net_mgmt_init_event_callback(&cb, iface_event_handler,
401 NET_EVENT_IF_UP | NET_EVENT_IF_DOWN);
402 net_mgmt_add_event_callback(&cb);
403 }
404