1 /** @file
2 * @brief LLDP related functions
3 */
4
5 /*
6 * Copyright (c) 2018 Intel Corporation
7 *
8 * SPDX-License-Identifier: Apache-2.0
9 */
10
11 #include <logging/log.h>
12 LOG_MODULE_REGISTER(net_lldp, CONFIG_NET_LLDP_LOG_LEVEL);
13
14 #include <errno.h>
15 #include <stdlib.h>
16
17 #include <net/net_core.h>
18 #include <net/ethernet.h>
19 #include <net/net_mgmt.h>
20 #include <net/lldp.h>
21
22 static struct net_mgmt_event_callback cb;
23
24 /* Have only one timer in order to save memory */
25 static struct k_work_delayable lldp_tx_timer;
26
27 /* Track currently active timers */
28 static sys_slist_t lldp_ifaces;
29
30 #define BUF_ALLOC_TIMEOUT K_MSEC(50)
31
lldp_find(struct ethernet_context * ctx,struct net_if * iface)32 static int lldp_find(struct ethernet_context *ctx, struct net_if *iface)
33 {
34 int i, found = -1;
35
36 for (i = 0; i < ARRAY_SIZE(ctx->lldp); i++) {
37 if (ctx->lldp[i].iface == iface) {
38 return i;
39 }
40
41 if (found < 0 && ctx->lldp[i].iface == NULL) {
42 found = i;
43 }
44 }
45
46 if (found >= 0) {
47 ctx->lldp[found].iface = iface;
48 return found;
49 }
50
51 return -ENOENT;
52 }
53
lldp_submit_work(uint32_t timeout)54 static void lldp_submit_work(uint32_t timeout)
55 {
56 k_work_cancel_delayable(&lldp_tx_timer);
57 k_work_reschedule(&lldp_tx_timer, K_MSEC(timeout));
58
59 NET_DBG("Next wakeup in %d ms",
60 k_ticks_to_ms_ceil32(
61 k_work_delayable_remaining_get(&lldp_tx_timer)));
62 }
63
lldp_check_timeout(int64_t start,uint32_t time,int64_t timeout)64 static bool lldp_check_timeout(int64_t start, uint32_t time, int64_t timeout)
65 {
66 start += time;
67 start = llabs(start);
68
69 if (start > timeout) {
70 return false;
71 }
72
73 return true;
74 }
75
lldp_timedout(struct ethernet_lldp * lldp,int64_t timeout)76 static bool lldp_timedout(struct ethernet_lldp *lldp, int64_t timeout)
77 {
78 return lldp_check_timeout(lldp->tx_timer_start,
79 lldp->tx_timer_timeout,
80 timeout);
81 }
82
lldp_send(struct ethernet_lldp * lldp)83 static int lldp_send(struct ethernet_lldp *lldp)
84 {
85 static const struct net_eth_addr lldp_multicast_eth_addr = {
86 { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e }
87 };
88 int ret = 0;
89 struct net_pkt *pkt;
90 size_t len;
91
92 if (!lldp->lldpdu) {
93 /* The ethernet driver has not set the lldpdu pointer */
94 NET_DBG("The LLDPDU is not set for lldp %p", lldp);
95 ret = -EINVAL;
96 goto out;
97 }
98
99 if (lldp->optional_du && lldp->optional_len) {
100 len = sizeof(struct net_lldpdu) + lldp->optional_len;
101 } else {
102 len = sizeof(struct net_lldpdu);
103 }
104
105 if (IS_ENABLED(CONFIG_NET_LLDP_END_LLDPDU_TLV_ENABLED)) {
106 len += sizeof(uint16_t);
107 }
108
109 pkt = net_pkt_alloc_with_buffer(lldp->iface, len, AF_UNSPEC, 0,
110 BUF_ALLOC_TIMEOUT);
111 if (!pkt) {
112 ret = -ENOMEM;
113 goto out;
114 }
115
116 net_pkt_set_lldp(pkt, true);
117
118 ret = net_pkt_write(pkt, (uint8_t *)lldp->lldpdu,
119 sizeof(struct net_lldpdu));
120 if (ret < 0) {
121 net_pkt_unref(pkt);
122 goto out;
123 }
124
125 if (lldp->optional_du && lldp->optional_len) {
126 ret = net_pkt_write(pkt, (uint8_t *)lldp->optional_du,
127 lldp->optional_len);
128 if (ret < 0) {
129 net_pkt_unref(pkt);
130 goto out;
131 }
132 }
133
134 if (IS_ENABLED(CONFIG_NET_LLDP_END_LLDPDU_TLV_ENABLED)) {
135 uint16_t tlv_end = htons(NET_LLDP_END_LLDPDU_VALUE);
136
137 ret = net_pkt_write(pkt, (uint8_t *)&tlv_end, sizeof(tlv_end));
138 if (ret < 0) {
139 net_pkt_unref(pkt);
140 goto out;
141 }
142 }
143
144 net_pkt_lladdr_src(pkt)->addr = net_if_get_link_addr(lldp->iface)->addr;
145 net_pkt_lladdr_src(pkt)->len = sizeof(struct net_eth_addr);
146 net_pkt_lladdr_dst(pkt)->addr = (uint8_t *)lldp_multicast_eth_addr.addr;
147 net_pkt_lladdr_dst(pkt)->len = sizeof(struct net_eth_addr);
148
149 if (net_if_send_data(lldp->iface, pkt) == NET_DROP) {
150 net_pkt_unref(pkt);
151 ret = -EIO;
152 }
153
154 out:
155 lldp->tx_timer_start = k_uptime_get();
156
157 return ret;
158 }
159
lldp_manage_timeouts(struct ethernet_lldp * lldp,int64_t timeout)160 static uint32_t lldp_manage_timeouts(struct ethernet_lldp *lldp, int64_t timeout)
161 {
162 int32_t next_timeout;
163
164 if (lldp_timedout(lldp, timeout)) {
165 lldp_send(lldp);
166 }
167
168 next_timeout = timeout - (lldp->tx_timer_start +
169 lldp->tx_timer_timeout);
170
171 return abs(next_timeout);
172 }
173
lldp_tx_timeout(struct k_work * work)174 static void lldp_tx_timeout(struct k_work *work)
175 {
176 uint32_t timeout_update = UINT32_MAX - 1;
177 int64_t timeout = k_uptime_get();
178 struct ethernet_lldp *current, *next;
179
180 ARG_UNUSED(work);
181
182 SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&lldp_ifaces, current, next, node) {
183 uint32_t next_timeout;
184
185 next_timeout = lldp_manage_timeouts(current, timeout);
186 if (next_timeout < timeout_update) {
187 timeout_update = next_timeout;
188 }
189 }
190
191 if (timeout_update < (UINT32_MAX - 1)) {
192 NET_DBG("Waiting for %u ms", timeout_update);
193
194 k_work_reschedule(&lldp_tx_timer, K_MSEC(timeout_update));
195 }
196 }
197
lldp_start_timer(struct ethernet_context * ctx,struct net_if * iface,int slot)198 static void lldp_start_timer(struct ethernet_context *ctx,
199 struct net_if *iface,
200 int slot)
201 {
202 ctx->lldp[slot].iface = iface;
203
204 sys_slist_append(&lldp_ifaces, &ctx->lldp[slot].node);
205
206 ctx->lldp[slot].tx_timer_start = k_uptime_get();
207 ctx->lldp[slot].tx_timer_timeout =
208 CONFIG_NET_LLDP_TX_INTERVAL * MSEC_PER_SEC;
209
210 lldp_submit_work(ctx->lldp[slot].tx_timer_timeout);
211 }
212
lldp_check_iface(struct net_if * iface)213 static int lldp_check_iface(struct net_if *iface)
214 {
215 if (net_if_l2(iface) != &NET_L2_GET_NAME(ETHERNET)) {
216 return -ENOENT;
217 }
218
219 if (!(net_eth_get_hw_capabilities(iface) & ETHERNET_LLDP)) {
220 return -ESRCH;
221 }
222
223 return 0;
224 }
225
lldp_start(struct net_if * iface,uint32_t mgmt_event)226 static int lldp_start(struct net_if *iface, uint32_t mgmt_event)
227 {
228 struct ethernet_context *ctx;
229 int ret, slot;
230
231 ret = lldp_check_iface(iface);
232 if (ret < 0) {
233 return ret;
234 }
235
236 ctx = net_if_l2_data(iface);
237
238 ret = lldp_find(ctx, iface);
239 if (ret < 0) {
240 return ret;
241 }
242
243 slot = ret;
244
245 if (mgmt_event == NET_EVENT_IF_DOWN) {
246 sys_slist_find_and_remove(&lldp_ifaces,
247 &ctx->lldp[slot].node);
248
249 if (sys_slist_is_empty(&lldp_ifaces)) {
250 k_work_cancel_delayable(&lldp_tx_timer);
251 }
252 } else if (mgmt_event == NET_EVENT_IF_UP) {
253 NET_DBG("Starting timer for iface %p", iface);
254 lldp_start_timer(ctx, iface, slot);
255 }
256
257 return 0;
258 }
259
net_lldp_recv(struct net_if * iface,struct net_pkt * pkt)260 enum net_verdict net_lldp_recv(struct net_if *iface, struct net_pkt *pkt)
261 {
262 struct ethernet_context *ctx;
263 net_lldp_recv_cb_t cb;
264 int ret;
265
266 ret = lldp_check_iface(iface);
267 if (ret < 0) {
268 return NET_DROP;
269 }
270
271 ctx = net_if_l2_data(iface);
272
273 ret = lldp_find(ctx, iface);
274 if (ret < 0) {
275 return NET_DROP;
276 }
277
278 cb = ctx->lldp[ret].cb;
279 if (cb) {
280 return cb(iface, pkt);
281 }
282
283 return NET_DROP;
284 }
285
net_lldp_register_callback(struct net_if * iface,net_lldp_recv_cb_t cb)286 int net_lldp_register_callback(struct net_if *iface, net_lldp_recv_cb_t cb)
287 {
288 struct ethernet_context *ctx;
289 int ret;
290
291 ret = lldp_check_iface(iface);
292 if (ret < 0) {
293 return ret;
294 }
295
296 ctx = net_if_l2_data(iface);
297
298 ret = lldp_find(ctx, iface);
299 if (ret < 0) {
300 return ret;
301 }
302
303 ctx->lldp[ret].cb = cb;
304
305 return 0;
306 }
307
iface_event_handler(struct net_mgmt_event_callback * cb,uint32_t mgmt_event,struct net_if * iface)308 static void iface_event_handler(struct net_mgmt_event_callback *cb,
309 uint32_t mgmt_event, struct net_if *iface)
310 {
311 lldp_start(iface, mgmt_event);
312 }
313
iface_cb(struct net_if * iface,void * user_data)314 static void iface_cb(struct net_if *iface, void *user_data)
315 {
316 /* If the network interface is already up, then call the sender
317 * immediately. If the interface is not ethernet one, then
318 * lldp_start() will return immediately.
319 */
320 if (net_if_flag_is_set(iface, NET_IF_UP)) {
321 lldp_start(iface, NET_EVENT_IF_UP);
322 }
323 }
324
net_lldp_config(struct net_if * iface,const struct net_lldpdu * lldpdu)325 int net_lldp_config(struct net_if *iface, const struct net_lldpdu *lldpdu)
326 {
327 struct ethernet_context *ctx = net_if_l2_data(iface);
328 int i;
329
330 i = lldp_find(ctx, iface);
331 if (i < 0) {
332 return i;
333 }
334
335 ctx->lldp[i].lldpdu = lldpdu;
336
337 return 0;
338 }
339
net_lldp_config_optional(struct net_if * iface,const uint8_t * tlv,size_t len)340 int net_lldp_config_optional(struct net_if *iface, const uint8_t *tlv, size_t len)
341 {
342 struct ethernet_context *ctx = net_if_l2_data(iface);
343 int i;
344
345 i = lldp_find(ctx, iface);
346 if (i < 0) {
347 return i;
348 }
349
350 ctx->lldp[i].optional_du = tlv;
351 ctx->lldp[i].optional_len = len;
352
353 return 0;
354 }
355
356 static const struct net_lldpdu lldpdu = {
357 .chassis_id = {
358 .type_length = htons((LLDP_TLV_CHASSIS_ID << 9) |
359 NET_LLDP_CHASSIS_ID_TLV_LEN),
360 .subtype = CONFIG_NET_LLDP_CHASSIS_ID_SUBTYPE,
361 .value = NET_LLDP_CHASSIS_ID_VALUE
362 },
363 .port_id = {
364 .type_length = htons((LLDP_TLV_PORT_ID << 9) |
365 NET_LLDP_PORT_ID_TLV_LEN),
366 .subtype = CONFIG_NET_LLDP_PORT_ID_SUBTYPE,
367 .value = NET_LLDP_PORT_ID_VALUE
368 },
369 .ttl = {
370 .type_length = htons((LLDP_TLV_TTL << 9) |
371 NET_LLDP_TTL_TLV_LEN),
372 .ttl = htons(NET_LLDP_TTL)
373 },
374 };
375
net_lldp_set_lldpdu(struct net_if * iface)376 int net_lldp_set_lldpdu(struct net_if *iface)
377 {
378 return net_lldp_config(iface, &lldpdu);
379 }
380
net_lldp_unset_lldpdu(struct net_if * iface)381 void net_lldp_unset_lldpdu(struct net_if *iface)
382 {
383 net_lldp_config(iface, NULL);
384 net_lldp_config_optional(iface, NULL, 0);
385 }
386
net_lldp_init(void)387 void net_lldp_init(void)
388 {
389 k_work_init_delayable(&lldp_tx_timer, lldp_tx_timeout);
390
391 net_if_foreach(iface_cb, NULL);
392
393 net_mgmt_init_event_callback(&cb, iface_event_handler,
394 NET_EVENT_IF_UP | NET_EVENT_IF_DOWN);
395 net_mgmt_add_event_callback(&cb);
396 }
397