1 /** @file
2 * @brief ARP related functions
3 */
4
5 /*
6 * Copyright (c) 2016 Intel Corporation
7 *
8 * SPDX-License-Identifier: Apache-2.0
9 */
10
11 #include <zephyr/logging/log.h>
12 LOG_MODULE_REGISTER(net_arp, CONFIG_NET_ARP_LOG_LEVEL);
13
14 #include <errno.h>
15 #include <zephyr/net/net_core.h>
16 #include <zephyr/net/net_pkt.h>
17 #include <zephyr/net/net_if.h>
18 #include <zephyr/net/net_stats.h>
19 #include <zephyr/net/net_mgmt.h>
20
21 #include "arp.h"
22 #include "net_private.h"
23
24 #define NET_BUF_TIMEOUT K_MSEC(100)
25 #define ARP_REQUEST_TIMEOUT (2 * MSEC_PER_SEC)
26
27 static bool arp_cache_initialized;
28 static struct arp_entry arp_entries[CONFIG_NET_ARP_TABLE_SIZE];
29
30 static sys_slist_t arp_free_entries;
31 static sys_slist_t arp_pending_entries;
32 static sys_slist_t arp_table;
33
34 static struct k_work_delayable arp_request_timer;
35
36 static struct k_mutex arp_mutex;
37
38 #if defined(CONFIG_NET_ARP_GRATUITOUS_TRANSMISSION)
39 static struct net_mgmt_event_callback iface_event_cb;
40 static struct net_mgmt_event_callback ipv4_event_cb;
41 static struct k_work_delayable arp_gratuitous_work;
42 #endif /* defined(CONFIG_NET_ARP_GRATUITOUS_TRANSMISSION) */
43
arp_entry_cleanup(struct arp_entry * entry,bool pending)44 static void arp_entry_cleanup(struct arp_entry *entry, bool pending)
45 {
46 NET_DBG("entry %p", entry);
47
48 if (pending) {
49 struct net_pkt *pkt;
50
51 while (!k_fifo_is_empty(&entry->pending_queue)) {
52 pkt = k_fifo_get(&entry->pending_queue, K_FOREVER);
53 NET_DBG("Releasing pending pkt %p (ref %ld)",
54 pkt,
55 atomic_get(&pkt->atomic_ref) - 1);
56 net_pkt_unref(pkt);
57 }
58 }
59
60 entry->iface = NULL;
61
62 (void)memset(&entry->ip, 0, sizeof(struct in_addr));
63 (void)memset(&entry->eth, 0, sizeof(struct net_eth_addr));
64 }
65
arp_entry_find(sys_slist_t * list,struct net_if * iface,struct in_addr * dst,sys_snode_t ** previous)66 static struct arp_entry *arp_entry_find(sys_slist_t *list,
67 struct net_if *iface,
68 struct in_addr *dst,
69 sys_snode_t **previous)
70 {
71 struct arp_entry *entry;
72
73 SYS_SLIST_FOR_EACH_CONTAINER(list, entry, node) {
74 NET_DBG("iface %d (%p) dst %s",
75 net_if_get_by_iface(iface), iface,
76 net_sprint_ipv4_addr(&entry->ip));
77
78 if (entry->iface == iface &&
79 net_ipv4_addr_cmp(&entry->ip, dst)) {
80 return entry;
81 }
82
83 if (previous) {
84 *previous = &entry->node;
85 }
86 }
87
88 return NULL;
89 }
90
arp_entry_find_move_first(struct net_if * iface,struct in_addr * dst)91 static inline struct arp_entry *arp_entry_find_move_first(struct net_if *iface,
92 struct in_addr *dst)
93 {
94 sys_snode_t *prev = NULL;
95 struct arp_entry *entry;
96
97 NET_DBG("dst %s", net_sprint_ipv4_addr(dst));
98
99 entry = arp_entry_find(&arp_table, iface, dst, &prev);
100 if (entry) {
101 /* Let's assume the target is going to be accessed
102 * more than once here in a short time frame. So we
103 * place the entry first in position into the table
104 * in order to reduce subsequent find.
105 */
106 if (&entry->node != sys_slist_peek_head(&arp_table)) {
107 sys_slist_remove(&arp_table, prev, &entry->node);
108 sys_slist_prepend(&arp_table, &entry->node);
109 }
110 }
111
112 return entry;
113 }
114
115 static inline
arp_entry_find_pending(struct net_if * iface,struct in_addr * dst)116 struct arp_entry *arp_entry_find_pending(struct net_if *iface,
117 struct in_addr *dst)
118 {
119 NET_DBG("dst %s", net_sprint_ipv4_addr(dst));
120
121 return arp_entry_find(&arp_pending_entries, iface, dst, NULL);
122 }
123
arp_entry_get_pending(struct net_if * iface,struct in_addr * dst)124 static struct arp_entry *arp_entry_get_pending(struct net_if *iface,
125 struct in_addr *dst)
126 {
127 sys_snode_t *prev = NULL;
128 struct arp_entry *entry;
129
130 NET_DBG("dst %s", net_sprint_ipv4_addr(dst));
131
132 entry = arp_entry_find(&arp_pending_entries, iface, dst, &prev);
133 if (entry) {
134 /* We remove the entry from the pending list */
135 sys_slist_remove(&arp_pending_entries, prev, &entry->node);
136 }
137
138 if (sys_slist_is_empty(&arp_pending_entries)) {
139 k_work_cancel_delayable(&arp_request_timer);
140 }
141
142 return entry;
143 }
144
arp_entry_get_free(void)145 static struct arp_entry *arp_entry_get_free(void)
146 {
147 sys_snode_t *node;
148
149 node = sys_slist_peek_head(&arp_free_entries);
150 if (!node) {
151 return NULL;
152 }
153
154 /* We remove the node from the free list */
155 sys_slist_remove(&arp_free_entries, NULL, node);
156
157 return CONTAINER_OF(node, struct arp_entry, node);
158 }
159
arp_entry_get_last_from_table(void)160 static struct arp_entry *arp_entry_get_last_from_table(void)
161 {
162 sys_snode_t *node;
163
164 /* We assume last entry is the oldest one,
165 * so is the preferred one to be taken out.
166 */
167
168 node = sys_slist_peek_tail(&arp_table);
169 if (!node) {
170 return NULL;
171 }
172
173 sys_slist_find_and_remove(&arp_table, node);
174
175 return CONTAINER_OF(node, struct arp_entry, node);
176 }
177
178
arp_entry_register_pending(struct arp_entry * entry)179 static void arp_entry_register_pending(struct arp_entry *entry)
180 {
181 NET_DBG("dst %s", net_sprint_ipv4_addr(&entry->ip));
182
183 sys_slist_append(&arp_pending_entries, &entry->node);
184
185 entry->req_start = k_uptime_get_32();
186
187 /* Let's start the timer if necessary */
188 if (!k_work_delayable_remaining_get(&arp_request_timer)) {
189 k_work_reschedule(&arp_request_timer,
190 K_MSEC(ARP_REQUEST_TIMEOUT));
191 }
192 }
193
arp_request_timeout(struct k_work * work)194 static void arp_request_timeout(struct k_work *work)
195 {
196 uint32_t current = k_uptime_get_32();
197 struct arp_entry *entry, *next;
198
199 ARG_UNUSED(work);
200
201 k_mutex_lock(&arp_mutex, K_FOREVER);
202
203 SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&arp_pending_entries,
204 entry, next, node) {
205 if ((int32_t)(entry->req_start +
206 ARP_REQUEST_TIMEOUT - current) > 0) {
207 break;
208 }
209
210 arp_entry_cleanup(entry, true);
211
212 sys_slist_remove(&arp_pending_entries, NULL, &entry->node);
213 sys_slist_append(&arp_free_entries, &entry->node);
214
215 entry = NULL;
216 }
217
218 if (entry) {
219 k_work_reschedule(&arp_request_timer,
220 K_MSEC(entry->req_start +
221 ARP_REQUEST_TIMEOUT - current));
222 }
223
224 k_mutex_unlock(&arp_mutex);
225 }
226
if_get_addr(struct net_if * iface,struct in_addr * addr)227 static inline struct in_addr *if_get_addr(struct net_if *iface,
228 struct in_addr *addr)
229 {
230 struct net_if_ipv4 *ipv4 = iface->config.ip.ipv4;
231
232 if (!ipv4) {
233 return NULL;
234 }
235
236 ARRAY_FOR_EACH(ipv4->unicast, i) {
237 if (ipv4->unicast[i].ipv4.is_used &&
238 ipv4->unicast[i].ipv4.address.family == AF_INET &&
239 ipv4->unicast[i].ipv4.addr_state == NET_ADDR_PREFERRED &&
240 (!addr ||
241 net_ipv4_addr_cmp(addr,
242 &ipv4->unicast[i].ipv4.address.in_addr))) {
243 return &ipv4->unicast[i].ipv4.address.in_addr;
244 }
245 }
246
247 return NULL;
248 }
249
arp_prepare(struct net_if * iface,struct in_addr * next_addr,struct arp_entry * entry,struct net_pkt * pending,struct in_addr * current_ip)250 static inline struct net_pkt *arp_prepare(struct net_if *iface,
251 struct in_addr *next_addr,
252 struct arp_entry *entry,
253 struct net_pkt *pending,
254 struct in_addr *current_ip)
255 {
256 struct net_arp_hdr *hdr;
257 struct in_addr *my_addr;
258 struct net_pkt *pkt;
259
260 if (current_ip) {
261 /* This is the IPv4 autoconf case where we have already
262 * things setup so no need to allocate new net_pkt
263 */
264 pkt = pending;
265 } else {
266 pkt = net_pkt_alloc_with_buffer(iface,
267 sizeof(struct net_arp_hdr),
268 AF_UNSPEC, 0, NET_BUF_TIMEOUT);
269 if (!pkt) {
270 return NULL;
271 }
272
273 /* Avoid recursive loop with network packet capturing */
274 if (IS_ENABLED(CONFIG_NET_CAPTURE) && pending) {
275 net_pkt_set_captured(pkt, net_pkt_is_captured(pending));
276 }
277
278 if (IS_ENABLED(CONFIG_NET_VLAN) && pending) {
279 net_pkt_set_vlan_tag(pkt, net_pkt_vlan_tag(pending));
280 }
281 }
282
283 net_buf_add(pkt->buffer, sizeof(struct net_arp_hdr));
284
285 hdr = NET_ARP_HDR(pkt);
286
287 /* If entry is not set, then we are just about to send
288 * an ARP request using the data in pending net_pkt.
289 * This can happen if there is already a pending ARP
290 * request and we want to send it again.
291 */
292 if (entry) {
293 if (!net_pkt_ipv4_acd(pkt)) {
294 net_pkt_ref(pending);
295 k_fifo_put(&entry->pending_queue, pending);
296 }
297
298 entry->iface = net_pkt_iface(pkt);
299
300 net_ipaddr_copy(&entry->ip, next_addr);
301
302 net_pkt_lladdr_src(pkt)->addr =
303 (uint8_t *)net_if_get_link_addr(entry->iface)->addr;
304
305 arp_entry_register_pending(entry);
306 } else {
307 net_pkt_lladdr_src(pkt)->addr =
308 (uint8_t *)net_if_get_link_addr(iface)->addr;
309 }
310
311 net_pkt_lladdr_src(pkt)->len = sizeof(struct net_eth_addr);
312
313 net_pkt_lladdr_dst(pkt)->addr = (uint8_t *)net_eth_broadcast_addr();
314 net_pkt_lladdr_dst(pkt)->len = sizeof(struct net_eth_addr);
315
316 hdr->hwtype = htons(NET_ARP_HTYPE_ETH);
317 hdr->protocol = htons(NET_ETH_PTYPE_IP);
318 hdr->hwlen = sizeof(struct net_eth_addr);
319 hdr->protolen = sizeof(struct in_addr);
320 hdr->opcode = htons(NET_ARP_REQUEST);
321
322 (void)memset(&hdr->dst_hwaddr.addr, 0x00, sizeof(struct net_eth_addr));
323
324 net_ipv4_addr_copy_raw(hdr->dst_ipaddr, (uint8_t *)next_addr);
325
326 memcpy(hdr->src_hwaddr.addr, net_pkt_lladdr_src(pkt)->addr,
327 sizeof(struct net_eth_addr));
328
329 if (net_pkt_ipv4_acd(pkt)) {
330 my_addr = current_ip;
331 } else if (!entry) {
332 my_addr = (struct in_addr *)NET_IPV4_HDR(pending)->src;
333 } else {
334 my_addr = if_get_addr(entry->iface, current_ip);
335 }
336
337 if (my_addr) {
338 net_ipv4_addr_copy_raw(hdr->src_ipaddr, (uint8_t *)my_addr);
339 } else {
340 (void)memset(&hdr->src_ipaddr, 0, sizeof(struct in_addr));
341 }
342
343 NET_DBG("Generating request for %s", net_sprint_ipv4_addr(next_addr));
344 return pkt;
345 }
346
net_arp_prepare(struct net_pkt * pkt,struct in_addr * request_ip,struct in_addr * current_ip)347 struct net_pkt *net_arp_prepare(struct net_pkt *pkt,
348 struct in_addr *request_ip,
349 struct in_addr *current_ip)
350 {
351 bool is_ipv4_ll_used = false;
352 struct arp_entry *entry;
353 struct in_addr *addr;
354
355 if (!pkt || !pkt->buffer) {
356 return NULL;
357 }
358
359 if (net_pkt_ipv4_acd(pkt)) {
360 return arp_prepare(net_pkt_iface(pkt), request_ip, NULL,
361 pkt, current_ip);
362 }
363
364 if (IS_ENABLED(CONFIG_NET_IPV4_AUTO)) {
365 is_ipv4_ll_used = net_ipv4_is_ll_addr((struct in_addr *)
366 &NET_IPV4_HDR(pkt)->src) ||
367 net_ipv4_is_ll_addr((struct in_addr *)
368 &NET_IPV4_HDR(pkt)->dst);
369 }
370
371 /* Is the destination in the local network, if not route via
372 * the gateway address.
373 */
374 if (!current_ip && !is_ipv4_ll_used &&
375 !net_if_ipv4_addr_mask_cmp(net_pkt_iface(pkt), request_ip)) {
376 struct net_if_ipv4 *ipv4 = net_pkt_iface(pkt)->config.ip.ipv4;
377
378 if (ipv4) {
379 addr = &ipv4->gw;
380 if (net_ipv4_is_addr_unspecified(addr)) {
381 NET_ERR("Gateway not set for iface %d, could not "
382 "send ARP request for %s",
383 net_if_get_by_iface(net_pkt_iface(pkt)),
384 net_sprint_ipv4_addr(request_ip));
385
386 return NULL;
387 }
388 } else {
389 addr = request_ip;
390 }
391 } else {
392 addr = request_ip;
393 }
394
395 k_mutex_lock(&arp_mutex, K_FOREVER);
396
397 /* If the destination address is already known, we do not need
398 * to send any ARP packet.
399 */
400 entry = arp_entry_find_move_first(net_pkt_iface(pkt), addr);
401 if (!entry) {
402 struct net_pkt *req;
403
404 entry = arp_entry_find_pending(net_pkt_iface(pkt), addr);
405 if (!entry) {
406 /* No pending, let's try to get a new entry */
407 entry = arp_entry_get_free();
408 if (!entry) {
409 /* Then let's take one from table? */
410 entry = arp_entry_get_last_from_table();
411 }
412 } else {
413 /* There is a pending ARP request already, check if this packet is already
414 * in the pending list and if so, resend the request, otherwise just
415 * append the packet to the request fifo list.
416 */
417 if (k_queue_unique_append(&entry->pending_queue._queue,
418 net_pkt_ref(pkt))) {
419 NET_DBG("Pending ARP request for %s, queuing pkt %p",
420 net_sprint_ipv4_addr(addr), pkt);
421 k_mutex_unlock(&arp_mutex);
422 return NULL;
423 }
424
425 entry = NULL;
426 }
427
428 req = arp_prepare(net_pkt_iface(pkt), addr, entry, pkt,
429 current_ip);
430
431 if (!entry) {
432 /* We cannot send the packet, the ARP cache is full
433 * or there is already a pending query to this IP
434 * address, so this packet must be discarded.
435 */
436 NET_DBG("Resending ARP %p", req);
437 }
438
439 if (!req && entry) {
440 /* Add the arp entry back to arp_free_entries, to avoid the
441 * arp entry is leak due to ARP packet allocated failed.
442 */
443 sys_slist_prepend(&arp_free_entries, &entry->node);
444 }
445
446 k_mutex_unlock(&arp_mutex);
447 return req;
448 }
449
450 k_mutex_unlock(&arp_mutex);
451
452 net_pkt_lladdr_src(pkt)->addr =
453 (uint8_t *)net_if_get_link_addr(entry->iface)->addr;
454 net_pkt_lladdr_src(pkt)->len = sizeof(struct net_eth_addr);
455
456 net_pkt_lladdr_dst(pkt)->addr = (uint8_t *)&entry->eth;
457 net_pkt_lladdr_dst(pkt)->len = sizeof(struct net_eth_addr);
458
459 NET_DBG("ARP using ll %s for IP %s",
460 net_sprint_ll_addr(net_pkt_lladdr_dst(pkt)->addr,
461 sizeof(struct net_eth_addr)),
462 net_sprint_ipv4_addr(&NET_IPV4_HDR(pkt)->dst));
463
464 return pkt;
465 }
466
arp_gratuitous(struct net_if * iface,struct in_addr * src,struct net_eth_addr * hwaddr)467 static void arp_gratuitous(struct net_if *iface,
468 struct in_addr *src,
469 struct net_eth_addr *hwaddr)
470 {
471 sys_snode_t *prev = NULL;
472 struct arp_entry *entry;
473
474 entry = arp_entry_find(&arp_table, iface, src, &prev);
475 if (entry) {
476 NET_DBG("Gratuitous ARP hwaddr %s -> %s",
477 net_sprint_ll_addr((const uint8_t *)&entry->eth,
478 sizeof(struct net_eth_addr)),
479 net_sprint_ll_addr((const uint8_t *)hwaddr,
480 sizeof(struct net_eth_addr)));
481
482 memcpy(&entry->eth, hwaddr, sizeof(struct net_eth_addr));
483 }
484 }
485
486 #if defined(CONFIG_NET_ARP_GRATUITOUS_TRANSMISSION)
arp_gratuitous_send(struct net_if * iface,struct in_addr * ipaddr)487 static void arp_gratuitous_send(struct net_if *iface,
488 struct in_addr *ipaddr)
489 {
490 struct net_arp_hdr *hdr;
491 struct net_pkt *pkt;
492
493 pkt = net_pkt_alloc_with_buffer(iface, sizeof(struct net_arp_hdr),
494 AF_UNSPEC, 0, NET_BUF_TIMEOUT);
495 if (!pkt) {
496 return;
497 }
498
499 net_buf_add(pkt->buffer, sizeof(struct net_arp_hdr));
500 net_pkt_set_vlan_tag(pkt, net_eth_get_vlan_tag(iface));
501
502 hdr = NET_ARP_HDR(pkt);
503
504 hdr->hwtype = htons(NET_ARP_HTYPE_ETH);
505 hdr->protocol = htons(NET_ETH_PTYPE_IP);
506 hdr->hwlen = sizeof(struct net_eth_addr);
507 hdr->protolen = sizeof(struct in_addr);
508 hdr->opcode = htons(NET_ARP_REQUEST);
509
510 memcpy(&hdr->dst_hwaddr.addr, net_eth_broadcast_addr(),
511 sizeof(struct net_eth_addr));
512 memcpy(&hdr->src_hwaddr.addr, net_if_get_link_addr(iface)->addr,
513 sizeof(struct net_eth_addr));
514
515 net_ipv4_addr_copy_raw(hdr->dst_ipaddr, (uint8_t *)ipaddr);
516 net_ipv4_addr_copy_raw(hdr->src_ipaddr, (uint8_t *)ipaddr);
517
518 net_pkt_lladdr_src(pkt)->addr = net_if_get_link_addr(iface)->addr;
519 net_pkt_lladdr_src(pkt)->len = sizeof(struct net_eth_addr);
520
521 net_pkt_lladdr_dst(pkt)->addr = (uint8_t *)net_eth_broadcast_addr();
522 net_pkt_lladdr_dst(pkt)->len = sizeof(struct net_eth_addr);
523
524 NET_DBG("Sending gratuitous ARP pkt %p", pkt);
525
526 if (net_if_send_data(iface, pkt) == NET_DROP) {
527 net_pkt_unref(pkt);
528 }
529 }
530
notify_all_ipv4_addr(struct net_if * iface)531 static void notify_all_ipv4_addr(struct net_if *iface)
532 {
533 struct net_if_ipv4 *ipv4 = iface->config.ip.ipv4;
534 int i;
535
536 if (!ipv4) {
537 return;
538 }
539
540 for (i = 0; i < NET_IF_MAX_IPV4_ADDR; i++) {
541 if (ipv4->unicast[i].ipv4.is_used &&
542 ipv4->unicast[i].ipv4.address.family == AF_INET &&
543 ipv4->unicast[i].ipv4.addr_state == NET_ADDR_PREFERRED) {
544 arp_gratuitous_send(iface,
545 &ipv4->unicast[i].ipv4.address.in_addr);
546 }
547 }
548 }
549
iface_event_handler(struct net_mgmt_event_callback * cb,uint32_t mgmt_event,struct net_if * iface)550 static void iface_event_handler(struct net_mgmt_event_callback *cb,
551 uint32_t mgmt_event, struct net_if *iface)
552 {
553 ARG_UNUSED(cb);
554
555 if (!(net_if_l2(iface) == &NET_L2_GET_NAME(ETHERNET) ||
556 net_eth_is_vlan_interface(iface))) {
557 return;
558 }
559
560 if (mgmt_event != NET_EVENT_IF_UP) {
561 return;
562 }
563
564 notify_all_ipv4_addr(iface);
565 }
566
ipv4_event_handler(struct net_mgmt_event_callback * cb,uint32_t mgmt_event,struct net_if * iface)567 static void ipv4_event_handler(struct net_mgmt_event_callback *cb,
568 uint32_t mgmt_event, struct net_if *iface)
569 {
570 struct in_addr *ipaddr;
571
572 if (!(net_if_l2(iface) == &NET_L2_GET_NAME(ETHERNET) ||
573 net_eth_is_vlan_interface(iface))) {
574 return;
575 }
576
577 if (!net_if_is_up(iface)) {
578 return;
579 }
580
581 if (mgmt_event != NET_EVENT_IPV4_ADDR_ADD) {
582 return;
583 }
584
585 if (cb->info_length != sizeof(struct in_addr)) {
586 return;
587 }
588
589 ipaddr = (struct in_addr *)cb->info;
590
591 arp_gratuitous_send(iface, ipaddr);
592 }
593
iface_cb(struct net_if * iface,void * user_data)594 static void iface_cb(struct net_if *iface, void *user_data)
595 {
596 ARG_UNUSED(user_data);
597
598 if (!(net_if_l2(iface) == &NET_L2_GET_NAME(ETHERNET) ||
599 net_eth_is_vlan_interface(iface))) {
600 return;
601 }
602
603 if (!net_if_is_up(iface)) {
604 return;
605 }
606
607 notify_all_ipv4_addr(iface);
608 }
609
arp_gratuitous_work_handler(struct k_work * work)610 static void arp_gratuitous_work_handler(struct k_work *work)
611 {
612 ARG_UNUSED(work);
613
614 net_if_foreach(iface_cb, NULL);
615
616 k_work_reschedule(&arp_gratuitous_work,
617 K_SECONDS(CONFIG_NET_ARP_GRATUITOUS_INTERVAL));
618 }
619 #endif /* defined(CONFIG_NET_ARP_GRATUITOUS_TRANSMISSION) */
620
net_arp_update(struct net_if * iface,struct in_addr * src,struct net_eth_addr * hwaddr,bool gratuitous,bool force)621 void net_arp_update(struct net_if *iface,
622 struct in_addr *src,
623 struct net_eth_addr *hwaddr,
624 bool gratuitous,
625 bool force)
626 {
627 struct arp_entry *entry;
628 struct net_pkt *pkt;
629
630 NET_DBG("iface %d (%p) src %s", net_if_get_by_iface(iface), iface,
631 net_sprint_ipv4_addr(src));
632 net_if_tx_lock(iface);
633 k_mutex_lock(&arp_mutex, K_FOREVER);
634
635 entry = arp_entry_get_pending(iface, src);
636 if (!entry) {
637 if (IS_ENABLED(CONFIG_NET_ARP_GRATUITOUS) && gratuitous) {
638 arp_gratuitous(iface, src, hwaddr);
639 }
640
641 if (force) {
642 sys_snode_t *prev = NULL;
643 struct arp_entry *arp_ent;
644
645 arp_ent = arp_entry_find(&arp_table, iface, src, &prev);
646 if (arp_ent) {
647 memcpy(&arp_ent->eth, hwaddr,
648 sizeof(struct net_eth_addr));
649 } else {
650 /* Add new entry as it was not found and force
651 * was set.
652 */
653 arp_ent = arp_entry_get_free();
654 if (!arp_ent) {
655 /* Then let's take one from table? */
656 arp_ent = arp_entry_get_last_from_table();
657 }
658
659 if (arp_ent) {
660 arp_ent->req_start = k_uptime_get_32();
661 arp_ent->iface = iface;
662 net_ipaddr_copy(&arp_ent->ip, src);
663 memcpy(&arp_ent->eth, hwaddr, sizeof(arp_ent->eth));
664 sys_slist_prepend(&arp_table, &arp_ent->node);
665 }
666 }
667 }
668
669 k_mutex_unlock(&arp_mutex);
670 net_if_tx_unlock(iface);
671 return;
672 }
673
674 memcpy(&entry->eth, hwaddr, sizeof(struct net_eth_addr));
675
676 /* Inserting entry into the table */
677 sys_slist_prepend(&arp_table, &entry->node);
678
679 while (!k_fifo_is_empty(&entry->pending_queue)) {
680 int ret;
681
682 pkt = k_fifo_get(&entry->pending_queue, K_FOREVER);
683
684 /* Set the dst in the pending packet */
685 net_pkt_lladdr_dst(pkt)->len = sizeof(struct net_eth_addr);
686 net_pkt_lladdr_dst(pkt)->addr =
687 (uint8_t *) &NET_ETH_HDR(pkt)->dst.addr;
688
689 NET_DBG("iface %d (%p) dst %s pending %p frag %p",
690 net_if_get_by_iface(iface), iface,
691 net_sprint_ipv4_addr(&entry->ip),
692 pkt, pkt->frags);
693
694 /* We directly send the packet without first queueing it.
695 * The pkt has already been queued for sending, once by
696 * net_if and second time in the ARP queue. We must not
697 * queue it twice in net_if so that the statistics of
698 * the pkt are not counted twice and the packet filter
699 * callbacks are only called once.
700 */
701 ret = net_if_l2(iface)->send(iface, pkt);
702 if (ret < 0) {
703 net_pkt_unref(pkt);
704 }
705 }
706
707 k_mutex_unlock(&arp_mutex);
708 net_if_tx_unlock(iface);
709 }
710
arp_prepare_reply(struct net_if * iface,struct net_pkt * req,struct net_eth_hdr * eth_query,struct net_eth_addr * dst_addr)711 static inline struct net_pkt *arp_prepare_reply(struct net_if *iface,
712 struct net_pkt *req,
713 struct net_eth_hdr *eth_query,
714 struct net_eth_addr *dst_addr)
715 {
716 struct net_arp_hdr *hdr, *query;
717 struct net_pkt *pkt;
718
719 pkt = net_pkt_alloc_with_buffer(iface, sizeof(struct net_arp_hdr),
720 AF_UNSPEC, 0, NET_BUF_TIMEOUT);
721 if (!pkt) {
722 return NULL;
723 }
724
725 net_buf_add(pkt->buffer, sizeof(struct net_arp_hdr));
726
727 hdr = NET_ARP_HDR(pkt);
728 query = NET_ARP_HDR(req);
729
730 if (IS_ENABLED(CONFIG_NET_VLAN)) {
731 net_pkt_set_vlan_tag(pkt, net_pkt_vlan_tag(req));
732 }
733
734 hdr->hwtype = htons(NET_ARP_HTYPE_ETH);
735 hdr->protocol = htons(NET_ETH_PTYPE_IP);
736 hdr->hwlen = sizeof(struct net_eth_addr);
737 hdr->protolen = sizeof(struct in_addr);
738 hdr->opcode = htons(NET_ARP_REPLY);
739
740 memcpy(&hdr->dst_hwaddr.addr, &dst_addr->addr,
741 sizeof(struct net_eth_addr));
742 memcpy(&hdr->src_hwaddr.addr, net_if_get_link_addr(iface)->addr,
743 sizeof(struct net_eth_addr));
744
745 net_ipv4_addr_copy_raw(hdr->dst_ipaddr, query->src_ipaddr);
746 net_ipv4_addr_copy_raw(hdr->src_ipaddr, query->dst_ipaddr);
747
748 net_pkt_lladdr_src(pkt)->addr = net_if_get_link_addr(iface)->addr;
749 net_pkt_lladdr_src(pkt)->len = sizeof(struct net_eth_addr);
750
751 net_pkt_lladdr_dst(pkt)->addr = (uint8_t *)&hdr->dst_hwaddr.addr;
752 net_pkt_lladdr_dst(pkt)->len = sizeof(struct net_eth_addr);
753
754 return pkt;
755 }
756
arp_hdr_check(struct net_arp_hdr * arp_hdr)757 static bool arp_hdr_check(struct net_arp_hdr *arp_hdr)
758 {
759 if (ntohs(arp_hdr->hwtype) != NET_ARP_HTYPE_ETH ||
760 ntohs(arp_hdr->protocol) != NET_ETH_PTYPE_IP ||
761 arp_hdr->hwlen != sizeof(struct net_eth_addr) ||
762 arp_hdr->protolen != NET_ARP_IPV4_PTYPE_SIZE ||
763 net_ipv4_is_addr_loopback((struct in_addr *)arp_hdr->src_ipaddr)) {
764 NET_DBG("DROP: Invalid ARP header");
765 return false;
766 }
767
768 return true;
769 }
770
net_arp_input(struct net_pkt * pkt,struct net_eth_hdr * eth_hdr)771 enum net_verdict net_arp_input(struct net_pkt *pkt,
772 struct net_eth_hdr *eth_hdr)
773 {
774 struct net_eth_addr *dst_hw_addr;
775 struct net_arp_hdr *arp_hdr;
776 struct net_pkt *reply;
777 struct in_addr *addr;
778
779 if (net_pkt_get_len(pkt) < (sizeof(struct net_arp_hdr) -
780 (net_pkt_ip_data(pkt) - (uint8_t *)eth_hdr))) {
781 NET_DBG("Invalid ARP header (len %zu, min %zu bytes) %p",
782 net_pkt_get_len(pkt), sizeof(struct net_arp_hdr) -
783 (net_pkt_ip_data(pkt) - (uint8_t *)eth_hdr), pkt);
784 return NET_DROP;
785 }
786
787 arp_hdr = NET_ARP_HDR(pkt);
788 if (!arp_hdr_check(arp_hdr)) {
789 return NET_DROP;
790 }
791
792 switch (ntohs(arp_hdr->opcode)) {
793 case NET_ARP_REQUEST:
794 /* If ARP request sender hw address is our address,
795 * we must drop the packet.
796 */
797 if (memcmp(&arp_hdr->src_hwaddr,
798 net_if_get_link_addr(net_pkt_iface(pkt))->addr,
799 sizeof(struct net_eth_addr)) == 0) {
800 return NET_DROP;
801 }
802
803 if (IS_ENABLED(CONFIG_NET_ARP_GRATUITOUS)) {
804 if (net_eth_is_addr_broadcast(ð_hdr->dst) &&
805 (net_eth_is_addr_broadcast(&arp_hdr->dst_hwaddr) ||
806 net_eth_is_addr_all_zeroes(&arp_hdr->dst_hwaddr)) &&
807 net_ipv4_addr_cmp_raw(arp_hdr->dst_ipaddr,
808 arp_hdr->src_ipaddr)) {
809 /* If the IP address is in our cache,
810 * then update it here.
811 */
812 net_arp_update(net_pkt_iface(pkt),
813 (struct in_addr *)arp_hdr->src_ipaddr,
814 &arp_hdr->src_hwaddr,
815 true, false);
816 break;
817 }
818 }
819
820 /* Discard ARP request if Ethernet address is broadcast
821 * and Source IP address is Multicast address.
822 */
823 if (memcmp(ð_hdr->dst, net_eth_broadcast_addr(),
824 sizeof(struct net_eth_addr)) == 0 &&
825 net_ipv4_is_addr_mcast((struct in_addr *)arp_hdr->src_ipaddr)) {
826 NET_DBG("DROP: eth addr is bcast, src addr is mcast");
827 return NET_DROP;
828 }
829
830 /* Someone wants to know our ll address */
831 addr = if_get_addr(net_pkt_iface(pkt),
832 (struct in_addr *)arp_hdr->dst_ipaddr);
833 if (!addr) {
834 /* Not for us so drop the packet silently */
835 return NET_DROP;
836 }
837
838 NET_DBG("ARP request from %s [%s] for %s",
839 net_sprint_ipv4_addr(&arp_hdr->src_ipaddr),
840 net_sprint_ll_addr((uint8_t *)&arp_hdr->src_hwaddr,
841 arp_hdr->hwlen),
842 net_sprint_ipv4_addr(&arp_hdr->dst_ipaddr));
843
844 /* Update the ARP cache if the sender MAC address has
845 * changed. In this case the target MAC address is all zeros
846 * and the target IP address is our address.
847 */
848 if (net_eth_is_addr_unspecified(&arp_hdr->dst_hwaddr)) {
849 NET_DBG("Updating ARP cache for %s [%s] iface %d",
850 net_sprint_ipv4_addr(&arp_hdr->src_ipaddr),
851 net_sprint_ll_addr((uint8_t *)&arp_hdr->src_hwaddr,
852 arp_hdr->hwlen),
853 net_if_get_by_iface(net_pkt_iface(pkt)));
854
855 net_arp_update(net_pkt_iface(pkt),
856 (struct in_addr *)arp_hdr->src_ipaddr,
857 &arp_hdr->src_hwaddr,
858 false, true);
859
860 dst_hw_addr = &arp_hdr->src_hwaddr;
861 } else {
862 dst_hw_addr = ð_hdr->src;
863 }
864
865 /* Send reply */
866 reply = arp_prepare_reply(net_pkt_iface(pkt), pkt, eth_hdr,
867 dst_hw_addr);
868 if (reply) {
869 net_if_queue_tx(net_pkt_iface(reply), reply);
870 } else {
871 NET_DBG("Cannot send ARP reply");
872 }
873 break;
874
875 case NET_ARP_REPLY:
876 if (net_ipv4_is_my_addr((struct in_addr *)arp_hdr->dst_ipaddr)) {
877 NET_DBG("Received ll %s for IP %s",
878 net_sprint_ll_addr(arp_hdr->src_hwaddr.addr,
879 sizeof(struct net_eth_addr)),
880 net_sprint_ipv4_addr(arp_hdr->src_ipaddr));
881 net_arp_update(net_pkt_iface(pkt),
882 (struct in_addr *)arp_hdr->src_ipaddr,
883 &arp_hdr->src_hwaddr,
884 false, false);
885 }
886
887 break;
888 }
889
890 net_pkt_unref(pkt);
891
892 return NET_OK;
893 }
894
net_arp_clear_cache(struct net_if * iface)895 void net_arp_clear_cache(struct net_if *iface)
896 {
897 sys_snode_t *prev = NULL;
898 struct arp_entry *entry, *next;
899
900 NET_DBG("Flushing ARP table");
901
902 k_mutex_lock(&arp_mutex, K_FOREVER);
903
904 SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&arp_table, entry, next, node) {
905 if (iface && iface != entry->iface) {
906 prev = &entry->node;
907 continue;
908 }
909
910 arp_entry_cleanup(entry, false);
911
912 sys_slist_remove(&arp_table, prev, &entry->node);
913 sys_slist_prepend(&arp_free_entries, &entry->node);
914 }
915
916 prev = NULL;
917
918 NET_DBG("Flushing ARP pending requests");
919
920 SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&arp_pending_entries,
921 entry, next, node) {
922 if (iface && iface != entry->iface) {
923 prev = &entry->node;
924 continue;
925 }
926
927 arp_entry_cleanup(entry, true);
928
929 sys_slist_remove(&arp_pending_entries, prev, &entry->node);
930 sys_slist_prepend(&arp_free_entries, &entry->node);
931 }
932
933 if (sys_slist_is_empty(&arp_pending_entries)) {
934 k_work_cancel_delayable(&arp_request_timer);
935 }
936
937 k_mutex_unlock(&arp_mutex);
938 }
939
net_arp_clear_pending(struct net_if * iface,struct in_addr * dst)940 int net_arp_clear_pending(struct net_if *iface, struct in_addr *dst)
941 {
942 struct arp_entry *entry = arp_entry_find_pending(iface, dst);
943
944 if (!entry) {
945 return -ENOENT;
946 }
947
948 arp_entry_cleanup(entry, true);
949
950 return 0;
951 }
952
net_arp_foreach(net_arp_cb_t cb,void * user_data)953 int net_arp_foreach(net_arp_cb_t cb, void *user_data)
954 {
955 int ret = 0;
956 struct arp_entry *entry;
957
958 k_mutex_lock(&arp_mutex, K_FOREVER);
959
960 SYS_SLIST_FOR_EACH_CONTAINER(&arp_table, entry, node) {
961 ret++;
962 cb(entry, user_data);
963 }
964
965 k_mutex_unlock(&arp_mutex);
966
967 return ret;
968 }
969
net_arp_init(void)970 void net_arp_init(void)
971 {
972 int i;
973
974 if (arp_cache_initialized) {
975 return;
976 }
977
978 sys_slist_init(&arp_free_entries);
979 sys_slist_init(&arp_pending_entries);
980 sys_slist_init(&arp_table);
981
982 for (i = 0; i < CONFIG_NET_ARP_TABLE_SIZE; i++) {
983 /* Inserting entry as free with initialised packet queue */
984 k_fifo_init(&arp_entries[i].pending_queue);
985 sys_slist_prepend(&arp_free_entries, &arp_entries[i].node);
986 }
987
988 k_work_init_delayable(&arp_request_timer, arp_request_timeout);
989
990 k_mutex_init(&arp_mutex);
991
992 arp_cache_initialized = true;
993
994 #if defined(CONFIG_NET_ARP_GRATUITOUS_TRANSMISSION)
995 net_mgmt_init_event_callback(&iface_event_cb, iface_event_handler,
996 NET_EVENT_IF_UP);
997 net_mgmt_init_event_callback(&ipv4_event_cb, ipv4_event_handler,
998 NET_EVENT_IPV4_ADDR_ADD);
999
1000 net_mgmt_add_event_callback(&iface_event_cb);
1001 net_mgmt_add_event_callback(&ipv4_event_cb);
1002
1003 k_work_init_delayable(&arp_gratuitous_work,
1004 arp_gratuitous_work_handler);
1005 k_work_reschedule(&arp_gratuitous_work,
1006 K_SECONDS(CONFIG_NET_ARP_GRATUITOUS_INTERVAL));
1007 #endif /* defined(CONFIG_NET_ARP_GRATUITOUS_TRANSMISSION) */
1008 }
1009