1 /** @file
2 * @brief ARP related functions
3 */
4
5 /*
6 * Copyright (c) 2016 Intel Corporation
7 *
8 * SPDX-License-Identifier: Apache-2.0
9 */
10
11 #include <zephyr/logging/log.h>
12 LOG_MODULE_REGISTER(net_arp, CONFIG_NET_ARP_LOG_LEVEL);
13
14 #include <errno.h>
15 #include <zephyr/net/net_core.h>
16 #include <zephyr/net/net_pkt.h>
17 #include <zephyr/net/net_if.h>
18 #include <zephyr/net/net_stats.h>
19 #include <zephyr/net/net_mgmt.h>
20
21 #include "arp.h"
22 #include "ipv4.h"
23 #include "net_private.h"
24
25 #define NET_BUF_TIMEOUT K_MSEC(100)
26 #define ARP_REQUEST_TIMEOUT (2 * MSEC_PER_SEC)
27
28 static bool arp_cache_initialized;
29 static struct arp_entry arp_entries[CONFIG_NET_ARP_TABLE_SIZE];
30
31 static sys_slist_t arp_free_entries;
32 static sys_slist_t arp_pending_entries;
33 static sys_slist_t arp_table;
34
35 static struct k_work_delayable arp_request_timer;
36
37 static struct k_mutex arp_mutex;
38
39 #if defined(CONFIG_NET_ARP_GRATUITOUS_TRANSMISSION)
40 static struct net_mgmt_event_callback iface_event_cb;
41 static struct net_mgmt_event_callback ipv4_event_cb;
42 static struct k_work_delayable arp_gratuitous_work;
43 #endif /* defined(CONFIG_NET_ARP_GRATUITOUS_TRANSMISSION) */
44
arp_entry_cleanup(struct arp_entry * entry,bool pending)45 static void arp_entry_cleanup(struct arp_entry *entry, bool pending)
46 {
47 NET_DBG("entry %p", entry);
48
49 if (pending) {
50 struct net_pkt *pkt;
51
52 while (!k_fifo_is_empty(&entry->pending_queue)) {
53 pkt = k_fifo_get(&entry->pending_queue, K_FOREVER);
54 NET_DBG("Releasing pending pkt %p (ref %ld)",
55 pkt,
56 atomic_get(&pkt->atomic_ref) - 1);
57 net_pkt_unref(pkt);
58 }
59 }
60
61 entry->iface = NULL;
62
63 (void)memset(&entry->ip, 0, sizeof(struct in_addr));
64 (void)memset(&entry->eth, 0, sizeof(struct net_eth_addr));
65 }
66
arp_entry_find(sys_slist_t * list,struct net_if * iface,struct in_addr * dst,sys_snode_t ** previous)67 static struct arp_entry *arp_entry_find(sys_slist_t *list,
68 struct net_if *iface,
69 struct in_addr *dst,
70 sys_snode_t **previous)
71 {
72 struct arp_entry *entry;
73
74 SYS_SLIST_FOR_EACH_CONTAINER(list, entry, node) {
75 NET_DBG("iface %d (%p) dst %s",
76 net_if_get_by_iface(iface), iface,
77 net_sprint_ipv4_addr(&entry->ip));
78
79 if (entry->iface == iface &&
80 net_ipv4_addr_cmp(&entry->ip, dst)) {
81 NET_DBG("found dst %s",
82 net_sprint_ipv4_addr(dst));
83
84 return entry;
85 }
86
87 if (previous) {
88 *previous = &entry->node;
89 }
90 }
91
92 return NULL;
93 }
94
arp_entry_find_move_first(struct net_if * iface,struct in_addr * dst)95 static inline struct arp_entry *arp_entry_find_move_first(struct net_if *iface,
96 struct in_addr *dst)
97 {
98 sys_snode_t *prev = NULL;
99 struct arp_entry *entry;
100
101 NET_DBG("dst %s", net_sprint_ipv4_addr(dst));
102
103 entry = arp_entry_find(&arp_table, iface, dst, &prev);
104 if (entry) {
105 /* Let's assume the target is going to be accessed
106 * more than once here in a short time frame. So we
107 * place the entry first in position into the table
108 * in order to reduce subsequent find.
109 */
110 if (&entry->node != sys_slist_peek_head(&arp_table)) {
111 sys_slist_remove(&arp_table, prev, &entry->node);
112 sys_slist_prepend(&arp_table, &entry->node);
113 }
114 }
115
116 return entry;
117 }
118
119 static inline
arp_entry_find_pending(struct net_if * iface,struct in_addr * dst)120 struct arp_entry *arp_entry_find_pending(struct net_if *iface,
121 struct in_addr *dst)
122 {
123 NET_DBG("dst %s", net_sprint_ipv4_addr(dst));
124
125 return arp_entry_find(&arp_pending_entries, iface, dst, NULL);
126 }
127
arp_entry_get_pending(struct net_if * iface,struct in_addr * dst)128 static struct arp_entry *arp_entry_get_pending(struct net_if *iface,
129 struct in_addr *dst)
130 {
131 sys_snode_t *prev = NULL;
132 struct arp_entry *entry;
133
134 NET_DBG("dst %s", net_sprint_ipv4_addr(dst));
135
136 entry = arp_entry_find(&arp_pending_entries, iface, dst, &prev);
137 if (entry) {
138 /* We remove the entry from the pending list */
139 sys_slist_remove(&arp_pending_entries, prev, &entry->node);
140 }
141
142 if (sys_slist_is_empty(&arp_pending_entries)) {
143 k_work_cancel_delayable(&arp_request_timer);
144 }
145
146 return entry;
147 }
148
arp_entry_get_free(void)149 static struct arp_entry *arp_entry_get_free(void)
150 {
151 sys_snode_t *node;
152
153 node = sys_slist_peek_head(&arp_free_entries);
154 if (!node) {
155 return NULL;
156 }
157
158 /* We remove the node from the free list */
159 sys_slist_remove(&arp_free_entries, NULL, node);
160
161 return CONTAINER_OF(node, struct arp_entry, node);
162 }
163
arp_entry_get_last_from_table(void)164 static struct arp_entry *arp_entry_get_last_from_table(void)
165 {
166 sys_snode_t *node;
167
168 /* We assume last entry is the oldest one,
169 * so is the preferred one to be taken out.
170 */
171
172 node = sys_slist_peek_tail(&arp_table);
173 if (!node) {
174 return NULL;
175 }
176
177 sys_slist_find_and_remove(&arp_table, node);
178
179 return CONTAINER_OF(node, struct arp_entry, node);
180 }
181
182
arp_entry_register_pending(struct arp_entry * entry)183 static void arp_entry_register_pending(struct arp_entry *entry)
184 {
185 NET_DBG("dst %s", net_sprint_ipv4_addr(&entry->ip));
186
187 sys_slist_append(&arp_pending_entries, &entry->node);
188
189 entry->req_start = k_uptime_get_32();
190
191 /* Let's start the timer if necessary */
192 if (!k_work_delayable_remaining_get(&arp_request_timer)) {
193 k_work_reschedule(&arp_request_timer,
194 K_MSEC(ARP_REQUEST_TIMEOUT));
195 }
196 }
197
arp_request_timeout(struct k_work * work)198 static void arp_request_timeout(struct k_work *work)
199 {
200 uint32_t current = k_uptime_get_32();
201 struct arp_entry *entry, *next;
202
203 ARG_UNUSED(work);
204
205 k_mutex_lock(&arp_mutex, K_FOREVER);
206
207 SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&arp_pending_entries,
208 entry, next, node) {
209 if ((int32_t)(entry->req_start +
210 ARP_REQUEST_TIMEOUT - current) > 0) {
211 break;
212 }
213
214 arp_entry_cleanup(entry, true);
215
216 sys_slist_remove(&arp_pending_entries, NULL, &entry->node);
217 sys_slist_append(&arp_free_entries, &entry->node);
218
219 entry = NULL;
220 }
221
222 if (entry) {
223 k_work_reschedule(&arp_request_timer,
224 K_MSEC(entry->req_start +
225 ARP_REQUEST_TIMEOUT - current));
226 }
227
228 k_mutex_unlock(&arp_mutex);
229 }
230
if_get_addr(struct net_if * iface,struct in_addr * addr)231 static inline struct in_addr *if_get_addr(struct net_if *iface,
232 struct in_addr *addr)
233 {
234 struct net_if_ipv4 *ipv4 = iface->config.ip.ipv4;
235
236 if (!ipv4) {
237 return NULL;
238 }
239
240 ARRAY_FOR_EACH(ipv4->unicast, i) {
241 if (ipv4->unicast[i].ipv4.is_used &&
242 ipv4->unicast[i].ipv4.address.family == AF_INET &&
243 ipv4->unicast[i].ipv4.addr_state == NET_ADDR_PREFERRED &&
244 (!addr ||
245 net_ipv4_addr_cmp(addr,
246 &ipv4->unicast[i].ipv4.address.in_addr))) {
247 return &ipv4->unicast[i].ipv4.address.in_addr;
248 }
249 }
250
251 return NULL;
252 }
253
arp_prepare(struct net_if * iface,struct in_addr * next_addr,struct arp_entry * entry,struct net_pkt * pending,struct in_addr * current_ip)254 static inline struct net_pkt *arp_prepare(struct net_if *iface,
255 struct in_addr *next_addr,
256 struct arp_entry *entry,
257 struct net_pkt *pending,
258 struct in_addr *current_ip)
259 {
260 struct net_arp_hdr *hdr;
261 struct in_addr *my_addr;
262 struct net_pkt *pkt;
263
264 if (current_ip) {
265 /* This is the IPv4 autoconf case where we have already
266 * things setup so no need to allocate new net_pkt
267 */
268 pkt = pending;
269 } else {
270 pkt = net_pkt_alloc_with_buffer(iface,
271 sizeof(struct net_arp_hdr),
272 AF_UNSPEC, 0, NET_BUF_TIMEOUT);
273 if (!pkt) {
274 return NULL;
275 }
276
277 /* Avoid recursive loop with network packet capturing */
278 if (IS_ENABLED(CONFIG_NET_CAPTURE) && pending) {
279 net_pkt_set_captured(pkt, net_pkt_is_captured(pending));
280 }
281
282 if (IS_ENABLED(CONFIG_NET_VLAN) && pending) {
283 net_pkt_set_vlan_tag(pkt, net_pkt_vlan_tag(pending));
284 }
285 }
286
287 net_pkt_set_ll_proto_type(pkt, NET_ETH_PTYPE_ARP);
288 net_pkt_set_family(pkt, AF_INET);
289
290 net_buf_add(pkt->buffer, sizeof(struct net_arp_hdr));
291
292 hdr = NET_ARP_HDR(pkt);
293
294 /* If entry is not set, then we are just about to send
295 * an ARP request using the data in pending net_pkt.
296 * This can happen if there is already a pending ARP
297 * request and we want to send it again.
298 */
299 if (entry) {
300 if (!net_pkt_ipv4_acd(pkt)) {
301 net_pkt_ref(pending);
302 k_fifo_put(&entry->pending_queue, pending);
303 }
304
305 entry->iface = net_pkt_iface(pkt);
306
307 net_ipaddr_copy(&entry->ip, next_addr);
308
309 (void)net_linkaddr_set(net_pkt_lladdr_src(pkt),
310 net_if_get_link_addr(entry->iface)->addr,
311 sizeof(struct net_eth_addr));
312
313 arp_entry_register_pending(entry);
314 } else {
315 (void)net_linkaddr_set(net_pkt_lladdr_src(pkt),
316 net_if_get_link_addr(iface)->addr,
317 sizeof(struct net_eth_addr));
318 }
319
320 (void)net_linkaddr_set(net_pkt_lladdr_dst(pkt),
321 (const uint8_t *)net_eth_broadcast_addr(),
322 sizeof(struct net_eth_addr));
323
324 hdr->hwtype = htons(NET_ARP_HTYPE_ETH);
325 hdr->protocol = htons(NET_ETH_PTYPE_IP);
326 hdr->hwlen = sizeof(struct net_eth_addr);
327 hdr->protolen = sizeof(struct in_addr);
328 hdr->opcode = htons(NET_ARP_REQUEST);
329
330 (void)memset(&hdr->dst_hwaddr.addr, 0x00, sizeof(struct net_eth_addr));
331
332 net_ipv4_addr_copy_raw(hdr->dst_ipaddr, (uint8_t *)next_addr);
333
334 memcpy(hdr->src_hwaddr.addr, net_pkt_lladdr_src(pkt)->addr,
335 sizeof(struct net_eth_addr));
336
337 if (net_pkt_ipv4_acd(pkt)) {
338 my_addr = current_ip;
339 } else if (!entry) {
340 my_addr = (struct in_addr *)NET_IPV4_HDR(pending)->src;
341 } else {
342 my_addr = if_get_addr(entry->iface, current_ip);
343 }
344
345 if (my_addr) {
346 net_ipv4_addr_copy_raw(hdr->src_ipaddr, (uint8_t *)my_addr);
347 } else {
348 (void)memset(&hdr->src_ipaddr, 0, sizeof(struct in_addr));
349 }
350
351 NET_DBG("Generating request for %s", net_sprint_ipv4_addr(next_addr));
352 return pkt;
353 }
354
net_arp_prepare(struct net_pkt * pkt,struct in_addr * request_ip,struct in_addr * current_ip)355 struct net_pkt *net_arp_prepare(struct net_pkt *pkt,
356 struct in_addr *request_ip,
357 struct in_addr *current_ip)
358 {
359 bool is_ipv4_ll_used = false;
360 struct arp_entry *entry;
361 struct in_addr *addr;
362
363 if (!pkt || !pkt->buffer) {
364 return NULL;
365 }
366
367 if (net_pkt_ipv4_acd(pkt)) {
368 return arp_prepare(net_pkt_iface(pkt), request_ip, NULL,
369 pkt, current_ip);
370 }
371
372 if (IS_ENABLED(CONFIG_NET_IPV4_AUTO)) {
373 is_ipv4_ll_used = net_ipv4_is_ll_addr((struct in_addr *)
374 &NET_IPV4_HDR(pkt)->src) ||
375 net_ipv4_is_ll_addr((struct in_addr *)
376 &NET_IPV4_HDR(pkt)->dst);
377 }
378
379 /* Is the destination in the local network, if not route via
380 * the gateway address.
381 */
382 if (!current_ip && !is_ipv4_ll_used &&
383 !net_if_ipv4_addr_mask_cmp(net_pkt_iface(pkt), request_ip)) {
384 struct net_if_ipv4 *ipv4 = net_pkt_iface(pkt)->config.ip.ipv4;
385
386 if (ipv4) {
387 addr = &ipv4->gw;
388 if (net_ipv4_is_addr_unspecified(addr)) {
389 NET_ERR("Gateway not set for iface %d, could not "
390 "send ARP request for %s",
391 net_if_get_by_iface(net_pkt_iface(pkt)),
392 net_sprint_ipv4_addr(request_ip));
393
394 return NULL;
395 }
396 } else {
397 addr = request_ip;
398 }
399 } else {
400 addr = request_ip;
401 }
402
403 k_mutex_lock(&arp_mutex, K_FOREVER);
404
405 /* If the destination address is already known, we do not need
406 * to send any ARP packet.
407 */
408 entry = arp_entry_find_move_first(net_pkt_iface(pkt), addr);
409 if (!entry) {
410 struct net_pkt *req;
411
412 entry = arp_entry_find_pending(net_pkt_iface(pkt), addr);
413 if (!entry) {
414 /* No pending, let's try to get a new entry */
415 entry = arp_entry_get_free();
416 if (!entry) {
417 /* Then let's take one from table? */
418 entry = arp_entry_get_last_from_table();
419 }
420 } else {
421 /* There is a pending ARP request already, check if this packet is already
422 * in the pending list and if so, resend the request, otherwise just
423 * append the packet to the request fifo list.
424 */
425 if (k_queue_unique_append(&entry->pending_queue._queue,
426 net_pkt_ref(pkt))) {
427 NET_DBG("Pending ARP request for %s, queuing pkt %p",
428 net_sprint_ipv4_addr(addr), pkt);
429 k_mutex_unlock(&arp_mutex);
430 return NULL;
431 }
432
433 entry = NULL;
434 }
435
436 req = arp_prepare(net_pkt_iface(pkt), addr, entry, pkt,
437 current_ip);
438
439 if (!entry) {
440 /* We cannot send the packet, the ARP cache is full
441 * or there is already a pending query to this IP
442 * address, so this packet must be discarded.
443 */
444 NET_DBG("Resending ARP %p", req);
445 }
446
447 if (!req && entry) {
448 /* Add the arp entry back to arp_free_entries, to avoid the
449 * arp entry is leak due to ARP packet allocated failed.
450 */
451 sys_slist_prepend(&arp_free_entries, &entry->node);
452 }
453
454 k_mutex_unlock(&arp_mutex);
455 return req;
456 }
457
458 k_mutex_unlock(&arp_mutex);
459
460 (void)net_linkaddr_set(net_pkt_lladdr_src(pkt),
461 net_if_get_link_addr(entry->iface)->addr,
462 sizeof(struct net_eth_addr));
463
464 (void)net_linkaddr_set(net_pkt_lladdr_dst(pkt),
465 (const uint8_t *)&entry->eth, sizeof(struct net_eth_addr));
466
467 NET_DBG("ARP using ll %s for IP %s",
468 net_sprint_ll_addr(net_pkt_lladdr_dst(pkt)->addr,
469 sizeof(struct net_eth_addr)),
470 net_sprint_ipv4_addr(NET_IPV4_HDR(pkt)->dst));
471
472 return pkt;
473 }
474
arp_gratuitous(struct net_if * iface,struct in_addr * src,struct net_eth_addr * hwaddr)475 static void arp_gratuitous(struct net_if *iface,
476 struct in_addr *src,
477 struct net_eth_addr *hwaddr)
478 {
479 sys_snode_t *prev = NULL;
480 struct arp_entry *entry;
481
482 entry = arp_entry_find(&arp_table, iface, src, &prev);
483 if (entry) {
484 NET_DBG("Gratuitous ARP hwaddr %s -> %s",
485 net_sprint_ll_addr((const uint8_t *)&entry->eth,
486 sizeof(struct net_eth_addr)),
487 net_sprint_ll_addr((const uint8_t *)hwaddr,
488 sizeof(struct net_eth_addr)));
489
490 memcpy(&entry->eth, hwaddr, sizeof(struct net_eth_addr));
491 }
492 }
493
494 #if defined(CONFIG_NET_ARP_GRATUITOUS_TRANSMISSION)
arp_gratuitous_send(struct net_if * iface,struct in_addr * ipaddr)495 static void arp_gratuitous_send(struct net_if *iface,
496 struct in_addr *ipaddr)
497 {
498 struct net_arp_hdr *hdr;
499 struct net_pkt *pkt;
500
501 pkt = net_pkt_alloc_with_buffer(iface, sizeof(struct net_arp_hdr),
502 AF_UNSPEC, 0, NET_BUF_TIMEOUT);
503 if (!pkt) {
504 return;
505 }
506
507 net_buf_add(pkt->buffer, sizeof(struct net_arp_hdr));
508 net_pkt_set_vlan_tag(pkt, net_eth_get_vlan_tag(iface));
509 net_pkt_set_ll_proto_type(pkt, NET_ETH_PTYPE_ARP);
510
511 hdr = NET_ARP_HDR(pkt);
512
513 hdr->hwtype = htons(NET_ARP_HTYPE_ETH);
514 hdr->protocol = htons(NET_ETH_PTYPE_IP);
515 hdr->hwlen = sizeof(struct net_eth_addr);
516 hdr->protolen = sizeof(struct in_addr);
517 hdr->opcode = htons(NET_ARP_REQUEST);
518
519 memcpy(&hdr->dst_hwaddr.addr, net_eth_broadcast_addr(),
520 sizeof(struct net_eth_addr));
521 memcpy(&hdr->src_hwaddr.addr, net_if_get_link_addr(iface)->addr,
522 sizeof(struct net_eth_addr));
523
524 net_ipv4_addr_copy_raw(hdr->dst_ipaddr, (uint8_t *)ipaddr);
525 net_ipv4_addr_copy_raw(hdr->src_ipaddr, (uint8_t *)ipaddr);
526
527 (void)net_linkaddr_set(net_pkt_lladdr_src(pkt),
528 net_if_get_link_addr(iface)->addr,
529 sizeof(struct net_eth_addr));
530
531 (void)net_linkaddr_set(net_pkt_lladdr_dst(pkt),
532 (uint8_t *)net_eth_broadcast_addr(),
533 sizeof(struct net_eth_addr));
534
535 NET_DBG("Sending gratuitous ARP pkt %p", pkt);
536
537 /* send without timeout, so we do not risk being blocked by tx when
538 * being flooded
539 */
540 if (net_if_try_send_data(iface, pkt, K_NO_WAIT) == NET_DROP) {
541 net_pkt_unref(pkt);
542 }
543 }
544
notify_all_ipv4_addr(struct net_if * iface)545 static void notify_all_ipv4_addr(struct net_if *iface)
546 {
547 struct net_if_ipv4 *ipv4 = iface->config.ip.ipv4;
548 int i;
549
550 if (!ipv4) {
551 return;
552 }
553
554 for (i = 0; i < NET_IF_MAX_IPV4_ADDR; i++) {
555 if (ipv4->unicast[i].ipv4.is_used &&
556 ipv4->unicast[i].ipv4.address.family == AF_INET &&
557 ipv4->unicast[i].ipv4.addr_state == NET_ADDR_PREFERRED) {
558 arp_gratuitous_send(iface,
559 &ipv4->unicast[i].ipv4.address.in_addr);
560 }
561 }
562 }
563
iface_event_handler(struct net_mgmt_event_callback * cb,uint32_t mgmt_event,struct net_if * iface)564 static void iface_event_handler(struct net_mgmt_event_callback *cb,
565 uint32_t mgmt_event, struct net_if *iface)
566 {
567 ARG_UNUSED(cb);
568
569 if (!(net_if_l2(iface) == &NET_L2_GET_NAME(ETHERNET) ||
570 net_eth_is_vlan_interface(iface))) {
571 return;
572 }
573
574 if (mgmt_event != NET_EVENT_IF_UP) {
575 return;
576 }
577
578 notify_all_ipv4_addr(iface);
579 }
580
ipv4_event_handler(struct net_mgmt_event_callback * cb,uint32_t mgmt_event,struct net_if * iface)581 static void ipv4_event_handler(struct net_mgmt_event_callback *cb,
582 uint32_t mgmt_event, struct net_if *iface)
583 {
584 struct in_addr *ipaddr;
585
586 if (!(net_if_l2(iface) == &NET_L2_GET_NAME(ETHERNET) ||
587 net_eth_is_vlan_interface(iface))) {
588 return;
589 }
590
591 if (!net_if_is_up(iface)) {
592 return;
593 }
594
595 if (mgmt_event != NET_EVENT_IPV4_ADDR_ADD) {
596 return;
597 }
598
599 if (cb->info_length != sizeof(struct in_addr)) {
600 return;
601 }
602
603 ipaddr = (struct in_addr *)cb->info;
604
605 arp_gratuitous_send(iface, ipaddr);
606 }
607
iface_cb(struct net_if * iface,void * user_data)608 static void iface_cb(struct net_if *iface, void *user_data)
609 {
610 ARG_UNUSED(user_data);
611
612 if (!(net_if_l2(iface) == &NET_L2_GET_NAME(ETHERNET) ||
613 net_eth_is_vlan_interface(iface))) {
614 return;
615 }
616
617 if (!net_if_is_up(iface)) {
618 return;
619 }
620
621 notify_all_ipv4_addr(iface);
622 }
623
arp_gratuitous_work_handler(struct k_work * work)624 static void arp_gratuitous_work_handler(struct k_work *work)
625 {
626 ARG_UNUSED(work);
627
628 net_if_foreach(iface_cb, NULL);
629
630 k_work_reschedule(&arp_gratuitous_work,
631 K_SECONDS(CONFIG_NET_ARP_GRATUITOUS_INTERVAL));
632 }
633 #endif /* defined(CONFIG_NET_ARP_GRATUITOUS_TRANSMISSION) */
634
net_arp_update(struct net_if * iface,struct in_addr * src,struct net_eth_addr * hwaddr,bool gratuitous,bool force)635 void net_arp_update(struct net_if *iface,
636 struct in_addr *src,
637 struct net_eth_addr *hwaddr,
638 bool gratuitous,
639 bool force)
640 {
641 struct arp_entry *entry;
642 struct net_pkt *pkt;
643
644 NET_DBG("iface %d (%p) src %s", net_if_get_by_iface(iface), iface,
645 net_sprint_ipv4_addr(src));
646 net_if_tx_lock(iface);
647 k_mutex_lock(&arp_mutex, K_FOREVER);
648
649 entry = arp_entry_get_pending(iface, src);
650 if (!entry) {
651 if (IS_ENABLED(CONFIG_NET_ARP_GRATUITOUS) && gratuitous) {
652 arp_gratuitous(iface, src, hwaddr);
653 }
654
655 if (force) {
656 sys_snode_t *prev = NULL;
657 struct arp_entry *arp_ent;
658
659 arp_ent = arp_entry_find(&arp_table, iface, src, &prev);
660 if (arp_ent) {
661 memcpy(&arp_ent->eth, hwaddr,
662 sizeof(struct net_eth_addr));
663 } else {
664 /* Add new entry as it was not found and force
665 * was set.
666 */
667 arp_ent = arp_entry_get_free();
668 if (!arp_ent) {
669 /* Then let's take one from table? */
670 arp_ent = arp_entry_get_last_from_table();
671 }
672
673 if (arp_ent) {
674 arp_ent->req_start = k_uptime_get_32();
675 arp_ent->iface = iface;
676 net_ipaddr_copy(&arp_ent->ip, src);
677 memcpy(&arp_ent->eth, hwaddr, sizeof(arp_ent->eth));
678 sys_slist_prepend(&arp_table, &arp_ent->node);
679 }
680 }
681 }
682
683 k_mutex_unlock(&arp_mutex);
684 net_if_tx_unlock(iface);
685 return;
686 }
687
688 memcpy(&entry->eth, hwaddr, sizeof(struct net_eth_addr));
689
690 /* Inserting entry into the table */
691 sys_slist_prepend(&arp_table, &entry->node);
692
693 while (!k_fifo_is_empty(&entry->pending_queue)) {
694 int ret;
695
696 pkt = k_fifo_get(&entry->pending_queue, K_FOREVER);
697
698 /* Set the dst in the pending packet */
699 (void)net_linkaddr_set(net_pkt_lladdr_dst(pkt),
700 (const uint8_t *)&NET_ETH_HDR(pkt)->dst.addr,
701 sizeof(struct net_eth_addr));
702
703 NET_DBG("iface %d (%p) dst %s pending %p frag %p ptype 0x%04x",
704 net_if_get_by_iface(iface), iface,
705 net_sprint_ipv4_addr(&entry->ip),
706 pkt, pkt->frags, net_pkt_ll_proto_type(pkt));
707
708 /* We directly send the packet without first queueing it.
709 * The pkt has already been queued for sending, once by
710 * net_if and second time in the ARP queue. We must not
711 * queue it twice in net_if so that the statistics of
712 * the pkt are not counted twice and the packet filter
713 * callbacks are only called once.
714 */
715 ret = net_if_l2(iface)->send(iface, pkt);
716 if (ret < 0) {
717 net_pkt_unref(pkt);
718 }
719 }
720
721 k_mutex_unlock(&arp_mutex);
722 net_if_tx_unlock(iface);
723 }
724
arp_prepare_reply(struct net_if * iface,struct net_pkt * req,struct net_eth_addr * dst_addr)725 static inline struct net_pkt *arp_prepare_reply(struct net_if *iface,
726 struct net_pkt *req,
727 struct net_eth_addr *dst_addr)
728 {
729 struct net_arp_hdr *hdr, *query;
730 struct net_pkt *pkt;
731
732 pkt = net_pkt_alloc_with_buffer(iface, sizeof(struct net_arp_hdr),
733 AF_UNSPEC, 0, NET_BUF_TIMEOUT);
734 if (!pkt) {
735 return NULL;
736 }
737
738 net_buf_add(pkt->buffer, sizeof(struct net_arp_hdr));
739
740 hdr = NET_ARP_HDR(pkt);
741 query = NET_ARP_HDR(req);
742
743 if (IS_ENABLED(CONFIG_NET_VLAN)) {
744 net_pkt_set_vlan_tag(pkt, net_pkt_vlan_tag(req));
745 }
746
747 hdr->hwtype = htons(NET_ARP_HTYPE_ETH);
748 hdr->protocol = htons(NET_ETH_PTYPE_IP);
749 hdr->hwlen = sizeof(struct net_eth_addr);
750 hdr->protolen = sizeof(struct in_addr);
751 hdr->opcode = htons(NET_ARP_REPLY);
752
753 memcpy(&hdr->dst_hwaddr.addr, &dst_addr->addr,
754 sizeof(struct net_eth_addr));
755 memcpy(&hdr->src_hwaddr.addr, net_if_get_link_addr(iface)->addr,
756 sizeof(struct net_eth_addr));
757
758 net_ipv4_addr_copy_raw(hdr->dst_ipaddr, query->src_ipaddr);
759 net_ipv4_addr_copy_raw(hdr->src_ipaddr, query->dst_ipaddr);
760
761 (void)net_linkaddr_set(net_pkt_lladdr_src(pkt),
762 net_if_get_link_addr(iface)->addr,
763 sizeof(struct net_eth_addr));
764
765 (void)net_linkaddr_set(net_pkt_lladdr_dst(pkt),
766 (uint8_t *)&hdr->dst_hwaddr.addr,
767 sizeof(struct net_eth_addr));
768
769 net_pkt_set_ll_proto_type(pkt, NET_ETH_PTYPE_ARP);
770 net_pkt_set_family(pkt, AF_INET);
771
772 return pkt;
773 }
774
arp_hdr_check(struct net_arp_hdr * arp_hdr)775 static bool arp_hdr_check(struct net_arp_hdr *arp_hdr)
776 {
777 if (ntohs(arp_hdr->hwtype) != NET_ARP_HTYPE_ETH ||
778 ntohs(arp_hdr->protocol) != NET_ETH_PTYPE_IP ||
779 arp_hdr->hwlen != sizeof(struct net_eth_addr) ||
780 arp_hdr->protolen != NET_ARP_IPV4_PTYPE_SIZE ||
781 net_ipv4_is_addr_loopback((struct in_addr *)arp_hdr->src_ipaddr)) {
782 NET_DBG("DROP: Invalid ARP header");
783 return false;
784 }
785
786 return true;
787 }
788
net_arp_input(struct net_pkt * pkt,struct net_eth_addr * src,struct net_eth_addr * dst)789 enum net_verdict net_arp_input(struct net_pkt *pkt,
790 struct net_eth_addr *src,
791 struct net_eth_addr *dst)
792 {
793 struct net_eth_addr *dst_hw_addr;
794 struct net_arp_hdr *arp_hdr;
795 struct net_pkt *reply;
796 struct in_addr *addr;
797
798 if (net_pkt_get_len(pkt) < sizeof(struct net_arp_hdr)) {
799 NET_DBG("DROP: Too short ARP msg (%zu bytes, min %zu bytes)",
800 net_pkt_get_len(pkt), sizeof(struct net_arp_hdr));
801 return NET_DROP;
802 }
803
804 arp_hdr = NET_ARP_HDR(pkt);
805 if (!arp_hdr_check(arp_hdr)) {
806 return NET_DROP;
807 }
808
809 switch (ntohs(arp_hdr->opcode)) {
810 case NET_ARP_REQUEST:
811 /* If ARP request sender hw address is our address,
812 * we must drop the packet.
813 */
814 if (memcmp(&arp_hdr->src_hwaddr,
815 net_if_get_link_addr(net_pkt_iface(pkt))->addr,
816 sizeof(struct net_eth_addr)) == 0) {
817 return NET_DROP;
818 }
819
820 if (IS_ENABLED(CONFIG_NET_ARP_GRATUITOUS)) {
821 if (net_eth_is_addr_broadcast(dst) &&
822 (net_eth_is_addr_broadcast(&arp_hdr->dst_hwaddr) ||
823 net_eth_is_addr_all_zeroes(&arp_hdr->dst_hwaddr)) &&
824 net_ipv4_addr_cmp_raw(arp_hdr->dst_ipaddr,
825 arp_hdr->src_ipaddr)) {
826 /* If the IP address is in our cache,
827 * then update it here.
828 */
829 net_arp_update(net_pkt_iface(pkt),
830 (struct in_addr *)arp_hdr->src_ipaddr,
831 &arp_hdr->src_hwaddr,
832 true, false);
833 break;
834 }
835 }
836
837 /* Discard ARP request if Ethernet address is broadcast
838 * and Source IP address is Multicast address.
839 */
840 if (memcmp(dst, net_eth_broadcast_addr(),
841 sizeof(struct net_eth_addr)) == 0 &&
842 net_ipv4_is_addr_mcast((struct in_addr *)arp_hdr->src_ipaddr)) {
843 NET_DBG("DROP: eth addr is bcast, src addr is mcast");
844 return NET_DROP;
845 }
846
847 /* Someone wants to know our ll address */
848 addr = if_get_addr(net_pkt_iface(pkt),
849 (struct in_addr *)arp_hdr->dst_ipaddr);
850 if (!addr) {
851 /* Not for us so drop the packet silently */
852 return NET_DROP;
853 }
854
855 NET_DBG("ARP request from %s [%s] for %s",
856 net_sprint_ipv4_addr(&arp_hdr->src_ipaddr),
857 net_sprint_ll_addr((uint8_t *)&arp_hdr->src_hwaddr,
858 arp_hdr->hwlen),
859 net_sprint_ipv4_addr(&arp_hdr->dst_ipaddr));
860
861 /* Update the ARP cache if the sender MAC address has
862 * changed. In this case the target MAC address is all zeros
863 * and the target IP address is our address.
864 */
865 if (net_eth_is_addr_unspecified(&arp_hdr->dst_hwaddr)) {
866 NET_DBG("Updating ARP cache for %s [%s] iface %d",
867 net_sprint_ipv4_addr(&arp_hdr->src_ipaddr),
868 net_sprint_ll_addr((uint8_t *)&arp_hdr->src_hwaddr,
869 arp_hdr->hwlen),
870 net_if_get_by_iface(net_pkt_iface(pkt)));
871
872 net_arp_update(net_pkt_iface(pkt),
873 (struct in_addr *)arp_hdr->src_ipaddr,
874 &arp_hdr->src_hwaddr,
875 false, true);
876
877 dst_hw_addr = &arp_hdr->src_hwaddr;
878 } else {
879 dst_hw_addr = src;
880 }
881
882 /* Send reply */
883 reply = arp_prepare_reply(net_pkt_iface(pkt), pkt, dst_hw_addr);
884 if (reply) {
885 net_if_try_queue_tx(net_pkt_iface(reply), reply, K_NO_WAIT);
886 } else {
887 NET_DBG("Cannot send ARP reply");
888 }
889 break;
890
891 case NET_ARP_REPLY:
892 if (net_ipv4_is_my_addr((struct in_addr *)arp_hdr->dst_ipaddr)) {
893 NET_DBG("Received ll %s for IP %s",
894 net_sprint_ll_addr(arp_hdr->src_hwaddr.addr,
895 sizeof(struct net_eth_addr)),
896 net_sprint_ipv4_addr(arp_hdr->src_ipaddr));
897 net_arp_update(net_pkt_iface(pkt),
898 (struct in_addr *)arp_hdr->src_ipaddr,
899 &arp_hdr->src_hwaddr,
900 false, false);
901 }
902
903 break;
904 }
905
906 net_pkt_unref(pkt);
907
908 return NET_OK;
909 }
910
net_arp_clear_cache(struct net_if * iface)911 void net_arp_clear_cache(struct net_if *iface)
912 {
913 sys_snode_t *prev = NULL;
914 struct arp_entry *entry, *next;
915
916 NET_DBG("Flushing ARP table");
917
918 k_mutex_lock(&arp_mutex, K_FOREVER);
919
920 SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&arp_table, entry, next, node) {
921 if (iface && iface != entry->iface) {
922 prev = &entry->node;
923 continue;
924 }
925
926 arp_entry_cleanup(entry, false);
927
928 sys_slist_remove(&arp_table, prev, &entry->node);
929 sys_slist_prepend(&arp_free_entries, &entry->node);
930 }
931
932 prev = NULL;
933
934 NET_DBG("Flushing ARP pending requests");
935
936 SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&arp_pending_entries,
937 entry, next, node) {
938 if (iface && iface != entry->iface) {
939 prev = &entry->node;
940 continue;
941 }
942
943 arp_entry_cleanup(entry, true);
944
945 sys_slist_remove(&arp_pending_entries, prev, &entry->node);
946 sys_slist_prepend(&arp_free_entries, &entry->node);
947 }
948
949 if (sys_slist_is_empty(&arp_pending_entries)) {
950 k_work_cancel_delayable(&arp_request_timer);
951 }
952
953 k_mutex_unlock(&arp_mutex);
954 }
955
net_arp_clear_pending(struct net_if * iface,struct in_addr * dst)956 int net_arp_clear_pending(struct net_if *iface, struct in_addr *dst)
957 {
958 struct arp_entry *entry = arp_entry_find_pending(iface, dst);
959
960 if (!entry) {
961 return -ENOENT;
962 }
963
964 arp_entry_cleanup(entry, true);
965
966 return 0;
967 }
968
net_arp_foreach(net_arp_cb_t cb,void * user_data)969 int net_arp_foreach(net_arp_cb_t cb, void *user_data)
970 {
971 int ret = 0;
972 struct arp_entry *entry;
973
974 k_mutex_lock(&arp_mutex, K_FOREVER);
975
976 SYS_SLIST_FOR_EACH_CONTAINER(&arp_table, entry, node) {
977 ret++;
978 cb(entry, user_data);
979 }
980
981 k_mutex_unlock(&arp_mutex);
982
983 return ret;
984 }
985
net_arp_init(void)986 void net_arp_init(void)
987 {
988 int i;
989
990 if (arp_cache_initialized) {
991 return;
992 }
993
994 sys_slist_init(&arp_free_entries);
995 sys_slist_init(&arp_pending_entries);
996 sys_slist_init(&arp_table);
997
998 for (i = 0; i < CONFIG_NET_ARP_TABLE_SIZE; i++) {
999 /* Inserting entry as free with initialised packet queue */
1000 k_fifo_init(&arp_entries[i].pending_queue);
1001 sys_slist_prepend(&arp_free_entries, &arp_entries[i].node);
1002 }
1003
1004 k_work_init_delayable(&arp_request_timer, arp_request_timeout);
1005
1006 k_mutex_init(&arp_mutex);
1007
1008 arp_cache_initialized = true;
1009
1010 #if defined(CONFIG_NET_ARP_GRATUITOUS_TRANSMISSION)
1011 net_mgmt_init_event_callback(&iface_event_cb, iface_event_handler,
1012 NET_EVENT_IF_UP);
1013 net_mgmt_init_event_callback(&ipv4_event_cb, ipv4_event_handler,
1014 NET_EVENT_IPV4_ADDR_ADD);
1015
1016 net_mgmt_add_event_callback(&iface_event_cb);
1017 net_mgmt_add_event_callback(&ipv4_event_cb);
1018
1019 k_work_init_delayable(&arp_gratuitous_work,
1020 arp_gratuitous_work_handler);
1021 k_work_reschedule(&arp_gratuitous_work,
1022 K_SECONDS(CONFIG_NET_ARP_GRATUITOUS_INTERVAL));
1023 #endif /* defined(CONFIG_NET_ARP_GRATUITOUS_TRANSMISSION) */
1024 }
1025
arp_recv(struct net_if * iface,uint16_t ptype,struct net_pkt * pkt)1026 static enum net_verdict arp_recv(struct net_if *iface,
1027 uint16_t ptype,
1028 struct net_pkt *pkt)
1029 {
1030 ARG_UNUSED(iface);
1031 ARG_UNUSED(ptype);
1032
1033 net_pkt_set_family(pkt, AF_INET);
1034
1035 NET_DBG("ARP packet from %s received",
1036 net_sprint_ll_addr(net_pkt_lladdr_src(pkt)->addr,
1037 sizeof(struct net_eth_addr)));
1038
1039 if (IS_ENABLED(CONFIG_NET_IPV4_ACD) &&
1040 net_ipv4_acd_input(iface, pkt) == NET_DROP) {
1041 return NET_DROP;
1042 }
1043
1044 return net_arp_input(pkt,
1045 (struct net_eth_addr *)net_pkt_lladdr_src(pkt)->addr,
1046 (struct net_eth_addr *)net_pkt_lladdr_dst(pkt)->addr);
1047 }
1048
1049 ETH_NET_L3_REGISTER(ARP, NET_ETH_PTYPE_ARP, arp_recv);
1050