1 /** @file
2  * @brief ARP related functions
3  */
4 
5 /*
6  * Copyright (c) 2016 Intel Corporation
7  *
8  * SPDX-License-Identifier: Apache-2.0
9  */
10 
11 #include <zephyr/logging/log.h>
12 LOG_MODULE_REGISTER(net_arp, CONFIG_NET_ARP_LOG_LEVEL);
13 
14 #include <errno.h>
15 #include <zephyr/net/net_core.h>
16 #include <zephyr/net/net_pkt.h>
17 #include <zephyr/net/net_if.h>
18 #include <zephyr/net/net_stats.h>
19 #include <zephyr/net/net_mgmt.h>
20 
21 #include "arp.h"
22 #include "net_private.h"
23 
24 #define NET_BUF_TIMEOUT K_MSEC(100)
25 #define ARP_REQUEST_TIMEOUT (2 * MSEC_PER_SEC)
26 
27 static bool arp_cache_initialized;
28 static struct arp_entry arp_entries[CONFIG_NET_ARP_TABLE_SIZE];
29 
30 static sys_slist_t arp_free_entries;
31 static sys_slist_t arp_pending_entries;
32 static sys_slist_t arp_table;
33 
34 static struct k_work_delayable arp_request_timer;
35 
36 static struct k_mutex arp_mutex;
37 
38 #if defined(CONFIG_NET_ARP_GRATUITOUS_TRANSMISSION)
39 static struct net_mgmt_event_callback iface_event_cb;
40 static struct net_mgmt_event_callback ipv4_event_cb;
41 static struct k_work_delayable arp_gratuitous_work;
42 #endif /* defined(CONFIG_NET_ARP_GRATUITOUS_TRANSMISSION) */
43 
arp_entry_cleanup(struct arp_entry * entry,bool pending)44 static void arp_entry_cleanup(struct arp_entry *entry, bool pending)
45 {
46 	NET_DBG("entry %p", entry);
47 
48 	if (pending) {
49 		struct net_pkt *pkt;
50 
51 		while (!k_fifo_is_empty(&entry->pending_queue)) {
52 			pkt = k_fifo_get(&entry->pending_queue, K_FOREVER);
53 			NET_DBG("Releasing pending pkt %p (ref %ld)",
54 				pkt,
55 				atomic_get(&pkt->atomic_ref) - 1);
56 			net_pkt_unref(pkt);
57 		}
58 	}
59 
60 	entry->iface = NULL;
61 
62 	(void)memset(&entry->ip, 0, sizeof(struct in_addr));
63 	(void)memset(&entry->eth, 0, sizeof(struct net_eth_addr));
64 }
65 
arp_entry_find(sys_slist_t * list,struct net_if * iface,struct in_addr * dst,sys_snode_t ** previous)66 static struct arp_entry *arp_entry_find(sys_slist_t *list,
67 					struct net_if *iface,
68 					struct in_addr *dst,
69 					sys_snode_t **previous)
70 {
71 	struct arp_entry *entry;
72 
73 	SYS_SLIST_FOR_EACH_CONTAINER(list, entry, node) {
74 		NET_DBG("iface %d (%p) dst %s",
75 			net_if_get_by_iface(iface), iface,
76 			net_sprint_ipv4_addr(&entry->ip));
77 
78 		if (entry->iface == iface &&
79 		    net_ipv4_addr_cmp(&entry->ip, dst)) {
80 			return entry;
81 		}
82 
83 		if (previous) {
84 			*previous = &entry->node;
85 		}
86 	}
87 
88 	return NULL;
89 }
90 
arp_entry_find_move_first(struct net_if * iface,struct in_addr * dst)91 static inline struct arp_entry *arp_entry_find_move_first(struct net_if *iface,
92 							  struct in_addr *dst)
93 {
94 	sys_snode_t *prev = NULL;
95 	struct arp_entry *entry;
96 
97 	NET_DBG("dst %s", net_sprint_ipv4_addr(dst));
98 
99 	entry = arp_entry_find(&arp_table, iface, dst, &prev);
100 	if (entry) {
101 		/* Let's assume the target is going to be accessed
102 		 * more than once here in a short time frame. So we
103 		 * place the entry first in position into the table
104 		 * in order to reduce subsequent find.
105 		 */
106 		if (&entry->node != sys_slist_peek_head(&arp_table)) {
107 			sys_slist_remove(&arp_table, prev, &entry->node);
108 			sys_slist_prepend(&arp_table, &entry->node);
109 		}
110 	}
111 
112 	return entry;
113 }
114 
115 static inline
arp_entry_find_pending(struct net_if * iface,struct in_addr * dst)116 struct arp_entry *arp_entry_find_pending(struct net_if *iface,
117 					 struct in_addr *dst)
118 {
119 	NET_DBG("dst %s", net_sprint_ipv4_addr(dst));
120 
121 	return arp_entry_find(&arp_pending_entries, iface, dst, NULL);
122 }
123 
arp_entry_get_pending(struct net_if * iface,struct in_addr * dst)124 static struct arp_entry *arp_entry_get_pending(struct net_if *iface,
125 					       struct in_addr *dst)
126 {
127 	sys_snode_t *prev = NULL;
128 	struct arp_entry *entry;
129 
130 	NET_DBG("dst %s", net_sprint_ipv4_addr(dst));
131 
132 	entry = arp_entry_find(&arp_pending_entries, iface, dst, &prev);
133 	if (entry) {
134 		/* We remove the entry from the pending list */
135 		sys_slist_remove(&arp_pending_entries, prev, &entry->node);
136 	}
137 
138 	if (sys_slist_is_empty(&arp_pending_entries)) {
139 		k_work_cancel_delayable(&arp_request_timer);
140 	}
141 
142 	return entry;
143 }
144 
arp_entry_get_free(void)145 static struct arp_entry *arp_entry_get_free(void)
146 {
147 	sys_snode_t *node;
148 
149 	node = sys_slist_peek_head(&arp_free_entries);
150 	if (!node) {
151 		return NULL;
152 	}
153 
154 	/* We remove the node from the free list */
155 	sys_slist_remove(&arp_free_entries, NULL, node);
156 
157 	return CONTAINER_OF(node, struct arp_entry, node);
158 }
159 
arp_entry_get_last_from_table(void)160 static struct arp_entry *arp_entry_get_last_from_table(void)
161 {
162 	sys_snode_t *node;
163 
164 	/* We assume last entry is the oldest one,
165 	 * so is the preferred one to be taken out.
166 	 */
167 
168 	node = sys_slist_peek_tail(&arp_table);
169 	if (!node) {
170 		return NULL;
171 	}
172 
173 	sys_slist_find_and_remove(&arp_table, node);
174 
175 	return CONTAINER_OF(node, struct arp_entry, node);
176 }
177 
178 
arp_entry_register_pending(struct arp_entry * entry)179 static void arp_entry_register_pending(struct arp_entry *entry)
180 {
181 	NET_DBG("dst %s", net_sprint_ipv4_addr(&entry->ip));
182 
183 	sys_slist_append(&arp_pending_entries, &entry->node);
184 
185 	entry->req_start = k_uptime_get_32();
186 
187 	/* Let's start the timer if necessary */
188 	if (!k_work_delayable_remaining_get(&arp_request_timer)) {
189 		k_work_reschedule(&arp_request_timer,
190 				  K_MSEC(ARP_REQUEST_TIMEOUT));
191 	}
192 }
193 
arp_request_timeout(struct k_work * work)194 static void arp_request_timeout(struct k_work *work)
195 {
196 	uint32_t current = k_uptime_get_32();
197 	struct arp_entry *entry, *next;
198 
199 	ARG_UNUSED(work);
200 
201 	k_mutex_lock(&arp_mutex, K_FOREVER);
202 
203 	SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&arp_pending_entries,
204 					  entry, next, node) {
205 		if ((int32_t)(entry->req_start +
206 			    ARP_REQUEST_TIMEOUT - current) > 0) {
207 			break;
208 		}
209 
210 		arp_entry_cleanup(entry, true);
211 
212 		sys_slist_remove(&arp_pending_entries, NULL, &entry->node);
213 		sys_slist_append(&arp_free_entries, &entry->node);
214 
215 		entry = NULL;
216 	}
217 
218 	if (entry) {
219 		k_work_reschedule(&arp_request_timer,
220 				  K_MSEC(entry->req_start +
221 					 ARP_REQUEST_TIMEOUT - current));
222 	}
223 
224 	k_mutex_unlock(&arp_mutex);
225 }
226 
if_get_addr(struct net_if * iface,struct in_addr * addr)227 static inline struct in_addr *if_get_addr(struct net_if *iface,
228 					  struct in_addr *addr)
229 {
230 	struct net_if_ipv4 *ipv4 = iface->config.ip.ipv4;
231 
232 	if (!ipv4) {
233 		return NULL;
234 	}
235 
236 	ARRAY_FOR_EACH(ipv4->unicast, i) {
237 		if (ipv4->unicast[i].ipv4.is_used &&
238 		    ipv4->unicast[i].ipv4.address.family == AF_INET &&
239 		    ipv4->unicast[i].ipv4.addr_state == NET_ADDR_PREFERRED &&
240 		    (!addr ||
241 		     net_ipv4_addr_cmp(addr,
242 				       &ipv4->unicast[i].ipv4.address.in_addr))) {
243 			return &ipv4->unicast[i].ipv4.address.in_addr;
244 		}
245 	}
246 
247 	return NULL;
248 }
249 
arp_prepare(struct net_if * iface,struct in_addr * next_addr,struct arp_entry * entry,struct net_pkt * pending,struct in_addr * current_ip)250 static inline struct net_pkt *arp_prepare(struct net_if *iface,
251 					  struct in_addr *next_addr,
252 					  struct arp_entry *entry,
253 					  struct net_pkt *pending,
254 					  struct in_addr *current_ip)
255 {
256 	struct net_arp_hdr *hdr;
257 	struct in_addr *my_addr;
258 	struct net_pkt *pkt;
259 
260 	if (current_ip) {
261 		/* This is the IPv4 autoconf case where we have already
262 		 * things setup so no need to allocate new net_pkt
263 		 */
264 		pkt = pending;
265 	} else {
266 		pkt = net_pkt_alloc_with_buffer(iface,
267 						sizeof(struct net_arp_hdr),
268 						AF_UNSPEC, 0, NET_BUF_TIMEOUT);
269 		if (!pkt) {
270 			return NULL;
271 		}
272 
273 		/* Avoid recursive loop with network packet capturing */
274 		if (IS_ENABLED(CONFIG_NET_CAPTURE) && pending) {
275 			net_pkt_set_captured(pkt, net_pkt_is_captured(pending));
276 		}
277 
278 		if (IS_ENABLED(CONFIG_NET_VLAN) && pending) {
279 			net_pkt_set_vlan_tag(pkt, net_pkt_vlan_tag(pending));
280 		}
281 	}
282 
283 	net_buf_add(pkt->buffer, sizeof(struct net_arp_hdr));
284 
285 	hdr = NET_ARP_HDR(pkt);
286 
287 	/* If entry is not set, then we are just about to send
288 	 * an ARP request using the data in pending net_pkt.
289 	 * This can happen if there is already a pending ARP
290 	 * request and we want to send it again.
291 	 */
292 	if (entry) {
293 		if (!net_pkt_ipv4_acd(pkt)) {
294 			k_fifo_put(&entry->pending_queue, net_pkt_ref(pending));
295 		}
296 
297 		entry->iface = net_pkt_iface(pkt);
298 
299 		net_ipaddr_copy(&entry->ip, next_addr);
300 
301 		net_pkt_lladdr_src(pkt)->addr =
302 			(uint8_t *)net_if_get_link_addr(entry->iface)->addr;
303 
304 		arp_entry_register_pending(entry);
305 	} else {
306 		net_pkt_lladdr_src(pkt)->addr =
307 			(uint8_t *)net_if_get_link_addr(iface)->addr;
308 	}
309 
310 	net_pkt_lladdr_src(pkt)->len = sizeof(struct net_eth_addr);
311 
312 	net_pkt_lladdr_dst(pkt)->addr = (uint8_t *)net_eth_broadcast_addr();
313 	net_pkt_lladdr_dst(pkt)->len = sizeof(struct net_eth_addr);
314 
315 	hdr->hwtype = htons(NET_ARP_HTYPE_ETH);
316 	hdr->protocol = htons(NET_ETH_PTYPE_IP);
317 	hdr->hwlen = sizeof(struct net_eth_addr);
318 	hdr->protolen = sizeof(struct in_addr);
319 	hdr->opcode = htons(NET_ARP_REQUEST);
320 
321 	(void)memset(&hdr->dst_hwaddr.addr, 0x00, sizeof(struct net_eth_addr));
322 
323 	net_ipv4_addr_copy_raw(hdr->dst_ipaddr, (uint8_t *)next_addr);
324 
325 	memcpy(hdr->src_hwaddr.addr, net_pkt_lladdr_src(pkt)->addr,
326 	       sizeof(struct net_eth_addr));
327 
328 	if (net_pkt_ipv4_acd(pkt)) {
329 		my_addr = current_ip;
330 	} else if (!entry) {
331 		my_addr = (struct in_addr *)NET_IPV4_HDR(pending)->src;
332 	} else {
333 		my_addr = if_get_addr(entry->iface, current_ip);
334 	}
335 
336 	if (my_addr) {
337 		net_ipv4_addr_copy_raw(hdr->src_ipaddr, (uint8_t *)my_addr);
338 	} else {
339 		(void)memset(&hdr->src_ipaddr, 0, sizeof(struct in_addr));
340 	}
341 
342 	NET_DBG("Generating request for %s", net_sprint_ipv4_addr(next_addr));
343 	return pkt;
344 }
345 
net_arp_prepare(struct net_pkt * pkt,struct in_addr * request_ip,struct in_addr * current_ip)346 struct net_pkt *net_arp_prepare(struct net_pkt *pkt,
347 				struct in_addr *request_ip,
348 				struct in_addr *current_ip)
349 {
350 	bool is_ipv4_ll_used = false;
351 	struct arp_entry *entry;
352 	struct in_addr *addr;
353 
354 	if (!pkt || !pkt->buffer) {
355 		return NULL;
356 	}
357 
358 	if (net_pkt_ipv4_acd(pkt)) {
359 		return arp_prepare(net_pkt_iface(pkt), request_ip, NULL,
360 				   pkt, current_ip);
361 	}
362 
363 	if (IS_ENABLED(CONFIG_NET_IPV4_AUTO)) {
364 		is_ipv4_ll_used = net_ipv4_is_ll_addr((struct in_addr *)
365 						&NET_IPV4_HDR(pkt)->src) ||
366 				  net_ipv4_is_ll_addr((struct in_addr *)
367 						&NET_IPV4_HDR(pkt)->dst);
368 	}
369 
370 	/* Is the destination in the local network, if not route via
371 	 * the gateway address.
372 	 */
373 	if (!current_ip && !is_ipv4_ll_used &&
374 	    !net_if_ipv4_addr_mask_cmp(net_pkt_iface(pkt), request_ip)) {
375 		struct net_if_ipv4 *ipv4 = net_pkt_iface(pkt)->config.ip.ipv4;
376 
377 		if (ipv4) {
378 			addr = &ipv4->gw;
379 			if (net_ipv4_is_addr_unspecified(addr)) {
380 				NET_ERR("Gateway not set for iface %p",
381 					net_pkt_iface(pkt));
382 
383 				return NULL;
384 			}
385 		} else {
386 			addr = request_ip;
387 		}
388 	} else {
389 		addr = request_ip;
390 	}
391 
392 	k_mutex_lock(&arp_mutex, K_FOREVER);
393 
394 	/* If the destination address is already known, we do not need
395 	 * to send any ARP packet.
396 	 */
397 	entry = arp_entry_find_move_first(net_pkt_iface(pkt), addr);
398 	if (!entry) {
399 		struct net_pkt *req;
400 
401 		entry = arp_entry_find_pending(net_pkt_iface(pkt), addr);
402 		if (!entry) {
403 			/* No pending, let's try to get a new entry */
404 			entry = arp_entry_get_free();
405 			if (!entry) {
406 				/* Then let's take one from table? */
407 				entry = arp_entry_get_last_from_table();
408 			}
409 		} else {
410 			/* There is a pending ARP request already, check if this packet is already
411 			 * in the pending list and if so, resend the request, otherwise just
412 			 * append the packet to the request fifo list.
413 			 */
414 			if (k_queue_unique_append(&entry->pending_queue._queue,
415 						  net_pkt_ref(pkt))) {
416 				NET_DBG("Pending ARP request for %s, queuing pkt %p",
417 					net_sprint_ipv4_addr(addr), pkt);
418 				k_mutex_unlock(&arp_mutex);
419 				return NULL;
420 			}
421 
422 			entry = NULL;
423 		}
424 
425 		req = arp_prepare(net_pkt_iface(pkt), addr, entry, pkt,
426 				  current_ip);
427 
428 		if (!entry) {
429 			/* We cannot send the packet, the ARP cache is full
430 			 * or there is already a pending query to this IP
431 			 * address, so this packet must be discarded.
432 			 */
433 			NET_DBG("Resending ARP %p", req);
434 		}
435 
436 		if (!req && entry) {
437 			/* Add the arp entry back to arp_free_entries, to avoid the
438 			 * arp entry is leak due to ARP packet allocated failed.
439 			 */
440 			sys_slist_prepend(&arp_free_entries, &entry->node);
441 		}
442 
443 		k_mutex_unlock(&arp_mutex);
444 		return req;
445 	}
446 
447 	k_mutex_unlock(&arp_mutex);
448 
449 	net_pkt_lladdr_src(pkt)->addr =
450 		(uint8_t *)net_if_get_link_addr(entry->iface)->addr;
451 	net_pkt_lladdr_src(pkt)->len = sizeof(struct net_eth_addr);
452 
453 	net_pkt_lladdr_dst(pkt)->addr = (uint8_t *)&entry->eth;
454 	net_pkt_lladdr_dst(pkt)->len = sizeof(struct net_eth_addr);
455 
456 	NET_DBG("ARP using ll %s for IP %s",
457 		net_sprint_ll_addr(net_pkt_lladdr_dst(pkt)->addr,
458 				   sizeof(struct net_eth_addr)),
459 		net_sprint_ipv4_addr(&NET_IPV4_HDR(pkt)->dst));
460 
461 	return pkt;
462 }
463 
arp_gratuitous(struct net_if * iface,struct in_addr * src,struct net_eth_addr * hwaddr)464 static void arp_gratuitous(struct net_if *iface,
465 			   struct in_addr *src,
466 			   struct net_eth_addr *hwaddr)
467 {
468 	sys_snode_t *prev = NULL;
469 	struct arp_entry *entry;
470 
471 	entry = arp_entry_find(&arp_table, iface, src, &prev);
472 	if (entry) {
473 		NET_DBG("Gratuitous ARP hwaddr %s -> %s",
474 			net_sprint_ll_addr((const uint8_t *)&entry->eth,
475 					   sizeof(struct net_eth_addr)),
476 			net_sprint_ll_addr((const uint8_t *)hwaddr,
477 					   sizeof(struct net_eth_addr)));
478 
479 		memcpy(&entry->eth, hwaddr, sizeof(struct net_eth_addr));
480 	}
481 }
482 
483 #if defined(CONFIG_NET_ARP_GRATUITOUS_TRANSMISSION)
arp_gratuitous_send(struct net_if * iface,struct in_addr * ipaddr)484 static void arp_gratuitous_send(struct net_if *iface,
485 				struct in_addr *ipaddr)
486 {
487 	struct net_arp_hdr *hdr;
488 	struct net_pkt *pkt;
489 
490 	pkt = net_pkt_alloc_with_buffer(iface, sizeof(struct net_arp_hdr),
491 					AF_UNSPEC, 0, NET_BUF_TIMEOUT);
492 	if (!pkt) {
493 		return;
494 	}
495 
496 	net_buf_add(pkt->buffer, sizeof(struct net_arp_hdr));
497 	net_pkt_set_vlan_tag(pkt, net_eth_get_vlan_tag(iface));
498 
499 	hdr = NET_ARP_HDR(pkt);
500 
501 	hdr->hwtype = htons(NET_ARP_HTYPE_ETH);
502 	hdr->protocol = htons(NET_ETH_PTYPE_IP);
503 	hdr->hwlen = sizeof(struct net_eth_addr);
504 	hdr->protolen = sizeof(struct in_addr);
505 	hdr->opcode = htons(NET_ARP_REQUEST);
506 
507 	memcpy(&hdr->dst_hwaddr.addr, net_eth_broadcast_addr(),
508 	       sizeof(struct net_eth_addr));
509 	memcpy(&hdr->src_hwaddr.addr, net_if_get_link_addr(iface)->addr,
510 	       sizeof(struct net_eth_addr));
511 
512 	net_ipv4_addr_copy_raw(hdr->dst_ipaddr, (uint8_t *)ipaddr);
513 	net_ipv4_addr_copy_raw(hdr->src_ipaddr, (uint8_t *)ipaddr);
514 
515 	net_pkt_lladdr_src(pkt)->addr = net_if_get_link_addr(iface)->addr;
516 	net_pkt_lladdr_src(pkt)->len = sizeof(struct net_eth_addr);
517 
518 	net_pkt_lladdr_dst(pkt)->addr = (uint8_t *)net_eth_broadcast_addr();
519 	net_pkt_lladdr_dst(pkt)->len = sizeof(struct net_eth_addr);
520 
521 	NET_DBG("Sending gratuitous ARP pkt %p", pkt);
522 
523 	if (net_if_send_data(iface, pkt) == NET_DROP) {
524 		net_pkt_unref(pkt);
525 	}
526 }
527 
notify_all_ipv4_addr(struct net_if * iface)528 static void notify_all_ipv4_addr(struct net_if *iface)
529 {
530 	struct net_if_ipv4 *ipv4 = iface->config.ip.ipv4;
531 	int i;
532 
533 	if (!ipv4) {
534 		return;
535 	}
536 
537 	for (i = 0; i < NET_IF_MAX_IPV4_ADDR; i++) {
538 		if (ipv4->unicast[i].ipv4.is_used &&
539 		    ipv4->unicast[i].ipv4.address.family == AF_INET &&
540 		    ipv4->unicast[i].ipv4.addr_state == NET_ADDR_PREFERRED) {
541 			arp_gratuitous_send(iface,
542 					    &ipv4->unicast[i].ipv4.address.in_addr);
543 		}
544 	}
545 }
546 
iface_event_handler(struct net_mgmt_event_callback * cb,uint32_t mgmt_event,struct net_if * iface)547 static void iface_event_handler(struct net_mgmt_event_callback *cb,
548 				uint32_t mgmt_event, struct net_if *iface)
549 {
550 	ARG_UNUSED(cb);
551 
552 	if (!(net_if_l2(iface) == &NET_L2_GET_NAME(ETHERNET) ||
553 	      net_eth_is_vlan_interface(iface))) {
554 		return;
555 	}
556 
557 	if (mgmt_event != NET_EVENT_IF_UP) {
558 		return;
559 	}
560 
561 	notify_all_ipv4_addr(iface);
562 }
563 
ipv4_event_handler(struct net_mgmt_event_callback * cb,uint32_t mgmt_event,struct net_if * iface)564 static void ipv4_event_handler(struct net_mgmt_event_callback *cb,
565 			       uint32_t mgmt_event, struct net_if *iface)
566 {
567 	struct in_addr *ipaddr;
568 
569 	if (!(net_if_l2(iface) == &NET_L2_GET_NAME(ETHERNET) ||
570 	      net_eth_is_vlan_interface(iface))) {
571 		return;
572 	}
573 
574 	if (!net_if_is_up(iface)) {
575 		return;
576 	}
577 
578 	if (mgmt_event != NET_EVENT_IPV4_ADDR_ADD) {
579 		return;
580 	}
581 
582 	if (cb->info_length != sizeof(struct in_addr)) {
583 		return;
584 	}
585 
586 	ipaddr = (struct in_addr *)cb->info;
587 
588 	arp_gratuitous_send(iface, ipaddr);
589 }
590 
iface_cb(struct net_if * iface,void * user_data)591 static void iface_cb(struct net_if *iface, void *user_data)
592 {
593 	ARG_UNUSED(user_data);
594 
595 	if (!(net_if_l2(iface) == &NET_L2_GET_NAME(ETHERNET) ||
596 	      net_eth_is_vlan_interface(iface))) {
597 		return;
598 	}
599 
600 	if (!net_if_is_up(iface)) {
601 		return;
602 	}
603 
604 	notify_all_ipv4_addr(iface);
605 }
606 
arp_gratuitous_work_handler(struct k_work * work)607 static void arp_gratuitous_work_handler(struct k_work *work)
608 {
609 	ARG_UNUSED(work);
610 
611 	net_if_foreach(iface_cb, NULL);
612 
613 	k_work_reschedule(&arp_gratuitous_work,
614 			  K_SECONDS(CONFIG_NET_ARP_GRATUITOUS_INTERVAL));
615 }
616 #endif /* defined(CONFIG_NET_ARP_GRATUITOUS_TRANSMISSION) */
617 
net_arp_update(struct net_if * iface,struct in_addr * src,struct net_eth_addr * hwaddr,bool gratuitous,bool force)618 void net_arp_update(struct net_if *iface,
619 		    struct in_addr *src,
620 		    struct net_eth_addr *hwaddr,
621 		    bool gratuitous,
622 		    bool force)
623 {
624 	struct arp_entry *entry;
625 	struct net_pkt *pkt;
626 
627 	NET_DBG("iface %d (%p) src %s", net_if_get_by_iface(iface), iface,
628 		net_sprint_ipv4_addr(src));
629 	net_if_tx_lock(iface);
630 	k_mutex_lock(&arp_mutex, K_FOREVER);
631 
632 	entry = arp_entry_get_pending(iface, src);
633 	if (!entry) {
634 		if (IS_ENABLED(CONFIG_NET_ARP_GRATUITOUS) && gratuitous) {
635 			arp_gratuitous(iface, src, hwaddr);
636 		}
637 
638 		if (force) {
639 			sys_snode_t *prev = NULL;
640 			struct arp_entry *arp_ent;
641 
642 			arp_ent = arp_entry_find(&arp_table, iface, src, &prev);
643 			if (arp_ent) {
644 				memcpy(&arp_ent->eth, hwaddr,
645 				       sizeof(struct net_eth_addr));
646 			} else {
647 				/* Add new entry as it was not found and force
648 				 * was set.
649 				 */
650 				arp_ent = arp_entry_get_free();
651 				if (!arp_ent) {
652 					/* Then let's take one from table? */
653 					arp_ent = arp_entry_get_last_from_table();
654 				}
655 
656 				if (arp_ent) {
657 					arp_ent->req_start = k_uptime_get_32();
658 					arp_ent->iface = iface;
659 					net_ipaddr_copy(&arp_ent->ip, src);
660 					memcpy(&arp_ent->eth, hwaddr, sizeof(arp_ent->eth));
661 					sys_slist_prepend(&arp_table, &arp_ent->node);
662 				}
663 			}
664 		}
665 
666 		k_mutex_unlock(&arp_mutex);
667 		net_if_tx_unlock(iface);
668 		return;
669 	}
670 
671 	memcpy(&entry->eth, hwaddr, sizeof(struct net_eth_addr));
672 
673 	/* Inserting entry into the table */
674 	sys_slist_prepend(&arp_table, &entry->node);
675 
676 	while (!k_fifo_is_empty(&entry->pending_queue)) {
677 		int ret;
678 
679 		pkt = k_fifo_get(&entry->pending_queue, K_FOREVER);
680 
681 		/* Set the dst in the pending packet */
682 		net_pkt_lladdr_dst(pkt)->len = sizeof(struct net_eth_addr);
683 		net_pkt_lladdr_dst(pkt)->addr =
684 			(uint8_t *) &NET_ETH_HDR(pkt)->dst.addr;
685 
686 		NET_DBG("iface %d (%p) dst %s pending %p frag %p",
687 			net_if_get_by_iface(iface), iface,
688 			net_sprint_ipv4_addr(&entry->ip),
689 			pkt, pkt->frags);
690 
691 		/* We directly send the packet without first queueing it.
692 		 * The pkt has already been queued for sending, once by
693 		 * net_if and second time in the ARP queue. We must not
694 		 * queue it twice in net_if so that the statistics of
695 		 * the pkt are not counted twice and the packet filter
696 		 * callbacks are only called once.
697 		 */
698 		ret = net_if_l2(iface)->send(iface, pkt);
699 		if (ret < 0) {
700 			net_pkt_unref(pkt);
701 		}
702 	}
703 
704 	k_mutex_unlock(&arp_mutex);
705 	net_if_tx_unlock(iface);
706 }
707 
arp_prepare_reply(struct net_if * iface,struct net_pkt * req,struct net_eth_hdr * eth_query,struct net_eth_addr * dst_addr)708 static inline struct net_pkt *arp_prepare_reply(struct net_if *iface,
709 						struct net_pkt *req,
710 						struct net_eth_hdr *eth_query,
711 						struct net_eth_addr *dst_addr)
712 {
713 	struct net_arp_hdr *hdr, *query;
714 	struct net_pkt *pkt;
715 
716 	pkt = net_pkt_alloc_with_buffer(iface, sizeof(struct net_arp_hdr),
717 					AF_UNSPEC, 0, NET_BUF_TIMEOUT);
718 	if (!pkt) {
719 		return NULL;
720 	}
721 
722 	net_buf_add(pkt->buffer, sizeof(struct net_arp_hdr));
723 
724 	hdr = NET_ARP_HDR(pkt);
725 	query = NET_ARP_HDR(req);
726 
727 	if (IS_ENABLED(CONFIG_NET_VLAN)) {
728 		net_pkt_set_vlan_tag(pkt, net_pkt_vlan_tag(req));
729 	}
730 
731 	hdr->hwtype = htons(NET_ARP_HTYPE_ETH);
732 	hdr->protocol = htons(NET_ETH_PTYPE_IP);
733 	hdr->hwlen = sizeof(struct net_eth_addr);
734 	hdr->protolen = sizeof(struct in_addr);
735 	hdr->opcode = htons(NET_ARP_REPLY);
736 
737 	memcpy(&hdr->dst_hwaddr.addr, &dst_addr->addr,
738 	       sizeof(struct net_eth_addr));
739 	memcpy(&hdr->src_hwaddr.addr, net_if_get_link_addr(iface)->addr,
740 	       sizeof(struct net_eth_addr));
741 
742 	net_ipv4_addr_copy_raw(hdr->dst_ipaddr, query->src_ipaddr);
743 	net_ipv4_addr_copy_raw(hdr->src_ipaddr, query->dst_ipaddr);
744 
745 	net_pkt_lladdr_src(pkt)->addr = net_if_get_link_addr(iface)->addr;
746 	net_pkt_lladdr_src(pkt)->len = sizeof(struct net_eth_addr);
747 
748 	net_pkt_lladdr_dst(pkt)->addr = (uint8_t *)&hdr->dst_hwaddr.addr;
749 	net_pkt_lladdr_dst(pkt)->len = sizeof(struct net_eth_addr);
750 
751 	return pkt;
752 }
753 
arp_hdr_check(struct net_arp_hdr * arp_hdr)754 static bool arp_hdr_check(struct net_arp_hdr *arp_hdr)
755 {
756 	if (ntohs(arp_hdr->hwtype) != NET_ARP_HTYPE_ETH ||
757 	    ntohs(arp_hdr->protocol) != NET_ETH_PTYPE_IP ||
758 	    arp_hdr->hwlen != sizeof(struct net_eth_addr) ||
759 	    arp_hdr->protolen != NET_ARP_IPV4_PTYPE_SIZE ||
760 	    net_ipv4_is_addr_loopback((struct in_addr *)arp_hdr->src_ipaddr)) {
761 		NET_DBG("DROP: Invalid ARP header");
762 		return false;
763 	}
764 
765 	return true;
766 }
767 
net_arp_input(struct net_pkt * pkt,struct net_eth_hdr * eth_hdr)768 enum net_verdict net_arp_input(struct net_pkt *pkt,
769 			       struct net_eth_hdr *eth_hdr)
770 {
771 	struct net_eth_addr *dst_hw_addr;
772 	struct net_arp_hdr *arp_hdr;
773 	struct net_pkt *reply;
774 	struct in_addr *addr;
775 
776 	if (net_pkt_get_len(pkt) < (sizeof(struct net_arp_hdr) -
777 				    (net_pkt_ip_data(pkt) - (uint8_t *)eth_hdr))) {
778 		NET_DBG("Invalid ARP header (len %zu, min %zu bytes) %p",
779 			net_pkt_get_len(pkt), sizeof(struct net_arp_hdr) -
780 			(net_pkt_ip_data(pkt) - (uint8_t *)eth_hdr), pkt);
781 		return NET_DROP;
782 	}
783 
784 	arp_hdr = NET_ARP_HDR(pkt);
785 	if (!arp_hdr_check(arp_hdr)) {
786 		return NET_DROP;
787 	}
788 
789 	switch (ntohs(arp_hdr->opcode)) {
790 	case NET_ARP_REQUEST:
791 		/* If ARP request sender hw address is our address,
792 		 * we must drop the packet.
793 		 */
794 		if (memcmp(&arp_hdr->src_hwaddr,
795 			   net_if_get_link_addr(net_pkt_iface(pkt))->addr,
796 			   sizeof(struct net_eth_addr)) == 0) {
797 			return NET_DROP;
798 		}
799 
800 		if (IS_ENABLED(CONFIG_NET_ARP_GRATUITOUS)) {
801 			if (net_eth_is_addr_broadcast(&eth_hdr->dst) &&
802 			    (net_eth_is_addr_broadcast(&arp_hdr->dst_hwaddr) ||
803 			     net_eth_is_addr_all_zeroes(&arp_hdr->dst_hwaddr)) &&
804 			    net_ipv4_addr_cmp_raw(arp_hdr->dst_ipaddr,
805 						  arp_hdr->src_ipaddr)) {
806 				/* If the IP address is in our cache,
807 				 * then update it here.
808 				 */
809 				net_arp_update(net_pkt_iface(pkt),
810 					       (struct in_addr *)arp_hdr->src_ipaddr,
811 					       &arp_hdr->src_hwaddr,
812 					       true, false);
813 				break;
814 			}
815 		}
816 
817 		/* Discard ARP request if Ethernet address is broadcast
818 		 * and Source IP address is Multicast address.
819 		 */
820 		if (memcmp(&eth_hdr->dst, net_eth_broadcast_addr(),
821 			   sizeof(struct net_eth_addr)) == 0 &&
822 		    net_ipv4_is_addr_mcast((struct in_addr *)arp_hdr->src_ipaddr)) {
823 			NET_DBG("DROP: eth addr is bcast, src addr is mcast");
824 			return NET_DROP;
825 		}
826 
827 		/* Someone wants to know our ll address */
828 		addr = if_get_addr(net_pkt_iface(pkt),
829 				   (struct in_addr *)arp_hdr->dst_ipaddr);
830 		if (!addr) {
831 			/* Not for us so drop the packet silently */
832 			return NET_DROP;
833 		}
834 
835 		NET_DBG("ARP request from %s [%s] for %s",
836 			net_sprint_ipv4_addr(&arp_hdr->src_ipaddr),
837 			net_sprint_ll_addr((uint8_t *)&arp_hdr->src_hwaddr,
838 					   arp_hdr->hwlen),
839 			net_sprint_ipv4_addr(&arp_hdr->dst_ipaddr));
840 
841 		/* Update the ARP cache if the sender MAC address has
842 		 * changed. In this case the target MAC address is all zeros
843 		 * and the target IP address is our address.
844 		 */
845 		if (net_eth_is_addr_unspecified(&arp_hdr->dst_hwaddr)) {
846 			NET_DBG("Updating ARP cache for %s [%s] iface %d",
847 				net_sprint_ipv4_addr(&arp_hdr->src_ipaddr),
848 				net_sprint_ll_addr((uint8_t *)&arp_hdr->src_hwaddr,
849 						   arp_hdr->hwlen),
850 				net_if_get_by_iface(net_pkt_iface(pkt)));
851 
852 			net_arp_update(net_pkt_iface(pkt),
853 				       (struct in_addr *)arp_hdr->src_ipaddr,
854 				       &arp_hdr->src_hwaddr,
855 				       false, true);
856 
857 			dst_hw_addr = &arp_hdr->src_hwaddr;
858 		} else {
859 			dst_hw_addr = &eth_hdr->src;
860 		}
861 
862 		/* Send reply */
863 		reply = arp_prepare_reply(net_pkt_iface(pkt), pkt, eth_hdr,
864 					  dst_hw_addr);
865 		if (reply) {
866 			net_if_queue_tx(net_pkt_iface(reply), reply);
867 		} else {
868 			NET_DBG("Cannot send ARP reply");
869 		}
870 		break;
871 
872 	case NET_ARP_REPLY:
873 		if (net_ipv4_is_my_addr((struct in_addr *)arp_hdr->dst_ipaddr)) {
874 			NET_DBG("Received ll %s for IP %s",
875 				net_sprint_ll_addr(arp_hdr->src_hwaddr.addr,
876 						   sizeof(struct net_eth_addr)),
877 				net_sprint_ipv4_addr(arp_hdr->src_ipaddr));
878 			net_arp_update(net_pkt_iface(pkt),
879 				       (struct in_addr *)arp_hdr->src_ipaddr,
880 				       &arp_hdr->src_hwaddr,
881 				       false, false);
882 		}
883 
884 		break;
885 	}
886 
887 	net_pkt_unref(pkt);
888 
889 	return NET_OK;
890 }
891 
net_arp_clear_cache(struct net_if * iface)892 void net_arp_clear_cache(struct net_if *iface)
893 {
894 	sys_snode_t *prev = NULL;
895 	struct arp_entry *entry, *next;
896 
897 	NET_DBG("Flushing ARP table");
898 
899 	k_mutex_lock(&arp_mutex, K_FOREVER);
900 
901 	SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&arp_table, entry, next, node) {
902 		if (iface && iface != entry->iface) {
903 			prev = &entry->node;
904 			continue;
905 		}
906 
907 		arp_entry_cleanup(entry, false);
908 
909 		sys_slist_remove(&arp_table, prev, &entry->node);
910 		sys_slist_prepend(&arp_free_entries, &entry->node);
911 	}
912 
913 	prev = NULL;
914 
915 	NET_DBG("Flushing ARP pending requests");
916 
917 	SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&arp_pending_entries,
918 					  entry, next, node) {
919 		if (iface && iface != entry->iface) {
920 			prev = &entry->node;
921 			continue;
922 		}
923 
924 		arp_entry_cleanup(entry, true);
925 
926 		sys_slist_remove(&arp_pending_entries, prev, &entry->node);
927 		sys_slist_prepend(&arp_free_entries, &entry->node);
928 	}
929 
930 	if (sys_slist_is_empty(&arp_pending_entries)) {
931 		k_work_cancel_delayable(&arp_request_timer);
932 	}
933 
934 	k_mutex_unlock(&arp_mutex);
935 }
936 
net_arp_clear_pending(struct net_if * iface,struct in_addr * dst)937 int net_arp_clear_pending(struct net_if *iface, struct in_addr *dst)
938 {
939 	struct arp_entry *entry = arp_entry_find_pending(iface, dst);
940 
941 	if (!entry) {
942 		return -ENOENT;
943 	}
944 
945 	arp_entry_cleanup(entry, true);
946 
947 	return 0;
948 }
949 
net_arp_foreach(net_arp_cb_t cb,void * user_data)950 int net_arp_foreach(net_arp_cb_t cb, void *user_data)
951 {
952 	int ret = 0;
953 	struct arp_entry *entry;
954 
955 	k_mutex_lock(&arp_mutex, K_FOREVER);
956 
957 	SYS_SLIST_FOR_EACH_CONTAINER(&arp_table, entry, node) {
958 		ret++;
959 		cb(entry, user_data);
960 	}
961 
962 	k_mutex_unlock(&arp_mutex);
963 
964 	return ret;
965 }
966 
net_arp_init(void)967 void net_arp_init(void)
968 {
969 	int i;
970 
971 	if (arp_cache_initialized) {
972 		return;
973 	}
974 
975 	sys_slist_init(&arp_free_entries);
976 	sys_slist_init(&arp_pending_entries);
977 	sys_slist_init(&arp_table);
978 
979 	for (i = 0; i < CONFIG_NET_ARP_TABLE_SIZE; i++) {
980 		/* Inserting entry as free with initialised packet queue */
981 		k_fifo_init(&arp_entries[i].pending_queue);
982 		sys_slist_prepend(&arp_free_entries, &arp_entries[i].node);
983 	}
984 
985 	k_work_init_delayable(&arp_request_timer, arp_request_timeout);
986 
987 	k_mutex_init(&arp_mutex);
988 
989 	arp_cache_initialized = true;
990 
991 #if defined(CONFIG_NET_ARP_GRATUITOUS_TRANSMISSION)
992 	net_mgmt_init_event_callback(&iface_event_cb, iface_event_handler,
993 				     NET_EVENT_IF_UP);
994 	net_mgmt_init_event_callback(&ipv4_event_cb, ipv4_event_handler,
995 				     NET_EVENT_IPV4_ADDR_ADD);
996 
997 	net_mgmt_add_event_callback(&iface_event_cb);
998 	net_mgmt_add_event_callback(&ipv4_event_cb);
999 
1000 	k_work_init_delayable(&arp_gratuitous_work,
1001 			      arp_gratuitous_work_handler);
1002 	k_work_reschedule(&arp_gratuitous_work,
1003 			  K_SECONDS(CONFIG_NET_ARP_GRATUITOUS_INTERVAL));
1004 #endif /* defined(CONFIG_NET_ARP_GRATUITOUS_TRANSMISSION) */
1005 }
1006