1 /*
2  * Copyright (c) 2017 Matthias Boesl
3  * Copyright (c) 2018 Intel Corporation
4  * Copyright (c) 2024 Nordic Semiconductor ASA
5  *
6  * SPDX-License-Identifier: Apache-2.0
7  */
8 
9 /** @file
10  * @brief IPv4 address conflict detection
11  */
12 
13 #include <zephyr/logging/log.h>
14 LOG_MODULE_REGISTER(net_ipv4_acd, CONFIG_NET_IPV4_ACD_LOG_LEVEL);
15 
16 #include <zephyr/net/ethernet.h>
17 #include <zephyr/net/net_if.h>
18 #include <zephyr/net/net_l2.h>
19 #include <zephyr/net/net_mgmt.h>
20 #include <zephyr/net/net_pkt.h>
21 #include <zephyr/random/random.h>
22 #include <zephyr/sys/slist.h>
23 
24 #include "ipv4.h"
25 #include "net_private.h"
26 #include "../l2/ethernet/arp.h"
27 
28 static K_MUTEX_DEFINE(lock);
29 
30 /* Address conflict detection timer. */
31 static struct k_work_delayable ipv4_acd_timer;
32 
33 /* List of IPv4 addresses under an active conflict detection. */
34 static sys_slist_t active_acd_timers;
35 
36 #define BUF_ALLOC_TIMEOUT K_MSEC(100)
37 
38 /* Initial random delay*/
39 #define IPV4_ACD_PROBE_WAIT 1
40 
41 /* Number of probe packets */
42 #define IPV4_ACD_PROBE_NUM 3
43 
44 /* Minimum delay till repeated probe */
45 #define IPV4_ACD_PROBE_MIN 1
46 
47 /* Maximum delay till repeated probe */
48 #define IPV4_ACD_PROBE_MAX 2
49 
50 /* Delay before announcing */
51 #define IPV4_ACD_ANNOUNCE_WAIT 2
52 
53 /* Number of announcement packets */
54 #define IPV4_ACD_ANNOUNCE_NUM 2
55 
56 /* Time between announcement packets */
57 #define IPV4_ACD_ANNOUNCE_INTERVAL 2
58 
59 /* Max conflicts before rate limiting */
60 #define IPV4_ACD_MAX_CONFLICTS 10
61 
62 /* Delay between successive attempts */
63 #define IPV4_ACD_RATE_LIMIT_INTERVAL 60
64 
65 /* Minimum interval between defensive ARPs */
66 #define IPV4_ACD_DEFEND_INTERVAL 10
67 
68 enum ipv4_acd_state {
69 	IPV4_ACD_PROBE,    /* Probing state */
70 	IPV4_ACD_ANNOUNCE, /* Announce state */
71 };
72 
ipv4_acd_prepare_arp(struct net_if * iface,struct in_addr * sender_ip,struct in_addr * target_ip)73 static struct net_pkt *ipv4_acd_prepare_arp(struct net_if *iface,
74 					    struct in_addr *sender_ip,
75 					    struct in_addr *target_ip)
76 {
77 	struct net_pkt *pkt;
78 
79 	/* We provide AF_UNSPEC to the allocator: this packet does not
80 	 * need space for any IPv4 header.
81 	 */
82 	pkt = net_pkt_alloc_with_buffer(iface, sizeof(struct net_arp_hdr),
83 					AF_UNSPEC, 0, BUF_ALLOC_TIMEOUT);
84 	if (!pkt) {
85 		return NULL;
86 	}
87 
88 	net_pkt_set_family(pkt, AF_INET);
89 	net_pkt_set_ipv4_acd(pkt, true);
90 
91 	return net_arp_prepare(pkt, target_ip, sender_ip);
92 }
93 
ipv4_acd_send_probe(struct net_if_addr * ifaddr)94 static void ipv4_acd_send_probe(struct net_if_addr *ifaddr)
95 {
96 	struct net_if *iface = net_if_get_by_index(ifaddr->ifindex);
97 	struct in_addr unspecified = { 0 };
98 	struct net_pkt *pkt;
99 
100 	pkt = ipv4_acd_prepare_arp(iface, &unspecified, &ifaddr->address.in_addr);
101 	if (!pkt) {
102 		NET_DBG("Failed to prepare probe %p", iface);
103 		return;
104 	}
105 
106 	if (net_if_send_data(iface, pkt) == NET_DROP) {
107 		net_pkt_unref(pkt);
108 	}
109 }
110 
ipv4_acd_send_announcement(struct net_if_addr * ifaddr)111 static void ipv4_acd_send_announcement(struct net_if_addr *ifaddr)
112 {
113 	struct net_if *iface = net_if_get_by_index(ifaddr->ifindex);
114 	struct net_pkt *pkt;
115 
116 	pkt = ipv4_acd_prepare_arp(iface, &ifaddr->address.in_addr,
117 				   &ifaddr->address.in_addr);
118 	if (!pkt) {
119 		NET_DBG("Failed to prepare announcement %p", iface);
120 		return;
121 	}
122 
123 	if (net_if_send_data(iface, pkt) == NET_DROP) {
124 		net_pkt_unref(pkt);
125 	}
126 }
127 
acd_timer_reschedule(void)128 static void acd_timer_reschedule(void)
129 {
130 	k_timepoint_t expiry = sys_timepoint_calc(K_FOREVER);
131 	k_timeout_t timeout;
132 	sys_snode_t *node;
133 
134 	SYS_SLIST_FOR_EACH_NODE(&active_acd_timers, node) {
135 		struct net_if_addr *ifaddr =
136 			CONTAINER_OF(node, struct net_if_addr, acd_node);
137 
138 		if (sys_timepoint_cmp(ifaddr->acd_timeout, expiry) < 0) {
139 			expiry = ifaddr->acd_timeout;
140 		}
141 	}
142 
143 	timeout = sys_timepoint_timeout(expiry);
144 	if (K_TIMEOUT_EQ(timeout, K_FOREVER)) {
145 		k_work_cancel_delayable(&ipv4_acd_timer);
146 		return;
147 	}
148 
149 	k_work_reschedule(&ipv4_acd_timer, timeout);
150 }
151 
ipv4_acd_manage_timeout(struct net_if_addr * ifaddr)152 static void ipv4_acd_manage_timeout(struct net_if_addr *ifaddr)
153 {
154 	switch (ifaddr->acd_state) {
155 	case IPV4_ACD_PROBE:
156 		if (ifaddr->acd_count < IPV4_ACD_PROBE_NUM) {
157 			uint32_t delay;
158 
159 			NET_DBG("Sending probe for %s",
160 				net_sprint_ipv4_addr(&ifaddr->address.in_addr));
161 
162 			ipv4_acd_send_probe(ifaddr);
163 
164 			ifaddr->acd_count++;
165 			if (ifaddr->acd_count < IPV4_ACD_PROBE_NUM) {
166 				delay = sys_rand32_get();
167 				delay %= MSEC_PER_SEC * (IPV4_ACD_PROBE_MAX - IPV4_ACD_PROBE_MIN);
168 				delay += MSEC_PER_SEC * IPV4_ACD_PROBE_MIN;
169 			} else {
170 				delay = MSEC_PER_SEC * IPV4_ACD_ANNOUNCE_WAIT;
171 
172 			}
173 
174 			ifaddr->acd_timeout = sys_timepoint_calc(K_MSEC(delay));
175 
176 			break;
177 		}
178 
179 		net_if_ipv4_acd_succeeded(net_if_get_by_index(ifaddr->ifindex),
180 					  ifaddr);
181 
182 		ifaddr->acd_state = IPV4_ACD_ANNOUNCE;
183 		ifaddr->acd_count = 0;
184 		__fallthrough;
185 	case IPV4_ACD_ANNOUNCE:
186 		if (ifaddr->acd_count < IPV4_ACD_ANNOUNCE_NUM) {
187 			NET_DBG("Sending announcement for %s",
188 				net_sprint_ipv4_addr(&ifaddr->address.in_addr));
189 
190 			ipv4_acd_send_announcement(ifaddr);
191 
192 			ifaddr->acd_count++;
193 			ifaddr->acd_timeout = sys_timepoint_calc(
194 					K_SECONDS(IPV4_ACD_ANNOUNCE_INTERVAL));
195 
196 			break;
197 		}
198 
199 		NET_DBG("IPv4 conflict detection done for %s",
200 			net_sprint_ipv4_addr(&ifaddr->address.in_addr));
201 
202 		/* Timeout will be used to determine whether DEFEND_INTERVAL
203 		 * has expired in case of conflicts.
204 		 */
205 		ifaddr->acd_timeout = sys_timepoint_calc(K_NO_WAIT);
206 
207 		sys_slist_find_and_remove(&active_acd_timers, &ifaddr->acd_node);
208 		break;
209 	default:
210 		break;
211 	}
212 }
213 
ipv4_acd_timeout(struct k_work * work)214 static void ipv4_acd_timeout(struct k_work *work)
215 {
216 	sys_snode_t *current, *next;
217 
218 	ARG_UNUSED(work);
219 
220 	k_mutex_lock(&lock, K_FOREVER);
221 
222 	SYS_SLIST_FOR_EACH_NODE_SAFE(&active_acd_timers, current, next) {
223 		struct net_if_addr *ifaddr =
224 			CONTAINER_OF(current, struct net_if_addr, acd_node);
225 
226 		if (sys_timepoint_expired(ifaddr->acd_timeout)) {
227 			ipv4_acd_manage_timeout(ifaddr);
228 		}
229 	}
230 
231 	acd_timer_reschedule();
232 
233 	k_mutex_unlock(&lock);
234 }
235 
acd_start_timer(struct net_if * iface,struct net_if_addr * ifaddr)236 static void acd_start_timer(struct net_if *iface, struct net_if_addr *ifaddr)
237 {
238 	uint32_t delay;
239 
240 	sys_slist_find_and_remove(&active_acd_timers, &ifaddr->acd_node);
241 	sys_slist_append(&active_acd_timers, &ifaddr->acd_node);
242 
243 	if (iface->config.ip.ipv4->conflict_cnt >= IPV4_ACD_MAX_CONFLICTS) {
244 		NET_DBG("Rate limiting");
245 		delay = MSEC_PER_SEC * IPV4_ACD_RATE_LIMIT_INTERVAL;
246 	} else {
247 		/* Initial probe should be delayed by a random time interval
248 		 * between 0 and PROBE_WAIT.
249 		 */
250 		delay = sys_rand32_get() % (MSEC_PER_SEC * IPV4_ACD_PROBE_WAIT);
251 	}
252 
253 	ifaddr->acd_timeout = sys_timepoint_calc(K_MSEC(delay));
254 
255 	acd_timer_reschedule();
256 }
257 
net_ipv4_acd_input(struct net_if * iface,struct net_pkt * pkt)258 enum net_verdict net_ipv4_acd_input(struct net_if *iface, struct net_pkt *pkt)
259 {
260 	sys_snode_t *current, *next;
261 	struct net_arp_hdr *arp_hdr;
262 	struct net_if_ipv4 *ipv4;
263 
264 	if (net_pkt_get_len(pkt) < sizeof(struct net_arp_hdr)) {
265 		NET_DBG("Invalid ARP header (len %zu, min %zu bytes)",
266 			net_pkt_get_len(pkt), sizeof(struct net_arp_hdr));
267 		return NET_DROP;
268 	}
269 
270 	arp_hdr = NET_ARP_HDR(pkt);
271 
272 	k_mutex_lock(&lock, K_FOREVER);
273 
274 	SYS_SLIST_FOR_EACH_NODE_SAFE(&active_acd_timers, current, next) {
275 		struct net_if_addr *ifaddr =
276 			CONTAINER_OF(current, struct net_if_addr, acd_node);
277 		struct net_if *addr_iface = net_if_get_by_index(ifaddr->ifindex);
278 		struct net_linkaddr *ll_addr;
279 
280 		if (iface != addr_iface) {
281 			continue;
282 		}
283 
284 		if (ifaddr->acd_state != IPV4_ACD_PROBE) {
285 			continue;
286 		}
287 
288 		ll_addr = net_if_get_link_addr(addr_iface);
289 
290 		/* RFC 5227, ch. 2.1.1 Probe Details:
291 		 * - ARP Request/Reply with Sender IP address match OR,
292 		 * - ARP Probe where Target IP address match with different sender HW address,
293 		 * indicate a conflict.
294 		 * ARP Probe has an all-zero sender IP address
295 		 */
296 		if (net_ipv4_addr_cmp_raw(arp_hdr->src_ipaddr,
297 					  (uint8_t *)&ifaddr->address.in_addr) ||
298 		    (net_ipv4_addr_cmp_raw(arp_hdr->dst_ipaddr,
299 					  (uint8_t *)&ifaddr->address.in_addr) &&
300 				 (memcmp(&arp_hdr->src_hwaddr, ll_addr->addr, ll_addr->len) != 0) &&
301 				 (net_ipv4_addr_cmp_raw(arp_hdr->src_ipaddr,
302 						(uint8_t *)&(struct in_addr)INADDR_ANY_INIT)))) {
303 			NET_DBG("Conflict detected from %s for %s",
304 				net_sprint_ll_addr((uint8_t *)&arp_hdr->src_hwaddr,
305 						   arp_hdr->hwlen),
306 				net_sprint_ipv4_addr(&ifaddr->address.in_addr));
307 
308 			iface->config.ip.ipv4->conflict_cnt++;
309 
310 			net_if_ipv4_acd_failed(addr_iface, ifaddr);
311 
312 			k_mutex_unlock(&lock);
313 
314 			return NET_DROP;
315 		}
316 	}
317 
318 	k_mutex_unlock(&lock);
319 
320 	ipv4 = iface->config.ip.ipv4;
321 	if (ipv4 == NULL) {
322 		goto out;
323 	}
324 
325 	/* Passive conflict detection - try to defend already confirmed
326 	 * addresses.
327 	 */
328 	ARRAY_FOR_EACH(ipv4->unicast, i) {
329 		struct net_if_addr *ifaddr = &ipv4->unicast[i].ipv4;
330 		struct net_linkaddr *ll_addr = net_if_get_link_addr(iface);
331 
332 		if (!ifaddr->is_used) {
333 			continue;
334 		}
335 
336 		if (net_ipv4_addr_cmp_raw(arp_hdr->src_ipaddr,
337 					  (uint8_t *)&ifaddr->address.in_addr) &&
338 		    memcmp(&arp_hdr->src_hwaddr, ll_addr->addr, ll_addr->len) != 0) {
339 			NET_DBG("Conflict detected from %s for %s",
340 				net_sprint_ll_addr((uint8_t *)&arp_hdr->src_hwaddr,
341 						   arp_hdr->hwlen),
342 				net_sprint_ipv4_addr(&ifaddr->address.in_addr));
343 
344 			ipv4->conflict_cnt++;
345 
346 			/* In case timer has expired, we're past DEFEND_INTERVAL
347 			 * and can try to defend again
348 			 */
349 			if (sys_timepoint_expired(ifaddr->acd_timeout)) {
350 				NET_DBG("Defending address %s",
351 					net_sprint_ipv4_addr(&ifaddr->address.in_addr));
352 				ipv4_acd_send_announcement(ifaddr);
353 				ifaddr->acd_timeout = sys_timepoint_calc(
354 					K_SECONDS(IPV4_ACD_DEFEND_INTERVAL));
355 			} else {
356 				NET_DBG("Reporting conflict on %s",
357 					net_sprint_ipv4_addr(&ifaddr->address.in_addr));
358 				/* Otherwise report the conflict and let the
359 				 * application decide.
360 				 */
361 				net_mgmt_event_notify_with_info(
362 					NET_EVENT_IPV4_ACD_CONFLICT, iface,
363 					&ifaddr->address.in_addr,
364 					sizeof(struct in_addr));
365 			}
366 
367 			break;
368 		}
369 	}
370 
371 out:
372 	return NET_CONTINUE;
373 }
374 
net_ipv4_acd_init(void)375 void net_ipv4_acd_init(void)
376 {
377 	k_work_init_delayable(&ipv4_acd_timer, ipv4_acd_timeout);
378 }
379 
net_ipv4_acd_start(struct net_if * iface,struct net_if_addr * ifaddr)380 int net_ipv4_acd_start(struct net_if *iface, struct net_if_addr *ifaddr)
381 {
382 	/* Address conflict detection is based on ARP, so can only be done on
383 	 * supporting interfaces.
384 	 */
385 	if (!(net_if_l2(iface) == &NET_L2_GET_NAME(ETHERNET) ||
386 	      net_eth_is_vlan_interface(iface))) {
387 		net_if_ipv4_acd_succeeded(iface, ifaddr);
388 		return 0;
389 	}
390 
391 	k_mutex_lock(&lock, K_FOREVER);
392 
393 	ifaddr->ifindex = net_if_get_by_iface(iface);
394 	ifaddr->acd_state = IPV4_ACD_PROBE;
395 	ifaddr->acd_count = 0;
396 
397 	acd_start_timer(iface, ifaddr);
398 
399 	k_mutex_unlock(&lock);
400 
401 	return 0;
402 }
403 
net_ipv4_acd_cancel(struct net_if * iface,struct net_if_addr * ifaddr)404 void net_ipv4_acd_cancel(struct net_if *iface, struct net_if_addr *ifaddr)
405 {
406 	/* Address conflict detection is based on ARP, so can only be done on
407 	 * supporting interfaces.
408 	 */
409 	if (!(net_if_l2(iface) == &NET_L2_GET_NAME(ETHERNET) ||
410 	      net_eth_is_vlan_interface(iface))) {
411 		return;
412 	}
413 
414 	k_mutex_lock(&lock, K_FOREVER);
415 
416 	sys_slist_find_and_remove(&active_acd_timers, &ifaddr->acd_node);
417 	acd_timer_reschedule();
418 
419 	k_mutex_unlock(&lock);
420 }
421