1 /*
2 * Copyright (c) 2017 Matthias Boesl
3 * Copyright (c) 2018 Intel Corporation
4 * Copyright (c) 2024 Nordic Semiconductor ASA
5 *
6 * SPDX-License-Identifier: Apache-2.0
7 */
8
9 /** @file
10 * @brief IPv4 address conflict detection
11 */
12
13 #include <zephyr/logging/log.h>
14 LOG_MODULE_REGISTER(net_ipv4_acd, CONFIG_NET_IPV4_ACD_LOG_LEVEL);
15
16 #include <zephyr/net/ethernet.h>
17 #include <zephyr/net/net_if.h>
18 #include <zephyr/net/net_l2.h>
19 #include <zephyr/net/net_mgmt.h>
20 #include <zephyr/net/net_pkt.h>
21 #include <zephyr/random/random.h>
22 #include <zephyr/sys/slist.h>
23
24 #include "ipv4.h"
25 #include "net_private.h"
26 #include "../l2/ethernet/arp.h"
27
28 static K_MUTEX_DEFINE(lock);
29
30 /* Address conflict detection timer. */
31 static struct k_work_delayable ipv4_acd_timer;
32
33 /* List of IPv4 addresses under an active conflict detection. */
34 static sys_slist_t active_acd_timers;
35
36 #define BUF_ALLOC_TIMEOUT K_MSEC(100)
37
38 /* Initial random delay*/
39 #define IPV4_ACD_PROBE_WAIT 1
40
41 /* Number of probe packets */
42 #define IPV4_ACD_PROBE_NUM 3
43
44 /* Minimum delay till repeated probe */
45 #define IPV4_ACD_PROBE_MIN 1
46
47 /* Maximum delay till repeated probe */
48 #define IPV4_ACD_PROBE_MAX 2
49
50 /* Delay before announcing */
51 #define IPV4_ACD_ANNOUNCE_WAIT 2
52
53 /* Number of announcement packets */
54 #define IPV4_ACD_ANNOUNCE_NUM 2
55
56 /* Time between announcement packets */
57 #define IPV4_ACD_ANNOUNCE_INTERVAL 2
58
59 /* Max conflicts before rate limiting */
60 #define IPV4_ACD_MAX_CONFLICTS 10
61
62 /* Delay between successive attempts */
63 #define IPV4_ACD_RATE_LIMIT_INTERVAL 60
64
65 /* Minimum interval between defensive ARPs */
66 #define IPV4_ACD_DEFEND_INTERVAL 10
67
68 enum ipv4_acd_state {
69 IPV4_ACD_PROBE, /* Probing state */
70 IPV4_ACD_ANNOUNCE, /* Announce state */
71 };
72
ipv4_acd_prepare_arp(struct net_if * iface,struct in_addr * sender_ip,struct in_addr * target_ip)73 static struct net_pkt *ipv4_acd_prepare_arp(struct net_if *iface,
74 struct in_addr *sender_ip,
75 struct in_addr *target_ip)
76 {
77 struct net_pkt *pkt;
78
79 /* We provide AF_UNSPEC to the allocator: this packet does not
80 * need space for any IPv4 header.
81 */
82 pkt = net_pkt_alloc_with_buffer(iface, sizeof(struct net_arp_hdr),
83 AF_UNSPEC, 0, BUF_ALLOC_TIMEOUT);
84 if (!pkt) {
85 return NULL;
86 }
87
88 net_pkt_set_family(pkt, AF_INET);
89 net_pkt_set_ipv4_acd(pkt, true);
90
91 return net_arp_prepare(pkt, target_ip, sender_ip);
92 }
93
ipv4_acd_send_probe(struct net_if_addr * ifaddr)94 static void ipv4_acd_send_probe(struct net_if_addr *ifaddr)
95 {
96 struct net_if *iface = net_if_get_by_index(ifaddr->ifindex);
97 struct in_addr unspecified = { 0 };
98 struct net_pkt *pkt;
99
100 pkt = ipv4_acd_prepare_arp(iface, &unspecified, &ifaddr->address.in_addr);
101 if (!pkt) {
102 NET_DBG("Failed to prepare probe %p", iface);
103 return;
104 }
105
106 if (net_if_send_data(iface, pkt) == NET_DROP) {
107 net_pkt_unref(pkt);
108 }
109 }
110
ipv4_acd_send_announcement(struct net_if_addr * ifaddr)111 static void ipv4_acd_send_announcement(struct net_if_addr *ifaddr)
112 {
113 struct net_if *iface = net_if_get_by_index(ifaddr->ifindex);
114 struct net_pkt *pkt;
115
116 pkt = ipv4_acd_prepare_arp(iface, &ifaddr->address.in_addr,
117 &ifaddr->address.in_addr);
118 if (!pkt) {
119 NET_DBG("Failed to prepare announcement %p", iface);
120 return;
121 }
122
123 if (net_if_send_data(iface, pkt) == NET_DROP) {
124 net_pkt_unref(pkt);
125 }
126 }
127
acd_timer_reschedule(void)128 static void acd_timer_reschedule(void)
129 {
130 k_timepoint_t expiry = sys_timepoint_calc(K_FOREVER);
131 k_timeout_t timeout;
132 sys_snode_t *node;
133
134 SYS_SLIST_FOR_EACH_NODE(&active_acd_timers, node) {
135 struct net_if_addr *ifaddr =
136 CONTAINER_OF(node, struct net_if_addr, acd_node);
137
138 if (sys_timepoint_cmp(ifaddr->acd_timeout, expiry) < 0) {
139 expiry = ifaddr->acd_timeout;
140 }
141 }
142
143 timeout = sys_timepoint_timeout(expiry);
144 if (K_TIMEOUT_EQ(timeout, K_FOREVER)) {
145 k_work_cancel_delayable(&ipv4_acd_timer);
146 return;
147 }
148
149 k_work_reschedule(&ipv4_acd_timer, timeout);
150 }
151
ipv4_acd_manage_timeout(struct net_if_addr * ifaddr)152 static void ipv4_acd_manage_timeout(struct net_if_addr *ifaddr)
153 {
154 switch (ifaddr->acd_state) {
155 case IPV4_ACD_PROBE:
156 if (ifaddr->acd_count < IPV4_ACD_PROBE_NUM) {
157 uint32_t delay;
158
159 NET_DBG("Sending probe for %s",
160 net_sprint_ipv4_addr(&ifaddr->address.in_addr));
161
162 ipv4_acd_send_probe(ifaddr);
163
164 ifaddr->acd_count++;
165 if (ifaddr->acd_count < IPV4_ACD_PROBE_NUM) {
166 delay = sys_rand32_get();
167 delay %= MSEC_PER_SEC * (IPV4_ACD_PROBE_MAX - IPV4_ACD_PROBE_MIN);
168 delay += MSEC_PER_SEC * IPV4_ACD_PROBE_MIN;
169 } else {
170 delay = MSEC_PER_SEC * IPV4_ACD_ANNOUNCE_WAIT;
171
172 }
173
174 ifaddr->acd_timeout = sys_timepoint_calc(K_MSEC(delay));
175
176 break;
177 }
178
179 net_if_ipv4_acd_succeeded(net_if_get_by_index(ifaddr->ifindex),
180 ifaddr);
181
182 ifaddr->acd_state = IPV4_ACD_ANNOUNCE;
183 ifaddr->acd_count = 0;
184 __fallthrough;
185 case IPV4_ACD_ANNOUNCE:
186 if (ifaddr->acd_count < IPV4_ACD_ANNOUNCE_NUM) {
187 NET_DBG("Sending announcement for %s",
188 net_sprint_ipv4_addr(&ifaddr->address.in_addr));
189
190 ipv4_acd_send_announcement(ifaddr);
191
192 ifaddr->acd_count++;
193 ifaddr->acd_timeout = sys_timepoint_calc(
194 K_SECONDS(IPV4_ACD_ANNOUNCE_INTERVAL));
195
196 break;
197 }
198
199 NET_DBG("IPv4 conflict detection done for %s",
200 net_sprint_ipv4_addr(&ifaddr->address.in_addr));
201
202 /* Timeout will be used to determine whether DEFEND_INTERVAL
203 * has expired in case of conflicts.
204 */
205 ifaddr->acd_timeout = sys_timepoint_calc(K_NO_WAIT);
206
207 sys_slist_find_and_remove(&active_acd_timers, &ifaddr->acd_node);
208 break;
209 default:
210 break;
211 }
212 }
213
ipv4_acd_timeout(struct k_work * work)214 static void ipv4_acd_timeout(struct k_work *work)
215 {
216 sys_snode_t *current, *next;
217
218 ARG_UNUSED(work);
219
220 k_mutex_lock(&lock, K_FOREVER);
221
222 SYS_SLIST_FOR_EACH_NODE_SAFE(&active_acd_timers, current, next) {
223 struct net_if_addr *ifaddr =
224 CONTAINER_OF(current, struct net_if_addr, acd_node);
225
226 if (sys_timepoint_expired(ifaddr->acd_timeout)) {
227 ipv4_acd_manage_timeout(ifaddr);
228 }
229 }
230
231 acd_timer_reschedule();
232
233 k_mutex_unlock(&lock);
234 }
235
acd_start_timer(struct net_if * iface,struct net_if_addr * ifaddr)236 static void acd_start_timer(struct net_if *iface, struct net_if_addr *ifaddr)
237 {
238 uint32_t delay;
239
240 sys_slist_find_and_remove(&active_acd_timers, &ifaddr->acd_node);
241 sys_slist_append(&active_acd_timers, &ifaddr->acd_node);
242
243 if (iface->config.ip.ipv4->conflict_cnt >= IPV4_ACD_MAX_CONFLICTS) {
244 NET_DBG("Rate limiting");
245 delay = MSEC_PER_SEC * IPV4_ACD_RATE_LIMIT_INTERVAL;
246 } else {
247 /* Initial probe should be delayed by a random time interval
248 * between 0 and PROBE_WAIT.
249 */
250 delay = sys_rand32_get() % (MSEC_PER_SEC * IPV4_ACD_PROBE_WAIT);
251 }
252
253 ifaddr->acd_timeout = sys_timepoint_calc(K_MSEC(delay));
254
255 acd_timer_reschedule();
256 }
257
net_ipv4_acd_input(struct net_if * iface,struct net_pkt * pkt)258 enum net_verdict net_ipv4_acd_input(struct net_if *iface, struct net_pkt *pkt)
259 {
260 sys_snode_t *current, *next;
261 struct net_arp_hdr *arp_hdr;
262 struct net_if_ipv4 *ipv4;
263
264 if (net_pkt_get_len(pkt) < sizeof(struct net_arp_hdr)) {
265 NET_DBG("Invalid ARP header (len %zu, min %zu bytes)",
266 net_pkt_get_len(pkt), sizeof(struct net_arp_hdr));
267 return NET_DROP;
268 }
269
270 arp_hdr = NET_ARP_HDR(pkt);
271
272 k_mutex_lock(&lock, K_FOREVER);
273
274 SYS_SLIST_FOR_EACH_NODE_SAFE(&active_acd_timers, current, next) {
275 struct net_if_addr *ifaddr =
276 CONTAINER_OF(current, struct net_if_addr, acd_node);
277 struct net_if *addr_iface = net_if_get_by_index(ifaddr->ifindex);
278 struct net_linkaddr *ll_addr;
279
280 if (iface != addr_iface) {
281 continue;
282 }
283
284 if (ifaddr->acd_state != IPV4_ACD_PROBE) {
285 continue;
286 }
287
288 ll_addr = net_if_get_link_addr(addr_iface);
289
290 /* RFC 5227, ch. 2.1.1 Probe Details:
291 * - Sender IP address match OR,
292 * - Target IP address match with different sender HW address,
293 * indicate a conflict.
294 */
295 if (net_ipv4_addr_cmp_raw(arp_hdr->src_ipaddr,
296 (uint8_t *)&ifaddr->address.in_addr) ||
297 (net_ipv4_addr_cmp_raw(arp_hdr->dst_ipaddr,
298 (uint8_t *)&ifaddr->address.in_addr) &&
299 memcmp(&arp_hdr->src_hwaddr, ll_addr->addr, ll_addr->len) != 0)) {
300 NET_DBG("Conflict detected from %s for %s",
301 net_sprint_ll_addr((uint8_t *)&arp_hdr->src_hwaddr,
302 arp_hdr->hwlen),
303 net_sprint_ipv4_addr(&ifaddr->address.in_addr));
304
305 iface->config.ip.ipv4->conflict_cnt++;
306
307 net_if_ipv4_acd_failed(addr_iface, ifaddr);
308
309 k_mutex_unlock(&lock);
310
311 return NET_DROP;
312 }
313 }
314
315 k_mutex_unlock(&lock);
316
317 ipv4 = iface->config.ip.ipv4;
318 if (ipv4 == NULL) {
319 goto out;
320 }
321
322 /* Passive conflict detection - try to defend already confirmed
323 * addresses.
324 */
325 ARRAY_FOR_EACH(ipv4->unicast, i) {
326 struct net_if_addr *ifaddr = &ipv4->unicast[i].ipv4;
327 struct net_linkaddr *ll_addr = net_if_get_link_addr(iface);
328
329 if (!ifaddr->is_used) {
330 continue;
331 }
332
333 if (net_ipv4_addr_cmp_raw(arp_hdr->src_ipaddr,
334 (uint8_t *)&ifaddr->address.in_addr) &&
335 memcmp(&arp_hdr->src_hwaddr, ll_addr->addr, ll_addr->len) != 0) {
336 NET_DBG("Conflict detected from %s for %s",
337 net_sprint_ll_addr((uint8_t *)&arp_hdr->src_hwaddr,
338 arp_hdr->hwlen),
339 net_sprint_ipv4_addr(&ifaddr->address.in_addr));
340
341 ipv4->conflict_cnt++;
342
343 /* In case timer has expired, we're past DEFEND_INTERVAL
344 * and can try to defend again
345 */
346 if (sys_timepoint_expired(ifaddr->acd_timeout)) {
347 NET_DBG("Defending address %s",
348 net_sprint_ipv4_addr(&ifaddr->address.in_addr));
349 ipv4_acd_send_announcement(ifaddr);
350 ifaddr->acd_timeout = sys_timepoint_calc(
351 K_SECONDS(IPV4_ACD_DEFEND_INTERVAL));
352 } else {
353 NET_DBG("Reporting conflict on %s",
354 net_sprint_ipv4_addr(&ifaddr->address.in_addr));
355 /* Otherwise report the conflict and let the
356 * application decide.
357 */
358 net_mgmt_event_notify_with_info(
359 NET_EVENT_IPV4_ACD_CONFLICT, iface,
360 &ifaddr->address.in_addr,
361 sizeof(struct in_addr));
362 }
363
364 break;
365 }
366 }
367
368 out:
369 return NET_CONTINUE;
370 }
371
net_ipv4_acd_init(void)372 void net_ipv4_acd_init(void)
373 {
374 k_work_init_delayable(&ipv4_acd_timer, ipv4_acd_timeout);
375 }
376
net_ipv4_acd_start(struct net_if * iface,struct net_if_addr * ifaddr)377 int net_ipv4_acd_start(struct net_if *iface, struct net_if_addr *ifaddr)
378 {
379 /* Address conflict detection is based on ARP, so can only be done on
380 * supporting interfaces.
381 */
382 if (!(net_if_l2(iface) == &NET_L2_GET_NAME(ETHERNET) ||
383 net_eth_is_vlan_interface(iface))) {
384 net_if_ipv4_acd_succeeded(iface, ifaddr);
385 return 0;
386 }
387
388 k_mutex_lock(&lock, K_FOREVER);
389
390 ifaddr->ifindex = net_if_get_by_iface(iface);
391 ifaddr->acd_state = IPV4_ACD_PROBE;
392 ifaddr->acd_count = 0;
393
394 acd_start_timer(iface, ifaddr);
395
396 k_mutex_unlock(&lock);
397
398 return 0;
399 }
400
net_ipv4_acd_cancel(struct net_if * iface,struct net_if_addr * ifaddr)401 void net_ipv4_acd_cancel(struct net_if *iface, struct net_if_addr *ifaddr)
402 {
403 /* Address conflict detection is based on ARP, so can only be done on
404 * supporting interfaces.
405 */
406 if (!(net_if_l2(iface) == &NET_L2_GET_NAME(ETHERNET) ||
407 net_eth_is_vlan_interface(iface))) {
408 return;
409 }
410
411 k_mutex_lock(&lock, K_FOREVER);
412
413 sys_slist_find_and_remove(&active_acd_timers, &ifaddr->acd_node);
414 acd_timer_reschedule();
415
416 k_mutex_unlock(&lock);
417 }
418