1 /*
2  * Copyright (c) 2019 Intel Corporation
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <zephyr/logging/log.h>
8 LOG_MODULE_DECLARE(conn_mgr, CONFIG_NET_CONNECTION_MANAGER_LOG_LEVEL);
9 
10 #include <errno.h>
11 #include <zephyr/net/net_if.h>
12 #include <zephyr/net/net_mgmt.h>
13 #include "conn_mgr_private.h"
14 
15 extern uint16_t iface_states[CONN_MGR_IFACE_MAX];
16 
17 static struct net_mgmt_event_callback iface_events_cb;
18 static struct net_mgmt_event_callback ipv6_events_cb;
19 static struct net_mgmt_event_callback ipv4_events_cb;
20 
conn_mgr_iface_events_handler(struct net_mgmt_event_callback * cb,uint32_t mgmt_event,struct net_if * iface)21 static void conn_mgr_iface_events_handler(struct net_mgmt_event_callback *cb,
22 					  uint32_t mgmt_event,
23 					  struct net_if *iface)
24 {
25 	int idx;
26 
27 	NET_DBG("Iface event %u received on iface %d (%p)", mgmt_event,
28 		net_if_get_by_iface(iface), iface);
29 
30 	if ((mgmt_event & CONN_MGR_IFACE_EVENTS_MASK) != mgmt_event) {
31 		return;
32 	}
33 
34 	idx = net_if_get_by_iface(iface) - 1;
35 
36 	NET_DBG("Iface index %u", idx);
37 
38 	k_mutex_lock(&conn_mgr_mon_lock, K_FOREVER);
39 
40 	switch (NET_MGMT_GET_COMMAND(mgmt_event)) {
41 	case NET_EVENT_IF_CMD_DOWN:
42 		iface_states[idx] &= ~CONN_MGR_IF_UP;
43 		break;
44 	case NET_EVENT_IF_CMD_UP:
45 		iface_states[idx] |= CONN_MGR_IF_UP;
46 		break;
47 	default:
48 		goto done;
49 	}
50 
51 	iface_states[idx] |= CONN_MGR_IF_CHANGED;
52 	k_sem_give(&conn_mgr_mon_updated);
53 
54 done:
55 	k_mutex_unlock(&conn_mgr_mon_lock);
56 }
57 
58 #if defined(CONFIG_NET_IPV6)
conn_mgr_ipv6_events_handler(struct net_mgmt_event_callback * cb,uint32_t mgmt_event,struct net_if * iface)59 static void conn_mgr_ipv6_events_handler(struct net_mgmt_event_callback *cb,
60 					 uint32_t mgmt_event,
61 					 struct net_if *iface)
62 {
63 	int idx;
64 
65 	NET_DBG("IPv6 event %u received on iface %d (%p)", mgmt_event,
66 		net_if_get_by_iface(iface), iface);
67 
68 	if ((mgmt_event & CONN_MGR_IPV6_EVENTS_MASK) != mgmt_event) {
69 		return;
70 	}
71 
72 	idx = net_if_get_by_iface(iface) - 1;
73 
74 	NET_DBG("Iface index %u", idx);
75 
76 	k_mutex_lock(&conn_mgr_mon_lock, K_FOREVER);
77 
78 	switch (NET_MGMT_GET_COMMAND(mgmt_event)) {
79 	case NET_EVENT_IPV6_CMD_DAD_SUCCEED:
80 		__fallthrough;
81 	case NET_EVENT_IPV6_CMD_ADDR_ADD:
82 		if (net_if_ipv6_get_global_addr(NET_ADDR_PREFERRED, &iface)) {
83 			iface_states[idx] |= CONN_MGR_IF_IPV6_SET;
84 		}
85 		break;
86 	case NET_EVENT_IPV6_CMD_DAD_FAILED:
87 		__fallthrough;
88 	case NET_EVENT_IPV6_CMD_ADDR_DEL:
89 		if (!net_if_ipv6_get_global_addr(NET_ADDR_PREFERRED, &iface)) {
90 			iface_states[idx] &= ~CONN_MGR_IF_IPV6_SET;
91 		}
92 
93 		break;
94 	default:
95 		goto done;
96 	}
97 
98 	iface_states[idx] |= CONN_MGR_IF_CHANGED;
99 	k_sem_give(&conn_mgr_mon_updated);
100 
101 done:
102 	k_mutex_unlock(&conn_mgr_mon_lock);
103 }
104 #else
105 static inline
conn_mgr_ipv6_events_handler(struct net_mgmt_event_callback * cb,uint32_t mgmt_event,struct net_if * iface)106 void conn_mgr_ipv6_events_handler(struct net_mgmt_event_callback *cb,
107 				  uint32_t mgmt_event,
108 				  struct net_if *iface)
109 {
110 	ARG_UNUSED(cb);
111 	ARG_UNUSED(mgmt_event);
112 	ARG_UNUSED(iface);
113 }
114 #endif /* CONFIG_NET_IPV6 */
115 
116 #if defined(CONFIG_NET_IPV4)
conn_mgr_ipv4_events_handler(struct net_mgmt_event_callback * cb,uint32_t mgmt_event,struct net_if * iface)117 static void conn_mgr_ipv4_events_handler(struct net_mgmt_event_callback *cb,
118 					 uint32_t mgmt_event,
119 					 struct net_if *iface)
120 {
121 	int idx;
122 
123 	NET_DBG("IPv4 event %u received on iface %d (%p)", mgmt_event,
124 		net_if_get_by_iface(iface), iface);
125 
126 	if ((mgmt_event & CONN_MGR_IPV4_EVENTS_MASK) != mgmt_event) {
127 		return;
128 	}
129 
130 	idx = net_if_get_by_iface(iface) - 1;
131 
132 	NET_DBG("Iface index %u", idx);
133 
134 	k_mutex_lock(&conn_mgr_mon_lock, K_FOREVER);
135 
136 	switch (NET_MGMT_GET_COMMAND(mgmt_event)) {
137 	case NET_EVENT_IPV4_CMD_ADDR_ADD:
138 		iface_states[idx] |= CONN_MGR_IF_IPV4_SET;
139 		break;
140 	case NET_EVENT_IPV4_CMD_ADDR_DEL:
141 		if (net_if_ipv4_get_global_addr(iface, NET_ADDR_PREFERRED)) {
142 			break;
143 		}
144 
145 		iface_states[idx] &= ~CONN_MGR_IF_IPV4_SET;
146 		break;
147 	default:
148 		goto done;
149 	}
150 
151 	iface_states[idx] |= CONN_MGR_IF_CHANGED;
152 	k_sem_give(&conn_mgr_mon_updated);
153 
154 done:
155 	k_mutex_unlock(&conn_mgr_mon_lock);
156 }
157 #else
158 static inline
conn_mgr_ipv4_events_handler(struct net_mgmt_event_callback * cb,uint32_t mgmt_event,struct net_if * iface)159 void conn_mgr_ipv4_events_handler(struct net_mgmt_event_callback *cb,
160 				  uint32_t mgmt_event,
161 				  struct net_if *iface)
162 {
163 	ARG_UNUSED(cb);
164 	ARG_UNUSED(mgmt_event);
165 	ARG_UNUSED(iface);
166 }
167 #endif /* CONFIG_NET_IPV4 */
168 
conn_mgr_init_events_handler(void)169 void conn_mgr_init_events_handler(void)
170 {
171 	net_mgmt_init_event_callback(&iface_events_cb,
172 				     conn_mgr_iface_events_handler,
173 				     CONN_MGR_IFACE_EVENTS_MASK);
174 	net_mgmt_add_event_callback(&iface_events_cb);
175 
176 	if (IS_ENABLED(CONFIG_NET_IPV6)) {
177 		net_mgmt_init_event_callback(&ipv6_events_cb,
178 					     conn_mgr_ipv6_events_handler,
179 					     CONN_MGR_IPV6_EVENTS_MASK);
180 		net_mgmt_add_event_callback(&ipv6_events_cb);
181 	}
182 
183 	if (IS_ENABLED(CONFIG_NET_IPV4)) {
184 		net_mgmt_init_event_callback(&ipv4_events_cb,
185 					     conn_mgr_ipv4_events_handler,
186 					     CONN_MGR_IPV4_EVENTS_MASK);
187 		net_mgmt_add_event_callback(&ipv4_events_cb);
188 	}
189 }
190