1 /*
2  * Copyright (c) 2023 Nordic Semiconductor ASA
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 /* Object core support for sockets */
8 
9 #include <zephyr/logging/log.h>
10 LOG_MODULE_DECLARE(net_sock, CONFIG_NET_SOCKETS_LOG_LEVEL);
11 
12 #include <zephyr/kernel.h>
13 
14 #include "sockets_internal.h"
15 #include "../../ip/net_private.h"
16 
17 static struct k_obj_type sock_obj_type;
18 static K_MUTEX_DEFINE(sock_obj_mutex);
19 
20 /* Allocate some extra socket objects so that we can track
21  * closed sockets and get some historical statistics.
22  */
23 static struct sock_obj sock_objects[CONFIG_ZVFS_OPEN_MAX * 2] = {
24 	[0 ... ((CONFIG_ZVFS_OPEN_MAX * 2) - 1)] = {
25 		.fd = -1,
26 		.init_done = false,
27 	}
28 };
29 
30 static void sock_obj_core_init_and_link(struct sock_obj *sock);
31 static int sock_obj_core_stats_reset(struct k_obj_core *obj);
32 static int sock_obj_stats_raw(struct k_obj_core *obj_core, void *stats);
33 static int sock_obj_core_get_reg_and_proto(int sock,
34 					   struct net_socket_register **reg);
35 
36 struct k_obj_core_stats_desc sock_obj_type_stats_desc = {
37 	.raw_size = sizeof(struct sock_obj_type_raw_stats),
38 	.raw = sock_obj_stats_raw,
39 	.reset = sock_obj_core_stats_reset,
40 	.disable = NULL,    /* Stats gathering is always on */
41 	.enable = NULL,     /* Stats gathering is always on */
42 };
43 
set_fields(struct sock_obj * obj,int fd,struct net_socket_register * reg,int family,int type,int proto)44 static void set_fields(struct sock_obj *obj, int fd,
45 		       struct net_socket_register *reg,
46 		       int family, int type, int proto)
47 {
48 	obj->fd = fd;
49 	obj->socket_family = family;
50 	obj->socket_type = type;
51 	obj->socket_proto = proto;
52 	obj->reg = reg;
53 	obj->creator = k_current_get();
54 	obj->create_time = sys_clock_tick_get();
55 }
56 
sock_obj_core_init_and_link(struct sock_obj * sock)57 static void sock_obj_core_init_and_link(struct sock_obj *sock)
58 {
59 	static bool type_init_done;
60 
61 	if (!type_init_done) {
62 		z_obj_type_init(&sock_obj_type, K_OBJ_TYPE_SOCK,
63 				offsetof(struct sock_obj, obj_core));
64 		k_obj_type_stats_init(&sock_obj_type, &sock_obj_type_stats_desc);
65 
66 		type_init_done = true;
67 	}
68 
69 	k_obj_core_init_and_link(K_OBJ_CORE(sock), &sock_obj_type);
70 	k_obj_core_stats_register(K_OBJ_CORE(sock), &sock->stats,
71 				  sizeof(struct sock_obj_type_raw_stats));
72 
73 	/* If the socket was closed and we re-opened it again, then clear
74 	 * the statistics.
75 	 */
76 	if (sock->init_done) {
77 		k_obj_core_stats_reset(K_OBJ_CORE(sock));
78 	}
79 
80 	sock->init_done = true;
81 }
82 
sock_obj_stats_raw(struct k_obj_core * obj_core,void * stats)83 static int sock_obj_stats_raw(struct k_obj_core *obj_core, void *stats)
84 {
85 	memcpy(stats, obj_core->stats, sizeof(struct sock_obj_type_raw_stats));
86 
87 	return 0;
88 }
89 
sock_obj_core_stats_reset(struct k_obj_core * obj_core)90 static int sock_obj_core_stats_reset(struct k_obj_core *obj_core)
91 {
92 	memset(obj_core->stats, 0, sizeof(struct sock_obj_type_raw_stats));
93 
94 	return 0;
95 }
96 
sock_obj_core_get_reg_and_proto(int sock,struct net_socket_register ** reg)97 static int sock_obj_core_get_reg_and_proto(int sock, struct net_socket_register **reg)
98 {
99 	int i, ret;
100 
101 	k_mutex_lock(&sock_obj_mutex, K_FOREVER);
102 
103 	for (i = 0; i < ARRAY_SIZE(sock_objects); i++) {
104 		if (sock_objects[i].fd == sock) {
105 			*reg = sock_objects[i].reg;
106 			ret = sock_objects[i].socket_proto;
107 			goto out;
108 		}
109 	}
110 
111 	ret = -ENOENT;
112 
113 out:
114 	k_mutex_unlock(&sock_obj_mutex);
115 
116 	return ret;
117 }
118 
sock_obj_core_alloc(int sock,struct net_socket_register * reg,int family,int type,int proto)119 int sock_obj_core_alloc(int sock, struct net_socket_register *reg,
120 			int family, int type, int proto)
121 {
122 	struct sock_obj *obj = NULL;
123 	int ret, i;
124 
125 	if (sock < 0) {
126 		return -EINVAL;
127 	}
128 
129 	k_mutex_lock(&sock_obj_mutex, K_FOREVER);
130 
131 	/* Try not to allocate already closed sockets so that we
132 	 * can see historical data.
133 	 */
134 	for (i = 0; i < ARRAY_SIZE(sock_objects); i++) {
135 		if (sock_objects[i].fd < 0) {
136 			if (sock_objects[i].init_done == false) {
137 				obj = &sock_objects[i];
138 				break;
139 			} else if (obj == NULL) {
140 				obj = &sock_objects[i];
141 			}
142 		}
143 	}
144 
145 	if (obj == NULL) {
146 		ret = -ENOENT;
147 		goto out;
148 	}
149 
150 	set_fields(obj, sock, reg, family, type, proto);
151 	sock_obj_core_init_and_link(obj);
152 
153 	ret = 0;
154 
155 out:
156 	k_mutex_unlock(&sock_obj_mutex);
157 
158 	return ret;
159 }
160 
sock_obj_core_alloc_find(int sock,int new_sock,int type)161 int sock_obj_core_alloc_find(int sock, int new_sock, int type)
162 {
163 	struct net_socket_register *reg = NULL;
164 	socklen_t optlen = sizeof(int);
165 	int family;
166 	int ret;
167 
168 	if (new_sock < 0) {
169 		return -EINVAL;
170 	}
171 
172 	ret = sock_obj_core_get_reg_and_proto(sock, &reg);
173 	if (ret < 0) {
174 		goto out;
175 	}
176 
177 	ret = zsock_getsockopt(sock, SOL_SOCKET, SO_DOMAIN, &family, &optlen);
178 	if (ret < 0) {
179 		NET_ERR("Cannot get socket domain (%d)", -errno);
180 		goto out;
181 	}
182 
183 	ret = sock_obj_core_alloc(new_sock, reg, family, type, ret);
184 	if (ret < 0) {
185 		NET_ERR("Cannot allocate core object for socket %d (%d)",
186 			new_sock, ret);
187 	}
188 
189 out:
190 	return ret;
191 }
192 
sock_obj_core_dealloc(int fd)193 int sock_obj_core_dealloc(int fd)
194 {
195 	int ret;
196 
197 	k_mutex_lock(&sock_obj_mutex, K_FOREVER);
198 
199 	for (int i = 0; i < ARRAY_SIZE(sock_objects); i++) {
200 		if (sock_objects[i].fd == fd) {
201 			sock_objects[i].fd = -1;
202 
203 			/* Calculate the lifetime of the socket so that
204 			 * net-shell can print it for the closed sockets.
205 			 */
206 			sock_objects[i].create_time =
207 				k_ticks_to_ms_ceil32(sys_clock_tick_get() -
208 						     sock_objects[i].create_time);
209 			ret = 0;
210 			goto out;
211 		}
212 	}
213 
214 	ret = -ENOENT;
215 
216 out:
217 	k_mutex_unlock(&sock_obj_mutex);
218 
219 	return ret;
220 }
221 
sock_obj_core_update_send_stats(int fd,int bytes)222 void sock_obj_core_update_send_stats(int fd, int bytes)
223 {
224 	if (bytes > 0) {
225 		k_mutex_lock(&sock_obj_mutex, K_FOREVER);
226 
227 		for (int i = 0; i < ARRAY_SIZE(sock_objects); i++) {
228 			if (sock_objects[i].fd == fd) {
229 				sock_objects[i].stats.sent += bytes;
230 				break;
231 			}
232 		}
233 
234 		k_mutex_unlock(&sock_obj_mutex);
235 	}
236 }
237 
sock_obj_core_update_recv_stats(int fd,int bytes)238 void sock_obj_core_update_recv_stats(int fd, int bytes)
239 {
240 	if (bytes > 0) {
241 		k_mutex_lock(&sock_obj_mutex, K_FOREVER);
242 
243 		for (int i = 0; i < ARRAY_SIZE(sock_objects); i++) {
244 			if (sock_objects[i].fd == fd) {
245 				sock_objects[i].stats.received += bytes;
246 				break;
247 			}
248 		}
249 
250 		k_mutex_unlock(&sock_obj_mutex);
251 	}
252 }
253