1 /*
2  * Copyright (c) 2015 Intel Corporation
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <zephyr/logging/log.h>
8 LOG_MODULE_DECLARE(net_zperf, CONFIG_NET_ZPERF_LOG_LEVEL);
9 
10 #include <zephyr/kernel.h>
11 
12 #include <zephyr/net/socket.h>
13 #include <zephyr/net/zperf.h>
14 
15 #include "zperf_internal.h"
16 
17 static uint8_t sample_packet[sizeof(struct zperf_udp_datagram) +
18 			     sizeof(struct zperf_client_hdr_v1) +
19 			     PACKET_SIZE_MAX];
20 
21 static struct zperf_async_upload_context udp_async_upload_ctx;
22 
zperf_upload_decode_stat(const uint8_t * data,size_t datalen,struct zperf_results * results)23 static inline void zperf_upload_decode_stat(const uint8_t *data,
24 					    size_t datalen,
25 					    struct zperf_results *results)
26 {
27 	struct zperf_server_hdr *stat;
28 
29 	if (datalen < sizeof(struct zperf_udp_datagram) +
30 		      sizeof(struct zperf_server_hdr)) {
31 		NET_WARN("Network packet too short");
32 	}
33 
34 	stat = (struct zperf_server_hdr *)
35 			(data + sizeof(struct zperf_udp_datagram));
36 
37 	results->nb_packets_rcvd = ntohl(UNALIGNED_GET(&stat->datagrams));
38 	results->nb_packets_lost = ntohl(UNALIGNED_GET(&stat->error_cnt));
39 	results->nb_packets_outorder =
40 		ntohl(UNALIGNED_GET(&stat->outorder_cnt));
41 	results->total_len = (((uint64_t)ntohl(UNALIGNED_GET(&stat->total_len1))) << 32) +
42 		ntohl(UNALIGNED_GET(&stat->total_len2));
43 	results->time_in_us = ntohl(UNALIGNED_GET(&stat->stop_usec)) +
44 		ntohl(UNALIGNED_GET(&stat->stop_sec)) * USEC_PER_SEC;
45 	results->jitter_in_us = ntohl(UNALIGNED_GET(&stat->jitter2)) +
46 		ntohl(UNALIGNED_GET(&stat->jitter1)) * USEC_PER_SEC;
47 }
48 
zperf_upload_fin(int sock,uint32_t nb_packets,uint64_t end_time,uint32_t packet_size,struct zperf_results * results,bool is_mcast_pkt)49 static inline int zperf_upload_fin(int sock,
50 				   uint32_t nb_packets,
51 				   uint64_t end_time,
52 				   uint32_t packet_size,
53 				   struct zperf_results *results,
54 				   bool is_mcast_pkt)
55 {
56 	uint8_t stats[sizeof(struct zperf_udp_datagram) +
57 		      sizeof(struct zperf_server_hdr)] = { 0 };
58 	struct zperf_udp_datagram *datagram;
59 	struct zperf_client_hdr_v1 *hdr;
60 	uint32_t secs = k_ticks_to_ms_ceil32(end_time) / 1000U;
61 	uint32_t usecs = k_ticks_to_us_ceil32(end_time) - secs * USEC_PER_SEC;
62 	int loop = 2;
63 	int ret = 0;
64 	struct timeval rcvtimeo = {
65 		.tv_sec = 2,
66 		.tv_usec = 0,
67 	};
68 
69 	while (ret <= 0 && loop-- > 0) {
70 		datagram = (struct zperf_udp_datagram *)sample_packet;
71 
72 		/* Fill the packet header */
73 		datagram->id = htonl(-nb_packets);
74 		datagram->tv_sec = htonl(secs);
75 		datagram->tv_usec = htonl(usecs);
76 
77 		hdr = (struct zperf_client_hdr_v1 *)(sample_packet +
78 						     sizeof(*datagram));
79 
80 		/* According to iperf documentation (in include/Settings.hpp),
81 		 * if the flags == 0, then the other values are ignored.
82 		 * But even if the values in the header are ignored, try
83 		 * to set there some meaningful values.
84 		 */
85 		hdr->flags = 0;
86 		hdr->num_of_threads = htonl(1);
87 		hdr->port = 0;
88 		hdr->buffer_len = sizeof(sample_packet) -
89 			sizeof(*datagram) - sizeof(*hdr);
90 		hdr->bandwidth = 0;
91 		hdr->num_of_bytes = htonl(packet_size);
92 
93 		/* Send the packet */
94 		ret = zsock_send(sock, sample_packet, packet_size, 0);
95 		if (ret < 0) {
96 			NET_ERR("Failed to send the packet (%d)", errno);
97 			continue;
98 		}
99 
100 		/* Multicast only send the negative sequence number packet
101 		 * and doesn't wait for a server ack
102 		 */
103 		if (!is_mcast_pkt) {
104 			/* Receive statistics */
105 			ret = zsock_setsockopt(sock, SOL_SOCKET, SO_RCVTIMEO, &rcvtimeo,
106 					       sizeof(rcvtimeo));
107 			if (ret < 0) {
108 				NET_ERR("setsockopt error (%d)", errno);
109 				continue;
110 			}
111 
112 			ret = zsock_recv(sock, stats, sizeof(stats), 0);
113 			if (ret == -EAGAIN) {
114 				NET_WARN("Stats receive timeout");
115 			} else if (ret < 0) {
116 				NET_ERR("Failed to receive packet (%d)", errno);
117 			}
118 		}
119 	}
120 
121 	/* Decode statistics */
122 	if (ret > 0) {
123 		zperf_upload_decode_stat(stats, ret, results);
124 	} else {
125 		return ret;
126 	}
127 
128 	/* Drain RX */
129 	while (true) {
130 		ret = zsock_recv(sock, stats, sizeof(stats), ZSOCK_MSG_DONTWAIT);
131 		if (ret < 0) {
132 			break;
133 		}
134 
135 		NET_WARN("Drain one spurious stat packet!");
136 	}
137 
138 	return 0;
139 }
140 
udp_upload(int sock,int port,const struct zperf_upload_params * param,struct zperf_results * results)141 static int udp_upload(int sock, int port,
142 		      const struct zperf_upload_params *param,
143 		      struct zperf_results *results)
144 {
145 	uint32_t duration_in_ms = param->duration_ms;
146 	uint32_t packet_size = param->packet_size;
147 	uint32_t rate_in_kbps = param->rate_kbps;
148 	uint32_t packet_duration_us = zperf_packet_duration(packet_size, rate_in_kbps);
149 	uint32_t packet_duration = k_us_to_ticks_ceil32(packet_duration_us);
150 	uint32_t delay = packet_duration;
151 	uint32_t nb_packets = 0U;
152 	int64_t start_time, end_time;
153 	int64_t print_time, last_loop_time;
154 	uint32_t print_period;
155 	bool is_mcast_pkt = false;
156 	int ret;
157 
158 	if (packet_size > PACKET_SIZE_MAX) {
159 		NET_WARN("Packet size too large! max size: %u",
160 			 PACKET_SIZE_MAX);
161 		packet_size = PACKET_SIZE_MAX;
162 	} else if (packet_size < sizeof(struct zperf_udp_datagram)) {
163 		NET_WARN("Packet size set to the min size: %zu",
164 			 sizeof(struct zperf_udp_datagram));
165 		packet_size = sizeof(struct zperf_udp_datagram);
166 	}
167 
168 	/* Start the loop */
169 	start_time = k_uptime_ticks();
170 	last_loop_time = start_time;
171 	end_time = start_time + k_ms_to_ticks_ceil64(duration_in_ms);
172 
173 	/* Print log every seconds */
174 	print_period = k_ms_to_ticks_ceil32(MSEC_PER_SEC);
175 	print_time = start_time + print_period;
176 
177 	(void)memset(sample_packet, 'z', sizeof(sample_packet));
178 
179 	do {
180 		struct zperf_udp_datagram *datagram;
181 		struct zperf_client_hdr_v1 *hdr;
182 		uint64_t usecs64;
183 		uint32_t secs, usecs;
184 		int64_t loop_time;
185 		int32_t adjust;
186 
187 		/* Timestamp */
188 		loop_time = k_uptime_ticks();
189 
190 		/* Algorithm to maintain a given baud rate */
191 		if (last_loop_time != loop_time) {
192 			adjust = packet_duration;
193 			adjust -= (int32_t)(loop_time - last_loop_time);
194 		} else {
195 			/* It's the first iteration so no need for adjustment
196 			 */
197 			adjust = 0;
198 		}
199 
200 		if ((adjust >= 0) || (-adjust < delay)) {
201 			delay += adjust;
202 		} else {
203 			delay = 0U; /* delay should never be negative */
204 		}
205 
206 		last_loop_time = loop_time;
207 
208 		usecs64 = k_ticks_to_us_floor64(loop_time);
209 		secs = usecs64 / USEC_PER_SEC;
210 		usecs = usecs64 - (uint64_t)secs * USEC_PER_SEC;
211 
212 		/* Fill the packet header */
213 		datagram = (struct zperf_udp_datagram *)sample_packet;
214 
215 		datagram->id = htonl(nb_packets);
216 		datagram->tv_sec = htonl(secs);
217 		datagram->tv_usec = htonl(usecs);
218 
219 		hdr = (struct zperf_client_hdr_v1 *)(sample_packet +
220 						     sizeof(*datagram));
221 		hdr->flags = 0;
222 		hdr->num_of_threads = htonl(1);
223 		hdr->port = htonl(port);
224 		hdr->buffer_len = sizeof(sample_packet) -
225 			sizeof(*datagram) - sizeof(*hdr);
226 		hdr->bandwidth = htonl(rate_in_kbps);
227 		hdr->num_of_bytes = htonl(packet_size);
228 
229 		/* Send the packet */
230 		ret = zsock_send(sock, sample_packet, packet_size, 0);
231 		if (ret < 0) {
232 			NET_ERR("Failed to send the packet (%d)", errno);
233 			return -errno;
234 		} else {
235 			nb_packets++;
236 		}
237 
238 		if (IS_ENABLED(CONFIG_NET_ZPERF_LOG_LEVEL_DBG)) {
239 			if (print_time >= loop_time) {
240 				NET_DBG("nb_packets=%u\tdelay=%u\tadjust=%d",
241 					nb_packets, (unsigned int)delay,
242 					(int)adjust);
243 				print_time += print_period;
244 			}
245 		}
246 
247 		/* Wait */
248 #if defined(CONFIG_ARCH_POSIX)
249 		k_busy_wait(USEC_PER_MSEC);
250 #else
251 		if (delay != 0) {
252 			k_sleep(K_TICKS(delay));
253 		}
254 #endif
255 	} while (last_loop_time < end_time);
256 
257 	end_time = k_uptime_ticks();
258 
259 	if (param->peer_addr.sa_family == AF_INET) {
260 		if (net_ipv4_is_addr_mcast(&net_sin(&param->peer_addr)->sin_addr)) {
261 			is_mcast_pkt = true;
262 		}
263 	} else if (param->peer_addr.sa_family == AF_INET6) {
264 		if (net_ipv6_is_addr_mcast(&net_sin6(&param->peer_addr)->sin6_addr)) {
265 			is_mcast_pkt = true;
266 		}
267 	} else {
268 		return -EINVAL;
269 	}
270 	ret = zperf_upload_fin(sock, nb_packets, end_time, packet_size,
271 			       results, is_mcast_pkt);
272 	if (ret < 0) {
273 		return ret;
274 	}
275 
276 	/* Add result coming from the client */
277 	results->nb_packets_sent = nb_packets;
278 	results->client_time_in_us =
279 				k_ticks_to_us_ceil64(end_time - start_time);
280 	results->packet_size = packet_size;
281 
282 	return 0;
283 }
284 
zperf_udp_upload(const struct zperf_upload_params * param,struct zperf_results * result)285 int zperf_udp_upload(const struct zperf_upload_params *param,
286 		     struct zperf_results *result)
287 {
288 	int port = 0;
289 	int sock;
290 	int ret;
291 	struct ifreq req;
292 
293 	if (param == NULL || result == NULL) {
294 		return -EINVAL;
295 	}
296 
297 	if (param->peer_addr.sa_family == AF_INET) {
298 		port = ntohs(net_sin(&param->peer_addr)->sin_port);
299 	} else if (param->peer_addr.sa_family == AF_INET6) {
300 		port = ntohs(net_sin6(&param->peer_addr)->sin6_port);
301 	} else {
302 		NET_ERR("Invalid address family (%d)",
303 			param->peer_addr.sa_family);
304 		return -EINVAL;
305 	}
306 
307 	sock = zperf_prepare_upload_sock(&param->peer_addr, param->options.tos,
308 					 param->options.priority, 0, IPPROTO_UDP);
309 	if (sock < 0) {
310 		return sock;
311 	}
312 
313 	if (param->if_name[0]) {
314 		(void)memset(req.ifr_name, 0, sizeof(req.ifr_name));
315 		strncpy(req.ifr_name, param->if_name, IFNAMSIZ);
316 		req.ifr_name[IFNAMSIZ - 1] = 0;
317 
318 		if (zsock_setsockopt(sock, SOL_SOCKET, SO_BINDTODEVICE, &req,
319 				     sizeof(struct ifreq)) != 0) {
320 			NET_WARN("setsockopt SO_BINDTODEVICE error (%d)", -errno);
321 		}
322 	}
323 
324 	ret = udp_upload(sock, port, param, result);
325 
326 	zsock_close(sock);
327 
328 	return ret;
329 }
330 
udp_upload_async_work(struct k_work * work)331 static void udp_upload_async_work(struct k_work *work)
332 {
333 	struct zperf_async_upload_context *upload_ctx =
334 		&udp_async_upload_ctx;
335 	struct zperf_results result;
336 	int ret;
337 
338 	upload_ctx->callback(ZPERF_SESSION_STARTED, NULL,
339 			     upload_ctx->user_data);
340 
341 	ret = zperf_udp_upload(&upload_ctx->param, &result);
342 	if (ret < 0) {
343 		upload_ctx->callback(ZPERF_SESSION_ERROR, NULL,
344 				     upload_ctx->user_data);
345 	} else {
346 		upload_ctx->callback(ZPERF_SESSION_FINISHED, &result,
347 				     upload_ctx->user_data);
348 	}
349 }
350 
zperf_udp_upload_async(const struct zperf_upload_params * param,zperf_callback callback,void * user_data)351 int zperf_udp_upload_async(const struct zperf_upload_params *param,
352 			   zperf_callback callback, void *user_data)
353 {
354 	if (param == NULL || callback == NULL) {
355 		return -EINVAL;
356 	}
357 
358 	if (k_work_is_pending(&udp_async_upload_ctx.work)) {
359 		return -EBUSY;
360 	}
361 
362 	memcpy(&udp_async_upload_ctx.param, param, sizeof(*param));
363 	udp_async_upload_ctx.callback = callback;
364 	udp_async_upload_ctx.user_data = user_data;
365 
366 	zperf_async_work_submit(&udp_async_upload_ctx.work);
367 
368 	return 0;
369 }
370 
zperf_udp_uploader_init(void)371 void zperf_udp_uploader_init(void)
372 {
373 	k_work_init(&udp_async_upload_ctx.work, udp_upload_async_work);
374 }
375