1 /*
2  * Copyright (c) 2017 Intel Corporation.
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <zephyr/logging/log.h>
8 LOG_MODULE_REGISTER(net_gptp, CONFIG_NET_GPTP_LOG_LEVEL);
9 
10 #include <zephyr/net/net_pkt.h>
11 #include <zephyr/drivers/ptp_clock.h>
12 #include <zephyr/net/ethernet_mgmt.h>
13 #include <zephyr/random/rand32.h>
14 
15 #include <zephyr/net/gptp.h>
16 
17 #include "gptp_messages.h"
18 #include "gptp_mi.h"
19 #include "gptp_data_set.h"
20 
21 #include "gptp_private.h"
22 
23 #define NET_GPTP_STACK_SIZE 2048
24 
25 #if CONFIG_NET_GPTP_NUM_PORTS > 32
26 /*
27  * Boolean arrays sizes have been hardcoded.
28  * It has been arbitrary chosen that a system can not
29  * have more than 32 ports.
30  */
31 #error Maximum number of ports exceeded. (Max is 32).
32 #endif
33 
34 K_KERNEL_STACK_DEFINE(gptp_stack, NET_GPTP_STACK_SIZE);
35 K_FIFO_DEFINE(gptp_rx_queue);
36 
37 static k_tid_t tid;
38 static struct k_thread gptp_thread_data;
39 struct gptp_domain gptp_domain;
40 
gptp_get_port_number(struct net_if * iface)41 int gptp_get_port_number(struct net_if *iface)
42 {
43 	int port = net_eth_get_ptp_port(iface) + 1;
44 
45 	if (port >= GPTP_PORT_START && port < GPTP_PORT_END) {
46 		return port;
47 	}
48 
49 	for (port = GPTP_PORT_START; port < GPTP_PORT_END; port++) {
50 		if (GPTP_PORT_IFACE(port) == iface) {
51 			return port;
52 		}
53 	}
54 
55 	return -ENODEV;
56 }
57 
gptp_is_slave_port(int port)58 bool gptp_is_slave_port(int port)
59 {
60 	return (GPTP_GLOBAL_DS()->selected_role[port] == GPTP_PORT_SLAVE);
61 }
62 
63 /*
64  * Use the given port to generate the clock identity
65  * for the device.
66  * The clock identity is unique for one time-aware system.
67  */
gptp_compute_clock_identity(int port)68 static void gptp_compute_clock_identity(int port)
69 {
70 	struct net_if *iface = GPTP_PORT_IFACE(port);
71 	struct gptp_default_ds *default_ds;
72 
73 	default_ds = GPTP_DEFAULT_DS();
74 
75 	if (iface) {
76 		default_ds->clk_id[0] = net_if_get_link_addr(iface)->addr[0];
77 		default_ds->clk_id[1] = net_if_get_link_addr(iface)->addr[1];
78 		default_ds->clk_id[2] = net_if_get_link_addr(iface)->addr[2];
79 		default_ds->clk_id[3] = 0xFF;
80 		default_ds->clk_id[4] = 0xFE;
81 		default_ds->clk_id[5] = net_if_get_link_addr(iface)->addr[3];
82 		default_ds->clk_id[6] = net_if_get_link_addr(iface)->addr[4];
83 		default_ds->clk_id[7] = net_if_get_link_addr(iface)->addr[5];
84 	}
85 }
86 
87 #define PRINT_INFO(msg, hdr, pkt)				\
88 	NET_DBG("Received %s seq %d pkt %p", (const char *)msg,	\
89 		ntohs(hdr->sequence_id), pkt)			\
90 
91 
gptp_handle_critical_msg(struct net_if * iface,struct net_pkt * pkt)92 static bool gptp_handle_critical_msg(struct net_if *iface, struct net_pkt *pkt)
93 {
94 	struct gptp_hdr *hdr = GPTP_HDR(pkt);
95 	bool handled = false;
96 	int port;
97 
98 	switch (hdr->message_type) {
99 	case GPTP_PATH_DELAY_REQ_MESSAGE:
100 		if (GPTP_CHECK_LEN(pkt, GPTP_PDELAY_REQ_LEN)) {
101 			NET_WARN("Invalid length for %s packet "
102 				 "should have %zd bytes but has %zd bytes",
103 				 "PDELAY_REQ",
104 				 GPTP_PDELAY_REQ_LEN,
105 				 GPTP_PACKET_LEN(pkt));
106 			break;
107 		}
108 
109 		PRINT_INFO("PDELAY_REQ", hdr, pkt);
110 
111 		port = gptp_get_port_number(iface);
112 		if (port == -ENODEV) {
113 			NET_DBG("No port found for gPTP buffer");
114 			return handled;
115 		}
116 
117 		if (GPTP_PORT_STATE(port)->pdelay_resp.state !=
118 						GPTP_PDELAY_RESP_NOT_ENABLED) {
119 			gptp_handle_pdelay_req(port, pkt);
120 		}
121 
122 		handled = true;
123 		break;
124 	default:
125 		/* Not a critical message, this will be handled later. */
126 		break;
127 	}
128 
129 	return handled;
130 }
131 
gptp_handle_msg(struct net_pkt * pkt)132 static void gptp_handle_msg(struct net_pkt *pkt)
133 {
134 	struct gptp_hdr *hdr = GPTP_HDR(pkt);
135 	struct gptp_pdelay_req_state *pdelay_req_state;
136 	struct gptp_sync_rcv_state *sync_rcv_state;
137 	struct gptp_port_announce_receive_state *pa_rcv_state;
138 	struct gptp_port_bmca_data *bmca_data;
139 	int port;
140 
141 	port = gptp_get_port_number(net_pkt_iface(pkt));
142 	if (port == -ENODEV) {
143 		NET_DBG("No port found for ptp buffer");
144 		return;
145 	}
146 
147 	pdelay_req_state = &GPTP_PORT_STATE(port)->pdelay_req;
148 	sync_rcv_state = &GPTP_PORT_STATE(port)->sync_rcv;
149 
150 	switch (hdr->message_type) {
151 	case GPTP_SYNC_MESSAGE:
152 		if (GPTP_CHECK_LEN(pkt, GPTP_SYNC_LEN)) {
153 			NET_WARN("Invalid length for %s packet "
154 				 "should have %zd bytes but has %zd bytes",
155 				 "SYNC",
156 				 GPTP_SYNC_LEN,
157 				 GPTP_PACKET_LEN(pkt));
158 			GPTP_STATS_INC(port, rx_ptp_packet_discard_count);
159 			break;
160 		}
161 
162 		PRINT_INFO("SYNC", hdr, pkt);
163 
164 		sync_rcv_state->rcvd_sync = true;
165 
166 		/* If we already have one, drop the previous one. */
167 		if (sync_rcv_state->rcvd_sync_ptr) {
168 			net_pkt_unref(sync_rcv_state->rcvd_sync_ptr);
169 		}
170 
171 		/* Keep the buffer alive until follow_up is received. */
172 		net_pkt_ref(pkt);
173 		sync_rcv_state->rcvd_sync_ptr = pkt;
174 
175 		GPTP_STATS_INC(port, rx_sync_count);
176 		break;
177 
178 	case GPTP_DELAY_REQ_MESSAGE:
179 		NET_DBG("Delay Request not handled.");
180 		break;
181 
182 	case GPTP_PATH_DELAY_REQ_MESSAGE:
183 		/*
184 		 * Path Delay Responses to Path Delay Requests need
185 		 * very low latency. These need to handled in priority
186 		 * when received as they cannot afford to be delayed
187 		 * by context switches.
188 		 */
189 		NET_WARN("Path Delay Request received as normal messages!");
190 		GPTP_STATS_INC(port, rx_ptp_packet_discard_count);
191 		break;
192 
193 	case GPTP_PATH_DELAY_RESP_MESSAGE:
194 		if (GPTP_CHECK_LEN(pkt, GPTP_PDELAY_RESP_LEN)) {
195 			NET_WARN("Invalid length for %s packet "
196 				 "should have %zd bytes but has %zd bytes",
197 				 "PDELAY_RESP",
198 				 GPTP_PDELAY_RESP_LEN,
199 				 GPTP_PACKET_LEN(pkt));
200 			GPTP_STATS_INC(port, rx_ptp_packet_discard_count);
201 			break;
202 		}
203 
204 		PRINT_INFO("PDELAY_RESP", hdr, pkt);
205 
206 		pdelay_req_state->rcvd_pdelay_resp++;
207 
208 		/* If we already have one, drop the received one. */
209 		if (pdelay_req_state->rcvd_pdelay_resp_ptr) {
210 			break;
211 		}
212 
213 		/* Keep the buffer alive until pdelay_rate_ratio is computed. */
214 		net_pkt_ref(pkt);
215 		pdelay_req_state->rcvd_pdelay_resp_ptr = pkt;
216 		break;
217 
218 	case GPTP_FOLLOWUP_MESSAGE:
219 		if (GPTP_CHECK_LEN(pkt, GPTP_FOLLOW_UP_LEN)) {
220 			NET_WARN("Invalid length for %s packet "
221 				 "should have %zd bytes but has %zd bytes",
222 				 "FOLLOWUP",
223 				 GPTP_FOLLOW_UP_LEN,
224 				 GPTP_PACKET_LEN(pkt));
225 			GPTP_STATS_INC(port, rx_ptp_packet_discard_count);
226 			break;
227 		}
228 
229 		PRINT_INFO("FOLLOWUP", hdr, pkt);
230 
231 		sync_rcv_state->rcvd_follow_up = true;
232 
233 		/* If we already have one, drop the previous one. */
234 		if (sync_rcv_state->rcvd_follow_up_ptr) {
235 			net_pkt_unref(sync_rcv_state->rcvd_follow_up_ptr);
236 		}
237 
238 		/* Keep the pkt alive until info is extracted. */
239 		sync_rcv_state->rcvd_follow_up_ptr = net_pkt_ref(pkt);
240 		NET_DBG("Keeping %s seq %d pkt %p", "FOLLOWUP",
241 			ntohs(hdr->sequence_id), pkt);
242 		break;
243 
244 	case GPTP_PATH_DELAY_FOLLOWUP_MESSAGE:
245 		if (GPTP_CHECK_LEN(pkt, GPTP_PDELAY_RESP_FUP_LEN)) {
246 			NET_WARN("Invalid length for %s packet "
247 				 "should have %zd bytes but has %zd bytes",
248 				 "PDELAY_FOLLOWUP",
249 				 GPTP_PDELAY_RESP_FUP_LEN,
250 				 GPTP_PACKET_LEN(pkt));
251 			GPTP_STATS_INC(port, rx_ptp_packet_discard_count);
252 			break;
253 		}
254 
255 		PRINT_INFO("PDELAY_FOLLOWUP", hdr, pkt);
256 
257 		pdelay_req_state->rcvd_pdelay_follow_up++;
258 
259 		/* If we already have one, drop the received one. */
260 		if (pdelay_req_state->rcvd_pdelay_follow_up_ptr) {
261 			break;
262 		}
263 
264 		/* Keep the buffer alive until pdelay_rate_ratio is computed. */
265 		net_pkt_ref(pkt);
266 		pdelay_req_state->rcvd_pdelay_follow_up_ptr = pkt;
267 
268 		GPTP_STATS_INC(port, rx_pdelay_resp_fup_count);
269 		break;
270 
271 	case GPTP_ANNOUNCE_MESSAGE:
272 		if (GPTP_ANNOUNCE_CHECK_LEN(pkt)) {
273 			NET_WARN("Invalid length for %s packet "
274 				 "should have %zd bytes but has %zd bytes",
275 				 "ANNOUNCE",
276 				 GPTP_ANNOUNCE_LEN(pkt),
277 				 GPTP_PACKET_LEN(pkt));
278 			GPTP_STATS_INC(port, rx_ptp_packet_discard_count);
279 			break;
280 		}
281 
282 		PRINT_INFO("ANNOUNCE", hdr, pkt);
283 
284 		pa_rcv_state = &GPTP_PORT_STATE(port)->pa_rcv;
285 		bmca_data = GPTP_PORT_BMCA_DATA(port);
286 
287 		if (pa_rcv_state->rcvd_announce == false &&
288 				bmca_data->rcvd_announce_ptr == NULL) {
289 			pa_rcv_state->rcvd_announce = true;
290 			bmca_data->rcvd_announce_ptr = pkt;
291 			net_pkt_ref(pkt);
292 		}
293 
294 		GPTP_STATS_INC(port, rx_announce_count);
295 		break;
296 
297 	case GPTP_SIGNALING_MESSAGE:
298 		if (GPTP_CHECK_LEN(pkt, GPTP_SIGNALING_LEN)) {
299 			NET_WARN("Invalid length for %s packet "
300 				 "should have %zd bytes but has %zd bytes",
301 				 "SIGNALING",
302 				 GPTP_SIGNALING_LEN,
303 				 GPTP_PACKET_LEN(pkt));
304 			GPTP_STATS_INC(port, rx_ptp_packet_discard_count);
305 			break;
306 		}
307 
308 		PRINT_INFO("SIGNALING", hdr, pkt);
309 
310 		gptp_handle_signaling(port, pkt);
311 		break;
312 
313 	case GPTP_MANAGEMENT_MESSAGE:
314 		PRINT_INFO("MANAGEMENT", hdr, pkt);
315 		GPTP_STATS_INC(port, rx_ptp_packet_discard_count);
316 		break;
317 
318 	default:
319 		NET_DBG("Received unknown message %x", hdr->message_type);
320 		GPTP_STATS_INC(port, rx_ptp_packet_discard_count);
321 		break;
322 	}
323 }
324 
net_gptp_recv(struct net_if * iface,struct net_pkt * pkt)325 enum net_verdict net_gptp_recv(struct net_if *iface, struct net_pkt *pkt)
326 {
327 	struct gptp_hdr *hdr = GPTP_HDR(pkt);
328 
329 	if ((hdr->ptp_version != GPTP_VERSION) ||
330 			(hdr->transport_specific != GPTP_TRANSPORT_802_1_AS)) {
331 		/* The stack only supports PTP V2 and transportSpecific set
332 		 * to 1 with IEEE802.1AS-2011.
333 		 */
334 		return NET_DROP;
335 	}
336 
337 	/* Handle critical messages. */
338 	if (!gptp_handle_critical_msg(iface, pkt)) {
339 		k_fifo_put(&gptp_rx_queue, pkt);
340 
341 		/* Returning OK here makes sure the network statistics are
342 		 * properly updated.
343 		 */
344 		return NET_OK;
345 	}
346 
347 	/* Message not propagated up in the stack. */
348 	return NET_DROP;
349 }
350 
gptp_init_clock_ds(void)351 static void gptp_init_clock_ds(void)
352 {
353 	struct gptp_global_ds *global_ds;
354 	struct gptp_default_ds *default_ds;
355 	struct gptp_current_ds *current_ds;
356 	struct gptp_parent_ds *parent_ds;
357 	struct gptp_time_prop_ds *prop_ds;
358 
359 	global_ds = GPTP_GLOBAL_DS();
360 	default_ds = GPTP_DEFAULT_DS();
361 	current_ds = GPTP_CURRENT_DS();
362 	parent_ds = GPTP_PARENT_DS();
363 	prop_ds = GPTP_PROPERTIES_DS();
364 
365 	/* Initialize global data set. */
366 	(void)memset(global_ds, 0, sizeof(struct gptp_global_ds));
367 
368 	/* Initialize default data set. */
369 
370 	/* Compute the clock identity from the first port MAC address. */
371 	gptp_compute_clock_identity(GPTP_PORT_START);
372 
373 	default_ds->gm_capable = IS_ENABLED(CONFIG_NET_GPTP_GM_CAPABLE);
374 	default_ds->clk_quality.clock_class = GPTP_CLASS_OTHER;
375 	default_ds->clk_quality.clock_accuracy =
376 		CONFIG_NET_GPTP_CLOCK_ACCURACY;
377 	default_ds->clk_quality.offset_scaled_log_var =
378 		GPTP_OFFSET_SCALED_LOG_VAR_UNKNOWN;
379 
380 	if (default_ds->gm_capable) {
381 		/* The priority1 value cannot be 255 for GM capable
382 		 * system.
383 		 */
384 		if (CONFIG_NET_GPTP_BMCA_PRIORITY1 ==
385 		    GPTP_PRIORITY1_NON_GM_CAPABLE) {
386 			default_ds->priority1 = GPTP_PRIORITY1_GM_CAPABLE;
387 		} else {
388 			default_ds->priority1 = CONFIG_NET_GPTP_BMCA_PRIORITY1;
389 		}
390 	} else {
391 		default_ds->priority1 = GPTP_PRIORITY1_NON_GM_CAPABLE;
392 	}
393 
394 	default_ds->priority2 = GPTP_PRIORITY2_DEFAULT;
395 
396 	default_ds->cur_utc_offset = 37U; /* Current leap seconds TAI - UTC */
397 	default_ds->flags.all = 0U;
398 	default_ds->flags.octets[1] = GPTP_FLAG_TIME_TRACEABLE;
399 	default_ds->time_source = GPTP_TS_INTERNAL_OSCILLATOR;
400 
401 	/* Initialize current data set. */
402 	(void)memset(current_ds, 0, sizeof(struct gptp_current_ds));
403 
404 	/* Initialize parent data set. */
405 
406 	/* parent clock id is initialized to default_ds clock id. */
407 	memcpy(parent_ds->port_id.clk_id, default_ds->clk_id,
408 	       GPTP_CLOCK_ID_LEN);
409 	memcpy(parent_ds->gm_id, default_ds->clk_id, GPTP_CLOCK_ID_LEN);
410 	parent_ds->port_id.port_number = 0U;
411 
412 	/* TODO: Check correct value for below field. */
413 	parent_ds->cumulative_rate_ratio = 0;
414 
415 	parent_ds->gm_clk_quality.clock_class =
416 		default_ds->clk_quality.clock_class;
417 	parent_ds->gm_clk_quality.clock_accuracy =
418 		default_ds->clk_quality.clock_accuracy;
419 	parent_ds->gm_clk_quality.offset_scaled_log_var =
420 		default_ds->clk_quality.offset_scaled_log_var;
421 	parent_ds->gm_priority1 = default_ds->priority1;
422 	parent_ds->gm_priority2 = default_ds->priority2;
423 
424 	/* Initialize properties data set. */
425 
426 	/* TODO: Get accurate values for below. From the GM. */
427 	prop_ds->cur_utc_offset = 37U; /* Current leap seconds TAI - UTC */
428 	prop_ds->cur_utc_offset_valid = false;
429 	prop_ds->leap59 = false;
430 	prop_ds->leap61 = false;
431 	prop_ds->time_traceable = false;
432 	prop_ds->freq_traceable = false;
433 	prop_ds->time_source = GPTP_TS_INTERNAL_OSCILLATOR;
434 
435 	/* Set system values. */
436 	global_ds->sys_flags.all = default_ds->flags.all;
437 	global_ds->sys_current_utc_offset = default_ds->cur_utc_offset;
438 	global_ds->sys_time_source = default_ds->time_source;
439 	global_ds->clk_master_sync_itv =
440 		NSEC_PER_SEC * GPTP_POW2(CONFIG_NET_GPTP_INIT_LOG_SYNC_ITV);
441 }
442 
gptp_init_port_ds(int port)443 static void gptp_init_port_ds(int port)
444 {
445 	struct gptp_default_ds *default_ds;
446 	struct gptp_port_ds *port_ds;
447 
448 #if defined(CONFIG_NET_GPTP_STATISTICS)
449 	struct gptp_port_param_ds *port_param_ds;
450 
451 	port_param_ds = GPTP_PORT_PARAM_DS(port);
452 #endif
453 
454 	default_ds = GPTP_DEFAULT_DS();
455 	port_ds = GPTP_PORT_DS(port);
456 
457 	/* Initialize port data set. */
458 	memcpy(port_ds->port_id.clk_id, default_ds->clk_id, GPTP_CLOCK_ID_LEN);
459 	port_ds->port_id.port_number = port;
460 
461 	port_ds->ptt_port_enabled = true;
462 	port_ds->prev_ptt_port_enabled = true;
463 
464 	port_ds->neighbor_prop_delay = 0;
465 	port_ds->neighbor_prop_delay_thresh = GPTP_NEIGHBOR_PROP_DELAY_THR;
466 	port_ds->delay_asymmetry = 0;
467 
468 	port_ds->ini_log_announce_itv = CONFIG_NET_GPTP_INIT_LOG_ANNOUNCE_ITV;
469 	port_ds->cur_log_announce_itv = port_ds->ini_log_announce_itv;
470 	port_ds->announce_receipt_timeout =
471 		CONFIG_NET_GPTP_ANNOUNCE_RECEIPT_TIMEOUT;
472 
473 	/* Subtract 1 to divide by 2 the sync interval. */
474 	port_ds->ini_log_half_sync_itv = CONFIG_NET_GPTP_INIT_LOG_SYNC_ITV - 1;
475 	port_ds->cur_log_half_sync_itv = port_ds->ini_log_half_sync_itv;
476 	port_ds->sync_receipt_timeout = CONFIG_NET_GPTP_SYNC_RECEIPT_TIMEOUT;
477 	port_ds->sync_receipt_timeout_time_itv = 10000000U; /* 10ms */
478 
479 	port_ds->ini_log_pdelay_req_itv =
480 		CONFIG_NET_GPTP_INIT_LOG_PDELAY_REQ_ITV;
481 	port_ds->cur_log_pdelay_req_itv = port_ds->ini_log_pdelay_req_itv;
482 	port_ds->allowed_lost_responses = GPTP_ALLOWED_LOST_RESP;
483 	port_ds->version = GPTP_VERSION;
484 
485 	gptp_set_time_itv(&port_ds->pdelay_req_itv, 1,
486 			  port_ds->cur_log_pdelay_req_itv);
487 
488 	gptp_set_time_itv(&port_ds->half_sync_itv, 1,
489 			  port_ds->cur_log_half_sync_itv);
490 
491 	port_ds->compute_neighbor_rate_ratio = true;
492 	port_ds->compute_neighbor_prop_delay = true;
493 
494 	/* Random Sequence Numbers. */
495 	port_ds->sync_seq_id = (uint16_t)sys_rand32_get();
496 	port_ds->pdelay_req_seq_id = (uint16_t)sys_rand32_get();
497 	port_ds->announce_seq_id = (uint16_t)sys_rand32_get();
498 	port_ds->signaling_seq_id = (uint16_t)sys_rand32_get();
499 
500 #if defined(CONFIG_NET_GPTP_STATISTICS)
501 	/* Initialize stats data set. */
502 	(void)memset(port_param_ds, 0, sizeof(struct gptp_port_param_ds));
503 #endif
504 }
505 
gptp_init_state_machine(void)506 static void gptp_init_state_machine(void)
507 {
508 	gptp_md_init_state_machine();
509 	gptp_mi_init_state_machine();
510 }
511 
gptp_state_machine(void)512 static void gptp_state_machine(void)
513 {
514 	int port;
515 
516 	/* Manage port states. */
517 	for (port = GPTP_PORT_START; port < GPTP_PORT_END; port++) {
518 		struct gptp_port_ds *port_ds = GPTP_PORT_DS(port);
519 
520 		/* If interface is down, don't move forward */
521 		if (net_if_flag_is_set(GPTP_PORT_IFACE(port), NET_IF_UP)) {
522 			switch (GPTP_GLOBAL_DS()->selected_role[port]) {
523 			case GPTP_PORT_DISABLED:
524 			case GPTP_PORT_MASTER:
525 			case GPTP_PORT_PASSIVE:
526 			case GPTP_PORT_SLAVE:
527 				gptp_md_state_machines(port);
528 				gptp_mi_port_sync_state_machines(port);
529 				gptp_mi_port_bmca_state_machines(port);
530 				break;
531 			default:
532 				NET_DBG("%s: Unknown port state", __func__);
533 				break;
534 			}
535 		} else {
536 			GPTP_GLOBAL_DS()->selected_role[port] = GPTP_PORT_DISABLED;
537 		}
538 
539 		port_ds->prev_ptt_port_enabled = port_ds->ptt_port_enabled;
540 	}
541 
542 	gptp_mi_state_machines();
543 }
544 
gptp_thread(void)545 static void gptp_thread(void)
546 {
547 	int port;
548 
549 	NET_DBG("Starting PTP thread");
550 
551 	gptp_init_clock_ds();
552 
553 	for (port = GPTP_PORT_START; port < GPTP_PORT_END; port++) {
554 		gptp_init_port_ds(port);
555 		gptp_change_port_state(port, GPTP_PORT_DISABLED);
556 	}
557 
558 	while (1) {
559 		struct net_pkt *pkt;
560 
561 		pkt = k_fifo_get(&gptp_rx_queue,
562 				 K_MSEC(GPTP_THREAD_WAIT_TIMEOUT_MS));
563 		if (pkt) {
564 			gptp_handle_msg(pkt);
565 			net_pkt_unref(pkt);
566 		}
567 
568 		gptp_state_machine();
569 	}
570 }
571 
572 
gptp_add_port(struct net_if * iface,void * user_data)573 static void gptp_add_port(struct net_if *iface, void *user_data)
574 {
575 	int *num_ports = user_data;
576 	const struct device *clk;
577 
578 	if (*num_ports >= CONFIG_NET_GPTP_NUM_PORTS) {
579 		return;
580 	}
581 
582 #if defined(CONFIG_NET_GPTP_VLAN)
583 	if (CONFIG_NET_GPTP_VLAN_TAG >= 0 &&
584 	    CONFIG_NET_GPTP_VLAN_TAG < NET_VLAN_TAG_UNSPEC) {
585 		struct net_if *vlan_iface;
586 
587 		vlan_iface = net_eth_get_vlan_iface(iface,
588 						    CONFIG_NET_GPTP_VLAN_TAG);
589 		if (vlan_iface != iface) {
590 			return;
591 		}
592 	}
593 #endif /* CONFIG_NET_GPTP_VLAN */
594 
595 	/* Check if interface has a PTP clock. */
596 	clk = net_eth_get_ptp_clock(iface);
597 	if (clk) {
598 		gptp_domain.iface[*num_ports] = iface;
599 		net_eth_set_ptp_port(iface, *num_ports);
600 		(*num_ports)++;
601 	}
602 }
603 
gptp_set_time_itv(struct gptp_uscaled_ns * interval,uint16_t seconds,int8_t log_msg_interval)604 void gptp_set_time_itv(struct gptp_uscaled_ns *interval,
605 		       uint16_t seconds,
606 		       int8_t log_msg_interval)
607 {
608 	int i;
609 
610 	if (seconds == 0U) {
611 		interval->low = 0U;
612 		interval->high = 0U;
613 		return;
614 	} else if (log_msg_interval >= 96) {
615 		/* Overflow, set maximum. */
616 		interval->low = UINT64_MAX;
617 		interval->high = UINT32_MAX;
618 
619 		return;
620 	} else if (log_msg_interval <= -64) {
621 		/* Underflow, set to 0. */
622 		interval->low = 0U;
623 		interval->high = 0U;
624 		return;
625 	}
626 
627 
628 	/* NSEC_PER_SEC is between 2^30 and 2^31, seconds is less thant 2^16,
629 	 * thus the computation will be less than 2^63.
630 	 */
631 	interval->low =	(seconds * (uint64_t)NSEC_PER_SEC) << 16;
632 
633 	if (log_msg_interval <= 0) {
634 		interval->low >>= -log_msg_interval;
635 		interval->high = 0U;
636 	} else {
637 		/* Find highest bit set. */
638 		for (i = 63; i >= 0; i--) {
639 			if (interval->low >> i) {
640 				break;
641 			}
642 		}
643 
644 		if ((i + log_msg_interval) >= 96 || log_msg_interval > 64) {
645 			/* Overflow, set maximum. */
646 			interval->low = UINT64_MAX;
647 			interval->high = UINT32_MAX;
648 		} else {
649 			interval->high =
650 				interval->low >> (64 - log_msg_interval);
651 
652 			/* << operator is undefined if the shift value is equal
653 			 * to the number of bits in the left expression’s type
654 			 */
655 			if (log_msg_interval == 64) {
656 				interval->low = 0U;
657 			} else {
658 				interval->low <<= log_msg_interval;
659 			}
660 		}
661 	}
662 }
663 
gptp_uscaled_ns_to_timer_ms(struct gptp_uscaled_ns * usns)664 int32_t gptp_uscaled_ns_to_timer_ms(struct gptp_uscaled_ns *usns)
665 {
666 	uint64_t tmp;
667 
668 	if (usns->high) {
669 		/* Do not calculate, it reaches max value. */
670 		return INT32_MAX;
671 	}
672 
673 	tmp = (usns->low >> 16) / USEC_PER_SEC;
674 	if (tmp == 0U) {
675 		/* Timer must be started with a minimum value of 1. */
676 		return 1;
677 	}
678 
679 	if (tmp > INT32_MAX) {
680 		return INT32_MAX;
681 	}
682 
683 	return (tmp & INT32_MAX);
684 
685 }
686 
timer_get_remaining_and_stop(struct k_timer * timer)687 static int32_t timer_get_remaining_and_stop(struct k_timer *timer)
688 {
689 	unsigned int key;
690 	int32_t timer_value;
691 
692 	key = irq_lock();
693 	timer_value = k_timer_remaining_get(timer);
694 
695 	/* Stop timer as the period is about to be modified. */
696 	k_timer_stop(timer);
697 	irq_unlock(key);
698 
699 	return timer_value;
700 }
701 
update_itv(struct gptp_uscaled_ns * itv,int8_t * cur_log_itv,int8_t * ini_log_itv,int8_t new_log_itv,int8_t correction_log_itv)702 static int32_t update_itv(struct gptp_uscaled_ns *itv,
703 			 int8_t *cur_log_itv,
704 			 int8_t *ini_log_itv,
705 			 int8_t new_log_itv,
706 			 int8_t correction_log_itv)
707 {
708 	switch (new_log_itv) {
709 	case GPTP_ITV_KEEP:
710 		break;
711 	case GPTP_ITV_SET_TO_INIT:
712 		*cur_log_itv = *ini_log_itv;
713 		gptp_set_time_itv(itv, 1, *ini_log_itv);
714 		break;
715 	case GPTP_ITV_STOP:
716 	default:
717 		*cur_log_itv = new_log_itv + correction_log_itv;
718 		gptp_set_time_itv(itv, 1, *cur_log_itv);
719 		break;
720 	}
721 
722 	return gptp_uscaled_ns_to_timer_ms(itv);
723 }
724 
gptp_update_pdelay_req_interval(int port,int8_t log_val)725 void gptp_update_pdelay_req_interval(int port, int8_t log_val)
726 {
727 	int32_t remaining;
728 	int32_t new_itv, old_itv;
729 	struct gptp_pdelay_req_state *state_pdelay;
730 	struct gptp_port_ds *port_ds;
731 
732 	port_ds = GPTP_PORT_DS(port);
733 	state_pdelay = &GPTP_PORT_STATE(port)->pdelay_req;
734 	remaining = timer_get_remaining_and_stop(&state_pdelay->pdelay_timer);
735 
736 	old_itv = gptp_uscaled_ns_to_timer_ms(&port_ds->pdelay_req_itv);
737 	new_itv = update_itv(&port_ds->pdelay_req_itv,
738 			     &port_ds->cur_log_pdelay_req_itv,
739 			     &port_ds->ini_log_pdelay_req_itv,
740 			     log_val,
741 			     0);
742 
743 	new_itv -= (old_itv-remaining);
744 	if (new_itv <= 0) {
745 		new_itv = 1;
746 	}
747 
748 	k_timer_start(&state_pdelay->pdelay_timer, K_MSEC(new_itv), K_NO_WAIT);
749 }
750 
gptp_update_sync_interval(int port,int8_t log_val)751 void gptp_update_sync_interval(int port, int8_t log_val)
752 {
753 	struct gptp_pss_send_state *state_pss_send;
754 	struct gptp_port_ds *port_ds;
755 	int32_t new_itv, old_itv, period;
756 	int32_t remaining;
757 	uint32_t time_spent;
758 
759 	port_ds = GPTP_PORT_DS(port);
760 	state_pss_send = &GPTP_PORT_STATE(port)->pss_send;
761 	remaining =
762 		timer_get_remaining_and_stop(
763 				&state_pss_send->half_sync_itv_timer);
764 	old_itv = gptp_uscaled_ns_to_timer_ms(&port_ds->half_sync_itv);
765 	new_itv = update_itv(&port_ds->half_sync_itv,
766 			     &port_ds->cur_log_half_sync_itv,
767 			     &port_ds->ini_log_half_sync_itv,
768 			     log_val,
769 			     -1);
770 	period = new_itv;
771 
772 	/* Get the time spent from the start of the timer. */
773 	time_spent = old_itv;
774 	if (state_pss_send->half_sync_itv_timer_expired) {
775 		time_spent *= 2U;
776 	}
777 	time_spent -= remaining;
778 
779 	/* Calculate remaining time and if half timer has expired. */
780 	if ((time_spent / 2U) > new_itv) {
781 		state_pss_send->sync_itv_timer_expired = true;
782 		state_pss_send->half_sync_itv_timer_expired = true;
783 		new_itv = 1;
784 	} else if (time_spent > new_itv) {
785 		state_pss_send->sync_itv_timer_expired = false;
786 		state_pss_send->half_sync_itv_timer_expired = true;
787 		new_itv -= (time_spent - new_itv);
788 	} else {
789 		state_pss_send->sync_itv_timer_expired = false;
790 		state_pss_send->half_sync_itv_timer_expired = false;
791 		new_itv -= time_spent;
792 	}
793 
794 	if (new_itv <= 0) {
795 		new_itv = 1;
796 	}
797 
798 	k_timer_start(&state_pss_send->half_sync_itv_timer, K_MSEC(new_itv),
799 		      K_MSEC(period));
800 }
801 
gptp_update_announce_interval(int port,int8_t log_val)802 void gptp_update_announce_interval(int port, int8_t log_val)
803 {
804 	int32_t remaining;
805 	int32_t new_itv, old_itv;
806 	struct gptp_port_announce_transmit_state *state_ann;
807 	struct gptp_port_bmca_data *bmca_data;
808 	struct gptp_port_ds *port_ds;
809 
810 	port_ds = GPTP_PORT_DS(port);
811 	state_ann = &GPTP_PORT_STATE(port)->pa_transmit;
812 	bmca_data = GPTP_PORT_BMCA_DATA(port);
813 	remaining = timer_get_remaining_and_stop(
814 			&state_ann->ann_send_periodic_timer);
815 
816 	old_itv = gptp_uscaled_ns_to_timer_ms(&bmca_data->announce_interval);
817 	new_itv = update_itv(&bmca_data->announce_interval,
818 			     &port_ds->cur_log_announce_itv,
819 			     &port_ds->ini_log_announce_itv,
820 			     log_val,
821 			     0);
822 
823 	new_itv -= (old_itv-remaining);
824 	if (new_itv <= 0) {
825 		new_itv = 1;
826 	}
827 
828 	k_timer_start(&state_ann->ann_send_periodic_timer, K_MSEC(new_itv),
829 		      K_NO_WAIT);
830 }
831 
832 struct port_user_data {
833 	gptp_port_cb_t cb;
834 	void *user_data;
835 };
836 
gptp_get_port(struct net_if * iface,void * user_data)837 static void gptp_get_port(struct net_if *iface, void *user_data)
838 {
839 	struct port_user_data *ud = user_data;
840 	const struct device *clk;
841 
842 	/* Check if interface has a PTP clock. */
843 	clk = net_eth_get_ptp_clock(iface);
844 	if (clk) {
845 		int port = gptp_get_port_number(iface);
846 
847 		if (port < 0) {
848 			return;
849 		}
850 
851 		ud->cb(port, iface, ud->user_data);
852 	}
853 }
854 
gptp_foreach_port(gptp_port_cb_t cb,void * user_data)855 void gptp_foreach_port(gptp_port_cb_t cb, void *user_data)
856 {
857 	struct port_user_data ud = {
858 		.cb = cb,
859 		.user_data = user_data
860 	};
861 
862 	net_if_foreach(gptp_get_port, &ud);
863 }
864 
gptp_get_domain(void)865 struct gptp_domain *gptp_get_domain(void)
866 {
867 	return &gptp_domain;
868 }
869 
gptp_get_port_data(struct gptp_domain * domain,int port,struct gptp_port_ds ** port_ds,struct gptp_port_param_ds ** port_param_ds,struct gptp_port_states ** port_state,struct gptp_port_bmca_data ** port_bmca_data,struct net_if ** iface)870 int gptp_get_port_data(struct gptp_domain *domain,
871 		       int port,
872 		       struct gptp_port_ds **port_ds,
873 		       struct gptp_port_param_ds **port_param_ds,
874 		       struct gptp_port_states **port_state,
875 		       struct gptp_port_bmca_data **port_bmca_data,
876 		       struct net_if **iface)
877 {
878 	if (domain != &gptp_domain) {
879 		return -ENOENT;
880 	}
881 
882 	if (port < GPTP_PORT_START || port >= GPTP_PORT_END) {
883 		return -EINVAL;
884 	}
885 
886 	if (port_ds) {
887 		*port_ds = GPTP_PORT_DS(port);
888 	}
889 
890 	if (port_param_ds) {
891 #if defined(CONFIG_NET_GPTP_STATISTICS)
892 		*port_param_ds = GPTP_PORT_PARAM_DS(port);
893 #else
894 		*port_param_ds = NULL;
895 #endif
896 	}
897 
898 	if (port_state) {
899 		*port_state = GPTP_PORT_STATE(port);
900 	}
901 
902 	if (port_bmca_data) {
903 		*port_bmca_data = GPTP_PORT_BMCA_DATA(port);
904 	}
905 
906 	if (iface) {
907 		*iface = GPTP_PORT_IFACE(port);
908 	}
909 
910 	return 0;
911 }
912 
init_ports(void)913 static void init_ports(void)
914 {
915 	net_if_foreach(gptp_add_port, &gptp_domain.default_ds.nb_ports);
916 
917 	/* Only initialize the state machine once the ports are known. */
918 	gptp_init_state_machine();
919 
920 	tid = k_thread_create(&gptp_thread_data, gptp_stack,
921 			      K_KERNEL_STACK_SIZEOF(gptp_stack),
922 			      (k_thread_entry_t)gptp_thread,
923 			      NULL, NULL, NULL, K_PRIO_COOP(5), 0, K_NO_WAIT);
924 	k_thread_name_set(&gptp_thread_data, "gptp");
925 }
926 
927 #if defined(CONFIG_NET_GPTP_VLAN)
928 static struct net_mgmt_event_callback vlan_cb;
929 
930 struct vlan_work {
931 	struct k_work work;
932 	struct net_if *iface;
933 } vlan;
934 
disable_port(int port)935 static void disable_port(int port)
936 {
937 	GPTP_GLOBAL_DS()->selected_role[port] = GPTP_PORT_DISABLED;
938 
939 	gptp_state_machine();
940 }
941 
vlan_enabled(struct k_work * work)942 static void vlan_enabled(struct k_work *work)
943 {
944 	struct vlan_work *vlan = CONTAINER_OF(work,
945 					      struct vlan_work,
946 					      work);
947 	if (tid) {
948 		int port;
949 
950 		port = gptp_get_port_number(vlan->iface);
951 		if (port < 0) {
952 			NET_DBG("No port found for iface %p", vlan->iface);
953 			return;
954 		}
955 
956 		GPTP_GLOBAL_DS()->selected_role[port] = GPTP_PORT_SLAVE;
957 
958 		gptp_state_machine();
959 	} else {
960 		init_ports();
961 	}
962 }
963 
vlan_disabled(struct k_work * work)964 static void vlan_disabled(struct k_work *work)
965 {
966 	struct vlan_work *vlan = CONTAINER_OF(work,
967 					      struct vlan_work,
968 					      work);
969 	int port;
970 
971 	port = gptp_get_port_number(vlan->iface);
972 	if (port < 0) {
973 		NET_DBG("No port found for iface %p", vlan->iface);
974 		return;
975 	}
976 
977 	disable_port(port);
978 }
979 
vlan_event_handler(struct net_mgmt_event_callback * cb,uint32_t mgmt_event,struct net_if * iface)980 static void vlan_event_handler(struct net_mgmt_event_callback *cb,
981 			       uint32_t mgmt_event,
982 			       struct net_if *iface)
983 {
984 	uint16_t tag;
985 
986 	if (mgmt_event != NET_EVENT_ETHERNET_VLAN_TAG_ENABLED &&
987 	    mgmt_event != NET_EVENT_ETHERNET_VLAN_TAG_DISABLED) {
988 		return;
989 	}
990 
991 #if defined(CONFIG_NET_MGMT_EVENT_INFO)
992 	if (!cb->info) {
993 		return;
994 	}
995 
996 	tag = *((uint16_t *)cb->info);
997 	if (tag != CONFIG_NET_GPTP_VLAN_TAG) {
998 		return;
999 	}
1000 
1001 	vlan.iface = iface;
1002 
1003 	if (mgmt_event == NET_EVENT_ETHERNET_VLAN_TAG_ENABLED) {
1004 		/* We found the right tag, now start gPTP for this interface */
1005 		k_work_init(&vlan.work, vlan_enabled);
1006 
1007 		NET_DBG("VLAN tag %d %s for iface %p", tag, "enabled", iface);
1008 	} else {
1009 		k_work_init(&vlan.work, vlan_disabled);
1010 
1011 		NET_DBG("VLAN tag %d %s for iface %p", tag, "disabled", iface);
1012 	}
1013 
1014 	k_work_submit(&vlan.work);
1015 #else
1016 	NET_WARN("VLAN event but tag info missing!");
1017 
1018 	ARG_UNUSED(tag);
1019 #endif
1020 }
1021 
setup_vlan_events_listener(void)1022 static void setup_vlan_events_listener(void)
1023 {
1024 	net_mgmt_init_event_callback(&vlan_cb, vlan_event_handler,
1025 				     NET_EVENT_ETHERNET_VLAN_TAG_ENABLED |
1026 				     NET_EVENT_ETHERNET_VLAN_TAG_DISABLED);
1027 	net_mgmt_add_event_callback(&vlan_cb);
1028 }
1029 #endif /* CONFIG_NET_GPTP_VLAN */
1030 
net_gptp_init(void)1031 void net_gptp_init(void)
1032 {
1033 	gptp_domain.default_ds.nb_ports = 0U;
1034 
1035 #if defined(CONFIG_NET_GPTP_VLAN)
1036 	/* If user has enabled gPTP over VLAN support, then we start gPTP
1037 	 * support after we have received correct "VLAN tag enabled" event.
1038 	 */
1039 	if (CONFIG_NET_GPTP_VLAN_TAG >= 0 &&
1040 	    CONFIG_NET_GPTP_VLAN_TAG < NET_VLAN_TAG_UNSPEC) {
1041 		setup_vlan_events_listener();
1042 	} else {
1043 		NET_WARN("VLAN tag %d set but the value is not valid.",
1044 			 CONFIG_NET_GPTP_VLAN_TAG);
1045 
1046 		init_ports();
1047 	}
1048 #else
1049 	init_ports();
1050 #endif
1051 }
1052