1 /*
2  * Copyright (c) 2017 Intel Corporation.
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <zephyr/logging/log.h>
8 LOG_MODULE_REGISTER(net_gptp, CONFIG_NET_GPTP_LOG_LEVEL);
9 
10 #include <zephyr/net/net_pkt.h>
11 #include <zephyr/drivers/ptp_clock.h>
12 #include <zephyr/net/ethernet_mgmt.h>
13 #include <zephyr/random/random.h>
14 
15 #include <zephyr/net/gptp.h>
16 
17 #include "gptp_messages.h"
18 #include "gptp_mi.h"
19 #include "gptp_data_set.h"
20 
21 #include "gptp_private.h"
22 
23 #if CONFIG_NET_GPTP_NUM_PORTS > 32
24 /*
25  * Boolean arrays sizes have been hardcoded.
26  * It has been arbitrary chosen that a system can not
27  * have more than 32 ports.
28  */
29 #error Maximum number of ports exceeded. (Max is 32).
30 #endif
31 
32 K_KERNEL_STACK_DEFINE(gptp_stack, CONFIG_NET_GPTP_STACK_SIZE);
33 K_FIFO_DEFINE(gptp_rx_queue);
34 
35 static k_tid_t tid;
36 static struct k_thread gptp_thread_data;
37 struct gptp_domain gptp_domain;
38 
gptp_get_port_number(struct net_if * iface)39 int gptp_get_port_number(struct net_if *iface)
40 {
41 	int port = net_eth_get_ptp_port(iface) + 1;
42 
43 	if (port >= GPTP_PORT_START && port < GPTP_PORT_END) {
44 		return port;
45 	}
46 
47 	for (port = GPTP_PORT_START; port < GPTP_PORT_END; port++) {
48 		if (GPTP_PORT_IFACE(port) == iface) {
49 			return port;
50 		}
51 	}
52 
53 	return -ENODEV;
54 }
55 
gptp_is_slave_port(int port)56 bool gptp_is_slave_port(int port)
57 {
58 	return (GPTP_GLOBAL_DS()->selected_role[port] == GPTP_PORT_SLAVE);
59 }
60 
61 /*
62  * Use the given port to generate the clock identity
63  * for the device.
64  * The clock identity is unique for one time-aware system.
65  */
gptp_compute_clock_identity(int port)66 static void gptp_compute_clock_identity(int port)
67 {
68 	struct net_if *iface = GPTP_PORT_IFACE(port);
69 	struct gptp_default_ds *default_ds;
70 
71 	default_ds = GPTP_DEFAULT_DS();
72 
73 	if (iface) {
74 		default_ds->clk_id[0] = net_if_get_link_addr(iface)->addr[0];
75 		default_ds->clk_id[1] = net_if_get_link_addr(iface)->addr[1];
76 		default_ds->clk_id[2] = net_if_get_link_addr(iface)->addr[2];
77 		default_ds->clk_id[3] = 0xFF;
78 		default_ds->clk_id[4] = 0xFE;
79 		default_ds->clk_id[5] = net_if_get_link_addr(iface)->addr[3];
80 		default_ds->clk_id[6] = net_if_get_link_addr(iface)->addr[4];
81 		default_ds->clk_id[7] = net_if_get_link_addr(iface)->addr[5];
82 	}
83 }
84 
85 #define PRINT_INFO(msg, hdr, pkt)				\
86 	NET_DBG("Received %s seq %d pkt %p", (const char *)msg,	\
87 		ntohs(hdr->sequence_id), pkt)			\
88 
89 
gptp_handle_critical_msg(struct net_if * iface,struct net_pkt * pkt)90 static bool gptp_handle_critical_msg(struct net_if *iface, struct net_pkt *pkt)
91 {
92 	struct gptp_hdr *hdr = GPTP_HDR(pkt);
93 	bool handled = false;
94 	int port;
95 
96 	switch (hdr->message_type) {
97 	case GPTP_PATH_DELAY_REQ_MESSAGE:
98 		if (GPTP_CHECK_LEN(pkt, GPTP_PDELAY_REQ_LEN)) {
99 			NET_WARN("Invalid length for %s packet "
100 				 "should have %zd bytes but has %zd bytes",
101 				 "PDELAY_REQ",
102 				 GPTP_PDELAY_REQ_LEN,
103 				 GPTP_PACKET_LEN(pkt));
104 			break;
105 		}
106 
107 		PRINT_INFO("PDELAY_REQ", hdr, pkt);
108 
109 		port = gptp_get_port_number(iface);
110 		if (port == -ENODEV) {
111 			NET_DBG("No port found for gPTP buffer");
112 			return handled;
113 		}
114 
115 		if (GPTP_PORT_STATE(port)->pdelay_resp.state !=
116 						GPTP_PDELAY_RESP_NOT_ENABLED) {
117 			gptp_handle_pdelay_req(port, pkt);
118 		}
119 
120 		handled = true;
121 		break;
122 	default:
123 		/* Not a critical message, this will be handled later. */
124 		break;
125 	}
126 
127 	return handled;
128 }
129 
gptp_handle_msg(struct net_pkt * pkt)130 static void gptp_handle_msg(struct net_pkt *pkt)
131 {
132 	struct gptp_hdr *hdr = GPTP_HDR(pkt);
133 	struct gptp_pdelay_req_state *pdelay_req_state;
134 	struct gptp_sync_rcv_state *sync_rcv_state;
135 	struct gptp_port_announce_receive_state *pa_rcv_state;
136 	struct gptp_port_bmca_data *bmca_data;
137 	int port;
138 
139 	port = gptp_get_port_number(net_pkt_iface(pkt));
140 	if (port == -ENODEV) {
141 		NET_DBG("No port found for ptp buffer");
142 		return;
143 	}
144 
145 	pdelay_req_state = &GPTP_PORT_STATE(port)->pdelay_req;
146 	sync_rcv_state = &GPTP_PORT_STATE(port)->sync_rcv;
147 
148 	switch (hdr->message_type) {
149 	case GPTP_SYNC_MESSAGE:
150 		if (GPTP_CHECK_LEN(pkt, GPTP_SYNC_LEN)) {
151 			NET_WARN("Invalid length for %s packet "
152 				 "should have %zd bytes but has %zd bytes",
153 				 "SYNC",
154 				 GPTP_SYNC_LEN,
155 				 GPTP_PACKET_LEN(pkt));
156 			GPTP_STATS_INC(port, rx_ptp_packet_discard_count);
157 			break;
158 		}
159 
160 		PRINT_INFO("SYNC", hdr, pkt);
161 
162 		sync_rcv_state->rcvd_sync = true;
163 
164 		/* If we already have one, drop the previous one. */
165 		if (sync_rcv_state->rcvd_sync_ptr) {
166 			net_pkt_unref(sync_rcv_state->rcvd_sync_ptr);
167 		}
168 
169 		/* Keep the buffer alive until follow_up is received. */
170 		net_pkt_ref(pkt);
171 		sync_rcv_state->rcvd_sync_ptr = pkt;
172 
173 		GPTP_STATS_INC(port, rx_sync_count);
174 		break;
175 
176 	case GPTP_DELAY_REQ_MESSAGE:
177 		NET_DBG("Delay Request not handled.");
178 		break;
179 
180 	case GPTP_PATH_DELAY_REQ_MESSAGE:
181 		/*
182 		 * Path Delay Responses to Path Delay Requests need
183 		 * very low latency. These need to handled in priority
184 		 * when received as they cannot afford to be delayed
185 		 * by context switches.
186 		 */
187 		NET_WARN("Path Delay Request received as normal messages!");
188 		GPTP_STATS_INC(port, rx_ptp_packet_discard_count);
189 		break;
190 
191 	case GPTP_PATH_DELAY_RESP_MESSAGE:
192 		if (GPTP_CHECK_LEN(pkt, GPTP_PDELAY_RESP_LEN)) {
193 			NET_WARN("Invalid length for %s packet "
194 				 "should have %zd bytes but has %zd bytes",
195 				 "PDELAY_RESP",
196 				 GPTP_PDELAY_RESP_LEN,
197 				 GPTP_PACKET_LEN(pkt));
198 			GPTP_STATS_INC(port, rx_ptp_packet_discard_count);
199 			break;
200 		}
201 
202 		PRINT_INFO("PDELAY_RESP", hdr, pkt);
203 
204 		pdelay_req_state->rcvd_pdelay_resp++;
205 
206 		/* If we already have one, drop the received one. */
207 		if (pdelay_req_state->rcvd_pdelay_resp_ptr) {
208 			break;
209 		}
210 
211 		/* Keep the buffer alive until pdelay_rate_ratio is computed. */
212 		net_pkt_ref(pkt);
213 		pdelay_req_state->rcvd_pdelay_resp_ptr = pkt;
214 		break;
215 
216 	case GPTP_FOLLOWUP_MESSAGE:
217 		if (GPTP_CHECK_LEN(pkt, GPTP_FOLLOW_UP_LEN)) {
218 			NET_WARN("Invalid length for %s packet "
219 				 "should have %zd bytes but has %zd bytes",
220 				 "FOLLOWUP",
221 				 GPTP_FOLLOW_UP_LEN,
222 				 GPTP_PACKET_LEN(pkt));
223 			GPTP_STATS_INC(port, rx_ptp_packet_discard_count);
224 			break;
225 		}
226 
227 		PRINT_INFO("FOLLOWUP", hdr, pkt);
228 
229 		sync_rcv_state->rcvd_follow_up = true;
230 
231 		/* If we already have one, drop the previous one. */
232 		if (sync_rcv_state->rcvd_follow_up_ptr) {
233 			net_pkt_unref(sync_rcv_state->rcvd_follow_up_ptr);
234 		}
235 
236 		/* Keep the pkt alive until info is extracted. */
237 		sync_rcv_state->rcvd_follow_up_ptr = net_pkt_ref(pkt);
238 		NET_DBG("Keeping %s seq %d pkt %p", "FOLLOWUP",
239 			ntohs(hdr->sequence_id), pkt);
240 		break;
241 
242 	case GPTP_PATH_DELAY_FOLLOWUP_MESSAGE:
243 		if (GPTP_CHECK_LEN(pkt, GPTP_PDELAY_RESP_FUP_LEN)) {
244 			NET_WARN("Invalid length for %s packet "
245 				 "should have %zd bytes but has %zd bytes",
246 				 "PDELAY_FOLLOWUP",
247 				 GPTP_PDELAY_RESP_FUP_LEN,
248 				 GPTP_PACKET_LEN(pkt));
249 			GPTP_STATS_INC(port, rx_ptp_packet_discard_count);
250 			break;
251 		}
252 
253 		PRINT_INFO("PDELAY_FOLLOWUP", hdr, pkt);
254 
255 		pdelay_req_state->rcvd_pdelay_follow_up++;
256 
257 		/* If we already have one, drop the received one. */
258 		if (pdelay_req_state->rcvd_pdelay_follow_up_ptr) {
259 			break;
260 		}
261 
262 		/* Keep the buffer alive until pdelay_rate_ratio is computed. */
263 		net_pkt_ref(pkt);
264 		pdelay_req_state->rcvd_pdelay_follow_up_ptr = pkt;
265 
266 		GPTP_STATS_INC(port, rx_pdelay_resp_fup_count);
267 		break;
268 
269 	case GPTP_ANNOUNCE_MESSAGE:
270 		if (GPTP_ANNOUNCE_CHECK_LEN(pkt)) {
271 			NET_WARN("Invalid length for %s packet "
272 				 "should have %zd bytes but has %zd bytes",
273 				 "ANNOUNCE",
274 				 GPTP_ANNOUNCE_LEN(pkt),
275 				 GPTP_PACKET_LEN(pkt));
276 			GPTP_STATS_INC(port, rx_ptp_packet_discard_count);
277 			break;
278 		}
279 
280 		PRINT_INFO("ANNOUNCE", hdr, pkt);
281 
282 		pa_rcv_state = &GPTP_PORT_STATE(port)->pa_rcv;
283 		bmca_data = GPTP_PORT_BMCA_DATA(port);
284 
285 		if (pa_rcv_state->rcvd_announce == false &&
286 				bmca_data->rcvd_announce_ptr == NULL) {
287 			pa_rcv_state->rcvd_announce = true;
288 			bmca_data->rcvd_announce_ptr = pkt;
289 			net_pkt_ref(pkt);
290 		}
291 
292 		GPTP_STATS_INC(port, rx_announce_count);
293 		break;
294 
295 	case GPTP_SIGNALING_MESSAGE:
296 		if (GPTP_CHECK_LEN(pkt, GPTP_SIGNALING_LEN)) {
297 			NET_WARN("Invalid length for %s packet "
298 				 "should have %zd bytes but has %zd bytes",
299 				 "SIGNALING",
300 				 GPTP_SIGNALING_LEN,
301 				 GPTP_PACKET_LEN(pkt));
302 			GPTP_STATS_INC(port, rx_ptp_packet_discard_count);
303 			break;
304 		}
305 
306 		PRINT_INFO("SIGNALING", hdr, pkt);
307 
308 		gptp_handle_signaling(port, pkt);
309 		break;
310 
311 	case GPTP_MANAGEMENT_MESSAGE:
312 		PRINT_INFO("MANAGEMENT", hdr, pkt);
313 		GPTP_STATS_INC(port, rx_ptp_packet_discard_count);
314 		break;
315 
316 	default:
317 		NET_DBG("Received unknown message %x", hdr->message_type);
318 		GPTP_STATS_INC(port, rx_ptp_packet_discard_count);
319 		break;
320 	}
321 }
322 
net_gptp_recv(struct net_if * iface,struct net_pkt * pkt)323 enum net_verdict net_gptp_recv(struct net_if *iface, struct net_pkt *pkt)
324 {
325 	struct gptp_hdr *hdr = GPTP_HDR(pkt);
326 
327 	if ((hdr->ptp_version != GPTP_VERSION) ||
328 			(hdr->transport_specific != GPTP_TRANSPORT_802_1_AS)) {
329 		/* The stack only supports PTP V2 and transportSpecific set
330 		 * to 1 with IEEE802.1AS-2011.
331 		 */
332 		return NET_DROP;
333 	}
334 
335 	/* Handle critical messages. */
336 	if (!gptp_handle_critical_msg(iface, pkt)) {
337 		k_fifo_put(&gptp_rx_queue, pkt);
338 
339 		/* Returning OK here makes sure the network statistics are
340 		 * properly updated.
341 		 */
342 		return NET_OK;
343 	}
344 
345 	/* Message not propagated up in the stack. */
346 	return NET_DROP;
347 }
348 
gptp_init_clock_ds(void)349 static void gptp_init_clock_ds(void)
350 {
351 	struct gptp_global_ds *global_ds;
352 	struct gptp_default_ds *default_ds;
353 	struct gptp_current_ds *current_ds;
354 	struct gptp_parent_ds *parent_ds;
355 	struct gptp_time_prop_ds *prop_ds;
356 
357 	global_ds = GPTP_GLOBAL_DS();
358 	default_ds = GPTP_DEFAULT_DS();
359 	current_ds = GPTP_CURRENT_DS();
360 	parent_ds = GPTP_PARENT_DS();
361 	prop_ds = GPTP_PROPERTIES_DS();
362 
363 	/* Initialize global data set. */
364 	(void)memset(global_ds, 0, sizeof(struct gptp_global_ds));
365 
366 	/* Initialize default data set. */
367 
368 	/* Compute the clock identity from the first port MAC address. */
369 	gptp_compute_clock_identity(GPTP_PORT_START);
370 
371 	default_ds->gm_capable = IS_ENABLED(CONFIG_NET_GPTP_GM_CAPABLE);
372 	default_ds->clk_quality.clock_class = GPTP_CLASS_OTHER;
373 	default_ds->clk_quality.clock_accuracy =
374 		CONFIG_NET_GPTP_CLOCK_ACCURACY;
375 	default_ds->clk_quality.offset_scaled_log_var =
376 		GPTP_OFFSET_SCALED_LOG_VAR_UNKNOWN;
377 
378 	if (default_ds->gm_capable) {
379 		/* The priority1 value cannot be 255 for GM capable
380 		 * system.
381 		 */
382 		if (CONFIG_NET_GPTP_BMCA_PRIORITY1 ==
383 		    GPTP_PRIORITY1_NON_GM_CAPABLE) {
384 			default_ds->priority1 = GPTP_PRIORITY1_GM_CAPABLE;
385 		} else {
386 			default_ds->priority1 = CONFIG_NET_GPTP_BMCA_PRIORITY1;
387 		}
388 	} else {
389 		default_ds->priority1 = GPTP_PRIORITY1_NON_GM_CAPABLE;
390 	}
391 
392 	default_ds->priority2 = GPTP_PRIORITY2_DEFAULT;
393 
394 	default_ds->cur_utc_offset = 37U; /* Current leap seconds TAI - UTC */
395 	default_ds->flags.all = 0U;
396 	default_ds->flags.octets[1] = GPTP_FLAG_TIME_TRACEABLE;
397 	default_ds->time_source = GPTP_TS_INTERNAL_OSCILLATOR;
398 
399 	/* Initialize current data set. */
400 	(void)memset(current_ds, 0, sizeof(struct gptp_current_ds));
401 
402 	/* Initialize parent data set. */
403 
404 	/* parent clock id is initialized to default_ds clock id. */
405 	memcpy(parent_ds->port_id.clk_id, default_ds->clk_id,
406 	       GPTP_CLOCK_ID_LEN);
407 	memcpy(parent_ds->gm_id, default_ds->clk_id, GPTP_CLOCK_ID_LEN);
408 	parent_ds->port_id.port_number = 0U;
409 
410 	/* TODO: Check correct value for below field. */
411 	parent_ds->cumulative_rate_ratio = 0;
412 
413 	parent_ds->gm_clk_quality.clock_class =
414 		default_ds->clk_quality.clock_class;
415 	parent_ds->gm_clk_quality.clock_accuracy =
416 		default_ds->clk_quality.clock_accuracy;
417 	parent_ds->gm_clk_quality.offset_scaled_log_var =
418 		default_ds->clk_quality.offset_scaled_log_var;
419 	parent_ds->gm_priority1 = default_ds->priority1;
420 	parent_ds->gm_priority2 = default_ds->priority2;
421 
422 	/* Initialize properties data set. */
423 
424 	/* TODO: Get accurate values for below. From the GM. */
425 	prop_ds->cur_utc_offset = 37U; /* Current leap seconds TAI - UTC */
426 	prop_ds->cur_utc_offset_valid = false;
427 	prop_ds->leap59 = false;
428 	prop_ds->leap61 = false;
429 	prop_ds->time_traceable = false;
430 	prop_ds->freq_traceable = false;
431 	prop_ds->time_source = GPTP_TS_INTERNAL_OSCILLATOR;
432 
433 	/* Set system values. */
434 	global_ds->sys_flags.all = default_ds->flags.all;
435 	global_ds->sys_current_utc_offset = default_ds->cur_utc_offset;
436 	global_ds->sys_time_source = default_ds->time_source;
437 	global_ds->clk_master_sync_itv =
438 		NSEC_PER_SEC * GPTP_POW2(CONFIG_NET_GPTP_INIT_LOG_SYNC_ITV);
439 }
440 
gptp_init_port_ds(int port)441 static void gptp_init_port_ds(int port)
442 {
443 	struct gptp_default_ds *default_ds;
444 	struct gptp_port_ds *port_ds;
445 
446 #if defined(CONFIG_NET_GPTP_STATISTICS)
447 	struct gptp_port_param_ds *port_param_ds;
448 
449 	port_param_ds = GPTP_PORT_PARAM_DS(port);
450 #endif
451 
452 	default_ds = GPTP_DEFAULT_DS();
453 	port_ds = GPTP_PORT_DS(port);
454 
455 	/* Initialize port data set. */
456 	memcpy(port_ds->port_id.clk_id, default_ds->clk_id, GPTP_CLOCK_ID_LEN);
457 	port_ds->port_id.port_number = port;
458 
459 	port_ds->ptt_port_enabled = true;
460 	port_ds->prev_ptt_port_enabled = true;
461 
462 	port_ds->neighbor_prop_delay = 0;
463 	port_ds->neighbor_prop_delay_thresh = GPTP_NEIGHBOR_PROP_DELAY_THR;
464 	port_ds->delay_asymmetry = 0;
465 
466 	port_ds->ini_log_announce_itv = CONFIG_NET_GPTP_INIT_LOG_ANNOUNCE_ITV;
467 	port_ds->cur_log_announce_itv = port_ds->ini_log_announce_itv;
468 	port_ds->announce_receipt_timeout =
469 		CONFIG_NET_GPTP_ANNOUNCE_RECEIPT_TIMEOUT;
470 
471 	/* Subtract 1 to divide by 2 the sync interval. */
472 	port_ds->ini_log_half_sync_itv = CONFIG_NET_GPTP_INIT_LOG_SYNC_ITV - 1;
473 	port_ds->cur_log_half_sync_itv = port_ds->ini_log_half_sync_itv;
474 	port_ds->sync_receipt_timeout = CONFIG_NET_GPTP_SYNC_RECEIPT_TIMEOUT;
475 	port_ds->sync_receipt_timeout_time_itv = 10000000U; /* 10ms */
476 
477 	port_ds->ini_log_pdelay_req_itv =
478 		CONFIG_NET_GPTP_INIT_LOG_PDELAY_REQ_ITV;
479 	port_ds->cur_log_pdelay_req_itv = port_ds->ini_log_pdelay_req_itv;
480 	port_ds->allowed_lost_responses = GPTP_ALLOWED_LOST_RESP;
481 	port_ds->version = GPTP_VERSION;
482 
483 	gptp_set_time_itv(&port_ds->pdelay_req_itv, 1,
484 			  port_ds->cur_log_pdelay_req_itv);
485 
486 	gptp_set_time_itv(&port_ds->half_sync_itv, 1,
487 			  port_ds->cur_log_half_sync_itv);
488 
489 	port_ds->compute_neighbor_rate_ratio = true;
490 	port_ds->compute_neighbor_prop_delay = true;
491 
492 	/* Random Sequence Numbers. */
493 	port_ds->sync_seq_id = (uint16_t)sys_rand32_get();
494 	port_ds->pdelay_req_seq_id = (uint16_t)sys_rand32_get();
495 	port_ds->announce_seq_id = (uint16_t)sys_rand32_get();
496 	port_ds->signaling_seq_id = (uint16_t)sys_rand32_get();
497 
498 #if defined(CONFIG_NET_GPTP_STATISTICS)
499 	/* Initialize stats data set. */
500 	(void)memset(port_param_ds, 0, sizeof(struct gptp_port_param_ds));
501 #endif
502 }
503 
gptp_init_state_machine(void)504 static void gptp_init_state_machine(void)
505 {
506 	gptp_md_init_state_machine();
507 	gptp_mi_init_state_machine();
508 }
509 
gptp_state_machine(void)510 static void gptp_state_machine(void)
511 {
512 	int port;
513 
514 	/* Manage port states. */
515 	for (port = GPTP_PORT_START; port < GPTP_PORT_END; port++) {
516 		struct gptp_port_ds *port_ds = GPTP_PORT_DS(port);
517 
518 		/* If interface is down, don't move forward */
519 		if (net_if_flag_is_set(GPTP_PORT_IFACE(port), NET_IF_UP)) {
520 			switch (GPTP_GLOBAL_DS()->selected_role[port]) {
521 			case GPTP_PORT_DISABLED:
522 			case GPTP_PORT_MASTER:
523 			case GPTP_PORT_PASSIVE:
524 			case GPTP_PORT_SLAVE:
525 				gptp_md_state_machines(port);
526 				gptp_mi_port_sync_state_machines(port);
527 				gptp_mi_port_bmca_state_machines(port);
528 				break;
529 			default:
530 				NET_DBG("%s: Unknown port state", __func__);
531 				break;
532 			}
533 		} else {
534 			GPTP_GLOBAL_DS()->selected_role[port] = GPTP_PORT_DISABLED;
535 		}
536 
537 		port_ds->prev_ptt_port_enabled = port_ds->ptt_port_enabled;
538 	}
539 
540 	gptp_mi_state_machines();
541 }
542 
gptp_thread(void * p1,void * p2,void * p3)543 static void gptp_thread(void *p1, void *p2, void *p3)
544 {
545 	ARG_UNUSED(p1);
546 	ARG_UNUSED(p2);
547 	ARG_UNUSED(p3);
548 
549 	int port;
550 
551 	NET_DBG("Starting PTP thread");
552 
553 	gptp_init_clock_ds();
554 
555 	for (port = GPTP_PORT_START; port < GPTP_PORT_END; port++) {
556 		gptp_init_port_ds(port);
557 		gptp_change_port_state(port, GPTP_PORT_DISABLED);
558 	}
559 
560 	while (1) {
561 		struct net_pkt *pkt;
562 
563 		pkt = k_fifo_get(&gptp_rx_queue,
564 				 K_MSEC(GPTP_THREAD_WAIT_TIMEOUT_MS));
565 		if (pkt) {
566 			gptp_handle_msg(pkt);
567 			net_pkt_unref(pkt);
568 		}
569 
570 		gptp_state_machine();
571 	}
572 }
573 
574 
gptp_add_port(struct net_if * iface,void * user_data)575 static void gptp_add_port(struct net_if *iface, void *user_data)
576 {
577 	int *num_ports = user_data;
578 	const struct device *clk;
579 
580 	if (*num_ports >= CONFIG_NET_GPTP_NUM_PORTS) {
581 		return;
582 	}
583 
584 #if defined(CONFIG_NET_GPTP_VLAN)
585 	if (CONFIG_NET_GPTP_VLAN_TAG >= 0 &&
586 	    CONFIG_NET_GPTP_VLAN_TAG < NET_VLAN_TAG_UNSPEC) {
587 		struct net_if *vlan_iface;
588 
589 		vlan_iface = net_eth_get_vlan_iface(iface,
590 						    CONFIG_NET_GPTP_VLAN_TAG);
591 		if (vlan_iface != iface) {
592 			return;
593 		}
594 	}
595 #endif /* CONFIG_NET_GPTP_VLAN */
596 
597 	/* Check if interface has a PTP clock. */
598 	clk = net_eth_get_ptp_clock(iface);
599 	if (clk) {
600 		gptp_domain.iface[*num_ports] = iface;
601 		net_eth_set_ptp_port(iface, *num_ports);
602 		(*num_ports)++;
603 	}
604 }
605 
gptp_set_time_itv(struct gptp_uscaled_ns * interval,uint16_t seconds,int8_t log_msg_interval)606 void gptp_set_time_itv(struct gptp_uscaled_ns *interval,
607 		       uint16_t seconds,
608 		       int8_t log_msg_interval)
609 {
610 	int i;
611 
612 	if (seconds == 0U) {
613 		interval->low = 0U;
614 		interval->high = 0U;
615 		return;
616 	} else if (log_msg_interval >= 96) {
617 		/* Overflow, set maximum. */
618 		interval->low = UINT64_MAX;
619 		interval->high = UINT32_MAX;
620 
621 		return;
622 	} else if (log_msg_interval <= -64) {
623 		/* Underflow, set to 0. */
624 		interval->low = 0U;
625 		interval->high = 0U;
626 		return;
627 	}
628 
629 
630 	/* NSEC_PER_SEC is between 2^30 and 2^31, seconds is less thant 2^16,
631 	 * thus the computation will be less than 2^63.
632 	 */
633 	interval->low =	(seconds * (uint64_t)NSEC_PER_SEC) << 16;
634 
635 	if (log_msg_interval <= 0) {
636 		interval->low >>= -log_msg_interval;
637 		interval->high = 0U;
638 	} else {
639 		/* Find highest bit set. */
640 		for (i = 63; i >= 0; i--) {
641 			if (interval->low >> i) {
642 				break;
643 			}
644 		}
645 
646 		if ((i + log_msg_interval) >= 96 || log_msg_interval > 64) {
647 			/* Overflow, set maximum. */
648 			interval->low = UINT64_MAX;
649 			interval->high = UINT32_MAX;
650 		} else {
651 			interval->high =
652 				interval->low >> (64 - log_msg_interval);
653 
654 			/* << operator is undefined if the shift value is equal
655 			 * to the number of bits in the left expression’s type
656 			 */
657 			if (log_msg_interval == 64) {
658 				interval->low = 0U;
659 			} else {
660 				interval->low <<= log_msg_interval;
661 			}
662 		}
663 	}
664 }
665 
gptp_uscaled_ns_to_timer_ms(struct gptp_uscaled_ns * usns)666 int32_t gptp_uscaled_ns_to_timer_ms(struct gptp_uscaled_ns *usns)
667 {
668 	uint64_t tmp;
669 
670 	if (usns->high) {
671 		/* Do not calculate, it reaches max value. */
672 		return INT32_MAX;
673 	}
674 
675 	tmp = (usns->low >> 16) / USEC_PER_SEC;
676 	if (tmp == 0U) {
677 		/* Timer must be started with a minimum value of 1. */
678 		return 1;
679 	}
680 
681 	if (tmp > INT32_MAX) {
682 		return INT32_MAX;
683 	}
684 
685 	return (tmp & INT32_MAX);
686 
687 }
688 
timer_get_remaining_and_stop(struct k_timer * timer)689 static int32_t timer_get_remaining_and_stop(struct k_timer *timer)
690 {
691 	unsigned int key;
692 	int32_t timer_value;
693 
694 	key = irq_lock();
695 	timer_value = k_timer_remaining_get(timer);
696 
697 	/* Stop timer as the period is about to be modified. */
698 	k_timer_stop(timer);
699 	irq_unlock(key);
700 
701 	return timer_value;
702 }
703 
update_itv(struct gptp_uscaled_ns * itv,int8_t * cur_log_itv,int8_t * ini_log_itv,int8_t new_log_itv,int8_t correction_log_itv)704 static int32_t update_itv(struct gptp_uscaled_ns *itv,
705 			 int8_t *cur_log_itv,
706 			 int8_t *ini_log_itv,
707 			 int8_t new_log_itv,
708 			 int8_t correction_log_itv)
709 {
710 	switch (new_log_itv) {
711 	case GPTP_ITV_KEEP:
712 		break;
713 	case GPTP_ITV_SET_TO_INIT:
714 		*cur_log_itv = *ini_log_itv;
715 		gptp_set_time_itv(itv, 1, *ini_log_itv);
716 		break;
717 	case GPTP_ITV_STOP:
718 	default:
719 		*cur_log_itv = new_log_itv + correction_log_itv;
720 		gptp_set_time_itv(itv, 1, *cur_log_itv);
721 		break;
722 	}
723 
724 	return gptp_uscaled_ns_to_timer_ms(itv);
725 }
726 
gptp_update_pdelay_req_interval(int port,int8_t log_val)727 void gptp_update_pdelay_req_interval(int port, int8_t log_val)
728 {
729 	int32_t remaining;
730 	int32_t new_itv, old_itv;
731 	struct gptp_pdelay_req_state *state_pdelay;
732 	struct gptp_port_ds *port_ds;
733 
734 	port_ds = GPTP_PORT_DS(port);
735 	state_pdelay = &GPTP_PORT_STATE(port)->pdelay_req;
736 	remaining = timer_get_remaining_and_stop(&state_pdelay->pdelay_timer);
737 
738 	old_itv = gptp_uscaled_ns_to_timer_ms(&port_ds->pdelay_req_itv);
739 	new_itv = update_itv(&port_ds->pdelay_req_itv,
740 			     &port_ds->cur_log_pdelay_req_itv,
741 			     &port_ds->ini_log_pdelay_req_itv,
742 			     log_val,
743 			     0);
744 
745 	new_itv -= (old_itv-remaining);
746 	if (new_itv <= 0) {
747 		new_itv = 1;
748 	}
749 
750 	k_timer_start(&state_pdelay->pdelay_timer, K_MSEC(new_itv), K_NO_WAIT);
751 }
752 
gptp_update_sync_interval(int port,int8_t log_val)753 void gptp_update_sync_interval(int port, int8_t log_val)
754 {
755 	struct gptp_pss_send_state *state_pss_send;
756 	struct gptp_port_ds *port_ds;
757 	int32_t new_itv, old_itv, period;
758 	int32_t remaining;
759 	uint32_t time_spent;
760 
761 	port_ds = GPTP_PORT_DS(port);
762 	state_pss_send = &GPTP_PORT_STATE(port)->pss_send;
763 	remaining =
764 		timer_get_remaining_and_stop(
765 				&state_pss_send->half_sync_itv_timer);
766 	old_itv = gptp_uscaled_ns_to_timer_ms(&port_ds->half_sync_itv);
767 	new_itv = update_itv(&port_ds->half_sync_itv,
768 			     &port_ds->cur_log_half_sync_itv,
769 			     &port_ds->ini_log_half_sync_itv,
770 			     log_val,
771 			     -1);
772 	period = new_itv;
773 
774 	/* Get the time spent from the start of the timer. */
775 	time_spent = old_itv;
776 	if (state_pss_send->half_sync_itv_timer_expired) {
777 		time_spent *= 2U;
778 	}
779 	time_spent -= remaining;
780 
781 	/* Calculate remaining time and if half timer has expired. */
782 	if ((time_spent / 2U) > new_itv) {
783 		state_pss_send->sync_itv_timer_expired = true;
784 		state_pss_send->half_sync_itv_timer_expired = true;
785 		new_itv = 1;
786 	} else if (time_spent > new_itv) {
787 		state_pss_send->sync_itv_timer_expired = false;
788 		state_pss_send->half_sync_itv_timer_expired = true;
789 		new_itv -= (time_spent - new_itv);
790 	} else {
791 		state_pss_send->sync_itv_timer_expired = false;
792 		state_pss_send->half_sync_itv_timer_expired = false;
793 		new_itv -= time_spent;
794 	}
795 
796 	if (new_itv <= 0) {
797 		new_itv = 1;
798 	}
799 
800 	k_timer_start(&state_pss_send->half_sync_itv_timer, K_MSEC(new_itv),
801 		      K_MSEC(period));
802 }
803 
gptp_update_announce_interval(int port,int8_t log_val)804 void gptp_update_announce_interval(int port, int8_t log_val)
805 {
806 	int32_t remaining;
807 	int32_t new_itv, old_itv;
808 	struct gptp_port_announce_transmit_state *state_ann;
809 	struct gptp_port_bmca_data *bmca_data;
810 	struct gptp_port_ds *port_ds;
811 
812 	port_ds = GPTP_PORT_DS(port);
813 	state_ann = &GPTP_PORT_STATE(port)->pa_transmit;
814 	bmca_data = GPTP_PORT_BMCA_DATA(port);
815 	remaining = timer_get_remaining_and_stop(
816 			&state_ann->ann_send_periodic_timer);
817 
818 	old_itv = gptp_uscaled_ns_to_timer_ms(&bmca_data->announce_interval);
819 	new_itv = update_itv(&bmca_data->announce_interval,
820 			     &port_ds->cur_log_announce_itv,
821 			     &port_ds->ini_log_announce_itv,
822 			     log_val,
823 			     0);
824 
825 	new_itv -= (old_itv-remaining);
826 	if (new_itv <= 0) {
827 		new_itv = 1;
828 	}
829 
830 	k_timer_start(&state_ann->ann_send_periodic_timer, K_MSEC(new_itv),
831 		      K_NO_WAIT);
832 }
833 
834 struct port_user_data {
835 	gptp_port_cb_t cb;
836 	void *user_data;
837 };
838 
gptp_get_port(struct net_if * iface,void * user_data)839 static void gptp_get_port(struct net_if *iface, void *user_data)
840 {
841 	struct port_user_data *ud = user_data;
842 	const struct device *clk;
843 
844 	/* Check if interface has a PTP clock. */
845 	clk = net_eth_get_ptp_clock(iface);
846 	if (clk) {
847 		int port = gptp_get_port_number(iface);
848 
849 		if (port < 0) {
850 			return;
851 		}
852 
853 		ud->cb(port, iface, ud->user_data);
854 	}
855 }
856 
gptp_foreach_port(gptp_port_cb_t cb,void * user_data)857 void gptp_foreach_port(gptp_port_cb_t cb, void *user_data)
858 {
859 	struct port_user_data ud = {
860 		.cb = cb,
861 		.user_data = user_data
862 	};
863 
864 	net_if_foreach(gptp_get_port, &ud);
865 }
866 
gptp_get_domain(void)867 struct gptp_domain *gptp_get_domain(void)
868 {
869 	return &gptp_domain;
870 }
871 
gptp_get_port_data(struct gptp_domain * domain,int port,struct gptp_port_ds ** port_ds,struct gptp_port_param_ds ** port_param_ds,struct gptp_port_states ** port_state,struct gptp_port_bmca_data ** port_bmca_data,struct net_if ** iface)872 int gptp_get_port_data(struct gptp_domain *domain,
873 		       int port,
874 		       struct gptp_port_ds **port_ds,
875 		       struct gptp_port_param_ds **port_param_ds,
876 		       struct gptp_port_states **port_state,
877 		       struct gptp_port_bmca_data **port_bmca_data,
878 		       struct net_if **iface)
879 {
880 	if (domain != &gptp_domain) {
881 		return -ENOENT;
882 	}
883 
884 	if (port < GPTP_PORT_START || port >= GPTP_PORT_END) {
885 		return -EINVAL;
886 	}
887 
888 	if (port_ds) {
889 		*port_ds = GPTP_PORT_DS(port);
890 	}
891 
892 	if (port_param_ds) {
893 #if defined(CONFIG_NET_GPTP_STATISTICS)
894 		*port_param_ds = GPTP_PORT_PARAM_DS(port);
895 #else
896 		*port_param_ds = NULL;
897 #endif
898 	}
899 
900 	if (port_state) {
901 		*port_state = GPTP_PORT_STATE(port);
902 	}
903 
904 	if (port_bmca_data) {
905 		*port_bmca_data = GPTP_PORT_BMCA_DATA(port);
906 	}
907 
908 	if (iface) {
909 		*iface = GPTP_PORT_IFACE(port);
910 	}
911 
912 	return 0;
913 }
914 
init_ports(void)915 static void init_ports(void)
916 {
917 	net_if_foreach(gptp_add_port, &gptp_domain.default_ds.nb_ports);
918 
919 	/* Only initialize the state machine once the ports are known. */
920 	gptp_init_state_machine();
921 
922 	tid = k_thread_create(&gptp_thread_data, gptp_stack,
923 			      K_KERNEL_STACK_SIZEOF(gptp_stack),
924 			      gptp_thread,
925 			      NULL, NULL, NULL, K_PRIO_COOP(5), 0, K_NO_WAIT);
926 	k_thread_name_set(&gptp_thread_data, "gptp");
927 }
928 
929 #if defined(CONFIG_NET_GPTP_VLAN)
930 static struct net_mgmt_event_callback vlan_cb;
931 
932 struct vlan_work {
933 	struct k_work work;
934 	struct net_if *iface;
935 } vlan;
936 
disable_port(int port)937 static void disable_port(int port)
938 {
939 	GPTP_GLOBAL_DS()->selected_role[port] = GPTP_PORT_DISABLED;
940 
941 	gptp_state_machine();
942 }
943 
vlan_enabled(struct k_work * work)944 static void vlan_enabled(struct k_work *work)
945 {
946 	struct vlan_work *one_vlan = CONTAINER_OF(work,
947 						  struct vlan_work,
948 						  work);
949 	if (tid) {
950 		int port;
951 
952 		port = gptp_get_port_number(one_vlan->iface);
953 		if (port < 0) {
954 			NET_DBG("No port found for iface %p", one_vlan->iface);
955 			return;
956 		}
957 
958 		GPTP_GLOBAL_DS()->selected_role[port] = GPTP_PORT_SLAVE;
959 
960 		gptp_state_machine();
961 	} else {
962 		init_ports();
963 	}
964 }
965 
vlan_disabled(struct k_work * work)966 static void vlan_disabled(struct k_work *work)
967 {
968 	struct vlan_work *one_vlan = CONTAINER_OF(work,
969 						  struct vlan_work,
970 						  work);
971 	int port;
972 
973 	port = gptp_get_port_number(one_vlan->iface);
974 	if (port < 0) {
975 		NET_DBG("No port found for iface %p", one_vlan->iface);
976 		return;
977 	}
978 
979 	disable_port(port);
980 }
981 
vlan_event_handler(struct net_mgmt_event_callback * cb,uint32_t mgmt_event,struct net_if * iface)982 static void vlan_event_handler(struct net_mgmt_event_callback *cb,
983 			       uint32_t mgmt_event,
984 			       struct net_if *iface)
985 {
986 	uint16_t tag;
987 
988 	if (mgmt_event != NET_EVENT_ETHERNET_VLAN_TAG_ENABLED &&
989 	    mgmt_event != NET_EVENT_ETHERNET_VLAN_TAG_DISABLED) {
990 		return;
991 	}
992 
993 #if defined(CONFIG_NET_MGMT_EVENT_INFO)
994 	if (!cb->info) {
995 		return;
996 	}
997 
998 	tag = *((uint16_t *)cb->info);
999 	if (tag != CONFIG_NET_GPTP_VLAN_TAG) {
1000 		return;
1001 	}
1002 
1003 	vlan.iface = iface;
1004 
1005 	if (mgmt_event == NET_EVENT_ETHERNET_VLAN_TAG_ENABLED) {
1006 		/* We found the right tag, now start gPTP for this interface */
1007 		k_work_init(&vlan.work, vlan_enabled);
1008 
1009 		NET_DBG("VLAN tag %d %s for iface %p", tag, "enabled", iface);
1010 	} else {
1011 		k_work_init(&vlan.work, vlan_disabled);
1012 
1013 		NET_DBG("VLAN tag %d %s for iface %p", tag, "disabled", iface);
1014 	}
1015 
1016 	k_work_submit(&vlan.work);
1017 #else
1018 	NET_WARN("VLAN event but tag info missing!");
1019 
1020 	ARG_UNUSED(tag);
1021 #endif
1022 }
1023 
setup_vlan_events_listener(void)1024 static void setup_vlan_events_listener(void)
1025 {
1026 	net_mgmt_init_event_callback(&vlan_cb, vlan_event_handler,
1027 				     NET_EVENT_ETHERNET_VLAN_TAG_ENABLED |
1028 				     NET_EVENT_ETHERNET_VLAN_TAG_DISABLED);
1029 	net_mgmt_add_event_callback(&vlan_cb);
1030 }
1031 #endif /* CONFIG_NET_GPTP_VLAN */
1032 
net_gptp_init(void)1033 void net_gptp_init(void)
1034 {
1035 	gptp_domain.default_ds.nb_ports = 0U;
1036 
1037 #if defined(CONFIG_NET_GPTP_VLAN)
1038 	/* If user has enabled gPTP over VLAN support, then we start gPTP
1039 	 * support after we have received correct "VLAN tag enabled" event.
1040 	 */
1041 	if (CONFIG_NET_GPTP_VLAN_TAG >= 0 &&
1042 	    CONFIG_NET_GPTP_VLAN_TAG < NET_VLAN_TAG_UNSPEC) {
1043 		setup_vlan_events_listener();
1044 	} else {
1045 		NET_WARN("VLAN tag %d set but the value is not valid.",
1046 			 CONFIG_NET_GPTP_VLAN_TAG);
1047 
1048 		init_ports();
1049 	}
1050 #else
1051 	init_ports();
1052 #endif
1053 }
1054