1 /*
2  * Copyright (c) 2019 Intel Corporation
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 /**
8  * @file
9  *
10  * PPP driver using uart_pipe. This is meant for network connectivity between
11  * two network end points.
12  */
13 
14 #define LOG_LEVEL CONFIG_NET_PPP_LOG_LEVEL
15 #include <zephyr/logging/log.h>
16 LOG_MODULE_REGISTER(net_ppp, LOG_LEVEL);
17 
18 #include <stdio.h>
19 
20 #include <zephyr/kernel.h>
21 
22 #include <stdbool.h>
23 #include <errno.h>
24 #include <stddef.h>
25 #include <zephyr/net/ppp.h>
26 #include <zephyr/net_buf.h>
27 #include <zephyr/net/net_pkt.h>
28 #include <zephyr/net/net_if.h>
29 #include <zephyr/net/net_core.h>
30 #include <zephyr/sys/ring_buffer.h>
31 #include <zephyr/sys/crc.h>
32 #include <zephyr/drivers/uart.h>
33 #include <zephyr/random/random.h>
34 #include <zephyr/posix/net/if_arp.h>
35 #include <zephyr/net/ethernet.h>
36 #include <zephyr/net/capture.h>
37 
38 #include "../../subsys/net/ip/net_stats.h"
39 #include "../../subsys/net/ip/net_private.h"
40 
41 #define UART_BUF_LEN CONFIG_NET_PPP_UART_BUF_LEN
42 #define UART_TX_BUF_LEN CONFIG_NET_PPP_ASYNC_UART_TX_BUF_LEN
43 
44 enum ppp_driver_state {
45 	STATE_HDLC_FRAME_START,
46 	STATE_HDLC_FRAME_ADDRESS,
47 	STATE_HDLC_FRAME_DATA,
48 };
49 
50 #define PPP_WORKQ_PRIORITY CONFIG_NET_PPP_RX_PRIORITY
51 #define PPP_WORKQ_STACK_SIZE CONFIG_NET_PPP_RX_STACK_SIZE
52 
53 K_KERNEL_STACK_DEFINE(ppp_workq, PPP_WORKQ_STACK_SIZE);
54 
55 #if defined(CONFIG_NET_PPP_CAPTURE)
56 #define MAX_CAPTURE_BUF_LEN CONFIG_NET_PPP_CAPTURE_BUF_SIZE
57 #else
58 #define MAX_CAPTURE_BUF_LEN 1
59 #endif
60 
61 struct net_ppp_capture_ctx {
62 	struct net_capture_cooked cooked;
63 	uint8_t capture_buf[MAX_CAPTURE_BUF_LEN];
64 };
65 
66 #if defined(CONFIG_NET_PPP_CAPTURE)
67 static struct net_ppp_capture_ctx _ppp_capture_ctx;
68 static struct net_ppp_capture_ctx *ppp_capture_ctx = &_ppp_capture_ctx;
69 #else
70 static struct net_ppp_capture_ctx *ppp_capture_ctx;
71 #endif
72 
73 struct ppp_driver_context {
74 	const struct device *dev;
75 	struct net_if *iface;
76 
77 	/* This net_pkt contains pkt that is being read */
78 	struct net_pkt *pkt;
79 
80 	/* How much free space we have in the net_pkt */
81 	size_t available;
82 
83 	/* ppp data is read into this buf */
84 	uint8_t buf[UART_BUF_LEN];
85 #if defined(CONFIG_NET_PPP_ASYNC_UART)
86 	/* with async we use 2 rx buffers */
87 	uint8_t buf2[UART_BUF_LEN];
88 	struct k_work_delayable uart_recovery_work;
89 
90 	/* ppp buf use when sending data */
91 	uint8_t send_buf[UART_TX_BUF_LEN];
92 #else
93 	/* ppp buf use when sending data */
94 	uint8_t send_buf[UART_BUF_LEN];
95 #endif
96 
97 	uint8_t mac_addr[6];
98 	struct net_linkaddr ll_addr;
99 
100 	/* Flag that tells whether this instance is initialized or not */
101 	atomic_t modem_init_done;
102 
103 	/* Incoming data is routed via ring buffer */
104 	struct ring_buf rx_ringbuf;
105 	uint8_t rx_buf[CONFIG_NET_PPP_RINGBUF_SIZE];
106 
107 	/* ISR function callback worker */
108 	struct k_work cb_work;
109 	struct k_work_q cb_workq;
110 
111 #if defined(CONFIG_NET_STATISTICS_PPP)
112 	struct net_stats_ppp stats;
113 #endif
114 	enum ppp_driver_state state;
115 
116 #if defined(CONFIG_PPP_CLIENT_CLIENTSERVER)
117 	/* correctly received CLIENT bytes */
118 	uint8_t client_index;
119 #endif
120 
121 	uint8_t init_done : 1;
122 	uint8_t next_escaped : 1;
123 };
124 
125 static struct ppp_driver_context ppp_driver_context_data;
126 
127 #if defined(CONFIG_NET_PPP_ASYNC_UART)
128 static bool rx_retry_pending;
129 static bool uart_recovery_pending;
130 static uint8_t *next_buf;
131 
132 static K_SEM_DEFINE(uarte_tx_finished, 0, 1);
133 
uart_callback(const struct device * dev,struct uart_event * evt,void * user_data)134 static void uart_callback(const struct device *dev,
135 			  struct uart_event *evt,
136 			  void *user_data)
137 {
138 	struct ppp_driver_context *context = user_data;
139 	uint8_t *p;
140 	int err, ret, len, space_left;
141 
142 	switch (evt->type) {
143 	case UART_TX_DONE:
144 		LOG_DBG("UART_TX_DONE: sent %zu bytes", evt->data.tx.len);
145 		k_sem_give(&uarte_tx_finished);
146 		break;
147 
148 	case UART_TX_ABORTED:
149 	{
150 		k_sem_give(&uarte_tx_finished);
151 		if (CONFIG_NET_PPP_ASYNC_UART_TX_TIMEOUT == 0) {
152 			LOG_WRN("UART TX aborted.");
153 			break;
154 		}
155 		struct uart_config uart_conf;
156 
157 		err = uart_config_get(dev, &uart_conf);
158 		if (err) {
159 			LOG_ERR("uart_config_get() err: %d", err);
160 		} else if (uart_conf.baudrate / 10 * CONFIG_NET_PPP_ASYNC_UART_TX_TIMEOUT
161 			  / MSEC_PER_SEC > evt->data.tx.len * 2) {
162 			/* The abort likely did not happen because of missing bandwidth. */
163 			LOG_DBG("UART_TX_ABORTED");
164 		} else {
165 			LOG_WRN("UART TX aborted: Only %zu bytes were sent. You may want"
166 				" to change either CONFIG_NET_PPP_ASYNC_UART_TX_TIMEOUT"
167 				" (%d ms) or the UART baud rate (%u).", evt->data.tx.len,
168 				CONFIG_NET_PPP_ASYNC_UART_TX_TIMEOUT, uart_conf.baudrate);
169 		}
170 		break;
171 	}
172 
173 	case UART_RX_RDY:
174 		len = evt->data.rx.len;
175 		p = evt->data.rx.buf + evt->data.rx.offset;
176 
177 		LOG_DBG("Received data %d bytes", len);
178 
179 		ret = ring_buf_put(&context->rx_ringbuf, p, len);
180 		if (ret < evt->data.rx.len) {
181 			LOG_WRN("Rx buffer doesn't have enough space. "
182 				"Bytes pending: %d, written only: %d. "
183 				"Disabling RX for now.",
184 				evt->data.rx.len, ret);
185 
186 			/* No possibility to set flow ctrl ON towards PC,
187 			 * thus workrounding this lack in async API by turning
188 			 * rx off for now and re-enabling that later.
189 			 */
190 			if (!rx_retry_pending) {
191 				uart_rx_disable(dev);
192 				rx_retry_pending = true;
193 			}
194 		}
195 
196 		space_left = ring_buf_space_get(&context->rx_ringbuf);
197 		if (!rx_retry_pending && space_left < (sizeof(context->rx_buf) / 8)) {
198 			/* Not much room left in buffer after a write to ring buffer.
199 			 * We submit a work, but enable flow ctrl also
200 			 * in this case to avoid packet losses.
201 			 */
202 			uart_rx_disable(dev);
203 			rx_retry_pending = true;
204 			LOG_WRN("%d written to RX buf, but after that only %d space left. "
205 				"Disabling RX for now.",
206 				ret, space_left);
207 		}
208 
209 		k_work_submit_to_queue(&context->cb_workq, &context->cb_work);
210 		break;
211 
212 	case UART_RX_BUF_REQUEST:
213 	{
214 		LOG_DBG("UART_RX_BUF_REQUEST: buf %p", (void *)next_buf);
215 
216 		if (next_buf) {
217 			err = uart_rx_buf_rsp(dev, next_buf, sizeof(context->buf));
218 			if (err) {
219 				LOG_ERR("uart_rx_buf_rsp() err: %d", err);
220 			}
221 		}
222 
223 		break;
224 	}
225 
226 	case UART_RX_BUF_RELEASED:
227 		next_buf = evt->data.rx_buf.buf;
228 		LOG_DBG("UART_RX_BUF_RELEASED: buf %p", (void *)next_buf);
229 		break;
230 
231 	case UART_RX_DISABLED:
232 		LOG_DBG("UART_RX_DISABLED - re-enabling in a while");
233 
234 		if (rx_retry_pending && !uart_recovery_pending) {
235 			k_work_schedule(&context->uart_recovery_work,
236 					K_MSEC(CONFIG_NET_PPP_ASYNC_UART_RX_RECOVERY_TIMEOUT));
237 			rx_retry_pending = false;
238 			uart_recovery_pending = true;
239 		}
240 		break;
241 
242 	case UART_RX_STOPPED:
243 		LOG_DBG("UART_RX_STOPPED: stop reason %d", evt->data.rx_stop.reason);
244 
245 		if (evt->data.rx_stop.reason != 0) {
246 			rx_retry_pending = true;
247 		}
248 		break;
249 	}
250 }
251 
ppp_async_uart_rx_enable(struct ppp_driver_context * context)252 static int ppp_async_uart_rx_enable(struct ppp_driver_context *context)
253 {
254 	int err;
255 
256 	next_buf = context->buf2;
257 	err = uart_callback_set(context->dev, uart_callback, (void *)context);
258 	if (err) {
259 		LOG_ERR("Failed to set uart callback, err %d", err);
260 	}
261 
262 	err = uart_rx_enable(context->dev, context->buf, sizeof(context->buf),
263 			     CONFIG_NET_PPP_ASYNC_UART_RX_ENABLE_TIMEOUT * USEC_PER_MSEC);
264 	if (err) {
265 		LOG_ERR("uart_rx_enable() failed, err %d", err);
266 	} else {
267 		LOG_DBG("RX enabled");
268 	}
269 	rx_retry_pending = false;
270 	return err;
271 }
272 
uart_recovery(struct k_work * work)273 static void uart_recovery(struct k_work *work)
274 {
275 	struct k_work_delayable *dwork = k_work_delayable_from_work(work);
276 	struct ppp_driver_context *ppp =
277 		CONTAINER_OF(dwork, struct ppp_driver_context, uart_recovery_work);
278 	int ret;
279 
280 	ret = ring_buf_space_get(&ppp->rx_ringbuf);
281 	if (ret >= (sizeof(ppp->rx_buf) / 2)) {
282 		ret = ppp_async_uart_rx_enable(ppp);
283 		if (ret) {
284 			LOG_ERR("ppp_async_uart_rx_enable() failed, err %d", ret);
285 		} else {
286 			LOG_DBG("UART RX recovered.");
287 		}
288 		uart_recovery_pending = false;
289 	} else {
290 		LOG_ERR("Rx buffer still doesn't have enough room %d to be re-enabled", ret);
291 		k_work_schedule(&ppp->uart_recovery_work,
292 				K_MSEC(CONFIG_NET_PPP_ASYNC_UART_RX_RECOVERY_TIMEOUT));
293 	}
294 }
295 #endif
296 
ppp_save_byte(struct ppp_driver_context * ppp,uint8_t byte)297 static int ppp_save_byte(struct ppp_driver_context *ppp, uint8_t byte)
298 {
299 	int ret;
300 
301 	if (!ppp->pkt) {
302 		ppp->pkt = net_pkt_rx_alloc_with_buffer(
303 			ppp->iface,
304 			CONFIG_NET_BUF_DATA_SIZE,
305 			AF_UNSPEC, 0, K_NO_WAIT);
306 		if (!ppp->pkt) {
307 			LOG_ERR("[%p] cannot allocate pkt", ppp);
308 			return -ENOMEM;
309 		}
310 
311 		net_pkt_cursor_init(ppp->pkt);
312 
313 		ppp->available = net_pkt_available_buffer(ppp->pkt);
314 	}
315 
316 	/* Extra debugging can be enabled separately if really
317 	 * needed. Normally it would just print too much data.
318 	 */
319 	if (0) {
320 		LOG_DBG("Saving byte %02x", byte);
321 	}
322 
323 	/* This is not very intuitive but we must allocate new buffer
324 	 * before we write a byte to last available cursor position.
325 	 */
326 	if (ppp->available == 1) {
327 		ret = net_pkt_alloc_buffer(ppp->pkt,
328 					   CONFIG_NET_BUF_DATA_SIZE + ppp->available,
329 					   AF_UNSPEC, K_NO_WAIT);
330 		if (ret < 0) {
331 			LOG_ERR("[%p] cannot allocate new data buffer", ppp);
332 			goto out_of_mem;
333 		}
334 
335 		ppp->available = net_pkt_available_buffer(ppp->pkt);
336 	}
337 
338 	if (ppp->available) {
339 		ret = net_pkt_write_u8(ppp->pkt, byte);
340 		if (ret < 0) {
341 			LOG_ERR("[%p] Cannot write to pkt %p (%d)",
342 				ppp, ppp->pkt, ret);
343 			goto out_of_mem;
344 		}
345 
346 		ppp->available--;
347 	}
348 
349 	return 0;
350 
351 out_of_mem:
352 	net_pkt_unref(ppp->pkt);
353 	ppp->pkt = NULL;
354 	return -ENOMEM;
355 }
356 
ppp_driver_state_str(enum ppp_driver_state state)357 static const char *ppp_driver_state_str(enum ppp_driver_state state)
358 {
359 #if (CONFIG_NET_PPP_LOG_LEVEL >= LOG_LEVEL_DBG)
360 	switch (state) {
361 	case STATE_HDLC_FRAME_START:
362 		return "START";
363 	case STATE_HDLC_FRAME_ADDRESS:
364 		return "ADDRESS";
365 	case STATE_HDLC_FRAME_DATA:
366 		return "DATA";
367 	}
368 #else
369 	ARG_UNUSED(state);
370 #endif
371 
372 	return "";
373 }
374 
ppp_change_state(struct ppp_driver_context * ctx,enum ppp_driver_state new_state)375 static void ppp_change_state(struct ppp_driver_context *ctx,
376 			     enum ppp_driver_state new_state)
377 {
378 	NET_ASSERT(ctx);
379 
380 	if (ctx->state == new_state) {
381 		return;
382 	}
383 
384 	NET_ASSERT(new_state >= STATE_HDLC_FRAME_START &&
385 		   new_state <= STATE_HDLC_FRAME_DATA);
386 
387 	NET_DBG("[%p] state %s (%d) => %s (%d)",
388 		ctx, ppp_driver_state_str(ctx->state), ctx->state,
389 		ppp_driver_state_str(new_state), new_state);
390 
391 	ctx->state = new_state;
392 }
393 
ppp_send_flush(struct ppp_driver_context * ppp,int off)394 static int ppp_send_flush(struct ppp_driver_context *ppp, int off)
395 {
396 	if (IS_ENABLED(CONFIG_NET_TEST)) {
397 		return 0;
398 	}
399 	uint8_t *buf = ppp->send_buf;
400 
401 	if (IS_ENABLED(CONFIG_NET_PPP_CAPTURE) &&
402 	    net_capture_is_enabled(NULL) && ppp_capture_ctx) {
403 		size_t len = off;
404 		uint8_t *start = &buf[0];
405 
406 		/* Do not capture HDLC frame start and stop bytes (0x7e) */
407 
408 		if (buf[0] == 0x7e) {
409 			len--;
410 			start++;
411 		}
412 
413 		if (buf[off] == 0x7e) {
414 			len--;
415 		}
416 
417 		net_capture_data(&ppp_capture_ctx->cooked,
418 				 start, len,
419 				 NET_CAPTURE_OUTGOING,
420 				 NET_ETH_PTYPE_HDLC);
421 	}
422 
423 #if defined(CONFIG_NET_PPP_ASYNC_UART)
424 	int ret;
425 	const int32_t timeout = CONFIG_NET_PPP_ASYNC_UART_TX_TIMEOUT
426 				? CONFIG_NET_PPP_ASYNC_UART_TX_TIMEOUT * USEC_PER_MSEC
427 				: SYS_FOREVER_US;
428 
429 	k_sem_take(&uarte_tx_finished, K_FOREVER);
430 
431 	ret = uart_tx(ppp->dev, buf, off, timeout);
432 	if (ret) {
433 		LOG_ERR("uart_tx() failed, err %d", ret);
434 		k_sem_give(&uarte_tx_finished);
435 	}
436 #else
437 	while (off--) {
438 		uart_poll_out(ppp->dev, *buf++);
439 	}
440 #endif
441 
442 	return 0;
443 }
444 
ppp_send_bytes(struct ppp_driver_context * ppp,const uint8_t * data,int len,int off)445 static int ppp_send_bytes(struct ppp_driver_context *ppp,
446 			  const uint8_t *data, int len, int off)
447 {
448 	int i;
449 
450 	for (i = 0; i < len; i++) {
451 		ppp->send_buf[off++] = data[i];
452 
453 		if (off >= sizeof(ppp->send_buf)) {
454 			off = ppp_send_flush(ppp, off);
455 		}
456 	}
457 
458 	return off;
459 }
460 
461 #if defined(CONFIG_PPP_CLIENT_CLIENTSERVER)
462 
463 #define CLIENT "CLIENT"
464 #define CLIENTSERVER "CLIENTSERVER"
465 
ppp_handle_client(struct ppp_driver_context * ppp,uint8_t byte)466 static void ppp_handle_client(struct ppp_driver_context *ppp, uint8_t byte)
467 {
468 	static const char *client = CLIENT;
469 	static const char *clientserver = CLIENTSERVER;
470 	int offset;
471 
472 	if (ppp->client_index >= (sizeof(CLIENT) - 1)) {
473 		ppp->client_index = 0;
474 	}
475 
476 	if (byte != client[ppp->client_index]) {
477 		ppp->client_index = 0;
478 		if (byte != client[ppp->client_index]) {
479 			return;
480 		}
481 	}
482 
483 	++ppp->client_index;
484 	if (ppp->client_index >= (sizeof(CLIENT) - 1)) {
485 		LOG_DBG("Received complete CLIENT string");
486 		offset = ppp_send_bytes(ppp, clientserver,
487 					sizeof(CLIENTSERVER) - 1, 0);
488 		(void)ppp_send_flush(ppp, offset);
489 		ppp->client_index = 0;
490 	}
491 
492 }
493 #endif
494 
ppp_input_byte(struct ppp_driver_context * ppp,uint8_t byte)495 static int ppp_input_byte(struct ppp_driver_context *ppp, uint8_t byte)
496 {
497 	int ret = -EAGAIN;
498 
499 	switch (ppp->state) {
500 	case STATE_HDLC_FRAME_START:
501 		/* Synchronizing the flow with HDLC flag field */
502 		if (byte == 0x7e) {
503 			/* Note that we do not save the sync flag */
504 			LOG_DBG("Sync byte (0x%02x) start", byte);
505 			ppp_change_state(ppp, STATE_HDLC_FRAME_ADDRESS);
506 #if defined(CONFIG_PPP_CLIENT_CLIENTSERVER)
507 		} else {
508 			ppp_handle_client(ppp, byte);
509 #endif
510 		}
511 
512 		break;
513 
514 	case STATE_HDLC_FRAME_ADDRESS:
515 		if (byte != 0xff) {
516 			/* Check if we need to sync again */
517 			if (byte == 0x7e) {
518 				/* Just skip to the start of the pkt byte */
519 				return -EAGAIN;
520 			}
521 
522 			LOG_DBG("Invalid (0x%02x) byte, expecting Address",
523 				byte);
524 
525 			/* If address is != 0xff, then ignore this
526 			 * frame. RFC 1662 ch 3.1
527 			 */
528 			ppp_change_state(ppp, STATE_HDLC_FRAME_START);
529 		} else {
530 			LOG_DBG("Address byte (0x%02x) start", byte);
531 
532 			ppp_change_state(ppp, STATE_HDLC_FRAME_DATA);
533 
534 			/* Save the address field so that we can calculate
535 			 * the FCS. The address field will not be passed
536 			 * to upper stack.
537 			 */
538 			ret = ppp_save_byte(ppp, byte);
539 			if (ret < 0) {
540 				ppp_change_state(ppp, STATE_HDLC_FRAME_START);
541 			}
542 
543 			ret = -EAGAIN;
544 		}
545 
546 		break;
547 
548 	case STATE_HDLC_FRAME_DATA:
549 		/* If the next frame starts, then send this one
550 		 * up in the network stack.
551 		 */
552 		if (byte == 0x7e) {
553 			LOG_DBG("End of pkt (0x%02x)", byte);
554 			ppp_change_state(ppp, STATE_HDLC_FRAME_ADDRESS);
555 			ret = 0;
556 		} else {
557 			if (byte == 0x7d) {
558 				/* RFC 1662, ch. 4.2 */
559 				ppp->next_escaped = true;
560 				break;
561 			}
562 
563 			if (ppp->next_escaped) {
564 				/* RFC 1662, ch. 4.2 */
565 				byte ^= 0x20;
566 				ppp->next_escaped = false;
567 			}
568 
569 			ret = ppp_save_byte(ppp, byte);
570 			if (ret < 0) {
571 				ppp_change_state(ppp, STATE_HDLC_FRAME_START);
572 			}
573 
574 			ret = -EAGAIN;
575 		}
576 
577 		break;
578 
579 	default:
580 		LOG_ERR("[%p] Invalid state %d", ppp, ppp->state);
581 		break;
582 	}
583 
584 	return ret;
585 }
586 
ppp_check_fcs(struct ppp_driver_context * ppp)587 static bool ppp_check_fcs(struct ppp_driver_context *ppp)
588 {
589 	struct net_buf *buf;
590 	uint16_t crc;
591 
592 	buf = ppp->pkt->buffer;
593 	if (!buf) {
594 		return false;
595 	}
596 
597 	crc = crc16_ccitt(0xffff, buf->data, buf->len);
598 
599 	buf = buf->frags;
600 
601 	while (buf) {
602 		crc = crc16_ccitt(crc, buf->data, buf->len);
603 		buf = buf->frags;
604 	}
605 
606 	if (crc != 0xf0b8) {
607 		LOG_DBG("Invalid FCS (0x%x)", crc);
608 #if defined(CONFIG_NET_STATISTICS_PPP)
609 		ppp->stats.chkerr++;
610 #endif
611 		return false;
612 	}
613 
614 	return true;
615 }
616 
ppp_process_msg(struct ppp_driver_context * ppp)617 static void ppp_process_msg(struct ppp_driver_context *ppp)
618 {
619 	if (LOG_LEVEL >= LOG_LEVEL_DBG) {
620 		net_pkt_hexdump(ppp->pkt, "recv ppp");
621 	}
622 
623 	if (IS_ENABLED(CONFIG_NET_PPP_VERIFY_FCS) && !ppp_check_fcs(ppp)) {
624 #if defined(CONFIG_NET_STATISTICS_PPP)
625 		ppp->stats.drop++;
626 		ppp->stats.pkts.rx++;
627 #endif
628 		net_pkt_unref(ppp->pkt);
629 	} else {
630 		/* If PPP packet capturing is enabled, then send the
631 		 * full packet with PPP headers for processing. Currently this
632 		 * captures only valid frames. If we would need to receive also
633 		 * invalid frames, the if-block would need to be moved before
634 		 * fcs check above.
635 		 */
636 		if (IS_ENABLED(CONFIG_NET_PPP_CAPTURE) &&
637 		    net_capture_is_enabled(NULL) && ppp_capture_ctx) {
638 			size_t copied;
639 
640 			/* Linearize the packet data. We cannot use the
641 			 * capture API that deals with net_pkt as we work
642 			 * in cooked mode and want to capture also the
643 			 * HDLC frame data.
644 			 */
645 			copied = net_buf_linearize(ppp_capture_ctx->capture_buf,
646 						   sizeof(ppp_capture_ctx->capture_buf),
647 						   ppp->pkt->buffer,
648 						   0U,
649 						   net_pkt_get_len(ppp->pkt));
650 
651 			net_capture_data(&ppp_capture_ctx->cooked,
652 					 ppp_capture_ctx->capture_buf,
653 					 copied,
654 					 NET_CAPTURE_HOST,
655 					 NET_ETH_PTYPE_HDLC);
656 		}
657 
658 		/* Remove the Address (0xff), Control (0x03) and
659 		 * FCS fields (16-bit) as the PPP L2 layer does not need
660 		 * those bytes.
661 		 */
662 		uint16_t addr_and_ctrl = net_buf_pull_be16(ppp->pkt->buffer);
663 
664 		/* Currently we do not support compressed Address and Control
665 		 * fields so they must always be present.
666 		 */
667 		if (addr_and_ctrl != (0xff << 8 | 0x03)) {
668 #if defined(CONFIG_NET_STATISTICS_PPP)
669 			ppp->stats.drop++;
670 			ppp->stats.pkts.rx++;
671 #endif
672 			net_pkt_unref(ppp->pkt);
673 		} else {
674 			/* Remove FCS bytes (2) */
675 			net_pkt_remove_tail(ppp->pkt, 2);
676 
677 			/* Make sure we now start reading from PPP header in
678 			 * PPP L2 recv()
679 			 */
680 			net_pkt_cursor_init(ppp->pkt);
681 			net_pkt_set_overwrite(ppp->pkt, true);
682 
683 			if (net_recv_data(ppp->iface, ppp->pkt) < 0) {
684 				net_pkt_unref(ppp->pkt);
685 			}
686 		}
687 	}
688 
689 	ppp->pkt = NULL;
690 }
691 
692 #if defined(CONFIG_NET_TEST)
ppp_recv_cb(uint8_t * buf,size_t * off)693 static uint8_t *ppp_recv_cb(uint8_t *buf, size_t *off)
694 {
695 	struct ppp_driver_context *ppp =
696 		CONTAINER_OF(buf, struct ppp_driver_context, buf[0]);
697 	size_t i, len = *off;
698 
699 	for (i = 0; i < *off; i++) {
700 		if (0) {
701 			/* Extra debugging can be enabled separately if really
702 			 * needed. Normally it would just print too much data.
703 			 */
704 			LOG_DBG("[%zd] %02x", i, buf[i]);
705 		}
706 
707 		if (ppp_input_byte(ppp, buf[i]) == 0) {
708 			/* Ignore empty or too short frames */
709 			if (ppp->pkt && net_pkt_get_len(ppp->pkt) > 3) {
710 				ppp_process_msg(ppp);
711 				break;
712 			}
713 		}
714 	}
715 
716 	if (i == *off) {
717 		*off = 0;
718 	} else {
719 		*off = len - i - 1;
720 
721 		memmove(&buf[0], &buf[i + 1], *off);
722 	}
723 
724 	return buf;
725 }
726 
ppp_driver_feed_data(uint8_t * data,int data_len)727 void ppp_driver_feed_data(uint8_t *data, int data_len)
728 {
729 	struct ppp_driver_context *ppp = &ppp_driver_context_data;
730 	size_t recv_off = 0;
731 
732 	/* We are expecting that the tests are feeding data in large
733 	 * chunks so we can reset the uart buffer here.
734 	 */
735 	memset(ppp->buf, 0, UART_BUF_LEN);
736 
737 	ppp_change_state(ppp, STATE_HDLC_FRAME_START);
738 
739 	while (data_len > 0) {
740 		int data_to_copy = MIN(data_len, UART_BUF_LEN);
741 		int remaining;
742 
743 		LOG_DBG("Feeding %d bytes", data_to_copy);
744 
745 		memcpy(ppp->buf, data, data_to_copy);
746 
747 		recv_off = data_to_copy;
748 
749 		(void)ppp_recv_cb(ppp->buf, &recv_off);
750 
751 		remaining = data_to_copy - recv_off;
752 
753 		LOG_DBG("We copied %d bytes", remaining);
754 
755 		data_len -= remaining;
756 		data += remaining;
757 	}
758 }
759 #endif
760 
calc_fcs(struct net_pkt * pkt,uint16_t * fcs,uint16_t protocol)761 static bool calc_fcs(struct net_pkt *pkt, uint16_t *fcs, uint16_t protocol)
762 {
763 	struct net_buf *buf;
764 	uint16_t crc;
765 	uint16_t c;
766 
767 	buf = pkt->buffer;
768 	if (!buf) {
769 		return false;
770 	}
771 
772 	/* HDLC Address and Control fields */
773 	c = sys_cpu_to_be16(0xff << 8 | 0x03);
774 
775 	crc = crc16_ccitt(0xffff, (const uint8_t *)&c, sizeof(c));
776 
777 	if (protocol > 0) {
778 		crc = crc16_ccitt(crc, (const uint8_t *)&protocol,
779 				  sizeof(protocol));
780 	}
781 
782 	while (buf) {
783 		crc = crc16_ccitt(crc, buf->data, buf->len);
784 		buf = buf->frags;
785 	}
786 
787 	crc ^= 0xffff;
788 	*fcs = crc;
789 
790 	return true;
791 }
792 
ppp_escape_byte(uint8_t byte,int * offset)793 static uint16_t ppp_escape_byte(uint8_t byte, int *offset)
794 {
795 	if (byte == 0x7e || byte == 0x7d || byte < 0x20) {
796 		*offset = 0;
797 		return (0x7d << 8) | (byte ^ 0x20);
798 	}
799 
800 	*offset = 1;
801 	return byte;
802 }
803 
ppp_send(const struct device * dev,struct net_pkt * pkt)804 static int ppp_send(const struct device *dev, struct net_pkt *pkt)
805 {
806 	struct ppp_driver_context *ppp = dev->data;
807 	struct net_buf *buf = pkt->buffer;
808 	uint16_t protocol = 0;
809 	int send_off = 0;
810 	uint32_t sync_addr_ctrl;
811 	uint16_t fcs, escaped;
812 	uint8_t byte;
813 	int i, offset;
814 
815 #if defined(CONFIG_NET_TEST)
816 	return 0;
817 #endif
818 
819 	ARG_UNUSED(dev);
820 
821 	if (!buf) {
822 		/* No data? */
823 		return -ENODATA;
824 	}
825 
826 	/* If the packet is a normal network packet, we must add the protocol
827 	 * value here.
828 	 */
829 	if (!net_pkt_is_ppp(pkt)) {
830 		if (net_pkt_family(pkt) == AF_INET) {
831 			protocol = htons(PPP_IP);
832 		} else if (net_pkt_family(pkt) == AF_INET6) {
833 			protocol = htons(PPP_IPV6);
834 		}  else {
835 			return -EPROTONOSUPPORT;
836 		}
837 	}
838 
839 	if (!calc_fcs(pkt, &fcs, protocol)) {
840 		return -ENOMEM;
841 	}
842 
843 	/* Sync, Address & Control fields */
844 	sync_addr_ctrl = sys_cpu_to_be32(0x7e << 24 | 0xff << 16 |
845 					 0x7d << 8 | 0x23);
846 	send_off = ppp_send_bytes(ppp, (const uint8_t *)&sync_addr_ctrl,
847 				  sizeof(sync_addr_ctrl), send_off);
848 
849 	if (protocol > 0) {
850 		escaped = htons(ppp_escape_byte(protocol, &offset));
851 		send_off = ppp_send_bytes(ppp, (uint8_t *)&escaped + offset,
852 					  offset ? 1 : 2,
853 					  send_off);
854 
855 		escaped = htons(ppp_escape_byte(protocol >> 8, &offset));
856 		send_off = ppp_send_bytes(ppp, (uint8_t *)&escaped + offset,
857 					  offset ? 1 : 2,
858 					  send_off);
859 	}
860 
861 	/* Note that we do not print the first four bytes and FCS bytes at the
862 	 * end so that we do not need to allocate separate net_buf just for
863 	 * that purpose.
864 	 */
865 	if (LOG_LEVEL >= LOG_LEVEL_DBG) {
866 		net_pkt_hexdump(pkt, "send ppp");
867 	}
868 
869 	while (buf) {
870 		for (i = 0; i < buf->len; i++) {
871 			/* Escape illegal bytes */
872 			escaped = htons(ppp_escape_byte(buf->data[i], &offset));
873 			send_off = ppp_send_bytes(ppp,
874 						  (uint8_t *)&escaped + offset,
875 						  offset ? 1 : 2,
876 						  send_off);
877 		}
878 
879 		buf = buf->frags;
880 	}
881 
882 	escaped = htons(ppp_escape_byte(fcs, &offset));
883 	send_off = ppp_send_bytes(ppp, (uint8_t *)&escaped + offset,
884 				  offset ? 1 : 2,
885 				  send_off);
886 
887 	escaped = htons(ppp_escape_byte(fcs >> 8, &offset));
888 	send_off = ppp_send_bytes(ppp, (uint8_t *)&escaped + offset,
889 				  offset ? 1 : 2,
890 				  send_off);
891 
892 	byte = 0x7e;
893 	send_off = ppp_send_bytes(ppp, &byte, 1, send_off);
894 
895 	(void)ppp_send_flush(ppp, send_off);
896 
897 	return 0;
898 }
899 
900 #if !defined(CONFIG_NET_TEST)
ppp_consume_ringbuf(struct ppp_driver_context * ppp)901 static int ppp_consume_ringbuf(struct ppp_driver_context *ppp)
902 {
903 	uint8_t *data;
904 	size_t len, tmp;
905 	int ret;
906 
907 	len = ring_buf_get_claim(&ppp->rx_ringbuf, &data,
908 				 CONFIG_NET_PPP_RINGBUF_SIZE);
909 	if (len == 0) {
910 		LOG_DBG("Ringbuf %p is empty!", &ppp->rx_ringbuf);
911 		return 0;
912 	}
913 
914 	/* This will print too much data, enable only if really needed */
915 	if (0) {
916 		LOG_HEXDUMP_DBG(data, len, ppp->dev->name);
917 	}
918 
919 	tmp = len;
920 
921 	do {
922 		if (ppp_input_byte(ppp, *data++) == 0) {
923 			/* Ignore empty or too short frames */
924 			if (ppp->pkt && net_pkt_get_len(ppp->pkt) > 3) {
925 				ppp_process_msg(ppp);
926 			}
927 		}
928 	} while (--tmp);
929 
930 	ret = ring_buf_get_finish(&ppp->rx_ringbuf, len);
931 	if (ret < 0) {
932 		LOG_DBG("Cannot flush ring buffer (%d)", ret);
933 	}
934 
935 	return -EAGAIN;
936 }
937 
ppp_isr_cb_work(struct k_work * work)938 static void ppp_isr_cb_work(struct k_work *work)
939 {
940 	struct ppp_driver_context *ppp =
941 		CONTAINER_OF(work, struct ppp_driver_context, cb_work);
942 	int ret = -EAGAIN;
943 
944 	while (ret == -EAGAIN) {
945 		ret = ppp_consume_ringbuf(ppp);
946 	}
947 }
948 #endif /* !CONFIG_NET_TEST */
949 
ppp_driver_init(const struct device * dev)950 static int ppp_driver_init(const struct device *dev)
951 {
952 	struct ppp_driver_context *ppp = dev->data;
953 
954 	LOG_DBG("[%p] dev %p", ppp, dev);
955 
956 #if !defined(CONFIG_NET_TEST)
957 	ring_buf_init(&ppp->rx_ringbuf, sizeof(ppp->rx_buf), ppp->rx_buf);
958 	k_work_init(&ppp->cb_work, ppp_isr_cb_work);
959 
960 	k_work_queue_start(&ppp->cb_workq, ppp_workq,
961 			   K_KERNEL_STACK_SIZEOF(ppp_workq),
962 			   K_PRIO_COOP(PPP_WORKQ_PRIORITY), NULL);
963 	k_thread_name_set(&ppp->cb_workq.thread, "ppp_workq");
964 #if defined(CONFIG_NET_PPP_ASYNC_UART)
965 	k_work_init_delayable(&ppp->uart_recovery_work, uart_recovery);
966 #endif
967 #endif
968 	ppp->pkt = NULL;
969 	ppp_change_state(ppp, STATE_HDLC_FRAME_START);
970 #if defined(CONFIG_PPP_CLIENT_CLIENTSERVER)
971 	ppp->client_index = 0;
972 #endif
973 
974 	return 0;
975 }
976 
ppp_get_mac(struct ppp_driver_context * ppp)977 static inline struct net_linkaddr *ppp_get_mac(struct ppp_driver_context *ppp)
978 {
979 	ppp->ll_addr.addr = ppp->mac_addr;
980 	ppp->ll_addr.len = sizeof(ppp->mac_addr);
981 
982 	return &ppp->ll_addr;
983 }
984 
ppp_iface_init(struct net_if * iface)985 static void ppp_iface_init(struct net_if *iface)
986 {
987 	struct ppp_driver_context *ppp = net_if_get_device(iface)->data;
988 	struct net_linkaddr *ll_addr;
989 
990 	LOG_DBG("[%p] iface %p", ppp, iface);
991 
992 	net_ppp_init(iface);
993 
994 	if (ppp->init_done) {
995 		return;
996 	}
997 
998 	ppp->init_done = true;
999 	ppp->iface = iface;
1000 
1001 	/* The mac address is not really used but network interface expects
1002 	 * to find one.
1003 	 */
1004 	ll_addr = ppp_get_mac(ppp);
1005 
1006 	if (CONFIG_PPP_MAC_ADDR[0] != 0) {
1007 		if (net_bytes_from_str(ppp->mac_addr, sizeof(ppp->mac_addr),
1008 				       CONFIG_PPP_MAC_ADDR) < 0) {
1009 			goto use_random_mac;
1010 		}
1011 	} else {
1012 use_random_mac:
1013 		/* 00-00-5E-00-53-xx Documentation RFC 7042 */
1014 		ppp->mac_addr[0] = 0x00;
1015 		ppp->mac_addr[1] = 0x00;
1016 		ppp->mac_addr[2] = 0x5E;
1017 		ppp->mac_addr[3] = 0x00;
1018 		ppp->mac_addr[4] = 0x53;
1019 		ppp->mac_addr[5] = sys_rand8_get();
1020 	}
1021 
1022 	net_if_set_link_addr(iface, ll_addr->addr, ll_addr->len,
1023 			     NET_LINK_ETHERNET);
1024 
1025 	if (IS_ENABLED(CONFIG_NET_PPP_CAPTURE)) {
1026 		static bool capture_setup_done;
1027 
1028 		if (!capture_setup_done) {
1029 			int ret;
1030 
1031 			ret = net_capture_cooked_setup(&ppp_capture_ctx->cooked,
1032 						       ARPHRD_PPP,
1033 						       sizeof(ppp->mac_addr),
1034 						       ppp->mac_addr);
1035 			if (ret < 0) {
1036 				LOG_DBG("Cannot setup capture (%d)", ret);
1037 			} else {
1038 				capture_setup_done = true;
1039 			}
1040 		}
1041 	}
1042 
1043 	memset(ppp->buf, 0, sizeof(ppp->buf));
1044 
1045 #if defined(CONFIG_PPP_NET_IF_NO_AUTO_START)
1046 	/*
1047 	 * If interface autostart is disabled from Kconfig, then do not start the
1048 	 * interface automatically but only when manually started.
1049 	 */
1050 	net_if_flag_set(iface, NET_IF_NO_AUTO_START);
1051 #endif
1052 }
1053 
1054 #if defined(CONFIG_NET_STATISTICS_PPP)
ppp_get_stats(const struct device * dev)1055 static struct net_stats_ppp *ppp_get_stats(const struct device *dev)
1056 {
1057 	struct ppp_driver_context *context = dev->data;
1058 
1059 	return &context->stats;
1060 }
1061 #endif
1062 
1063 #if !defined(CONFIG_NET_TEST) && !defined(CONFIG_NET_PPP_ASYNC_UART)
ppp_uart_flush(const struct device * dev)1064 static void ppp_uart_flush(const struct device *dev)
1065 {
1066 	uint8_t c;
1067 
1068 	while (uart_fifo_read(dev, &c, 1) > 0) {
1069 		continue;
1070 	}
1071 }
1072 
ppp_uart_isr(const struct device * uart,void * user_data)1073 static void ppp_uart_isr(const struct device *uart, void *user_data)
1074 {
1075 	struct ppp_driver_context *context = user_data;
1076 	int rx = 0, ret;
1077 
1078 	/* get all of the data off UART as fast as we can */
1079 	while (uart_irq_update(uart) && uart_irq_rx_ready(uart)) {
1080 		rx = uart_fifo_read(uart, context->buf, sizeof(context->buf));
1081 		if (rx <= 0) {
1082 			continue;
1083 		}
1084 
1085 		ret = ring_buf_put(&context->rx_ringbuf, context->buf, rx);
1086 		if (ret < rx) {
1087 			LOG_ERR("Rx buffer doesn't have enough space. "
1088 				"Bytes pending: %d, written: %d",
1089 				rx, ret);
1090 			break;
1091 		}
1092 
1093 		k_work_submit_to_queue(&context->cb_workq, &context->cb_work);
1094 	}
1095 }
1096 #endif /* !CONFIG_NET_TEST && !CONFIG_NET_PPP_ASYNC_UART */
1097 
ppp_start(const struct device * dev)1098 static int ppp_start(const struct device *dev)
1099 {
1100 	struct ppp_driver_context *context = dev->data;
1101 
1102 	/* Init the PPP UART. This should only be called once. */
1103 #if !defined(CONFIG_NET_TEST)
1104 	if (atomic_cas(&context->modem_init_done, false, true)) {
1105 		context->dev = DEVICE_DT_GET(DT_CHOSEN(zephyr_ppp_uart));
1106 
1107 		LOG_DBG("Initializing PPP to use %s", context->dev->name);
1108 
1109 		if (!device_is_ready(context->dev)) {
1110 			LOG_ERR("Device %s is not ready", context->dev->name);
1111 			return -ENODEV;
1112 		}
1113 #if defined(CONFIG_NET_PPP_ASYNC_UART)
1114 		k_sem_give(&uarte_tx_finished);
1115 		ppp_async_uart_rx_enable(context);
1116 #else
1117 		uart_irq_rx_disable(context->dev);
1118 		uart_irq_tx_disable(context->dev);
1119 		ppp_uart_flush(context->dev);
1120 		uart_irq_callback_user_data_set(context->dev, ppp_uart_isr,
1121 						context);
1122 		uart_irq_rx_enable(context->dev);
1123 #endif
1124 	}
1125 #endif /* !CONFIG_NET_TEST */
1126 
1127 	net_if_carrier_on(context->iface);
1128 	return 0;
1129 }
1130 
ppp_stop(const struct device * dev)1131 static int ppp_stop(const struct device *dev)
1132 {
1133 	struct ppp_driver_context *context = dev->data;
1134 
1135 	net_if_carrier_off(context->iface);
1136 #if defined(CONFIG_NET_PPP_ASYNC_UART)
1137 	uart_rx_disable(context->dev);
1138 #endif
1139 	context->modem_init_done = false;
1140 	return 0;
1141 }
1142 
1143 static const struct ppp_api ppp_if_api = {
1144 	.iface_api.init = ppp_iface_init,
1145 
1146 	.send = ppp_send,
1147 	.start = ppp_start,
1148 	.stop = ppp_stop,
1149 #if defined(CONFIG_NET_STATISTICS_PPP)
1150 	.get_stats = ppp_get_stats,
1151 #endif
1152 };
1153 
1154 NET_DEVICE_INIT(ppp, CONFIG_NET_PPP_DRV_NAME, ppp_driver_init,
1155 		NULL, &ppp_driver_context_data, NULL,
1156 		CONFIG_KERNEL_INIT_PRIORITY_DEFAULT, &ppp_if_api,
1157 		PPP_L2, NET_L2_GET_CTX_TYPE(PPP_L2), PPP_MTU);
1158