1 /*
2 * Copyright (c) 2019 Intel Corporation
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 /**
8 * @file
9 *
10 * PPP driver using uart_pipe. This is meant for network connectivity between
11 * two network end points.
12 */
13
14 #define LOG_LEVEL CONFIG_NET_PPP_LOG_LEVEL
15 #include <logging/log.h>
16 LOG_MODULE_REGISTER(net_ppp, LOG_LEVEL);
17
18 #include <stdio.h>
19
20 #include <kernel.h>
21
22 #include <stdbool.h>
23 #include <errno.h>
24 #include <stddef.h>
25 #include <net/ppp.h>
26 #include <net/buf.h>
27 #include <net/net_pkt.h>
28 #include <net/net_if.h>
29 #include <net/net_core.h>
30 #include <sys/ring_buffer.h>
31 #include <sys/crc.h>
32 #include <drivers/uart.h>
33 #include <drivers/console/uart_mux.h>
34 #include <random/rand32.h>
35
36 #include "../../subsys/net/ip/net_stats.h"
37 #include "../../subsys/net/ip/net_private.h"
38
39 #define UART_BUF_LEN CONFIG_NET_PPP_UART_BUF_LEN
40
41 enum ppp_driver_state {
42 STATE_HDLC_FRAME_START,
43 STATE_HDLC_FRAME_ADDRESS,
44 STATE_HDLC_FRAME_DATA,
45 };
46
47 #define PPP_WORKQ_PRIORITY CONFIG_NET_PPP_RX_PRIORITY
48 #define PPP_WORKQ_STACK_SIZE CONFIG_NET_PPP_RX_STACK_SIZE
49
50 K_KERNEL_STACK_DEFINE(ppp_workq, PPP_WORKQ_STACK_SIZE);
51
52 struct ppp_driver_context {
53 const struct device *dev;
54 struct net_if *iface;
55
56 /* This net_pkt contains pkt that is being read */
57 struct net_pkt *pkt;
58
59 /* How much free space we have in the net_pkt */
60 size_t available;
61
62 /* ppp data is read into this buf */
63 uint8_t buf[UART_BUF_LEN];
64
65 /* ppp buf use when sending data */
66 uint8_t send_buf[UART_BUF_LEN];
67
68 uint8_t mac_addr[6];
69 struct net_linkaddr ll_addr;
70
71 /* Flag that tells whether this instance is initialized or not */
72 atomic_t modem_init_done;
73
74 /* Incoming data is routed via ring buffer */
75 struct ring_buf rx_ringbuf;
76 uint8_t rx_buf[CONFIG_NET_PPP_RINGBUF_SIZE];
77
78 /* ISR function callback worker */
79 struct k_work cb_work;
80 struct k_work_q cb_workq;
81
82 #if defined(CONFIG_NET_STATISTICS_PPP)
83 struct net_stats_ppp stats;
84 #endif
85 enum ppp_driver_state state;
86
87 #if defined(CONFIG_PPP_CLIENT_CLIENTSERVER)
88 /* correctly received CLIENT bytes */
89 uint8_t client_index;
90 #endif
91
92 uint8_t init_done : 1;
93 uint8_t next_escaped : 1;
94 };
95
96 static struct ppp_driver_context ppp_driver_context_data;
97
ppp_save_byte(struct ppp_driver_context * ppp,uint8_t byte)98 static int ppp_save_byte(struct ppp_driver_context *ppp, uint8_t byte)
99 {
100 int ret;
101
102 if (!ppp->pkt) {
103 ppp->pkt = net_pkt_rx_alloc_with_buffer(
104 ppp->iface,
105 CONFIG_NET_BUF_DATA_SIZE,
106 AF_UNSPEC, 0, K_NO_WAIT);
107 if (!ppp->pkt) {
108 LOG_ERR("[%p] cannot allocate pkt", ppp);
109 return -ENOMEM;
110 }
111
112 net_pkt_cursor_init(ppp->pkt);
113
114 ppp->available = net_pkt_available_buffer(ppp->pkt);
115 }
116
117 /* Extra debugging can be enabled separately if really
118 * needed. Normally it would just print too much data.
119 */
120 if (0) {
121 LOG_DBG("Saving byte %02x", byte);
122 }
123
124 /* This is not very intuitive but we must allocate new buffer
125 * before we write a byte to last available cursor position.
126 */
127 if (ppp->available == 1) {
128 ret = net_pkt_alloc_buffer(ppp->pkt,
129 CONFIG_NET_BUF_DATA_SIZE,
130 AF_UNSPEC, K_NO_WAIT);
131 if (ret < 0) {
132 LOG_ERR("[%p] cannot allocate new data buffer", ppp);
133 goto out_of_mem;
134 }
135
136 ppp->available = net_pkt_available_buffer(ppp->pkt);
137 }
138
139 if (ppp->available) {
140 ret = net_pkt_write_u8(ppp->pkt, byte);
141 if (ret < 0) {
142 LOG_ERR("[%p] Cannot write to pkt %p (%d)",
143 ppp, ppp->pkt, ret);
144 goto out_of_mem;
145 }
146
147 ppp->available--;
148 }
149
150 return 0;
151
152 out_of_mem:
153 net_pkt_unref(ppp->pkt);
154 ppp->pkt = NULL;
155 return -ENOMEM;
156 }
157
ppp_driver_state_str(enum ppp_driver_state state)158 static const char *ppp_driver_state_str(enum ppp_driver_state state)
159 {
160 #if (CONFIG_NET_PPP_LOG_LEVEL >= LOG_LEVEL_DBG)
161 switch (state) {
162 case STATE_HDLC_FRAME_START:
163 return "START";
164 case STATE_HDLC_FRAME_ADDRESS:
165 return "ADDRESS";
166 case STATE_HDLC_FRAME_DATA:
167 return "DATA";
168 }
169 #else
170 ARG_UNUSED(state);
171 #endif
172
173 return "";
174 }
175
ppp_change_state(struct ppp_driver_context * ctx,enum ppp_driver_state new_state)176 static void ppp_change_state(struct ppp_driver_context *ctx,
177 enum ppp_driver_state new_state)
178 {
179 NET_ASSERT(ctx);
180
181 if (ctx->state == new_state) {
182 return;
183 }
184
185 NET_ASSERT(new_state >= STATE_HDLC_FRAME_START &&
186 new_state <= STATE_HDLC_FRAME_DATA);
187
188 NET_DBG("[%p] state %s (%d) => %s (%d)",
189 ctx, ppp_driver_state_str(ctx->state), ctx->state,
190 ppp_driver_state_str(new_state), new_state);
191
192 ctx->state = new_state;
193 }
194
ppp_send_flush(struct ppp_driver_context * ppp,int off)195 static int ppp_send_flush(struct ppp_driver_context *ppp, int off)
196 {
197 if (IS_ENABLED(CONFIG_NET_TEST)) {
198 return 0;
199 }
200 uint8_t *buf = ppp->send_buf;
201
202 /* If we're using gsm_mux, We don't want to use poll_out because sending
203 * one byte at a time causes each byte to get wrapped in muxing headers.
204 * But we can safely call uart_fifo_fill outside of ISR context when
205 * muxing because uart_mux implements it in software.
206 */
207 if (IS_ENABLED(CONFIG_GSM_MUX)) {
208 (void)uart_fifo_fill(ppp->dev, buf, off);
209 } else {
210 while (off--) {
211 uart_poll_out(ppp->dev, *buf++);
212 }
213 }
214
215 return 0;
216 }
217
ppp_send_bytes(struct ppp_driver_context * ppp,const uint8_t * data,int len,int off)218 static int ppp_send_bytes(struct ppp_driver_context *ppp,
219 const uint8_t *data, int len, int off)
220 {
221 int i;
222
223 for (i = 0; i < len; i++) {
224 ppp->send_buf[off++] = data[i];
225
226 if (off >= sizeof(ppp->send_buf)) {
227 off = ppp_send_flush(ppp, off);
228 }
229 }
230
231 return off;
232 }
233
234 #if defined(CONFIG_PPP_CLIENT_CLIENTSERVER)
235
236 #define CLIENT "CLIENT"
237 #define CLIENTSERVER "CLIENTSERVER"
238
ppp_handle_client(struct ppp_driver_context * ppp,uint8_t byte)239 static void ppp_handle_client(struct ppp_driver_context *ppp, uint8_t byte)
240 {
241 static const char *client = CLIENT;
242 static const char *clientserver = CLIENTSERVER;
243 int offset;
244
245 if (ppp->client_index >= (sizeof(CLIENT) - 1)) {
246 ppp->client_index = 0;
247 }
248
249 if (byte != client[ppp->client_index]) {
250 ppp->client_index = 0;
251 if (byte != client[ppp->client_index]) {
252 return;
253 }
254 }
255
256 ++ppp->client_index;
257 if (ppp->client_index >= (sizeof(CLIENT) - 1)) {
258 LOG_DBG("Received complete CLIENT string");
259 offset = ppp_send_bytes(ppp, clientserver,
260 sizeof(CLIENTSERVER) - 1, 0);
261 (void)ppp_send_flush(ppp, offset);
262 ppp->client_index = 0;
263 }
264
265 }
266 #endif
267
ppp_input_byte(struct ppp_driver_context * ppp,uint8_t byte)268 static int ppp_input_byte(struct ppp_driver_context *ppp, uint8_t byte)
269 {
270 int ret = -EAGAIN;
271
272 switch (ppp->state) {
273 case STATE_HDLC_FRAME_START:
274 /* Synchronizing the flow with HDLC flag field */
275 if (byte == 0x7e) {
276 /* Note that we do not save the sync flag */
277 LOG_DBG("Sync byte (0x%02x) start", byte);
278 ppp_change_state(ppp, STATE_HDLC_FRAME_ADDRESS);
279 #if defined(CONFIG_PPP_CLIENT_CLIENTSERVER)
280 } else {
281 ppp_handle_client(ppp, byte);
282 #endif
283 }
284
285 break;
286
287 case STATE_HDLC_FRAME_ADDRESS:
288 if (byte != 0xff) {
289 /* Check if we need to sync again */
290 if (byte == 0x7e) {
291 /* Just skip to the start of the pkt byte */
292 return -EAGAIN;
293 }
294
295 LOG_DBG("Invalid (0x%02x) byte, expecting Address",
296 byte);
297
298 /* If address is != 0xff, then ignore this
299 * frame. RFC 1662 ch 3.1
300 */
301 ppp_change_state(ppp, STATE_HDLC_FRAME_START);
302 } else {
303 LOG_DBG("Address byte (0x%02x) start", byte);
304
305 ppp_change_state(ppp, STATE_HDLC_FRAME_DATA);
306
307 /* Save the address field so that we can calculate
308 * the FCS. The address field will not be passed
309 * to upper stack.
310 */
311 ret = ppp_save_byte(ppp, byte);
312 if (ret < 0) {
313 ppp_change_state(ppp, STATE_HDLC_FRAME_START);
314 }
315
316 ret = -EAGAIN;
317 }
318
319 break;
320
321 case STATE_HDLC_FRAME_DATA:
322 /* If the next frame starts, then send this one
323 * up in the network stack.
324 */
325 if (byte == 0x7e) {
326 LOG_DBG("End of pkt (0x%02x)", byte);
327 ppp_change_state(ppp, STATE_HDLC_FRAME_ADDRESS);
328 ret = 0;
329 } else {
330 if (byte == 0x7d) {
331 /* RFC 1662, ch. 4.2 */
332 ppp->next_escaped = true;
333 break;
334 }
335
336 if (ppp->next_escaped) {
337 /* RFC 1662, ch. 4.2 */
338 byte ^= 0x20;
339 ppp->next_escaped = false;
340 }
341
342 ret = ppp_save_byte(ppp, byte);
343 if (ret < 0) {
344 ppp_change_state(ppp, STATE_HDLC_FRAME_START);
345 }
346
347 ret = -EAGAIN;
348 }
349
350 break;
351
352 default:
353 LOG_DBG("[%p] Invalid state %d", ppp, ppp->state);
354 break;
355 }
356
357 return ret;
358 }
359
ppp_check_fcs(struct ppp_driver_context * ppp)360 static bool ppp_check_fcs(struct ppp_driver_context *ppp)
361 {
362 struct net_buf *buf;
363 uint16_t crc;
364
365 buf = ppp->pkt->buffer;
366 if (!buf) {
367 return false;
368 }
369
370 crc = crc16_ccitt(0xffff, buf->data, buf->len);
371
372 buf = buf->frags;
373
374 while (buf) {
375 crc = crc16_ccitt(crc, buf->data, buf->len);
376 buf = buf->frags;
377 }
378
379 if (crc != 0xf0b8) {
380 LOG_DBG("Invalid FCS (0x%x)", crc);
381 #if defined(CONFIG_NET_STATISTICS_PPP)
382 ppp->stats.chkerr++;
383 #endif
384 return false;
385 }
386
387 return true;
388 }
389
ppp_process_msg(struct ppp_driver_context * ppp)390 static void ppp_process_msg(struct ppp_driver_context *ppp)
391 {
392 if (LOG_LEVEL >= LOG_LEVEL_DBG) {
393 net_pkt_hexdump(ppp->pkt, "recv ppp");
394 }
395
396 if (IS_ENABLED(CONFIG_NET_PPP_VERIFY_FCS) && !ppp_check_fcs(ppp)) {
397 #if defined(CONFIG_NET_STATISTICS_PPP)
398 ppp->stats.drop++;
399 ppp->stats.pkts.rx++;
400 #endif
401 net_pkt_unref(ppp->pkt);
402 } else {
403 /* Remove the Address (0xff), Control (0x03) and
404 * FCS fields (16-bit) as the PPP L2 layer does not need
405 * those bytes.
406 */
407 uint16_t addr_and_ctrl = net_buf_pull_be16(ppp->pkt->buffer);
408
409 /* Currently we do not support compressed Address and Control
410 * fields so they must always be present.
411 */
412 if (addr_and_ctrl != (0xff << 8 | 0x03)) {
413 #if defined(CONFIG_NET_STATISTICS_PPP)
414 ppp->stats.drop++;
415 ppp->stats.pkts.rx++;
416 #endif
417 net_pkt_unref(ppp->pkt);
418 } else {
419 /* Remove FCS bytes (2) */
420 net_pkt_remove_tail(ppp->pkt, 2);
421
422 /* Make sure we now start reading from PPP header in
423 * PPP L2 recv()
424 */
425 net_pkt_cursor_init(ppp->pkt);
426 net_pkt_set_overwrite(ppp->pkt, true);
427
428 if (net_recv_data(ppp->iface, ppp->pkt) < 0) {
429 net_pkt_unref(ppp->pkt);
430 }
431 }
432 }
433
434 ppp->pkt = NULL;
435 }
436
437 #if defined(CONFIG_NET_TEST)
ppp_recv_cb(uint8_t * buf,size_t * off)438 static uint8_t *ppp_recv_cb(uint8_t *buf, size_t *off)
439 {
440 struct ppp_driver_context *ppp =
441 CONTAINER_OF(buf, struct ppp_driver_context, buf);
442 size_t i, len = *off;
443
444 for (i = 0; i < *off; i++) {
445 if (0) {
446 /* Extra debugging can be enabled separately if really
447 * needed. Normally it would just print too much data.
448 */
449 LOG_DBG("[%zd] %02x", i, buf[i]);
450 }
451
452 if (ppp_input_byte(ppp, buf[i]) == 0) {
453 /* Ignore empty or too short frames */
454 if (ppp->pkt && net_pkt_get_len(ppp->pkt) > 3) {
455 ppp_process_msg(ppp);
456 break;
457 }
458 }
459 }
460
461 if (i == *off) {
462 *off = 0;
463 } else {
464 *off = len - i - 1;
465
466 memmove(&buf[0], &buf[i + 1], *off);
467 }
468
469 return buf;
470 }
471
ppp_driver_feed_data(uint8_t * data,int data_len)472 void ppp_driver_feed_data(uint8_t *data, int data_len)
473 {
474 struct ppp_driver_context *ppp = &ppp_driver_context_data;
475 size_t recv_off = 0;
476
477 /* We are expecting that the tests are feeding data in large
478 * chunks so we can reset the uart buffer here.
479 */
480 memset(ppp->buf, 0, UART_BUF_LEN);
481
482 ppp_change_state(ppp, STATE_HDLC_FRAME_START);
483
484 while (data_len > 0) {
485 int data_to_copy = MIN(data_len, UART_BUF_LEN);
486 int remaining;
487
488 LOG_DBG("Feeding %d bytes", data_to_copy);
489
490 memcpy(ppp->buf, data, data_to_copy);
491
492 recv_off = data_to_copy;
493
494 (void)ppp_recv_cb(ppp->buf, &recv_off);
495
496 remaining = data_to_copy - recv_off;
497
498 LOG_DBG("We copied %d bytes", remaining);
499
500 data_len -= remaining;
501 data += remaining;
502 }
503 }
504 #endif
505
calc_fcs(struct net_pkt * pkt,uint16_t * fcs,uint16_t protocol)506 static bool calc_fcs(struct net_pkt *pkt, uint16_t *fcs, uint16_t protocol)
507 {
508 struct net_buf *buf;
509 uint16_t crc;
510 uint16_t c;
511
512 buf = pkt->buffer;
513 if (!buf) {
514 return false;
515 }
516
517 /* HDLC Address and Control fields */
518 c = sys_cpu_to_be16(0xff << 8 | 0x03);
519
520 crc = crc16_ccitt(0xffff, (const uint8_t *)&c, sizeof(c));
521
522 if (protocol > 0) {
523 crc = crc16_ccitt(crc, (const uint8_t *)&protocol,
524 sizeof(protocol));
525 }
526
527 while (buf) {
528 crc = crc16_ccitt(crc, buf->data, buf->len);
529 buf = buf->frags;
530 }
531
532 crc ^= 0xffff;
533 *fcs = crc;
534
535 return true;
536 }
537
ppp_escape_byte(uint8_t byte,int * offset)538 static uint16_t ppp_escape_byte(uint8_t byte, int *offset)
539 {
540 if (byte == 0x7e || byte == 0x7d || byte < 0x20) {
541 *offset = 0;
542 return (0x7d << 8) | (byte ^ 0x20);
543 }
544
545 *offset = 1;
546 return byte;
547 }
548
ppp_send(const struct device * dev,struct net_pkt * pkt)549 static int ppp_send(const struct device *dev, struct net_pkt *pkt)
550 {
551 struct ppp_driver_context *ppp = dev->data;
552 struct net_buf *buf = pkt->buffer;
553 uint16_t protocol = 0;
554 int send_off = 0;
555 uint32_t sync_addr_ctrl;
556 uint16_t fcs, escaped;
557 uint8_t byte;
558 int i, offset;
559
560 #if defined(CONFIG_NET_TEST)
561 return 0;
562 #endif
563
564 ARG_UNUSED(dev);
565
566 if (!buf) {
567 /* No data? */
568 return -ENODATA;
569 }
570
571 /* If the packet is a normal network packet, we must add the protocol
572 * value here.
573 */
574 if (!net_pkt_is_ppp(pkt)) {
575 if (net_pkt_family(pkt) == AF_INET) {
576 protocol = htons(PPP_IP);
577 } else if (net_pkt_family(pkt) == AF_INET6) {
578 protocol = htons(PPP_IPV6);
579 } else if (IS_ENABLED(CONFIG_NET_SOCKETS_PACKET) &&
580 net_pkt_family(pkt) == AF_PACKET) {
581 char type = (NET_IPV6_HDR(pkt)->vtc & 0xf0);
582
583 switch (type) {
584 case 0x60:
585 protocol = htons(PPP_IPV6);
586 break;
587 case 0x40:
588 protocol = htons(PPP_IP);
589 break;
590 default:
591 return -EPROTONOSUPPORT;
592 }
593 } else {
594 return -EPROTONOSUPPORT;
595 }
596 }
597
598 if (!calc_fcs(pkt, &fcs, protocol)) {
599 return -ENOMEM;
600 }
601
602 /* Sync, Address & Control fields */
603 sync_addr_ctrl = sys_cpu_to_be32(0x7e << 24 | 0xff << 16 |
604 0x7d << 8 | 0x23);
605 send_off = ppp_send_bytes(ppp, (const uint8_t *)&sync_addr_ctrl,
606 sizeof(sync_addr_ctrl), send_off);
607
608 if (protocol > 0) {
609 escaped = htons(ppp_escape_byte(protocol, &offset));
610 send_off = ppp_send_bytes(ppp, (uint8_t *)&escaped + offset,
611 offset ? 1 : 2,
612 send_off);
613
614 escaped = htons(ppp_escape_byte(protocol >> 8, &offset));
615 send_off = ppp_send_bytes(ppp, (uint8_t *)&escaped + offset,
616 offset ? 1 : 2,
617 send_off);
618 }
619
620 /* Note that we do not print the first four bytes and FCS bytes at the
621 * end so that we do not need to allocate separate net_buf just for
622 * that purpose.
623 */
624 if (LOG_LEVEL >= LOG_LEVEL_DBG) {
625 net_pkt_hexdump(pkt, "send ppp");
626 }
627
628 while (buf) {
629 for (i = 0; i < buf->len; i++) {
630 /* Escape illegal bytes */
631 escaped = htons(ppp_escape_byte(buf->data[i], &offset));
632 send_off = ppp_send_bytes(ppp,
633 (uint8_t *)&escaped + offset,
634 offset ? 1 : 2,
635 send_off);
636 }
637
638 buf = buf->frags;
639 }
640
641 escaped = htons(ppp_escape_byte(fcs, &offset));
642 send_off = ppp_send_bytes(ppp, (uint8_t *)&escaped + offset,
643 offset ? 1 : 2,
644 send_off);
645
646 escaped = htons(ppp_escape_byte(fcs >> 8, &offset));
647 send_off = ppp_send_bytes(ppp, (uint8_t *)&escaped + offset,
648 offset ? 1 : 2,
649 send_off);
650
651 byte = 0x7e;
652 send_off = ppp_send_bytes(ppp, &byte, 1, send_off);
653
654 (void)ppp_send_flush(ppp, send_off);
655
656 return 0;
657 }
658
659 #if !defined(CONFIG_NET_TEST)
ppp_consume_ringbuf(struct ppp_driver_context * ppp)660 static int ppp_consume_ringbuf(struct ppp_driver_context *ppp)
661 {
662 uint8_t *data;
663 size_t len, tmp;
664 int ret;
665
666 len = ring_buf_get_claim(&ppp->rx_ringbuf, &data,
667 CONFIG_NET_PPP_RINGBUF_SIZE);
668 if (len == 0) {
669 LOG_DBG("Ringbuf %p is empty!", &ppp->rx_ringbuf);
670 return 0;
671 }
672
673 /* This will print too much data, enable only if really needed */
674 if (0) {
675 LOG_HEXDUMP_DBG(data, len, ppp->dev->name);
676 }
677
678 tmp = len;
679
680 do {
681 if (ppp_input_byte(ppp, *data++) == 0) {
682 /* Ignore empty or too short frames */
683 if (ppp->pkt && net_pkt_get_len(ppp->pkt) > 3) {
684 ppp_process_msg(ppp);
685 }
686 }
687 } while (--tmp);
688
689 ret = ring_buf_get_finish(&ppp->rx_ringbuf, len);
690 if (ret < 0) {
691 LOG_DBG("Cannot flush ring buffer (%d)", ret);
692 }
693
694 return -EAGAIN;
695 }
696
ppp_isr_cb_work(struct k_work * work)697 static void ppp_isr_cb_work(struct k_work *work)
698 {
699 struct ppp_driver_context *ppp =
700 CONTAINER_OF(work, struct ppp_driver_context, cb_work);
701 int ret = -EAGAIN;
702
703 while (ret == -EAGAIN) {
704 ret = ppp_consume_ringbuf(ppp);
705 }
706 }
707 #endif /* !CONFIG_NET_TEST */
708
ppp_driver_init(const struct device * dev)709 static int ppp_driver_init(const struct device *dev)
710 {
711 struct ppp_driver_context *ppp = dev->data;
712
713 LOG_DBG("[%p] dev %p", ppp, dev);
714
715 #if !defined(CONFIG_NET_TEST)
716 ring_buf_init(&ppp->rx_ringbuf, sizeof(ppp->rx_buf), ppp->rx_buf);
717 k_work_init(&ppp->cb_work, ppp_isr_cb_work);
718
719 k_work_queue_start(&ppp->cb_workq, ppp_workq,
720 K_KERNEL_STACK_SIZEOF(ppp_workq),
721 K_PRIO_COOP(PPP_WORKQ_PRIORITY), NULL);
722 k_thread_name_set(&ppp->cb_workq.thread, "ppp_workq");
723 #endif
724
725 ppp->pkt = NULL;
726 ppp_change_state(ppp, STATE_HDLC_FRAME_START);
727 #if defined(CONFIG_PPP_CLIENT_CLIENTSERVER)
728 ppp->client_index = 0;
729 #endif
730
731 return 0;
732 }
733
ppp_get_mac(struct ppp_driver_context * ppp)734 static inline struct net_linkaddr *ppp_get_mac(struct ppp_driver_context *ppp)
735 {
736 ppp->ll_addr.addr = ppp->mac_addr;
737 ppp->ll_addr.len = sizeof(ppp->mac_addr);
738
739 return &ppp->ll_addr;
740 }
741
ppp_iface_init(struct net_if * iface)742 static void ppp_iface_init(struct net_if *iface)
743 {
744 struct ppp_driver_context *ppp = net_if_get_device(iface)->data;
745 struct net_linkaddr *ll_addr;
746
747 LOG_DBG("[%p] iface %p", ppp, iface);
748
749 net_ppp_init(iface);
750
751 if (ppp->init_done) {
752 return;
753 }
754
755 ppp->init_done = true;
756 ppp->iface = iface;
757
758 /* The mac address is not really used but network interface expects
759 * to find one.
760 */
761 ll_addr = ppp_get_mac(ppp);
762
763 if (CONFIG_PPP_MAC_ADDR[0] != 0) {
764 if (net_bytes_from_str(ppp->mac_addr, sizeof(ppp->mac_addr),
765 CONFIG_PPP_MAC_ADDR) < 0) {
766 goto use_random_mac;
767 }
768 } else {
769 use_random_mac:
770 /* 00-00-5E-00-53-xx Documentation RFC 7042 */
771 ppp->mac_addr[0] = 0x00;
772 ppp->mac_addr[1] = 0x00;
773 ppp->mac_addr[2] = 0x5E;
774 ppp->mac_addr[3] = 0x00;
775 ppp->mac_addr[4] = 0x53;
776 ppp->mac_addr[5] = sys_rand32_get();
777 }
778
779 net_if_set_link_addr(iface, ll_addr->addr, ll_addr->len,
780 NET_LINK_ETHERNET);
781
782 memset(ppp->buf, 0, sizeof(ppp->buf));
783
784 /* If we have a GSM modem with PPP support, then do not start the
785 * interface automatically but only after the modem is ready.
786 */
787 if (IS_ENABLED(CONFIG_MODEM_GSM_PPP)) {
788 net_if_flag_set(iface, NET_IF_NO_AUTO_START);
789 }
790 }
791
792 #if defined(CONFIG_NET_STATISTICS_PPP)
ppp_get_stats(const struct device * dev)793 static struct net_stats_ppp *ppp_get_stats(const struct device *dev)
794 {
795 struct ppp_driver_context *context = dev->data;
796
797 return &context->stats;
798 }
799 #endif
800
801 #if !defined(CONFIG_NET_TEST)
ppp_uart_flush(const struct device * dev)802 static void ppp_uart_flush(const struct device *dev)
803 {
804 uint8_t c;
805
806 while (uart_fifo_read(dev, &c, 1) > 0) {
807 continue;
808 }
809 }
810
ppp_uart_isr(const struct device * uart,void * user_data)811 static void ppp_uart_isr(const struct device *uart, void *user_data)
812 {
813 struct ppp_driver_context *context = user_data;
814 int rx = 0, ret;
815
816 /* get all of the data off UART as fast as we can */
817 while (uart_irq_update(uart) && uart_irq_rx_ready(uart)) {
818 rx = uart_fifo_read(uart, context->buf, sizeof(context->buf));
819 if (rx <= 0) {
820 continue;
821 }
822
823 ret = ring_buf_put(&context->rx_ringbuf, context->buf, rx);
824 if (ret < rx) {
825 LOG_ERR("Rx buffer doesn't have enough space. "
826 "Bytes pending: %d, written: %d",
827 rx, ret);
828 break;
829 }
830
831 k_work_submit_to_queue(&context->cb_workq, &context->cb_work);
832 }
833 }
834 #endif /* !CONFIG_NET_TEST */
835
ppp_start(const struct device * dev)836 static int ppp_start(const struct device *dev)
837 {
838 struct ppp_driver_context *context = dev->data;
839
840 /* Init the PPP UART only once. This should only be done after
841 * the GSM muxing is setup and enabled. GSM modem will call this
842 * after everything is ready to be connected.
843 */
844 #if !defined(CONFIG_NET_TEST)
845 if (atomic_cas(&context->modem_init_done, false, true)) {
846 const char *dev_name = NULL;
847
848 /* Now try to figure out what device to open. If GSM muxing
849 * is enabled, then use it. If not, then check if modem
850 * configuration is enabled, and use that. If none are enabled,
851 * then use our own config.
852 */
853 #if IS_ENABLED(CONFIG_GSM_MUX)
854 const struct device *mux;
855
856 mux = uart_mux_find(CONFIG_GSM_MUX_DLCI_PPP);
857 if (mux == NULL) {
858 LOG_ERR("Cannot find GSM mux dev for DLCI %d",
859 CONFIG_GSM_MUX_DLCI_PPP);
860 return -ENOENT;
861 }
862
863 dev_name = mux->name;
864 #elif IS_ENABLED(CONFIG_MODEM_GSM_PPP)
865 dev_name = DT_BUS_LABEL(DT_INST(0, zephyr_gsm_ppp));
866 #else
867 dev_name = CONFIG_NET_PPP_UART_NAME;
868 #endif
869 if (dev_name == NULL || dev_name[0] == '\0') {
870 LOG_ERR("UART configuration is wrong!");
871 return -EINVAL;
872 }
873
874 LOG_INF("Initializing PPP to use %s", dev_name);
875
876 context->dev = device_get_binding(dev_name);
877 if (!context->dev) {
878 LOG_ERR("Cannot find dev %s", dev_name);
879 return -ENODEV;
880 }
881
882 uart_irq_rx_disable(context->dev);
883 uart_irq_tx_disable(context->dev);
884 ppp_uart_flush(context->dev);
885 uart_irq_callback_user_data_set(context->dev, ppp_uart_isr,
886 context);
887 uart_irq_rx_enable(context->dev);
888 }
889 #endif /* !CONFIG_NET_TEST */
890
891 net_ppp_carrier_on(context->iface);
892
893 return 0;
894 }
895
ppp_stop(const struct device * dev)896 static int ppp_stop(const struct device *dev)
897 {
898 struct ppp_driver_context *context = dev->data;
899
900 net_ppp_carrier_off(context->iface);
901 context->modem_init_done = false;
902 return 0;
903 }
904
905 static const struct ppp_api ppp_if_api = {
906 .iface_api.init = ppp_iface_init,
907
908 .send = ppp_send,
909 .start = ppp_start,
910 .stop = ppp_stop,
911 #if defined(CONFIG_NET_STATISTICS_PPP)
912 .get_stats = ppp_get_stats,
913 #endif
914 };
915
916 NET_DEVICE_INIT(ppp, CONFIG_NET_PPP_DRV_NAME, ppp_driver_init,
917 NULL, &ppp_driver_context_data, NULL,
918 CONFIG_KERNEL_INIT_PRIORITY_DEFAULT, &ppp_if_api,
919 PPP_L2, NET_L2_GET_CTX_TYPE(PPP_L2), PPP_MTU);
920