Lines Matching +full:crc +full:- +full:enabled
4 * SPDX-License-Identifier: Apache-2.0
31 #include <zephyr/sys/crc.h>
142 switch (evt->type) { in uart_callback()
144 LOG_DBG("UART_TX_DONE: sent %zu bytes", evt->data.tx.len); in uart_callback()
161 / MSEC_PER_SEC > evt->data.tx.len * 2) { in uart_callback()
167 " (%d ms) or the UART baud rate (%u).", evt->data.tx.len, in uart_callback()
174 len = evt->data.rx.len; in uart_callback()
175 p = evt->data.rx.buf + evt->data.rx.offset; in uart_callback()
179 ret = ring_buf_put(&context->rx_ringbuf, p, len); in uart_callback()
180 if (ret < evt->data.rx.len) { in uart_callback()
184 evt->data.rx.len, ret); in uart_callback()
188 * rx off for now and re-enabling that later. in uart_callback()
196 space_left = ring_buf_space_get(&context->rx_ringbuf); in uart_callback()
197 if (!rx_retry_pending && space_left < (sizeof(context->rx_buf) / 8)) { in uart_callback()
209 k_work_submit_to_queue(&context->cb_workq, &context->cb_work); in uart_callback()
217 err = uart_rx_buf_rsp(dev, next_buf, sizeof(context->buf)); in uart_callback()
227 next_buf = evt->data.rx_buf.buf; in uart_callback()
232 LOG_DBG("UART_RX_DISABLED - re-enabling in a while"); in uart_callback()
235 k_work_schedule(&context->uart_recovery_work, in uart_callback()
243 LOG_DBG("UART_RX_STOPPED: stop reason %d", evt->data.rx_stop.reason); in uart_callback()
245 if (evt->data.rx_stop.reason != 0) { in uart_callback()
256 next_buf = context->buf2; in ppp_async_uart_rx_enable()
257 err = uart_callback_set(context->dev, uart_callback, (void *)context); in ppp_async_uart_rx_enable()
262 err = uart_rx_enable(context->dev, context->buf, sizeof(context->buf), in ppp_async_uart_rx_enable()
267 LOG_DBG("RX enabled"); in ppp_async_uart_rx_enable()
280 ret = ring_buf_space_get(&ppp->rx_ringbuf); in uart_recovery()
281 if (ret >= (sizeof(ppp->rx_buf) / 2)) { in uart_recovery()
290 LOG_ERR("Rx buffer still doesn't have enough room %d to be re-enabled", ret); in uart_recovery()
291 k_work_schedule(&ppp->uart_recovery_work, in uart_recovery()
301 if (!ppp->pkt) { in ppp_save_byte()
302 ppp->pkt = net_pkt_rx_alloc_with_buffer( in ppp_save_byte()
303 ppp->iface, in ppp_save_byte()
306 if (!ppp->pkt) { in ppp_save_byte()
308 return -ENOMEM; in ppp_save_byte()
311 net_pkt_cursor_init(ppp->pkt); in ppp_save_byte()
313 ppp->available = net_pkt_available_buffer(ppp->pkt); in ppp_save_byte()
316 /* Extra debugging can be enabled separately if really in ppp_save_byte()
326 if (ppp->available == 1) { in ppp_save_byte()
327 ret = net_pkt_alloc_buffer(ppp->pkt, in ppp_save_byte()
328 CONFIG_NET_BUF_DATA_SIZE + ppp->available, in ppp_save_byte()
335 ppp->available = net_pkt_available_buffer(ppp->pkt); in ppp_save_byte()
338 if (ppp->available) { in ppp_save_byte()
339 ret = net_pkt_write_u8(ppp->pkt, byte); in ppp_save_byte()
342 ppp, ppp->pkt, ret); in ppp_save_byte()
346 ppp->available--; in ppp_save_byte()
352 net_pkt_unref(ppp->pkt); in ppp_save_byte()
353 ppp->pkt = NULL; in ppp_save_byte()
354 return -ENOMEM; in ppp_save_byte()
380 if (ctx->state == new_state) { in ppp_change_state()
388 ctx, ppp_driver_state_str(ctx->state), ctx->state, in ppp_change_state()
391 ctx->state = new_state; in ppp_change_state()
399 uint8_t *buf = ppp->send_buf; in ppp_send_flush()
409 len--; in ppp_send_flush()
414 len--; in ppp_send_flush()
417 net_capture_data(&ppp_capture_ctx->cooked, in ppp_send_flush()
431 ret = uart_tx(ppp->dev, buf, off, timeout); in ppp_send_flush()
437 while (off--) { in ppp_send_flush()
438 uart_poll_out(ppp->dev, *buf++); in ppp_send_flush()
451 ppp->send_buf[off++] = data[i]; in ppp_send_bytes()
453 if (off >= sizeof(ppp->send_buf)) { in ppp_send_bytes()
472 if (ppp->client_index >= (sizeof(CLIENT) - 1)) { in ppp_handle_client()
473 ppp->client_index = 0; in ppp_handle_client()
476 if (byte != client[ppp->client_index]) { in ppp_handle_client()
477 ppp->client_index = 0; in ppp_handle_client()
478 if (byte != client[ppp->client_index]) { in ppp_handle_client()
483 ++ppp->client_index; in ppp_handle_client()
484 if (ppp->client_index >= (sizeof(CLIENT) - 1)) { in ppp_handle_client()
487 sizeof(CLIENTSERVER) - 1, 0); in ppp_handle_client()
489 ppp->client_index = 0; in ppp_handle_client()
497 int ret = -EAGAIN; in ppp_input_byte()
499 switch (ppp->state) { in ppp_input_byte()
519 return -EAGAIN; in ppp_input_byte()
543 ret = -EAGAIN; in ppp_input_byte()
559 ppp->next_escaped = true; in ppp_input_byte()
563 if (ppp->next_escaped) { in ppp_input_byte()
566 ppp->next_escaped = false; in ppp_input_byte()
574 ret = -EAGAIN; in ppp_input_byte()
580 LOG_ERR("[%p] Invalid state %d", ppp, ppp->state); in ppp_input_byte()
590 uint16_t crc; in ppp_check_fcs() local
592 buf = ppp->pkt->buffer; in ppp_check_fcs()
597 crc = crc16_ccitt(0xffff, buf->data, buf->len); in ppp_check_fcs()
599 buf = buf->frags; in ppp_check_fcs()
602 crc = crc16_ccitt(crc, buf->data, buf->len); in ppp_check_fcs()
603 buf = buf->frags; in ppp_check_fcs()
606 if (crc != 0xf0b8) { in ppp_check_fcs()
607 LOG_DBG("Invalid FCS (0x%x)", crc); in ppp_check_fcs()
609 ppp->stats.chkerr++; in ppp_check_fcs()
620 net_pkt_hexdump(ppp->pkt, "recv ppp"); in ppp_process_msg()
625 ppp->stats.drop++; in ppp_process_msg()
626 ppp->stats.pkts.rx++; in ppp_process_msg()
628 net_pkt_unref(ppp->pkt); in ppp_process_msg()
630 /* If PPP packet capturing is enabled, then send the in ppp_process_msg()
633 * invalid frames, the if-block would need to be moved before in ppp_process_msg()
645 copied = net_buf_linearize(ppp_capture_ctx->capture_buf, in ppp_process_msg()
646 sizeof(ppp_capture_ctx->capture_buf), in ppp_process_msg()
647 ppp->pkt->buffer, in ppp_process_msg()
649 net_pkt_get_len(ppp->pkt)); in ppp_process_msg()
651 net_capture_data(&ppp_capture_ctx->cooked, in ppp_process_msg()
652 ppp_capture_ctx->capture_buf, in ppp_process_msg()
659 * FCS fields (16-bit) as the PPP L2 layer does not need in ppp_process_msg()
662 uint16_t addr_and_ctrl = net_buf_pull_be16(ppp->pkt->buffer); in ppp_process_msg()
669 ppp->stats.drop++; in ppp_process_msg()
670 ppp->stats.pkts.rx++; in ppp_process_msg()
672 net_pkt_unref(ppp->pkt); in ppp_process_msg()
675 net_pkt_remove_tail(ppp->pkt, 2); in ppp_process_msg()
680 net_pkt_cursor_init(ppp->pkt); in ppp_process_msg()
681 net_pkt_set_overwrite(ppp->pkt, true); in ppp_process_msg()
683 if (net_recv_data(ppp->iface, ppp->pkt) < 0) { in ppp_process_msg()
684 net_pkt_unref(ppp->pkt); in ppp_process_msg()
689 ppp->pkt = NULL; in ppp_process_msg()
701 /* Extra debugging can be enabled separately if really in ppp_recv_cb()
709 if (ppp->pkt && net_pkt_get_len(ppp->pkt) > 3) { in ppp_recv_cb()
719 *off = len - i - 1; in ppp_recv_cb()
735 memset(ppp->buf, 0, UART_BUF_LEN); in ppp_driver_feed_data()
745 memcpy(ppp->buf, data, data_to_copy); in ppp_driver_feed_data()
749 (void)ppp_recv_cb(ppp->buf, &recv_off); in ppp_driver_feed_data()
751 remaining = data_to_copy - recv_off; in ppp_driver_feed_data()
755 data_len -= remaining; in ppp_driver_feed_data()
764 uint16_t crc; in calc_fcs() local
767 buf = pkt->buffer; in calc_fcs()
775 crc = crc16_ccitt(0xffff, (const uint8_t *)&c, sizeof(c)); in calc_fcs()
778 crc = crc16_ccitt(crc, (const uint8_t *)&protocol, in calc_fcs()
783 crc = crc16_ccitt(crc, buf->data, buf->len); in calc_fcs()
784 buf = buf->frags; in calc_fcs()
787 crc ^= 0xffff; in calc_fcs()
788 *fcs = crc; in calc_fcs()
806 struct ppp_driver_context *ppp = dev->data; in ppp_send()
807 struct net_buf *buf = pkt->buffer; in ppp_send()
823 return -ENODATA; in ppp_send()
835 return -EPROTONOSUPPORT; in ppp_send()
840 return -ENOMEM; in ppp_send()
870 for (i = 0; i < buf->len; i++) { in ppp_send()
872 escaped = htons(ppp_escape_byte(buf->data[i], &offset)); in ppp_send()
879 buf = buf->frags; in ppp_send()
907 len = ring_buf_get_claim(&ppp->rx_ringbuf, &data, in ppp_consume_ringbuf()
910 LOG_DBG("Ringbuf %p is empty!", &ppp->rx_ringbuf); in ppp_consume_ringbuf()
916 LOG_HEXDUMP_DBG(data, len, ppp->dev->name); in ppp_consume_ringbuf()
924 if (ppp->pkt && net_pkt_get_len(ppp->pkt) > 3) { in ppp_consume_ringbuf()
928 } while (--tmp); in ppp_consume_ringbuf()
930 ret = ring_buf_get_finish(&ppp->rx_ringbuf, len); in ppp_consume_ringbuf()
935 return -EAGAIN; in ppp_consume_ringbuf()
942 int ret = -EAGAIN; in ppp_isr_cb_work()
944 while (ret == -EAGAIN) { in ppp_isr_cb_work()
952 struct ppp_driver_context *ppp = dev->data; in ppp_driver_init()
957 ring_buf_init(&ppp->rx_ringbuf, sizeof(ppp->rx_buf), ppp->rx_buf); in ppp_driver_init()
958 k_work_init(&ppp->cb_work, ppp_isr_cb_work); in ppp_driver_init()
960 k_work_queue_start(&ppp->cb_workq, ppp_workq, in ppp_driver_init()
963 k_thread_name_set(&ppp->cb_workq.thread, "ppp_workq"); in ppp_driver_init()
965 k_work_init_delayable(&ppp->uart_recovery_work, uart_recovery); in ppp_driver_init()
968 ppp->pkt = NULL; in ppp_driver_init()
971 ppp->client_index = 0; in ppp_driver_init()
979 ppp->ll_addr.addr = ppp->mac_addr; in ppp_get_mac()
980 ppp->ll_addr.len = sizeof(ppp->mac_addr); in ppp_get_mac()
982 return &ppp->ll_addr; in ppp_get_mac()
987 struct ppp_driver_context *ppp = net_if_get_device(iface)->data; in ppp_iface_init()
994 if (ppp->init_done) { in ppp_iface_init()
998 ppp->init_done = true; in ppp_iface_init()
999 ppp->iface = iface; in ppp_iface_init()
1007 if (net_bytes_from_str(ppp->mac_addr, sizeof(ppp->mac_addr), in ppp_iface_init()
1013 /* 00-00-5E-00-53-xx Documentation RFC 7042 */ in ppp_iface_init()
1014 ppp->mac_addr[0] = 0x00; in ppp_iface_init()
1015 ppp->mac_addr[1] = 0x00; in ppp_iface_init()
1016 ppp->mac_addr[2] = 0x5E; in ppp_iface_init()
1017 ppp->mac_addr[3] = 0x00; in ppp_iface_init()
1018 ppp->mac_addr[4] = 0x53; in ppp_iface_init()
1019 ppp->mac_addr[5] = sys_rand8_get(); in ppp_iface_init()
1022 net_if_set_link_addr(iface, ll_addr->addr, ll_addr->len, in ppp_iface_init()
1031 ret = net_capture_cooked_setup(&ppp_capture_ctx->cooked, in ppp_iface_init()
1033 sizeof(ppp->mac_addr), in ppp_iface_init()
1034 ppp->mac_addr); in ppp_iface_init()
1043 memset(ppp->buf, 0, sizeof(ppp->buf)); in ppp_iface_init()
1057 struct ppp_driver_context *context = dev->data; in ppp_get_stats()
1059 return &context->stats; in ppp_get_stats()
1080 rx = uart_fifo_read(uart, context->buf, sizeof(context->buf)); in ppp_uart_isr()
1085 ret = ring_buf_put(&context->rx_ringbuf, context->buf, rx); in ppp_uart_isr()
1093 k_work_submit_to_queue(&context->cb_workq, &context->cb_work); in ppp_uart_isr()
1100 struct ppp_driver_context *context = dev->data; in ppp_start()
1104 if (atomic_cas(&context->modem_init_done, false, true)) { in ppp_start()
1105 context->dev = DEVICE_DT_GET(DT_CHOSEN(zephyr_ppp_uart)); in ppp_start()
1107 LOG_DBG("Initializing PPP to use %s", context->dev->name); in ppp_start()
1109 if (!device_is_ready(context->dev)) { in ppp_start()
1110 LOG_ERR("Device %s is not ready", context->dev->name); in ppp_start()
1111 return -ENODEV; in ppp_start()
1117 uart_irq_rx_disable(context->dev); in ppp_start()
1118 uart_irq_tx_disable(context->dev); in ppp_start()
1119 ppp_uart_flush(context->dev); in ppp_start()
1120 uart_irq_callback_user_data_set(context->dev, ppp_uart_isr, in ppp_start()
1122 uart_irq_rx_enable(context->dev); in ppp_start()
1127 net_if_carrier_on(context->iface); in ppp_start()
1133 struct ppp_driver_context *context = dev->data; in ppp_stop()
1135 net_if_carrier_off(context->iface); in ppp_stop()
1137 uart_rx_disable(context->dev); in ppp_stop()
1139 context->modem_init_done = false; in ppp_stop()