Lines Matching +full:uart +full:- +full:dev
4 * SPDX-License-Identifier: Apache-2.0
8 #include <zephyr/drivers/uart.h>
37 const struct device *dev; member
41 } uart; member
48 const struct device *dev = (const struct device *)ctx; in bt_notif_enabled() local
49 struct uart_bt_data *dev_data = (struct uart_bt_data *)dev->data; in bt_notif_enabled()
51 (void)atomic_set(&dev_data->bt.enabled, enabled ? 1 : 0); in bt_notif_enabled()
53 LOG_DBG("%s() - %s", __func__, enabled ? "enabled" : "disabled"); in bt_notif_enabled()
55 if (!ring_buf_is_empty(dev_data->uart.tx_ringbuf)) { in bt_notif_enabled()
56 k_work_reschedule_for_queue(&nus_work_queue, &dev_data->uart.tx_work, K_NO_WAIT); in bt_notif_enabled()
67 const struct device *dev = (const struct device *)ctx; in bt_received() local
68 struct uart_bt_data *dev_data = (struct uart_bt_data *)dev->data; in bt_received()
69 struct ring_buf *ringbuf = dev_data->uart.rx_ringbuf; in bt_received()
72 LOG_DBG("%s() - len: %d, rx_ringbuf space %d", __func__, len, ring_buf_space_get(ringbuf)); in bt_received()
80 k_work_submit_to_queue(&nus_work_queue, &dev_data->uart.cb_work); in bt_received()
112 return (min_att_mtu - 1 - 2); in get_max_chunk_size()
117 struct uart_bt_data *dev_data = CONTAINER_OF(work, struct uart_bt_data, uart.cb_work); in cb_work_handler()
119 if (dev_data->uart.callback.cb) { in cb_work_handler()
120 dev_data->uart.callback.cb( in cb_work_handler()
121 dev_data->uart.callback.dev, in cb_work_handler()
122 dev_data->uart.callback.cb_data); in cb_work_handler()
129 struct uart_bt_data *dev_data = CONTAINER_OF(dwork, struct uart_bt_data, uart.tx_work); in tx_work_handler()
142 len = ring_buf_get_claim(dev_data->uart.tx_ringbuf, &data, chunk_size); in tx_work_handler()
144 err = bt_nus_inst_send(NULL, dev_data->bt.inst, data, len); in tx_work_handler()
150 ring_buf_get_finish(dev_data->uart.tx_ringbuf, len); in tx_work_handler()
153 if ((ring_buf_space_get(dev_data->uart.tx_ringbuf) > 0) && dev_data->uart.tx_irq_ena) { in tx_work_handler()
154 k_work_submit_to_queue(&nus_work_queue, &dev_data->uart.cb_work); in tx_work_handler()
158 static int uart_bt_fifo_fill(const struct device *dev, const uint8_t *tx_data, int len) in uart_bt_fifo_fill() argument
160 struct uart_bt_data *dev_data = (struct uart_bt_data *)dev->data; in uart_bt_fifo_fill()
163 wrote = ring_buf_put(dev_data->uart.tx_ringbuf, tx_data, len); in uart_bt_fifo_fill()
165 LOG_WRN("Ring buffer full, drop %zd bytes", len - wrote); in uart_bt_fifo_fill()
168 if (atomic_get(&dev_data->bt.enabled)) { in uart_bt_fifo_fill()
169 k_work_reschedule_for_queue(&nus_work_queue, &dev_data->uart.tx_work, K_NO_WAIT); in uart_bt_fifo_fill()
175 static int uart_bt_fifo_read(const struct device *dev, uint8_t *rx_data, const int size) in uart_bt_fifo_read() argument
177 struct uart_bt_data *dev_data = (struct uart_bt_data *)dev->data; in uart_bt_fifo_read()
179 return ring_buf_get(dev_data->uart.rx_ringbuf, rx_data, size); in uart_bt_fifo_read()
182 static int uart_bt_poll_in(const struct device *dev, unsigned char *c) in uart_bt_poll_in() argument
184 int err = uart_bt_fifo_read(dev, c, 1); in uart_bt_poll_in()
186 return err == 1 ? 0 : -1; in uart_bt_poll_in()
189 static void uart_bt_poll_out(const struct device *dev, unsigned char c) in uart_bt_poll_out() argument
191 struct uart_bt_data *dev_data = (struct uart_bt_data *)dev->data; in uart_bt_poll_out()
192 struct ring_buf *ringbuf = dev_data->uart.tx_ringbuf; in uart_bt_poll_out()
194 /** Right now we're discarding data if ring-buf is full. */ in uart_bt_poll_out()
196 if (k_is_in_isr() || !atomic_get(&dev_data->bt.enabled)) { in uart_bt_poll_out()
205 if (atomic_get(&dev_data->bt.enabled)) { in uart_bt_poll_out()
208 * called inside a for-loop). in uart_bt_poll_out()
210 k_work_schedule_for_queue(&nus_work_queue, &dev_data->uart.tx_work, K_MSEC(1)); in uart_bt_poll_out()
214 static int uart_bt_irq_tx_ready(const struct device *dev) in uart_bt_irq_tx_ready() argument
216 struct uart_bt_data *dev_data = (struct uart_bt_data *)dev->data; in uart_bt_irq_tx_ready()
218 if ((ring_buf_space_get(dev_data->uart.tx_ringbuf) > 0) && dev_data->uart.tx_irq_ena) { in uart_bt_irq_tx_ready()
225 static void uart_bt_irq_tx_enable(const struct device *dev) in uart_bt_irq_tx_enable() argument
227 struct uart_bt_data *dev_data = (struct uart_bt_data *)dev->data; in uart_bt_irq_tx_enable()
229 dev_data->uart.tx_irq_ena = true; in uart_bt_irq_tx_enable()
231 if (uart_bt_irq_tx_ready(dev)) { in uart_bt_irq_tx_enable()
232 k_work_submit_to_queue(&nus_work_queue, &dev_data->uart.cb_work); in uart_bt_irq_tx_enable()
236 static void uart_bt_irq_tx_disable(const struct device *dev) in uart_bt_irq_tx_disable() argument
238 struct uart_bt_data *dev_data = (struct uart_bt_data *)dev->data; in uart_bt_irq_tx_disable()
240 dev_data->uart.tx_irq_ena = false; in uart_bt_irq_tx_disable()
243 static int uart_bt_irq_rx_ready(const struct device *dev) in uart_bt_irq_rx_ready() argument
245 struct uart_bt_data *dev_data = (struct uart_bt_data *)dev->data; in uart_bt_irq_rx_ready()
247 if (!ring_buf_is_empty(dev_data->uart.rx_ringbuf) && dev_data->uart.rx_irq_ena) { in uart_bt_irq_rx_ready()
254 static void uart_bt_irq_rx_enable(const struct device *dev) in uart_bt_irq_rx_enable() argument
256 struct uart_bt_data *dev_data = (struct uart_bt_data *)dev->data; in uart_bt_irq_rx_enable()
258 dev_data->uart.rx_irq_ena = true; in uart_bt_irq_rx_enable()
260 k_work_submit_to_queue(&nus_work_queue, &dev_data->uart.cb_work); in uart_bt_irq_rx_enable()
263 static void uart_bt_irq_rx_disable(const struct device *dev) in uart_bt_irq_rx_disable() argument
265 struct uart_bt_data *dev_data = (struct uart_bt_data *)dev->data; in uart_bt_irq_rx_disable()
267 dev_data->uart.rx_irq_ena = false; in uart_bt_irq_rx_disable()
270 static int uart_bt_irq_is_pending(const struct device *dev) in uart_bt_irq_is_pending() argument
272 return uart_bt_irq_rx_ready(dev); in uart_bt_irq_is_pending()
275 static int uart_bt_irq_update(const struct device *dev) in uart_bt_irq_update() argument
277 ARG_UNUSED(dev); in uart_bt_irq_update()
282 static void uart_bt_irq_callback_set(const struct device *dev, in uart_bt_irq_callback_set() argument
286 struct uart_bt_data *dev_data = (struct uart_bt_data *)dev->data; in uart_bt_irq_callback_set()
288 dev_data->uart.callback.cb = cb; in uart_bt_irq_callback_set()
289 dev_data->uart.callback.cb_data = cb_data; in uart_bt_irq_callback_set()
292 static DEVICE_API(uart, uart_bt_driver_api) = {
318 /** The work-queue is shared across all instances, hence we initialize it separatedly */
321 static int uart_bt_init(const struct device *dev) in uart_bt_init() argument
324 struct uart_bt_data *dev_data = (struct uart_bt_data *)dev->data; in uart_bt_init()
329 dev_data->uart.callback.dev = dev; in uart_bt_init()
331 k_work_init_delayable(&dev_data->uart.tx_work, tx_work_handler); in uart_bt_init()
332 k_work_init(&dev_data->uart.cb_work, cb_work_handler); in uart_bt_init()
334 err = bt_nus_inst_cb_register(dev_data->bt.inst, &dev_data->bt.cb, (void *)dev); in uart_bt_init()
361 .uart = { \