1 /*
2 * Copyright (c) 2022 Nordic Semiconductor ASA
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #define DT_DRV_COMPAT zephyr_cdc_acm_uart
8
9 #include <zephyr/init.h>
10 #include <zephyr/kernel.h>
11 #include <zephyr/drivers/uart.h>
12 #include <zephyr/sys/ring_buffer.h>
13 #include <zephyr/sys/byteorder.h>
14
15 #include <zephyr/usb/usbd.h>
16 #include <zephyr/usb/usb_ch9.h>
17 #include <zephyr/usb/class/usb_cdc.h>
18
19 #include <zephyr/drivers/usb/udc.h>
20
21 #include <zephyr/logging/log.h>
22 LOG_MODULE_REGISTER(usbd_cdc_acm, CONFIG_USBD_CDC_ACM_LOG_LEVEL);
23
24 NET_BUF_POOL_FIXED_DEFINE(cdc_acm_ep_pool,
25 DT_NUM_INST_STATUS_OKAY(DT_DRV_COMPAT) * 2,
26 512, sizeof(struct udc_buf_info), NULL);
27
28 #define CDC_ACM_DEFAULT_LINECODING {sys_cpu_to_le32(115200), 0, 0, 8}
29 #define CDC_ACM_DEFAULT_BULK_EP_MPS 0
30 #define CDC_ACM_DEFAULT_INT_EP_MPS 16
31 #define CDC_ACM_DEFAULT_INT_INTERVAL 0x0A
32
33 #define CDC_ACM_CLASS_ENABLED 0
34 #define CDC_ACM_CLASS_SUSPENDED 1
35 #define CDC_ACM_IRQ_RX_ENABLED 2
36 #define CDC_ACM_IRQ_TX_ENABLED 3
37 #define CDC_ACM_RX_FIFO_BUSY 4
38 #define CDC_ACM_LOCK 5
39
40 static struct k_work_q cdc_acm_work_q;
41 static K_KERNEL_STACK_DEFINE(cdc_acm_stack,
42 CONFIG_USBD_CDC_ACM_STACK_SIZE);
43
44 struct cdc_acm_uart_fifo {
45 struct ring_buf *rb;
46 bool irq;
47 bool altered;
48 };
49
50 struct cdc_acm_uart_data {
51 /* Pointer to the associated USBD class node */
52 struct usbd_class_node *c_nd;
53 /* Line Coding Structure */
54 struct cdc_acm_line_coding line_coding;
55 /* SetControlLineState bitmap */
56 uint16_t line_state;
57 /* Serial state bitmap */
58 uint16_t serial_state;
59 /* UART actual configuration */
60 struct uart_config uart_cfg;
61 /* UART actual RTS state */
62 bool line_state_rts;
63 /* UART actual DTR state */
64 bool line_state_dtr;
65 /* UART API IRQ callback */
66 uart_irq_callback_user_data_t cb;
67 /* UART API user callback data */
68 void *cb_data;
69 /* UART API IRQ callback work */
70 struct k_work irq_cb_work;
71 struct cdc_acm_uart_fifo rx_fifo;
72 struct cdc_acm_uart_fifo tx_fifo;
73 /* USBD CDC ACM TX fifo work */
74 struct k_work tx_fifo_work;
75 /* USBD CDC ACM RX fifo work */
76 struct k_work rx_fifo_work;
77 atomic_t state;
78 struct k_sem notif_sem;
79 };
80
81 struct usbd_cdc_acm_desc {
82 struct usb_association_descriptor iad_cdc;
83 struct usb_if_descriptor if0;
84 struct cdc_header_descriptor if0_header;
85 struct cdc_cm_descriptor if0_cm;
86 struct cdc_acm_descriptor if0_acm;
87 struct cdc_union_descriptor if0_union;
88 struct usb_ep_descriptor if0_int_ep;
89
90 struct usb_if_descriptor if1;
91 struct usb_ep_descriptor if1_in_ep;
92 struct usb_ep_descriptor if1_out_ep;
93
94 struct usb_desc_header nil_desc;
95 } __packed;
96
97 static void cdc_acm_irq_rx_enable(const struct device *dev);
98
cdc_acm_buf_alloc(const uint8_t ep)99 struct net_buf *cdc_acm_buf_alloc(const uint8_t ep)
100 {
101 struct net_buf *buf = NULL;
102 struct udc_buf_info *bi;
103
104 buf = net_buf_alloc(&cdc_acm_ep_pool, K_NO_WAIT);
105 if (!buf) {
106 return NULL;
107 }
108
109 bi = udc_get_buf_info(buf);
110 memset(bi, 0, sizeof(struct udc_buf_info));
111 bi->ep = ep;
112
113 return buf;
114 }
115
cdc_acm_work_submit(struct k_work * work)116 static ALWAYS_INLINE int cdc_acm_work_submit(struct k_work *work)
117 {
118 return k_work_submit_to_queue(&cdc_acm_work_q, work);
119 }
120
check_wq_ctx(const struct device * dev)121 static ALWAYS_INLINE bool check_wq_ctx(const struct device *dev)
122 {
123 return k_current_get() == k_work_queue_thread_get(&cdc_acm_work_q);
124 }
125
cdc_acm_get_int_in(struct usbd_class_node * const c_nd)126 static uint8_t cdc_acm_get_int_in(struct usbd_class_node *const c_nd)
127 {
128 struct usbd_cdc_acm_desc *desc = c_nd->data->desc;
129
130 return desc->if0_int_ep.bEndpointAddress;
131 }
132
cdc_acm_get_bulk_in(struct usbd_class_node * const c_nd)133 static uint8_t cdc_acm_get_bulk_in(struct usbd_class_node *const c_nd)
134 {
135 struct usbd_cdc_acm_desc *desc = c_nd->data->desc;
136
137 return desc->if1_in_ep.bEndpointAddress;
138 }
139
cdc_acm_get_bulk_out(struct usbd_class_node * const c_nd)140 static uint8_t cdc_acm_get_bulk_out(struct usbd_class_node *const c_nd)
141 {
142 struct usbd_cdc_acm_desc *desc = c_nd->data->desc;
143
144 return desc->if1_out_ep.bEndpointAddress;
145 }
146
cdc_acm_get_bulk_mps(struct usbd_class_node * const c_nd)147 static size_t cdc_acm_get_bulk_mps(struct usbd_class_node *const c_nd)
148 {
149 struct usbd_cdc_acm_desc *desc = c_nd->data->desc;
150
151 return desc->if1_out_ep.wMaxPacketSize;
152 }
153
usbd_cdc_acm_request(struct usbd_class_node * const c_nd,struct net_buf * buf,int err)154 static int usbd_cdc_acm_request(struct usbd_class_node *const c_nd,
155 struct net_buf *buf, int err)
156 {
157 struct usbd_contex *uds_ctx = c_nd->data->uds_ctx;
158 const struct device *dev = c_nd->data->priv;
159 struct cdc_acm_uart_data *data = dev->data;
160 struct udc_buf_info *bi;
161
162 bi = udc_get_buf_info(buf);
163 if (err) {
164 if (err == -ECONNABORTED) {
165 LOG_WRN("request ep 0x%02x, len %u cancelled",
166 bi->ep, buf->len);
167 } else {
168 LOG_ERR("request ep 0x%02x, len %u failed",
169 bi->ep, buf->len);
170 }
171
172 if (bi->ep == cdc_acm_get_bulk_out(c_nd)) {
173 atomic_clear_bit(&data->state, CDC_ACM_RX_FIFO_BUSY);
174 }
175
176 goto ep_request_error;
177 }
178
179 if (bi->ep == cdc_acm_get_bulk_out(c_nd)) {
180 /* RX transfer completion */
181 size_t done;
182
183 LOG_HEXDUMP_INF(buf->data, buf->len, "");
184 done = ring_buf_put(data->rx_fifo.rb, buf->data, buf->len);
185 if (done && data->cb) {
186 cdc_acm_work_submit(&data->irq_cb_work);
187 }
188
189 atomic_clear_bit(&data->state, CDC_ACM_RX_FIFO_BUSY);
190 cdc_acm_work_submit(&data->rx_fifo_work);
191 }
192
193 if (bi->ep == cdc_acm_get_bulk_in(c_nd)) {
194 /* TX transfer completion */
195 if (data->cb) {
196 cdc_acm_work_submit(&data->irq_cb_work);
197 }
198 }
199
200 if (bi->ep == cdc_acm_get_int_in(c_nd)) {
201 k_sem_give(&data->notif_sem);
202 }
203
204 ep_request_error:
205 return usbd_ep_buf_free(uds_ctx, buf);
206 }
207
usbd_cdc_acm_update(struct usbd_class_node * const c_nd,uint8_t iface,uint8_t alternate)208 static void usbd_cdc_acm_update(struct usbd_class_node *const c_nd,
209 uint8_t iface, uint8_t alternate)
210 {
211 LOG_DBG("New configuration, interface %u alternate %u",
212 iface, alternate);
213 }
214
usbd_cdc_acm_enable(struct usbd_class_node * const c_nd)215 static void usbd_cdc_acm_enable(struct usbd_class_node *const c_nd)
216 {
217 const struct device *dev = c_nd->data->priv;
218 struct cdc_acm_uart_data *data = dev->data;
219
220 atomic_set_bit(&data->state, CDC_ACM_CLASS_ENABLED);
221 LOG_INF("Configuration enabled");
222
223 if (atomic_test_bit(&data->state, CDC_ACM_IRQ_RX_ENABLED)) {
224 cdc_acm_irq_rx_enable(dev);
225 }
226
227 if (atomic_test_bit(&data->state, CDC_ACM_IRQ_TX_ENABLED)) {
228 /* TODO */
229 }
230 }
231
usbd_cdc_acm_disable(struct usbd_class_node * const c_nd)232 static void usbd_cdc_acm_disable(struct usbd_class_node *const c_nd)
233 {
234 const struct device *dev = c_nd->data->priv;
235 struct cdc_acm_uart_data *data = dev->data;
236
237 atomic_clear_bit(&data->state, CDC_ACM_CLASS_ENABLED);
238 atomic_clear_bit(&data->state, CDC_ACM_CLASS_SUSPENDED);
239 LOG_INF("Configuration disabled");
240 }
241
usbd_cdc_acm_suspended(struct usbd_class_node * const c_nd)242 static void usbd_cdc_acm_suspended(struct usbd_class_node *const c_nd)
243 {
244 const struct device *dev = c_nd->data->priv;
245 struct cdc_acm_uart_data *data = dev->data;
246
247 /* FIXME: filter stray suspended events earlier */
248 atomic_set_bit(&data->state, CDC_ACM_CLASS_SUSPENDED);
249 }
250
usbd_cdc_acm_resumed(struct usbd_class_node * const c_nd)251 static void usbd_cdc_acm_resumed(struct usbd_class_node *const c_nd)
252 {
253 const struct device *dev = c_nd->data->priv;
254 struct cdc_acm_uart_data *data = dev->data;
255
256 atomic_clear_bit(&data->state, CDC_ACM_CLASS_SUSPENDED);
257 }
258
cdc_acm_update_uart_cfg(struct cdc_acm_uart_data * const data)259 static void cdc_acm_update_uart_cfg(struct cdc_acm_uart_data *const data)
260 {
261 struct uart_config *const cfg = &data->uart_cfg;
262
263 cfg->baudrate = sys_le32_to_cpu(data->line_coding.dwDTERate);
264
265 switch (data->line_coding.bCharFormat) {
266 case USB_CDC_LINE_CODING_STOP_BITS_1:
267 cfg->stop_bits = UART_CFG_STOP_BITS_1;
268 break;
269 case USB_CDC_LINE_CODING_STOP_BITS_1_5:
270 cfg->stop_bits = UART_CFG_STOP_BITS_1_5;
271 break;
272 case USB_CDC_LINE_CODING_STOP_BITS_2:
273 default:
274 cfg->stop_bits = UART_CFG_STOP_BITS_2;
275 break;
276 };
277
278 switch (data->line_coding.bParityType) {
279 case USB_CDC_LINE_CODING_PARITY_NO:
280 default:
281 cfg->parity = UART_CFG_PARITY_NONE;
282 break;
283 case USB_CDC_LINE_CODING_PARITY_ODD:
284 cfg->parity = UART_CFG_PARITY_ODD;
285 break;
286 case USB_CDC_LINE_CODING_PARITY_EVEN:
287 cfg->parity = UART_CFG_PARITY_EVEN;
288 break;
289 case USB_CDC_LINE_CODING_PARITY_MARK:
290 cfg->parity = UART_CFG_PARITY_MARK;
291 break;
292 case USB_CDC_LINE_CODING_PARITY_SPACE:
293 cfg->parity = UART_CFG_PARITY_SPACE;
294 break;
295 };
296
297 switch (data->line_coding.bDataBits) {
298 case USB_CDC_LINE_CODING_DATA_BITS_5:
299 cfg->data_bits = UART_CFG_DATA_BITS_5;
300 break;
301 case USB_CDC_LINE_CODING_DATA_BITS_6:
302 cfg->data_bits = UART_CFG_DATA_BITS_6;
303 break;
304 case USB_CDC_LINE_CODING_DATA_BITS_7:
305 cfg->data_bits = UART_CFG_DATA_BITS_7;
306 break;
307 case USB_CDC_LINE_CODING_DATA_BITS_8:
308 default:
309 cfg->data_bits = UART_CFG_DATA_BITS_8;
310 break;
311 };
312
313 cfg->flow_ctrl = UART_CFG_FLOW_CTRL_NONE;
314 }
315
cdc_acm_update_linestate(struct cdc_acm_uart_data * const data)316 static void cdc_acm_update_linestate(struct cdc_acm_uart_data *const data)
317 {
318 if (data->line_state & SET_CONTROL_LINE_STATE_RTS) {
319 data->line_state_rts = true;
320 } else {
321 data->line_state_rts = false;
322 }
323
324 if (data->line_state & SET_CONTROL_LINE_STATE_DTR) {
325 data->line_state_dtr = true;
326 } else {
327 data->line_state_dtr = false;
328 }
329 }
330
usbd_cdc_acm_cth(struct usbd_class_node * const c_nd,const struct usb_setup_packet * const setup,struct net_buf * const buf)331 static int usbd_cdc_acm_cth(struct usbd_class_node *const c_nd,
332 const struct usb_setup_packet *const setup,
333 struct net_buf *const buf)
334 {
335 const struct device *dev = c_nd->data->priv;
336 struct cdc_acm_uart_data *data = dev->data;
337 size_t min_len;
338
339 if (setup->bRequest == GET_LINE_CODING) {
340 if (buf == NULL) {
341 errno = -ENOMEM;
342 return 0;
343 }
344
345 min_len = MIN(sizeof(data->line_coding), setup->wLength);
346 net_buf_add_mem(buf, &data->line_coding, min_len);
347
348 return 0;
349 }
350
351 LOG_DBG("bmRequestType 0x%02x bRequest 0x%02x unsupported",
352 setup->bmRequestType, setup->bRequest);
353 errno = -ENOTSUP;
354
355 return 0;
356 }
357
usbd_cdc_acm_ctd(struct usbd_class_node * const c_nd,const struct usb_setup_packet * const setup,const struct net_buf * const buf)358 static int usbd_cdc_acm_ctd(struct usbd_class_node *const c_nd,
359 const struct usb_setup_packet *const setup,
360 const struct net_buf *const buf)
361 {
362 const struct device *dev = c_nd->data->priv;
363 struct cdc_acm_uart_data *data = dev->data;
364 size_t len;
365
366 switch (setup->bRequest) {
367 case SET_LINE_CODING:
368 len = sizeof(data->line_coding);
369 if (setup->wLength != len) {
370 errno = -ENOTSUP;
371 return 0;
372 }
373
374 memcpy(&data->line_coding, buf->data, len);
375 cdc_acm_update_uart_cfg(data);
376 return 0;
377
378 case SET_CONTROL_LINE_STATE:
379 data->line_state = setup->wValue;
380 cdc_acm_update_linestate(data);
381 return 0;
382
383 default:
384 break;
385 }
386
387 LOG_DBG("bmRequestType 0x%02x bRequest 0x%02x unsupported",
388 setup->bmRequestType, setup->bRequest);
389 errno = -ENOTSUP;
390
391 return 0;
392 }
393
usbd_cdc_acm_init(struct usbd_class_node * const c_nd)394 static int usbd_cdc_acm_init(struct usbd_class_node *const c_nd)
395 {
396 struct usbd_cdc_acm_desc *desc = c_nd->data->desc;
397
398 desc->iad_cdc.bFirstInterface = desc->if0.bInterfaceNumber;
399 desc->if0_union.bControlInterface = desc->if0.bInterfaceNumber;
400 desc->if0_union.bSubordinateInterface0 = desc->if1.bInterfaceNumber;
401
402 return 0;
403 }
404
cdc_acm_send_notification(const struct device * dev,const uint16_t serial_state)405 static int cdc_acm_send_notification(const struct device *dev,
406 const uint16_t serial_state)
407 {
408 struct cdc_acm_notification notification = {
409 .bmRequestType = 0xA1,
410 .bNotificationType = USB_CDC_SERIAL_STATE,
411 .wValue = 0,
412 .wIndex = 0,
413 .wLength = sys_cpu_to_le16(sizeof(uint16_t)),
414 .data = sys_cpu_to_le16(serial_state),
415 };
416 struct cdc_acm_uart_data *data = dev->data;
417 struct usbd_class_node *c_nd = data->c_nd;
418 struct net_buf *buf;
419 uint8_t ep;
420 int ret;
421
422 if (!atomic_test_bit(&data->state, CDC_ACM_CLASS_ENABLED)) {
423 LOG_INF("USB configuration is not enabled");
424 return -EACCES;
425 }
426
427 if (atomic_test_bit(&data->state, CDC_ACM_CLASS_SUSPENDED)) {
428 LOG_INF("USB support is suspended (FIXME)");
429 return -EACCES;
430 }
431
432 ep = cdc_acm_get_int_in(c_nd);
433 buf = usbd_ep_buf_alloc(c_nd, ep, sizeof(struct cdc_acm_notification));
434 if (buf == NULL) {
435 return -ENOMEM;
436 }
437
438 net_buf_add_mem(buf, ¬ification, sizeof(struct cdc_acm_notification));
439 ret = usbd_ep_enqueue(c_nd, buf);
440 /* FIXME: support for sync transfers */
441 k_sem_take(&data->notif_sem, K_FOREVER);
442
443 return ret;
444 }
445
446 /*
447 * TX handler is triggered when the state of TX fifo has been altered.
448 */
cdc_acm_tx_fifo_handler(struct k_work * work)449 static void cdc_acm_tx_fifo_handler(struct k_work *work)
450 {
451 struct cdc_acm_uart_data *data;
452 struct usbd_class_node *c_nd;
453 struct net_buf *buf;
454 size_t len;
455 int ret;
456
457 data = CONTAINER_OF(work, struct cdc_acm_uart_data, tx_fifo_work);
458 c_nd = data->c_nd;
459
460 if (!atomic_test_bit(&data->state, CDC_ACM_CLASS_ENABLED)) {
461 LOG_DBG("USB configuration is not enabled");
462 return;
463 }
464
465 if (atomic_test_bit(&data->state, CDC_ACM_CLASS_SUSPENDED)) {
466 LOG_INF("USB support is suspended (FIXME: submit rwup)");
467 return;
468 }
469
470 if (atomic_test_and_set_bit(&data->state, CDC_ACM_LOCK)) {
471 cdc_acm_work_submit(&data->tx_fifo_work);
472 return;
473 }
474
475 buf = cdc_acm_buf_alloc(cdc_acm_get_bulk_in(c_nd));
476 if (buf == NULL) {
477 cdc_acm_work_submit(&data->tx_fifo_work);
478 goto tx_fifo_handler_exit;
479 }
480
481 len = ring_buf_get(data->tx_fifo.rb, buf->data, buf->size);
482 net_buf_add(buf, len);
483
484 ret = usbd_ep_enqueue(c_nd, buf);
485 if (ret) {
486 LOG_ERR("Failed to enqueue");
487 net_buf_unref(buf);
488 }
489
490 tx_fifo_handler_exit:
491 atomic_clear_bit(&data->state, CDC_ACM_LOCK);
492 }
493
494 /*
495 * RX handler should be conditionally triggered at:
496 * - (x) cdc_acm_irq_rx_enable()
497 * - (x) RX transfer completion
498 * - (x) the end of cdc_acm_irq_cb_handler
499 * - (x) USBD class API enable call
500 * - ( ) USBD class API resumed call (TODO)
501 */
cdc_acm_rx_fifo_handler(struct k_work * work)502 static void cdc_acm_rx_fifo_handler(struct k_work *work)
503 {
504 struct cdc_acm_uart_data *data;
505 struct usbd_class_node *c_nd;
506 struct net_buf *buf;
507 uint8_t ep;
508 int ret;
509
510 data = CONTAINER_OF(work, struct cdc_acm_uart_data, rx_fifo_work);
511 c_nd = data->c_nd;
512
513 if (!atomic_test_bit(&data->state, CDC_ACM_CLASS_ENABLED) ||
514 atomic_test_bit(&data->state, CDC_ACM_CLASS_SUSPENDED)) {
515 LOG_INF("USB configuration is not enabled or suspended");
516 return;
517 }
518
519 if (ring_buf_space_get(data->rx_fifo.rb) < cdc_acm_get_bulk_mps(c_nd)) {
520 LOG_INF("RX buffer to small, throttle");
521 return;
522 }
523
524 if (atomic_test_and_set_bit(&data->state, CDC_ACM_RX_FIFO_BUSY)) {
525 LOG_WRN("RX transfer already in progress");
526 return;
527 }
528
529 ep = cdc_acm_get_bulk_out(c_nd);
530 buf = cdc_acm_buf_alloc(ep);
531 if (buf == NULL) {
532 return;
533 }
534
535 ret = usbd_ep_enqueue(c_nd, buf);
536 if (ret) {
537 LOG_ERR("Failed to enqueue net_buf for 0x%02x", ep);
538 net_buf_unref(buf);
539 }
540 }
541
cdc_acm_irq_tx_enable(const struct device * dev)542 static void cdc_acm_irq_tx_enable(const struct device *dev)
543 {
544 struct cdc_acm_uart_data *const data = dev->data;
545
546 atomic_set_bit(&data->state, CDC_ACM_IRQ_TX_ENABLED);
547
548 if (ring_buf_is_empty(data->tx_fifo.rb)) {
549 LOG_INF("tx_en: trigger irq_cb_work");
550 cdc_acm_work_submit(&data->irq_cb_work);
551 }
552 }
553
cdc_acm_irq_tx_disable(const struct device * dev)554 static void cdc_acm_irq_tx_disable(const struct device *dev)
555 {
556 struct cdc_acm_uart_data *const data = dev->data;
557
558 atomic_clear_bit(&data->state, CDC_ACM_IRQ_TX_ENABLED);
559 }
560
cdc_acm_irq_rx_enable(const struct device * dev)561 static void cdc_acm_irq_rx_enable(const struct device *dev)
562 {
563 struct cdc_acm_uart_data *const data = dev->data;
564
565 atomic_set_bit(&data->state, CDC_ACM_IRQ_RX_ENABLED);
566
567 /* Permit buffer to be drained regardless of USB state */
568 if (!ring_buf_is_empty(data->rx_fifo.rb)) {
569 LOG_INF("rx_en: trigger irq_cb_work");
570 cdc_acm_work_submit(&data->irq_cb_work);
571 }
572
573 if (!atomic_test_bit(&data->state, CDC_ACM_RX_FIFO_BUSY)) {
574 LOG_INF("rx_en: trigger rx_fifo_work");
575 cdc_acm_work_submit(&data->rx_fifo_work);
576 }
577 }
578
cdc_acm_irq_rx_disable(const struct device * dev)579 static void cdc_acm_irq_rx_disable(const struct device *dev)
580 {
581 struct cdc_acm_uart_data *const data = dev->data;
582
583 atomic_clear_bit(&data->state, CDC_ACM_IRQ_RX_ENABLED);
584 }
585
cdc_acm_fifo_fill(const struct device * dev,const uint8_t * const tx_data,const int len)586 static int cdc_acm_fifo_fill(const struct device *dev,
587 const uint8_t *const tx_data,
588 const int len)
589 {
590 struct cdc_acm_uart_data *const data = dev->data;
591 uint32_t done;
592
593 if (!check_wq_ctx(dev)) {
594 LOG_WRN("Invoked by inappropriate context");
595 __ASSERT_NO_MSG(false);
596 return 0;
597 }
598
599 done = ring_buf_put(data->tx_fifo.rb, tx_data, len);
600 if (done) {
601 data->tx_fifo.altered = true;
602 }
603
604 LOG_INF("UART dev %p, len %d, remaining space %u",
605 dev, len, ring_buf_space_get(data->tx_fifo.rb));
606
607 return done;
608 }
609
cdc_acm_fifo_read(const struct device * dev,uint8_t * const rx_data,const int size)610 static int cdc_acm_fifo_read(const struct device *dev,
611 uint8_t *const rx_data,
612 const int size)
613 {
614 struct cdc_acm_uart_data *const data = dev->data;
615 uint32_t len;
616
617 LOG_INF("UART dev %p size %d length %u",
618 dev, size, ring_buf_size_get(data->rx_fifo.rb));
619
620 if (!check_wq_ctx(dev)) {
621 LOG_WRN("Invoked by inappropriate context");
622 __ASSERT_NO_MSG(false);
623 return 0;
624 }
625
626 len = ring_buf_get(data->rx_fifo.rb, rx_data, size);
627 if (len) {
628 data->rx_fifo.altered = true;
629 }
630
631 return len;
632 }
633
cdc_acm_irq_tx_ready(const struct device * dev)634 static int cdc_acm_irq_tx_ready(const struct device *dev)
635 {
636 struct cdc_acm_uart_data *const data = dev->data;
637
638 if (check_wq_ctx(dev)) {
639 if (ring_buf_space_get(data->tx_fifo.rb)) {
640 return 1;
641 }
642 } else {
643 LOG_WRN("Invoked by inappropriate context");
644 __ASSERT_NO_MSG(false);
645 }
646
647 return 0;
648 }
649
cdc_acm_irq_rx_ready(const struct device * dev)650 static int cdc_acm_irq_rx_ready(const struct device *dev)
651 {
652 struct cdc_acm_uart_data *const data = dev->data;
653
654 if (check_wq_ctx(dev)) {
655 if (!ring_buf_is_empty(data->rx_fifo.rb)) {
656 return 1;
657 }
658 } else {
659 LOG_WRN("Invoked by inappropriate context");
660 __ASSERT_NO_MSG(false);
661 }
662
663
664 return 0;
665 }
666
cdc_acm_irq_is_pending(const struct device * dev)667 static int cdc_acm_irq_is_pending(const struct device *dev)
668 {
669 struct cdc_acm_uart_data *const data = dev->data;
670
671 if (check_wq_ctx(dev)) {
672 if (data->tx_fifo.irq || data->rx_fifo.irq) {
673 return 1;
674 }
675 } else {
676 LOG_WRN("Invoked by inappropriate context");
677 __ASSERT_NO_MSG(false);
678 }
679
680 return 0;
681 }
682
cdc_acm_irq_update(const struct device * dev)683 static int cdc_acm_irq_update(const struct device *dev)
684 {
685 struct cdc_acm_uart_data *const data = dev->data;
686
687 if (!check_wq_ctx(dev)) {
688 LOG_WRN("Invoked by inappropriate context");
689 __ASSERT_NO_MSG(false);
690 return 0;
691 }
692
693 if (atomic_test_bit(&data->state, CDC_ACM_IRQ_RX_ENABLED) &&
694 !ring_buf_is_empty(data->rx_fifo.rb)) {
695 data->rx_fifo.irq = true;
696 } else {
697 data->rx_fifo.irq = false;
698 }
699
700 if (atomic_test_bit(&data->state, CDC_ACM_IRQ_TX_ENABLED) &&
701 ring_buf_is_empty(data->tx_fifo.rb)) {
702 data->tx_fifo.irq = true;
703 } else {
704 data->tx_fifo.irq = false;
705 }
706
707 return 1;
708 }
709
710 /*
711 * IRQ handler should be conditionally triggered for the TX path at:
712 * - cdc_acm_irq_tx_enable()
713 * - TX transfer completion
714 * - TX buffer is empty
715 * - USBD class API enable and resumed calls
716 *
717 * for RX path, if enabled, at:
718 * - cdc_acm_irq_rx_enable()
719 * - RX transfer completion
720 * - RX buffer is not empty
721 */
cdc_acm_irq_cb_handler(struct k_work * work)722 static void cdc_acm_irq_cb_handler(struct k_work *work)
723 {
724 struct cdc_acm_uart_data *data;
725 struct usbd_class_node *c_nd;
726
727 data = CONTAINER_OF(work, struct cdc_acm_uart_data, irq_cb_work);
728 c_nd = data->c_nd;
729
730 if (data->cb == NULL) {
731 LOG_ERR("IRQ callback is not set");
732 return;
733 }
734
735 if (atomic_test_and_set_bit(&data->state, CDC_ACM_LOCK)) {
736 LOG_ERR("Polling is in progress");
737 cdc_acm_work_submit(&data->irq_cb_work);
738 return;
739 }
740
741 data->tx_fifo.altered = false;
742 data->rx_fifo.altered = false;
743 data->rx_fifo.irq = false;
744 data->tx_fifo.irq = false;
745
746 if (atomic_test_bit(&data->state, CDC_ACM_IRQ_RX_ENABLED) ||
747 atomic_test_bit(&data->state, CDC_ACM_IRQ_TX_ENABLED)) {
748 data->cb(c_nd->data->priv, data->cb_data);
749 }
750
751 if (data->rx_fifo.altered) {
752 LOG_DBG("rx fifo altered, submit work");
753 cdc_acm_work_submit(&data->rx_fifo_work);
754 }
755
756 if (data->tx_fifo.altered) {
757 LOG_DBG("tx fifo altered, submit work");
758 cdc_acm_work_submit(&data->tx_fifo_work);
759 }
760
761 if (atomic_test_bit(&data->state, CDC_ACM_IRQ_RX_ENABLED) &&
762 !ring_buf_is_empty(data->rx_fifo.rb)) {
763 LOG_DBG("rx irq pending, submit irq_cb_work");
764 cdc_acm_work_submit(&data->irq_cb_work);
765 }
766
767 if (atomic_test_bit(&data->state, CDC_ACM_IRQ_TX_ENABLED) &&
768 ring_buf_is_empty(data->tx_fifo.rb)) {
769 LOG_DBG("tx irq pending, submit irq_cb_work");
770 cdc_acm_work_submit(&data->irq_cb_work);
771 }
772
773 atomic_clear_bit(&data->state, CDC_ACM_LOCK);
774 }
775
cdc_acm_irq_callback_set(const struct device * dev,const uart_irq_callback_user_data_t cb,void * const cb_data)776 static void cdc_acm_irq_callback_set(const struct device *dev,
777 const uart_irq_callback_user_data_t cb,
778 void *const cb_data)
779 {
780 struct cdc_acm_uart_data *const data = dev->data;
781
782 data->cb = cb;
783 data->cb_data = cb_data;
784 }
785
cdc_acm_poll_in(const struct device * dev,unsigned char * const c)786 static int cdc_acm_poll_in(const struct device *dev, unsigned char *const c)
787 {
788 struct cdc_acm_uart_data *const data = dev->data;
789 uint32_t len;
790 int ret = -1;
791
792 if (atomic_test_and_set_bit(&data->state, CDC_ACM_LOCK)) {
793 LOG_ERR("IRQ callback is used");
794 return -1;
795 }
796
797 if (ring_buf_is_empty(data->rx_fifo.rb)) {
798 goto poll_in_exit;
799 }
800
801 len = ring_buf_get(data->rx_fifo.rb, c, 1);
802 if (len) {
803 cdc_acm_work_submit(&data->rx_fifo_work);
804 ret = 0;
805 }
806
807 poll_in_exit:
808 atomic_clear_bit(&data->state, CDC_ACM_LOCK);
809
810 return ret;
811 }
812
cdc_acm_poll_out(const struct device * dev,const unsigned char c)813 static void cdc_acm_poll_out(const struct device *dev, const unsigned char c)
814 {
815 struct cdc_acm_uart_data *const data = dev->data;
816
817 if (atomic_test_and_set_bit(&data->state, CDC_ACM_LOCK)) {
818 LOG_ERR("IRQ callback is used");
819 return;
820 }
821
822 if (ring_buf_put(data->tx_fifo.rb, &c, 1)) {
823 goto poll_out_exit;
824 }
825
826 LOG_DBG("Ring buffer full, drain buffer");
827 if (!ring_buf_get(data->tx_fifo.rb, NULL, 1) ||
828 !ring_buf_put(data->tx_fifo.rb, &c, 1)) {
829 LOG_ERR("Failed to drain buffer");
830 __ASSERT_NO_MSG(false);
831 }
832
833 poll_out_exit:
834 atomic_clear_bit(&data->state, CDC_ACM_LOCK);
835 cdc_acm_work_submit(&data->tx_fifo_work);
836 }
837
838 #ifdef CONFIG_UART_LINE_CTRL
cdc_acm_line_ctrl_set(const struct device * dev,const uint32_t ctrl,const uint32_t val)839 static int cdc_acm_line_ctrl_set(const struct device *dev,
840 const uint32_t ctrl, const uint32_t val)
841 {
842 struct cdc_acm_uart_data *const data = dev->data;
843 uint32_t flag = 0;
844
845 switch (ctrl) {
846 case USB_CDC_LINE_CTRL_BAUD_RATE:
847 /* Ignore since it can not be used for notification anyway */
848 return 0;
849 case USB_CDC_LINE_CTRL_DCD:
850 flag = USB_CDC_SERIAL_STATE_RXCARRIER;
851 break;
852 case USB_CDC_LINE_CTRL_DSR:
853 flag = USB_CDC_SERIAL_STATE_TXCARRIER;
854 break;
855 case USB_CDC_LINE_CTRL_BREAK:
856 flag = USB_CDC_SERIAL_STATE_BREAK;
857 break;
858 case USB_CDC_LINE_CTRL_RING_SIGNAL:
859 flag = USB_CDC_SERIAL_STATE_RINGSIGNAL;
860 break;
861 case USB_CDC_LINE_CTRL_FRAMING:
862 flag = USB_CDC_SERIAL_STATE_FRAMING;
863 break;
864 case USB_CDC_LINE_CTRL_PARITY:
865 flag = USB_CDC_SERIAL_STATE_PARITY;
866 break;
867 case USB_CDC_LINE_CTRL_OVER_RUN:
868 flag = USB_CDC_SERIAL_STATE_OVERRUN;
869 break;
870 default:
871 return -EINVAL;
872 }
873
874 if (val) {
875 data->serial_state |= flag;
876 } else {
877 data->serial_state &= ~flag;
878 }
879
880 return cdc_acm_send_notification(dev, data->serial_state);
881 }
882
cdc_acm_line_ctrl_get(const struct device * dev,const uint32_t ctrl,uint32_t * const val)883 static int cdc_acm_line_ctrl_get(const struct device *dev,
884 const uint32_t ctrl, uint32_t *const val)
885 {
886 struct cdc_acm_uart_data *const data = dev->data;
887
888 switch (ctrl) {
889 case UART_LINE_CTRL_BAUD_RATE:
890 *val = data->uart_cfg.baudrate;
891 return 0;
892 case UART_LINE_CTRL_RTS:
893 *val = data->line_state_rts;
894 return 0;
895 case UART_LINE_CTRL_DTR:
896 *val = data->line_state_dtr;
897 return 0;
898 }
899
900 return -ENOTSUP;
901 }
902 #endif
903
904 #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE
cdc_acm_configure(const struct device * dev,const struct uart_config * const cfg)905 static int cdc_acm_configure(const struct device *dev,
906 const struct uart_config *const cfg)
907 {
908 ARG_UNUSED(dev);
909 ARG_UNUSED(cfg);
910 /*
911 * We cannot implement configure API because there is
912 * no notification of configuration changes provided
913 * for the Abstract Control Model and the UART controller
914 * is only emulated.
915 * However, it allows us to use CDC ACM UART together with
916 * subsystems like Modbus which require configure API for
917 * real controllers.
918 */
919
920 return 0;
921 }
922
cdc_acm_config_get(const struct device * dev,struct uart_config * const cfg)923 static int cdc_acm_config_get(const struct device *dev,
924 struct uart_config *const cfg)
925 {
926 struct cdc_acm_uart_data *const data = dev->data;
927
928 memcpy(cfg, &data->uart_cfg, sizeof(struct uart_config));
929
930 return 0;
931 }
932 #endif /* CONFIG_UART_USE_RUNTIME_CONFIGURE */
933
usbd_cdc_acm_init_wq(void)934 static int usbd_cdc_acm_init_wq(void)
935 {
936 k_work_queue_init(&cdc_acm_work_q);
937 k_work_queue_start(&cdc_acm_work_q, cdc_acm_stack,
938 K_KERNEL_STACK_SIZEOF(cdc_acm_stack),
939 CONFIG_SYSTEM_WORKQUEUE_PRIORITY, NULL);
940
941 return 0;
942 }
943
usbd_cdc_acm_preinit(const struct device * dev)944 static int usbd_cdc_acm_preinit(const struct device *dev)
945 {
946 struct cdc_acm_uart_data *const data = dev->data;
947
948 ring_buf_reset(data->tx_fifo.rb);
949 ring_buf_reset(data->rx_fifo.rb);
950
951 k_thread_name_set(&cdc_acm_work_q.thread, "cdc_acm_work_q");
952
953 k_work_init(&data->tx_fifo_work, cdc_acm_tx_fifo_handler);
954 k_work_init(&data->rx_fifo_work, cdc_acm_rx_fifo_handler);
955 k_work_init(&data->irq_cb_work, cdc_acm_irq_cb_handler);
956
957 return 0;
958 }
959
960 static const struct uart_driver_api cdc_acm_uart_api = {
961 .irq_tx_enable = cdc_acm_irq_tx_enable,
962 .irq_tx_disable = cdc_acm_irq_tx_disable,
963 .irq_tx_ready = cdc_acm_irq_tx_ready,
964 .irq_rx_enable = cdc_acm_irq_rx_enable,
965 .irq_rx_disable = cdc_acm_irq_rx_disable,
966 .irq_rx_ready = cdc_acm_irq_rx_ready,
967 .irq_is_pending = cdc_acm_irq_is_pending,
968 .irq_update = cdc_acm_irq_update,
969 .irq_callback_set = cdc_acm_irq_callback_set,
970 .poll_in = cdc_acm_poll_in,
971 .poll_out = cdc_acm_poll_out,
972 .fifo_fill = cdc_acm_fifo_fill,
973 .fifo_read = cdc_acm_fifo_read,
974 #ifdef CONFIG_UART_LINE_CTRL
975 .line_ctrl_set = cdc_acm_line_ctrl_set,
976 .line_ctrl_get = cdc_acm_line_ctrl_get,
977 #endif
978 #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE
979 .configure = cdc_acm_configure,
980 .config_get = cdc_acm_config_get,
981 #endif
982 };
983
984 struct usbd_class_api usbd_cdc_acm_api = {
985 .request = usbd_cdc_acm_request,
986 .update = usbd_cdc_acm_update,
987 .enable = usbd_cdc_acm_enable,
988 .disable = usbd_cdc_acm_disable,
989 .suspended = usbd_cdc_acm_suspended,
990 .resumed = usbd_cdc_acm_resumed,
991 .control_to_host = usbd_cdc_acm_cth,
992 .control_to_dev = usbd_cdc_acm_ctd,
993 .init = usbd_cdc_acm_init,
994 };
995
996 #define CDC_ACM_DEFINE_DESCRIPTOR(n) \
997 static struct usbd_cdc_acm_desc cdc_acm_desc_##n = { \
998 .iad_cdc = { \
999 .bLength = sizeof(struct usb_association_descriptor), \
1000 .bDescriptorType = USB_DESC_INTERFACE_ASSOC, \
1001 .bFirstInterface = 0, \
1002 .bInterfaceCount = 0x02, \
1003 .bFunctionClass = USB_BCC_CDC_CONTROL, \
1004 .bFunctionSubClass = ACM_SUBCLASS, \
1005 .bFunctionProtocol = 0, \
1006 .iFunction = 0, \
1007 }, \
1008 \
1009 .if0 = { \
1010 .bLength = sizeof(struct usb_if_descriptor), \
1011 .bDescriptorType = USB_DESC_INTERFACE, \
1012 .bInterfaceNumber = 0, \
1013 .bAlternateSetting = 0, \
1014 .bNumEndpoints = 1, \
1015 .bInterfaceClass = USB_BCC_CDC_CONTROL, \
1016 .bInterfaceSubClass = ACM_SUBCLASS, \
1017 .bInterfaceProtocol = 0, \
1018 .iInterface = 0, \
1019 }, \
1020 \
1021 .if0_header = { \
1022 .bFunctionLength = sizeof(struct cdc_header_descriptor), \
1023 .bDescriptorType = USB_DESC_CS_INTERFACE, \
1024 .bDescriptorSubtype = HEADER_FUNC_DESC, \
1025 .bcdCDC = sys_cpu_to_le16(USB_SRN_1_1), \
1026 }, \
1027 \
1028 .if0_cm = { \
1029 .bFunctionLength = sizeof(struct cdc_cm_descriptor), \
1030 .bDescriptorType = USB_DESC_CS_INTERFACE, \
1031 .bDescriptorSubtype = CALL_MANAGEMENT_FUNC_DESC, \
1032 .bmCapabilities = 0, \
1033 .bDataInterface = 1, \
1034 }, \
1035 \
1036 .if0_acm = { \
1037 .bFunctionLength = sizeof(struct cdc_acm_descriptor), \
1038 .bDescriptorType = USB_DESC_CS_INTERFACE, \
1039 .bDescriptorSubtype = ACM_FUNC_DESC, \
1040 /* See CDC PSTN Subclass Chapter 5.3.2 */ \
1041 .bmCapabilities = BIT(1), \
1042 }, \
1043 \
1044 .if0_union = { \
1045 .bFunctionLength = sizeof(struct cdc_union_descriptor), \
1046 .bDescriptorType = USB_DESC_CS_INTERFACE, \
1047 .bDescriptorSubtype = UNION_FUNC_DESC, \
1048 .bControlInterface = 0, \
1049 .bSubordinateInterface0 = 1, \
1050 }, \
1051 \
1052 .if0_int_ep = { \
1053 .bLength = sizeof(struct usb_ep_descriptor), \
1054 .bDescriptorType = USB_DESC_ENDPOINT, \
1055 .bEndpointAddress = 0x81, \
1056 .bmAttributes = USB_EP_TYPE_INTERRUPT, \
1057 .wMaxPacketSize = sys_cpu_to_le16(CDC_ACM_DEFAULT_INT_EP_MPS), \
1058 .bInterval = CDC_ACM_DEFAULT_INT_INTERVAL, \
1059 }, \
1060 \
1061 .if1 = { \
1062 .bLength = sizeof(struct usb_if_descriptor), \
1063 .bDescriptorType = USB_DESC_INTERFACE, \
1064 .bInterfaceNumber = 1, \
1065 .bAlternateSetting = 0, \
1066 .bNumEndpoints = 2, \
1067 .bInterfaceClass = USB_BCC_CDC_DATA, \
1068 .bInterfaceSubClass = 0, \
1069 .bInterfaceProtocol = 0, \
1070 .iInterface = 0, \
1071 }, \
1072 \
1073 .if1_in_ep = { \
1074 .bLength = sizeof(struct usb_ep_descriptor), \
1075 .bDescriptorType = USB_DESC_ENDPOINT, \
1076 .bEndpointAddress = 0x82, \
1077 .bmAttributes = USB_EP_TYPE_BULK, \
1078 .wMaxPacketSize = sys_cpu_to_le16(CDC_ACM_DEFAULT_BULK_EP_MPS), \
1079 .bInterval = 0, \
1080 }, \
1081 \
1082 .if1_out_ep = { \
1083 .bLength = sizeof(struct usb_ep_descriptor), \
1084 .bDescriptorType = USB_DESC_ENDPOINT, \
1085 .bEndpointAddress = 0x01, \
1086 .bmAttributes = USB_EP_TYPE_BULK, \
1087 .wMaxPacketSize = sys_cpu_to_le16(CDC_ACM_DEFAULT_BULK_EP_MPS), \
1088 .bInterval = 0, \
1089 }, \
1090 \
1091 .nil_desc = { \
1092 .bLength = 0, \
1093 .bDescriptorType = 0, \
1094 }, \
1095 }
1096
1097 #define USBD_CDC_ACM_DT_DEVICE_DEFINE(n) \
1098 BUILD_ASSERT(DT_INST_ON_BUS(n, usb), \
1099 "node " DT_NODE_PATH(DT_DRV_INST(n)) \
1100 " is not assigned to a USB device controller"); \
1101 \
1102 CDC_ACM_DEFINE_DESCRIPTOR(n); \
1103 \
1104 static struct usbd_class_data usbd_cdc_acm_data_##n; \
1105 \
1106 USBD_DEFINE_CLASS(cdc_acm_##n, \
1107 &usbd_cdc_acm_api, \
1108 &usbd_cdc_acm_data_##n); \
1109 \
1110 RING_BUF_DECLARE(cdc_acm_rb_rx_##n, DT_INST_PROP(n, tx_fifo_size)); \
1111 RING_BUF_DECLARE(cdc_acm_rb_tx_##n, DT_INST_PROP(n, tx_fifo_size)); \
1112 \
1113 static struct cdc_acm_uart_data uart_data_##n = { \
1114 .line_coding = CDC_ACM_DEFAULT_LINECODING, \
1115 .c_nd = &cdc_acm_##n, \
1116 .rx_fifo.rb = &cdc_acm_rb_rx_##n, \
1117 .tx_fifo.rb = &cdc_acm_rb_tx_##n, \
1118 .notif_sem = Z_SEM_INITIALIZER(uart_data_##n.notif_sem, 0, 1), \
1119 }; \
1120 \
1121 static struct usbd_class_data usbd_cdc_acm_data_##n = { \
1122 .desc = (struct usb_desc_header *)&cdc_acm_desc_##n, \
1123 .priv = (void *)DEVICE_DT_GET(DT_DRV_INST(n)), \
1124 }; \
1125 \
1126 DEVICE_DT_INST_DEFINE(n, usbd_cdc_acm_preinit, NULL, \
1127 &uart_data_##n, NULL, \
1128 PRE_KERNEL_1, CONFIG_SERIAL_INIT_PRIORITY, \
1129 &cdc_acm_uart_api);
1130
1131 DT_INST_FOREACH_STATUS_OKAY(USBD_CDC_ACM_DT_DEVICE_DEFINE);
1132
1133 SYS_INIT(usbd_cdc_acm_init_wq, POST_KERNEL, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);
1134