1 /*
2  * Copyright (c) 2022 Nordic Semiconductor ASA
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #define DT_DRV_COMPAT zephyr_cdc_acm_uart
8 
9 #include <zephyr/init.h>
10 #include <zephyr/kernel.h>
11 #include <zephyr/drivers/uart.h>
12 #include <zephyr/sys/ring_buffer.h>
13 #include <zephyr/sys/byteorder.h>
14 
15 #include <zephyr/usb/usbd.h>
16 #include <zephyr/usb/usb_ch9.h>
17 #include <zephyr/usb/class/usb_cdc.h>
18 
19 #include <zephyr/drivers/usb/udc.h>
20 
21 #include "usbd_msg.h"
22 
23 #include <zephyr/logging/log.h>
24 /* Prevent endless recursive logging loop and warn user about it */
25 #if defined(CONFIG_USBD_CDC_ACM_LOG_LEVEL) && CONFIG_USBD_CDC_ACM_LOG_LEVEL != LOG_LEVEL_NONE
26 #define CHOSEN_CONSOLE DT_NODE_HAS_COMPAT(DT_CHOSEN(zephyr_console), zephyr_cdc_acm_uart)
27 #define CHOSEN_SHELL   DT_NODE_HAS_COMPAT(DT_CHOSEN(zephyr_shell_uart), zephyr_cdc_acm_uart)
28 #if (CHOSEN_CONSOLE && defined(CONFIG_LOG_BACKEND_UART)) || \
29 	(CHOSEN_SHELL && defined(CONFIG_SHELL_LOG_BACKEND))
30 #warning "USBD_CDC_ACM_LOG_LEVEL forced to LOG_LEVEL_NONE"
31 #undef CONFIG_USBD_CDC_ACM_LOG_LEVEL
32 #define CONFIG_USBD_CDC_ACM_LOG_LEVEL LOG_LEVEL_NONE
33 #endif
34 #endif
35 LOG_MODULE_REGISTER(usbd_cdc_acm, CONFIG_USBD_CDC_ACM_LOG_LEVEL);
36 
37 UDC_BUF_POOL_DEFINE(cdc_acm_ep_pool,
38 		    DT_NUM_INST_STATUS_OKAY(DT_DRV_COMPAT) * 2,
39 		    512, sizeof(struct udc_buf_info), NULL);
40 
41 #define CDC_ACM_DEFAULT_LINECODING	{sys_cpu_to_le32(115200), 0, 0, 8}
42 #define CDC_ACM_DEFAULT_INT_EP_MPS	16
43 #define CDC_ACM_INTERVAL_DEFAULT	10000UL
44 #define CDC_ACM_FS_INT_EP_INTERVAL	USB_FS_INT_EP_INTERVAL(10000U)
45 #define CDC_ACM_HS_INT_EP_INTERVAL	USB_HS_INT_EP_INTERVAL(10000U)
46 
47 #define CDC_ACM_CLASS_ENABLED		0
48 #define CDC_ACM_CLASS_SUSPENDED		1
49 #define CDC_ACM_IRQ_RX_ENABLED		2
50 #define CDC_ACM_IRQ_TX_ENABLED		3
51 #define CDC_ACM_RX_FIFO_BUSY		4
52 #define CDC_ACM_TX_FIFO_BUSY		5
53 
54 static struct k_work_q cdc_acm_work_q;
55 static K_KERNEL_STACK_DEFINE(cdc_acm_stack,
56 			     CONFIG_USBD_CDC_ACM_STACK_SIZE);
57 
58 struct cdc_acm_uart_fifo {
59 	struct ring_buf *rb;
60 	bool irq;
61 	bool altered;
62 };
63 
64 struct usbd_cdc_acm_desc {
65 	struct usb_association_descriptor iad;
66 	struct usb_if_descriptor if0;
67 	struct cdc_header_descriptor if0_header;
68 	struct cdc_cm_descriptor if0_cm;
69 	struct cdc_acm_descriptor if0_acm;
70 	struct cdc_union_descriptor if0_union;
71 	struct usb_ep_descriptor if0_int_ep;
72 	struct usb_ep_descriptor if0_hs_int_ep;
73 
74 	struct usb_if_descriptor if1;
75 	struct usb_ep_descriptor if1_in_ep;
76 	struct usb_ep_descriptor if1_out_ep;
77 	struct usb_ep_descriptor if1_hs_in_ep;
78 	struct usb_ep_descriptor if1_hs_out_ep;
79 
80 	struct usb_desc_header nil_desc;
81 };
82 
83 struct cdc_acm_uart_data {
84 	/* Pointer to the associated USBD class node */
85 	struct usbd_class_data *c_data;
86 	/* Pointer to the class interface descriptors */
87 	struct usbd_cdc_acm_desc *const desc;
88 	const struct usb_desc_header **const fs_desc;
89 	const struct usb_desc_header **const hs_desc;
90 	/* Line Coding Structure */
91 	struct cdc_acm_line_coding line_coding;
92 	/* SetControlLineState bitmap */
93 	uint16_t line_state;
94 	/* Serial state bitmap */
95 	uint16_t serial_state;
96 	/* UART actual configuration */
97 	struct uart_config uart_cfg;
98 	/* UART actual RTS state */
99 	bool line_state_rts;
100 	/* UART actual DTR state */
101 	bool line_state_dtr;
102 	/* UART API IRQ callback */
103 	uart_irq_callback_user_data_t cb;
104 	/* UART API user callback data */
105 	void *cb_data;
106 	/* UART API IRQ callback work */
107 	struct k_work irq_cb_work;
108 	struct cdc_acm_uart_fifo rx_fifo;
109 	struct cdc_acm_uart_fifo tx_fifo;
110 	/* When flow_ctrl is set, poll out is blocked when the buffer is full,
111 	 * roughly emulating flow control.
112 	 */
113 	bool flow_ctrl;
114 	/* USBD CDC ACM TX fifo work */
115 	struct k_work_delayable tx_fifo_work;
116 	/* USBD CDC ACM RX fifo work */
117 	struct k_work rx_fifo_work;
118 	atomic_t state;
119 	struct k_sem notif_sem;
120 };
121 
122 static void cdc_acm_irq_rx_enable(const struct device *dev);
123 
cdc_acm_buf_alloc(const uint8_t ep)124 struct net_buf *cdc_acm_buf_alloc(const uint8_t ep)
125 {
126 	struct net_buf *buf = NULL;
127 	struct udc_buf_info *bi;
128 
129 	buf = net_buf_alloc(&cdc_acm_ep_pool, K_NO_WAIT);
130 	if (!buf) {
131 		return NULL;
132 	}
133 
134 	bi = udc_get_buf_info(buf);
135 	bi->ep = ep;
136 
137 	return buf;
138 }
139 
cdc_acm_work_submit(struct k_work * work)140 static ALWAYS_INLINE int cdc_acm_work_submit(struct k_work *work)
141 {
142 	return k_work_submit_to_queue(&cdc_acm_work_q, work);
143 }
144 
cdc_acm_work_schedule(struct k_work_delayable * work,k_timeout_t delay)145 static ALWAYS_INLINE int cdc_acm_work_schedule(struct k_work_delayable *work,
146 					       k_timeout_t delay)
147 {
148 	return k_work_schedule_for_queue(&cdc_acm_work_q, work, delay);
149 }
150 
check_wq_ctx(const struct device * dev)151 static ALWAYS_INLINE bool check_wq_ctx(const struct device *dev)
152 {
153 	return k_current_get() == k_work_queue_thread_get(&cdc_acm_work_q);
154 }
155 
cdc_acm_get_int_in(struct usbd_class_data * const c_data)156 static uint8_t cdc_acm_get_int_in(struct usbd_class_data *const c_data)
157 {
158 	struct usbd_context *uds_ctx = usbd_class_get_ctx(c_data);
159 	const struct device *dev = usbd_class_get_private(c_data);
160 	struct cdc_acm_uart_data *data = dev->data;
161 	struct usbd_cdc_acm_desc *desc = data->desc;
162 
163 	if (usbd_bus_speed(uds_ctx) == USBD_SPEED_HS) {
164 		return desc->if0_hs_int_ep.bEndpointAddress;
165 	}
166 
167 	return desc->if0_int_ep.bEndpointAddress;
168 }
169 
cdc_acm_get_bulk_in(struct usbd_class_data * const c_data)170 static uint8_t cdc_acm_get_bulk_in(struct usbd_class_data *const c_data)
171 {
172 	struct usbd_context *uds_ctx = usbd_class_get_ctx(c_data);
173 	const struct device *dev = usbd_class_get_private(c_data);
174 	struct cdc_acm_uart_data *data = dev->data;
175 	struct usbd_cdc_acm_desc *desc = data->desc;
176 
177 	if (usbd_bus_speed(uds_ctx) == USBD_SPEED_HS) {
178 		return desc->if1_hs_in_ep.bEndpointAddress;
179 	}
180 
181 	return desc->if1_in_ep.bEndpointAddress;
182 }
183 
cdc_acm_get_bulk_out(struct usbd_class_data * const c_data)184 static uint8_t cdc_acm_get_bulk_out(struct usbd_class_data *const c_data)
185 {
186 	struct usbd_context *uds_ctx = usbd_class_get_ctx(c_data);
187 	const struct device *dev = usbd_class_get_private(c_data);
188 	struct cdc_acm_uart_data *data = dev->data;
189 	struct usbd_cdc_acm_desc *desc = data->desc;
190 
191 	if (usbd_bus_speed(uds_ctx) == USBD_SPEED_HS) {
192 		return desc->if1_hs_out_ep.bEndpointAddress;
193 	}
194 
195 	return desc->if1_out_ep.bEndpointAddress;
196 }
197 
cdc_acm_get_bulk_mps(struct usbd_class_data * const c_data)198 static size_t cdc_acm_get_bulk_mps(struct usbd_class_data *const c_data)
199 {
200 	struct usbd_context *uds_ctx = usbd_class_get_ctx(c_data);
201 
202 	if (usbd_bus_speed(uds_ctx) == USBD_SPEED_HS) {
203 		return 512U;
204 	}
205 
206 	return 64U;
207 }
208 
usbd_cdc_acm_request(struct usbd_class_data * const c_data,struct net_buf * buf,int err)209 static int usbd_cdc_acm_request(struct usbd_class_data *const c_data,
210 				struct net_buf *buf, int err)
211 {
212 	struct usbd_context *uds_ctx = usbd_class_get_ctx(c_data);
213 	const struct device *dev = usbd_class_get_private(c_data);
214 	struct cdc_acm_uart_data *data = dev->data;
215 	struct udc_buf_info *bi;
216 
217 	bi = udc_get_buf_info(buf);
218 	if (err) {
219 		if (err == -ECONNABORTED) {
220 			LOG_WRN("request ep 0x%02x, len %u cancelled",
221 				bi->ep, buf->len);
222 		} else {
223 			LOG_ERR("request ep 0x%02x, len %u failed",
224 				bi->ep, buf->len);
225 		}
226 
227 		if (bi->ep == cdc_acm_get_bulk_out(c_data)) {
228 			atomic_clear_bit(&data->state, CDC_ACM_RX_FIFO_BUSY);
229 		}
230 
231 		if (bi->ep == cdc_acm_get_bulk_in(c_data)) {
232 			atomic_clear_bit(&data->state, CDC_ACM_TX_FIFO_BUSY);
233 		}
234 
235 		goto ep_request_error;
236 	}
237 
238 	if (bi->ep == cdc_acm_get_bulk_out(c_data)) {
239 		/* RX transfer completion */
240 		size_t done;
241 
242 		LOG_HEXDUMP_INF(buf->data, buf->len, "");
243 		done = ring_buf_put(data->rx_fifo.rb, buf->data, buf->len);
244 		if (done && data->cb) {
245 			cdc_acm_work_submit(&data->irq_cb_work);
246 		}
247 
248 		atomic_clear_bit(&data->state, CDC_ACM_RX_FIFO_BUSY);
249 		cdc_acm_work_submit(&data->rx_fifo_work);
250 	}
251 
252 	if (bi->ep == cdc_acm_get_bulk_in(c_data)) {
253 		/* TX transfer completion */
254 		if (data->cb) {
255 			cdc_acm_work_submit(&data->irq_cb_work);
256 		}
257 
258 		atomic_clear_bit(&data->state, CDC_ACM_TX_FIFO_BUSY);
259 
260 		if (!ring_buf_is_empty(data->tx_fifo.rb)) {
261 			/* Queue pending TX data on IN endpoint */
262 			cdc_acm_work_schedule(&data->tx_fifo_work, K_NO_WAIT);
263 		}
264 
265 	}
266 
267 	if (bi->ep == cdc_acm_get_int_in(c_data)) {
268 		k_sem_give(&data->notif_sem);
269 	}
270 
271 ep_request_error:
272 	return usbd_ep_buf_free(uds_ctx, buf);
273 }
274 
usbd_cdc_acm_update(struct usbd_class_data * const c_data,uint8_t iface,uint8_t alternate)275 static void usbd_cdc_acm_update(struct usbd_class_data *const c_data,
276 				uint8_t iface, uint8_t alternate)
277 {
278 	LOG_DBG("New configuration, interface %u alternate %u",
279 		iface, alternate);
280 }
281 
usbd_cdc_acm_enable(struct usbd_class_data * const c_data)282 static void usbd_cdc_acm_enable(struct usbd_class_data *const c_data)
283 {
284 	const struct device *dev = usbd_class_get_private(c_data);
285 	struct cdc_acm_uart_data *data = dev->data;
286 
287 	atomic_set_bit(&data->state, CDC_ACM_CLASS_ENABLED);
288 	LOG_INF("Configuration enabled");
289 
290 	if (atomic_test_bit(&data->state, CDC_ACM_IRQ_RX_ENABLED)) {
291 		cdc_acm_irq_rx_enable(dev);
292 	}
293 
294 	if (atomic_test_bit(&data->state, CDC_ACM_IRQ_TX_ENABLED)) {
295 		if (ring_buf_space_get(data->tx_fifo.rb)) {
296 			/* Raise TX ready interrupt */
297 			cdc_acm_work_submit(&data->irq_cb_work);
298 		} else {
299 			/* Queue pending TX data on IN endpoint */
300 			cdc_acm_work_schedule(&data->tx_fifo_work, K_NO_WAIT);
301 		}
302 	}
303 }
304 
usbd_cdc_acm_disable(struct usbd_class_data * const c_data)305 static void usbd_cdc_acm_disable(struct usbd_class_data *const c_data)
306 {
307 	const struct device *dev = usbd_class_get_private(c_data);
308 	struct cdc_acm_uart_data *data = dev->data;
309 
310 	atomic_clear_bit(&data->state, CDC_ACM_CLASS_ENABLED);
311 	atomic_clear_bit(&data->state, CDC_ACM_CLASS_SUSPENDED);
312 	LOG_INF("Configuration disabled");
313 }
314 
usbd_cdc_acm_suspended(struct usbd_class_data * const c_data)315 static void usbd_cdc_acm_suspended(struct usbd_class_data *const c_data)
316 {
317 	const struct device *dev = usbd_class_get_private(c_data);
318 	struct cdc_acm_uart_data *data = dev->data;
319 
320 	/* FIXME: filter stray suspended events earlier */
321 	atomic_set_bit(&data->state, CDC_ACM_CLASS_SUSPENDED);
322 }
323 
usbd_cdc_acm_resumed(struct usbd_class_data * const c_data)324 static void usbd_cdc_acm_resumed(struct usbd_class_data *const c_data)
325 {
326 	const struct device *dev = usbd_class_get_private(c_data);
327 	struct cdc_acm_uart_data *data = dev->data;
328 
329 	atomic_clear_bit(&data->state, CDC_ACM_CLASS_SUSPENDED);
330 }
331 
usbd_cdc_acm_get_desc(struct usbd_class_data * const c_data,const enum usbd_speed speed)332 static void *usbd_cdc_acm_get_desc(struct usbd_class_data *const c_data,
333 				   const enum usbd_speed speed)
334 {
335 	const struct device *dev = usbd_class_get_private(c_data);
336 	struct cdc_acm_uart_data *data = dev->data;
337 
338 	if (speed == USBD_SPEED_HS) {
339 		return data->hs_desc;
340 	}
341 
342 	return data->fs_desc;
343 }
344 
cdc_acm_update_uart_cfg(struct cdc_acm_uart_data * const data)345 static void cdc_acm_update_uart_cfg(struct cdc_acm_uart_data *const data)
346 {
347 	struct uart_config *const cfg = &data->uart_cfg;
348 
349 	cfg->baudrate = sys_le32_to_cpu(data->line_coding.dwDTERate);
350 
351 	switch (data->line_coding.bCharFormat) {
352 	case USB_CDC_LINE_CODING_STOP_BITS_1:
353 		cfg->stop_bits = UART_CFG_STOP_BITS_1;
354 		break;
355 	case USB_CDC_LINE_CODING_STOP_BITS_1_5:
356 		cfg->stop_bits = UART_CFG_STOP_BITS_1_5;
357 		break;
358 	case USB_CDC_LINE_CODING_STOP_BITS_2:
359 	default:
360 		cfg->stop_bits = UART_CFG_STOP_BITS_2;
361 		break;
362 	};
363 
364 	switch (data->line_coding.bParityType) {
365 	case USB_CDC_LINE_CODING_PARITY_NO:
366 	default:
367 		cfg->parity = UART_CFG_PARITY_NONE;
368 		break;
369 	case USB_CDC_LINE_CODING_PARITY_ODD:
370 		cfg->parity = UART_CFG_PARITY_ODD;
371 		break;
372 	case USB_CDC_LINE_CODING_PARITY_EVEN:
373 		cfg->parity = UART_CFG_PARITY_EVEN;
374 		break;
375 	case USB_CDC_LINE_CODING_PARITY_MARK:
376 		cfg->parity = UART_CFG_PARITY_MARK;
377 		break;
378 	case USB_CDC_LINE_CODING_PARITY_SPACE:
379 		cfg->parity = UART_CFG_PARITY_SPACE;
380 		break;
381 	};
382 
383 	switch (data->line_coding.bDataBits) {
384 	case USB_CDC_LINE_CODING_DATA_BITS_5:
385 		cfg->data_bits = UART_CFG_DATA_BITS_5;
386 		break;
387 	case USB_CDC_LINE_CODING_DATA_BITS_6:
388 		cfg->data_bits = UART_CFG_DATA_BITS_6;
389 		break;
390 	case USB_CDC_LINE_CODING_DATA_BITS_7:
391 		cfg->data_bits = UART_CFG_DATA_BITS_7;
392 		break;
393 	case USB_CDC_LINE_CODING_DATA_BITS_8:
394 	default:
395 		cfg->data_bits = UART_CFG_DATA_BITS_8;
396 		break;
397 	};
398 
399 	cfg->flow_ctrl = data->flow_ctrl ? UART_CFG_FLOW_CTRL_RTS_CTS :
400 					   UART_CFG_FLOW_CTRL_NONE;
401 }
402 
cdc_acm_update_linestate(struct cdc_acm_uart_data * const data)403 static void cdc_acm_update_linestate(struct cdc_acm_uart_data *const data)
404 {
405 	if (data->line_state & SET_CONTROL_LINE_STATE_RTS) {
406 		data->line_state_rts = true;
407 	} else {
408 		data->line_state_rts = false;
409 	}
410 
411 	if (data->line_state & SET_CONTROL_LINE_STATE_DTR) {
412 		data->line_state_dtr = true;
413 	} else {
414 		data->line_state_dtr = false;
415 	}
416 }
417 
usbd_cdc_acm_cth(struct usbd_class_data * const c_data,const struct usb_setup_packet * const setup,struct net_buf * const buf)418 static int usbd_cdc_acm_cth(struct usbd_class_data *const c_data,
419 			    const struct usb_setup_packet *const setup,
420 			    struct net_buf *const buf)
421 {
422 	const struct device *dev = usbd_class_get_private(c_data);
423 	struct cdc_acm_uart_data *data = dev->data;
424 	size_t min_len;
425 
426 	if (setup->bRequest == GET_LINE_CODING) {
427 		if (buf == NULL) {
428 			errno = -ENOMEM;
429 			return 0;
430 		}
431 
432 		min_len = MIN(sizeof(data->line_coding), setup->wLength);
433 		net_buf_add_mem(buf, &data->line_coding, min_len);
434 
435 		return 0;
436 	}
437 
438 	LOG_DBG("bmRequestType 0x%02x bRequest 0x%02x unsupported",
439 		setup->bmRequestType, setup->bRequest);
440 	errno = -ENOTSUP;
441 
442 	return 0;
443 }
444 
usbd_cdc_acm_ctd(struct usbd_class_data * const c_data,const struct usb_setup_packet * const setup,const struct net_buf * const buf)445 static int usbd_cdc_acm_ctd(struct usbd_class_data *const c_data,
446 			    const struct usb_setup_packet *const setup,
447 			    const struct net_buf *const buf)
448 {
449 	struct usbd_context *uds_ctx = usbd_class_get_ctx(c_data);
450 	const struct device *dev = usbd_class_get_private(c_data);
451 	struct cdc_acm_uart_data *data = dev->data;
452 	size_t len;
453 
454 	switch (setup->bRequest) {
455 	case SET_LINE_CODING:
456 		len = sizeof(data->line_coding);
457 		if (setup->wLength != len) {
458 			errno = -ENOTSUP;
459 			return 0;
460 		}
461 
462 		memcpy(&data->line_coding, buf->data, len);
463 		cdc_acm_update_uart_cfg(data);
464 		usbd_msg_pub_device(uds_ctx, USBD_MSG_CDC_ACM_LINE_CODING, dev);
465 		return 0;
466 
467 	case SET_CONTROL_LINE_STATE:
468 		data->line_state = setup->wValue;
469 		cdc_acm_update_linestate(data);
470 		usbd_msg_pub_device(uds_ctx, USBD_MSG_CDC_ACM_CONTROL_LINE_STATE, dev);
471 		return 0;
472 
473 	default:
474 		break;
475 	}
476 
477 	LOG_DBG("bmRequestType 0x%02x bRequest 0x%02x unsupported",
478 		setup->bmRequestType, setup->bRequest);
479 	errno = -ENOTSUP;
480 
481 	return 0;
482 }
483 
usbd_cdc_acm_init(struct usbd_class_data * const c_data)484 static int usbd_cdc_acm_init(struct usbd_class_data *const c_data)
485 {
486 	const struct device *dev = usbd_class_get_private(c_data);
487 	struct cdc_acm_uart_data *data = dev->data;
488 	struct usbd_cdc_acm_desc *desc = data->desc;
489 
490 	desc->if0_union.bControlInterface = desc->if0.bInterfaceNumber;
491 	desc->if0_union.bSubordinateInterface0 = desc->if1.bInterfaceNumber;
492 
493 	return 0;
494 }
495 
cdc_acm_send_notification(const struct device * dev,const uint16_t serial_state)496 static int cdc_acm_send_notification(const struct device *dev,
497 				     const uint16_t serial_state)
498 {
499 	struct cdc_acm_notification notification = {
500 		.bmRequestType = 0xA1,
501 		.bNotificationType = USB_CDC_SERIAL_STATE,
502 		.wValue = 0,
503 		.wIndex = 0,
504 		.wLength = sys_cpu_to_le16(sizeof(uint16_t)),
505 		.data = sys_cpu_to_le16(serial_state),
506 	};
507 	struct cdc_acm_uart_data *data = dev->data;
508 	struct usbd_class_data *c_data = data->c_data;
509 	struct net_buf *buf;
510 	uint8_t ep;
511 	int ret;
512 
513 	if (!atomic_test_bit(&data->state, CDC_ACM_CLASS_ENABLED)) {
514 		LOG_INF("USB configuration is not enabled");
515 		return -EACCES;
516 	}
517 
518 	if (atomic_test_bit(&data->state, CDC_ACM_CLASS_SUSPENDED)) {
519 		LOG_INF("USB support is suspended (FIXME)");
520 		return -EACCES;
521 	}
522 
523 	ep = cdc_acm_get_int_in(c_data);
524 	buf = usbd_ep_buf_alloc(c_data, ep, sizeof(struct cdc_acm_notification));
525 	if (buf == NULL) {
526 		return -ENOMEM;
527 	}
528 
529 	net_buf_add_mem(buf, &notification, sizeof(struct cdc_acm_notification));
530 	ret = usbd_ep_enqueue(c_data, buf);
531 	/* FIXME: support for sync transfers */
532 	k_sem_take(&data->notif_sem, K_FOREVER);
533 
534 	return ret;
535 }
536 
537 /*
538  * TX handler is triggered when the state of TX fifo has been altered.
539  */
cdc_acm_tx_fifo_handler(struct k_work * work)540 static void cdc_acm_tx_fifo_handler(struct k_work *work)
541 {
542 	struct k_work_delayable *dwork = k_work_delayable_from_work(work);
543 	struct cdc_acm_uart_data *data;
544 	struct usbd_class_data *c_data;
545 	struct net_buf *buf;
546 	size_t len;
547 	int ret;
548 
549 	data = CONTAINER_OF(dwork, struct cdc_acm_uart_data, tx_fifo_work);
550 	c_data = data->c_data;
551 
552 	if (!atomic_test_bit(&data->state, CDC_ACM_CLASS_ENABLED)) {
553 		LOG_DBG("USB configuration is not enabled");
554 		return;
555 	}
556 
557 	if (atomic_test_bit(&data->state, CDC_ACM_CLASS_SUSPENDED)) {
558 		LOG_INF("USB support is suspended (FIXME: submit rwup)");
559 		return;
560 	}
561 
562 	if (atomic_test_and_set_bit(&data->state, CDC_ACM_TX_FIFO_BUSY)) {
563 		LOG_DBG("TX transfer already in progress");
564 		return;
565 	}
566 
567 	buf = cdc_acm_buf_alloc(cdc_acm_get_bulk_in(c_data));
568 	if (buf == NULL) {
569 		atomic_clear_bit(&data->state, CDC_ACM_TX_FIFO_BUSY);
570 		cdc_acm_work_schedule(&data->tx_fifo_work, K_MSEC(1));
571 		return;
572 	}
573 
574 	len = ring_buf_get(data->tx_fifo.rb, buf->data, buf->size);
575 	net_buf_add(buf, len);
576 
577 	ret = usbd_ep_enqueue(c_data, buf);
578 	if (ret) {
579 		LOG_ERR("Failed to enqueue");
580 		net_buf_unref(buf);
581 		atomic_clear_bit(&data->state, CDC_ACM_TX_FIFO_BUSY);
582 	}
583 }
584 
585 /*
586  * RX handler should be conditionally triggered at:
587  *  - (x) cdc_acm_irq_rx_enable()
588  *  - (x) RX transfer completion
589  *  - (x) the end of cdc_acm_irq_cb_handler
590  *  - (x) USBD class API enable call
591  *  - ( ) USBD class API resumed call (TODO)
592  */
cdc_acm_rx_fifo_handler(struct k_work * work)593 static void cdc_acm_rx_fifo_handler(struct k_work *work)
594 {
595 	struct cdc_acm_uart_data *data;
596 	struct usbd_class_data *c_data;
597 	struct net_buf *buf;
598 	uint8_t ep;
599 	int ret;
600 
601 	data = CONTAINER_OF(work, struct cdc_acm_uart_data, rx_fifo_work);
602 	c_data = data->c_data;
603 
604 	if (!atomic_test_bit(&data->state, CDC_ACM_CLASS_ENABLED) ||
605 	    atomic_test_bit(&data->state, CDC_ACM_CLASS_SUSPENDED)) {
606 		LOG_INF("USB configuration is not enabled or suspended");
607 		return;
608 	}
609 
610 	if (ring_buf_space_get(data->rx_fifo.rb) < cdc_acm_get_bulk_mps(c_data)) {
611 		LOG_INF("RX buffer to small, throttle");
612 		return;
613 	}
614 
615 	if (atomic_test_and_set_bit(&data->state, CDC_ACM_RX_FIFO_BUSY)) {
616 		LOG_WRN("RX transfer already in progress");
617 		return;
618 	}
619 
620 	ep = cdc_acm_get_bulk_out(c_data);
621 	buf = cdc_acm_buf_alloc(ep);
622 	if (buf == NULL) {
623 		return;
624 	}
625 
626 	/* Shrink the buffer size if operating on a full speed bus */
627 	buf->size = MIN(cdc_acm_get_bulk_mps(c_data), buf->size);
628 
629 	ret = usbd_ep_enqueue(c_data, buf);
630 	if (ret) {
631 		LOG_ERR("Failed to enqueue net_buf for 0x%02x", ep);
632 		net_buf_unref(buf);
633 	}
634 }
635 
cdc_acm_irq_tx_enable(const struct device * dev)636 static void cdc_acm_irq_tx_enable(const struct device *dev)
637 {
638 	struct cdc_acm_uart_data *const data = dev->data;
639 
640 	atomic_set_bit(&data->state, CDC_ACM_IRQ_TX_ENABLED);
641 
642 	if (ring_buf_space_get(data->tx_fifo.rb)) {
643 		LOG_INF("tx_en: trigger irq_cb_work");
644 		cdc_acm_work_submit(&data->irq_cb_work);
645 	}
646 }
647 
cdc_acm_irq_tx_disable(const struct device * dev)648 static void cdc_acm_irq_tx_disable(const struct device *dev)
649 {
650 	struct cdc_acm_uart_data *const data = dev->data;
651 
652 	atomic_clear_bit(&data->state, CDC_ACM_IRQ_TX_ENABLED);
653 }
654 
cdc_acm_irq_rx_enable(const struct device * dev)655 static void cdc_acm_irq_rx_enable(const struct device *dev)
656 {
657 	struct cdc_acm_uart_data *const data = dev->data;
658 
659 	atomic_set_bit(&data->state, CDC_ACM_IRQ_RX_ENABLED);
660 
661 	/* Permit buffer to be drained regardless of USB state */
662 	if (!ring_buf_is_empty(data->rx_fifo.rb)) {
663 		LOG_INF("rx_en: trigger irq_cb_work");
664 		cdc_acm_work_submit(&data->irq_cb_work);
665 	}
666 
667 	if (!atomic_test_bit(&data->state, CDC_ACM_RX_FIFO_BUSY)) {
668 		LOG_INF("rx_en: trigger rx_fifo_work");
669 		cdc_acm_work_submit(&data->rx_fifo_work);
670 	}
671 }
672 
cdc_acm_irq_rx_disable(const struct device * dev)673 static void cdc_acm_irq_rx_disable(const struct device *dev)
674 {
675 	struct cdc_acm_uart_data *const data = dev->data;
676 
677 	atomic_clear_bit(&data->state, CDC_ACM_IRQ_RX_ENABLED);
678 }
679 
cdc_acm_fifo_fill(const struct device * dev,const uint8_t * const tx_data,const int len)680 static int cdc_acm_fifo_fill(const struct device *dev,
681 			     const uint8_t *const tx_data,
682 			     const int len)
683 {
684 	struct cdc_acm_uart_data *const data = dev->data;
685 	unsigned int lock;
686 	uint32_t done;
687 
688 	if (!check_wq_ctx(dev)) {
689 		LOG_WRN("Invoked by inappropriate context");
690 		__ASSERT_NO_MSG(false);
691 		return 0;
692 	}
693 
694 	lock = irq_lock();
695 	done = ring_buf_put(data->tx_fifo.rb, tx_data, len);
696 	irq_unlock(lock);
697 	if (done) {
698 		data->tx_fifo.altered = true;
699 	}
700 
701 	LOG_INF("UART dev %p, len %d, remaining space %u",
702 		dev, len, ring_buf_space_get(data->tx_fifo.rb));
703 
704 	return done;
705 }
706 
cdc_acm_fifo_read(const struct device * dev,uint8_t * const rx_data,const int size)707 static int cdc_acm_fifo_read(const struct device *dev,
708 			     uint8_t *const rx_data,
709 			     const int size)
710 {
711 	struct cdc_acm_uart_data *const data = dev->data;
712 	uint32_t len;
713 
714 	LOG_INF("UART dev %p size %d length %u",
715 		dev, size, ring_buf_size_get(data->rx_fifo.rb));
716 
717 	if (!check_wq_ctx(dev)) {
718 		LOG_WRN("Invoked by inappropriate context");
719 		__ASSERT_NO_MSG(false);
720 		return 0;
721 	}
722 
723 	len = ring_buf_get(data->rx_fifo.rb, rx_data, size);
724 	if (len) {
725 		data->rx_fifo.altered = true;
726 	}
727 
728 	return len;
729 }
730 
cdc_acm_irq_tx_ready(const struct device * dev)731 static int cdc_acm_irq_tx_ready(const struct device *dev)
732 {
733 	struct cdc_acm_uart_data *const data = dev->data;
734 
735 	if (check_wq_ctx(dev)) {
736 		if (data->tx_fifo.irq) {
737 			return ring_buf_space_get(data->tx_fifo.rb);
738 		}
739 	} else {
740 		LOG_WRN("Invoked by inappropriate context");
741 		__ASSERT_NO_MSG(false);
742 	}
743 
744 	return 0;
745 }
746 
cdc_acm_irq_rx_ready(const struct device * dev)747 static int cdc_acm_irq_rx_ready(const struct device *dev)
748 {
749 	struct cdc_acm_uart_data *const data = dev->data;
750 
751 	if (check_wq_ctx(dev)) {
752 		if (data->rx_fifo.irq) {
753 			return 1;
754 		}
755 	} else {
756 		LOG_WRN("Invoked by inappropriate context");
757 		__ASSERT_NO_MSG(false);
758 	}
759 
760 
761 	return 0;
762 }
763 
cdc_acm_irq_is_pending(const struct device * dev)764 static int cdc_acm_irq_is_pending(const struct device *dev)
765 {
766 	struct cdc_acm_uart_data *const data = dev->data;
767 
768 	if (check_wq_ctx(dev)) {
769 		if (data->tx_fifo.irq || data->rx_fifo.irq) {
770 			return 1;
771 		}
772 	} else {
773 		LOG_WRN("Invoked by inappropriate context");
774 		__ASSERT_NO_MSG(false);
775 	}
776 
777 	return 0;
778 }
779 
cdc_acm_irq_update(const struct device * dev)780 static int cdc_acm_irq_update(const struct device *dev)
781 {
782 	struct cdc_acm_uart_data *const data = dev->data;
783 
784 	if (!check_wq_ctx(dev)) {
785 		LOG_WRN("Invoked by inappropriate context");
786 		__ASSERT_NO_MSG(false);
787 		return 0;
788 	}
789 
790 	if (atomic_test_bit(&data->state, CDC_ACM_IRQ_RX_ENABLED) &&
791 	    !ring_buf_is_empty(data->rx_fifo.rb)) {
792 		data->rx_fifo.irq = true;
793 	} else {
794 		data->rx_fifo.irq = false;
795 	}
796 
797 	if (atomic_test_bit(&data->state, CDC_ACM_IRQ_TX_ENABLED) &&
798 	    ring_buf_space_get(data->tx_fifo.rb)) {
799 		data->tx_fifo.irq = true;
800 	} else {
801 		data->tx_fifo.irq = false;
802 	}
803 
804 	return 1;
805 }
806 
807 /*
808  * IRQ handler should be conditionally triggered for the TX path at:
809  *  - cdc_acm_irq_tx_enable()
810  *  - TX transfer completion
811  *  - TX buffer is empty
812  *  - USBD class API enable and resumed calls
813  *
814  * for RX path, if enabled, at:
815  *  - cdc_acm_irq_rx_enable()
816  *  - RX transfer completion
817  *  - RX buffer is not empty
818  */
cdc_acm_irq_cb_handler(struct k_work * work)819 static void cdc_acm_irq_cb_handler(struct k_work *work)
820 {
821 	struct cdc_acm_uart_data *data;
822 	struct usbd_class_data *c_data;
823 
824 	data = CONTAINER_OF(work, struct cdc_acm_uart_data, irq_cb_work);
825 	c_data = data->c_data;
826 
827 	if (data->cb == NULL) {
828 		LOG_ERR("IRQ callback is not set");
829 		return;
830 	}
831 
832 	data->tx_fifo.altered = false;
833 	data->rx_fifo.altered = false;
834 	data->rx_fifo.irq = false;
835 	data->tx_fifo.irq = false;
836 
837 	if (atomic_test_bit(&data->state, CDC_ACM_IRQ_RX_ENABLED) ||
838 	    atomic_test_bit(&data->state, CDC_ACM_IRQ_TX_ENABLED)) {
839 		data->cb(usbd_class_get_private(c_data), data->cb_data);
840 	}
841 
842 	if (data->rx_fifo.altered) {
843 		LOG_DBG("rx fifo altered, submit work");
844 		cdc_acm_work_submit(&data->rx_fifo_work);
845 	}
846 
847 	if (data->tx_fifo.altered) {
848 		LOG_DBG("tx fifo altered, submit work");
849 		if (!atomic_test_bit(&data->state, CDC_ACM_TX_FIFO_BUSY)) {
850 			cdc_acm_work_schedule(&data->tx_fifo_work, K_NO_WAIT);
851 		}
852 	}
853 
854 	if (atomic_test_bit(&data->state, CDC_ACM_IRQ_RX_ENABLED) &&
855 	    !ring_buf_is_empty(data->rx_fifo.rb)) {
856 		LOG_DBG("rx irq pending, submit irq_cb_work");
857 		cdc_acm_work_submit(&data->irq_cb_work);
858 	}
859 
860 	if (atomic_test_bit(&data->state, CDC_ACM_IRQ_TX_ENABLED) &&
861 	    ring_buf_space_get(data->tx_fifo.rb)) {
862 		LOG_DBG("tx irq pending, submit irq_cb_work");
863 		cdc_acm_work_submit(&data->irq_cb_work);
864 	}
865 }
866 
cdc_acm_irq_callback_set(const struct device * dev,const uart_irq_callback_user_data_t cb,void * const cb_data)867 static void cdc_acm_irq_callback_set(const struct device *dev,
868 				     const uart_irq_callback_user_data_t cb,
869 				     void *const cb_data)
870 {
871 	struct cdc_acm_uart_data *const data = dev->data;
872 
873 	data->cb = cb;
874 	data->cb_data = cb_data;
875 }
876 
cdc_acm_poll_in(const struct device * dev,unsigned char * const c)877 static int cdc_acm_poll_in(const struct device *dev, unsigned char *const c)
878 {
879 	struct cdc_acm_uart_data *const data = dev->data;
880 	uint32_t len;
881 	int ret = -1;
882 
883 	if (ring_buf_is_empty(data->rx_fifo.rb)) {
884 		return ret;
885 	}
886 
887 	len = ring_buf_get(data->rx_fifo.rb, c, 1);
888 	if (len) {
889 		cdc_acm_work_submit(&data->rx_fifo_work);
890 		ret = 0;
891 	}
892 
893 	return ret;
894 }
895 
cdc_acm_poll_out(const struct device * dev,const unsigned char c)896 static void cdc_acm_poll_out(const struct device *dev, const unsigned char c)
897 {
898 	struct cdc_acm_uart_data *const data = dev->data;
899 	unsigned int lock;
900 	uint32_t wrote;
901 
902 	while (true) {
903 		lock = irq_lock();
904 		wrote = ring_buf_put(data->tx_fifo.rb, &c, 1);
905 		irq_unlock(lock);
906 
907 		if (wrote == 1) {
908 			break;
909 		}
910 
911 		if (k_is_in_isr() || !data->flow_ctrl) {
912 			LOG_WRN_ONCE("Ring buffer full, discard data");
913 			break;
914 		}
915 
916 		k_msleep(1);
917 	}
918 
919 	/* Schedule with minimal timeout to make it possible to send more than
920 	 * one byte per USB transfer. The latency increase is negligible while
921 	 * the increased throughput and reduced CPU usage is easily observable.
922 	 */
923 	cdc_acm_work_schedule(&data->tx_fifo_work, K_MSEC(1));
924 }
925 
926 #ifdef CONFIG_UART_LINE_CTRL
cdc_acm_line_ctrl_set(const struct device * dev,const uint32_t ctrl,const uint32_t val)927 static int cdc_acm_line_ctrl_set(const struct device *dev,
928 				 const uint32_t ctrl, const uint32_t val)
929 {
930 	struct cdc_acm_uart_data *const data = dev->data;
931 	uint32_t flag = 0;
932 
933 	switch (ctrl) {
934 	case USB_CDC_LINE_CTRL_BAUD_RATE:
935 		/* Ignore since it can not be used for notification anyway */
936 		return 0;
937 	case USB_CDC_LINE_CTRL_DCD:
938 		flag = USB_CDC_SERIAL_STATE_RXCARRIER;
939 		break;
940 	case USB_CDC_LINE_CTRL_DSR:
941 		flag = USB_CDC_SERIAL_STATE_TXCARRIER;
942 		break;
943 	case USB_CDC_LINE_CTRL_BREAK:
944 		flag = USB_CDC_SERIAL_STATE_BREAK;
945 		break;
946 	case USB_CDC_LINE_CTRL_RING_SIGNAL:
947 		flag = USB_CDC_SERIAL_STATE_RINGSIGNAL;
948 		break;
949 	case USB_CDC_LINE_CTRL_FRAMING:
950 		flag = USB_CDC_SERIAL_STATE_FRAMING;
951 		break;
952 	case USB_CDC_LINE_CTRL_PARITY:
953 		flag = USB_CDC_SERIAL_STATE_PARITY;
954 		break;
955 	case USB_CDC_LINE_CTRL_OVER_RUN:
956 		flag = USB_CDC_SERIAL_STATE_OVERRUN;
957 		break;
958 	default:
959 		return -EINVAL;
960 	}
961 
962 	if (val) {
963 		data->serial_state |= flag;
964 	} else {
965 		data->serial_state &= ~flag;
966 	}
967 
968 	return cdc_acm_send_notification(dev, data->serial_state);
969 }
970 
cdc_acm_line_ctrl_get(const struct device * dev,const uint32_t ctrl,uint32_t * const val)971 static int cdc_acm_line_ctrl_get(const struct device *dev,
972 				 const uint32_t ctrl, uint32_t *const val)
973 {
974 	struct cdc_acm_uart_data *const data = dev->data;
975 
976 	switch (ctrl) {
977 	case UART_LINE_CTRL_BAUD_RATE:
978 		*val = data->uart_cfg.baudrate;
979 		return 0;
980 	case UART_LINE_CTRL_RTS:
981 		*val = data->line_state_rts;
982 		return 0;
983 	case UART_LINE_CTRL_DTR:
984 		*val = data->line_state_dtr;
985 		return 0;
986 	}
987 
988 	return -ENOTSUP;
989 }
990 #endif
991 
992 #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE
cdc_acm_configure(const struct device * dev,const struct uart_config * const cfg)993 static int cdc_acm_configure(const struct device *dev,
994 			     const struct uart_config *const cfg)
995 {
996 	struct cdc_acm_uart_data *const data = dev->data;
997 
998 	switch (cfg->flow_ctrl) {
999 	case UART_CFG_FLOW_CTRL_NONE:
1000 		data->flow_ctrl = false;
1001 		break;
1002 	case UART_CFG_FLOW_CTRL_RTS_CTS:
1003 		data->flow_ctrl = true;
1004 		break;
1005 	default:
1006 		return -ENOTSUP;
1007 	}
1008 
1009 	return 0;
1010 }
1011 
cdc_acm_config_get(const struct device * dev,struct uart_config * const cfg)1012 static int cdc_acm_config_get(const struct device *dev,
1013 			      struct uart_config *const cfg)
1014 {
1015 	struct cdc_acm_uart_data *const data = dev->data;
1016 
1017 	memcpy(cfg, &data->uart_cfg, sizeof(struct uart_config));
1018 
1019 	return 0;
1020 }
1021 #endif /* CONFIG_UART_USE_RUNTIME_CONFIGURE */
1022 
usbd_cdc_acm_init_wq(void)1023 static int usbd_cdc_acm_init_wq(void)
1024 {
1025 	k_work_queue_init(&cdc_acm_work_q);
1026 	k_work_queue_start(&cdc_acm_work_q, cdc_acm_stack,
1027 			   K_KERNEL_STACK_SIZEOF(cdc_acm_stack),
1028 			   CONFIG_SYSTEM_WORKQUEUE_PRIORITY, NULL);
1029 	k_thread_name_set(&cdc_acm_work_q.thread, "cdc_acm_work_q");
1030 
1031 	return 0;
1032 }
1033 
usbd_cdc_acm_preinit(const struct device * dev)1034 static int usbd_cdc_acm_preinit(const struct device *dev)
1035 {
1036 	struct cdc_acm_uart_data *const data = dev->data;
1037 
1038 	ring_buf_reset(data->tx_fifo.rb);
1039 	ring_buf_reset(data->rx_fifo.rb);
1040 
1041 	k_work_init_delayable(&data->tx_fifo_work, cdc_acm_tx_fifo_handler);
1042 	k_work_init(&data->rx_fifo_work, cdc_acm_rx_fifo_handler);
1043 	k_work_init(&data->irq_cb_work, cdc_acm_irq_cb_handler);
1044 
1045 	return 0;
1046 }
1047 
1048 static DEVICE_API(uart, cdc_acm_uart_api) = {
1049 	.irq_tx_enable = cdc_acm_irq_tx_enable,
1050 	.irq_tx_disable = cdc_acm_irq_tx_disable,
1051 	.irq_tx_ready = cdc_acm_irq_tx_ready,
1052 	.irq_rx_enable = cdc_acm_irq_rx_enable,
1053 	.irq_rx_disable = cdc_acm_irq_rx_disable,
1054 	.irq_rx_ready = cdc_acm_irq_rx_ready,
1055 	.irq_is_pending = cdc_acm_irq_is_pending,
1056 	.irq_update = cdc_acm_irq_update,
1057 	.irq_callback_set = cdc_acm_irq_callback_set,
1058 	.poll_in = cdc_acm_poll_in,
1059 	.poll_out = cdc_acm_poll_out,
1060 	.fifo_fill = cdc_acm_fifo_fill,
1061 	.fifo_read = cdc_acm_fifo_read,
1062 #ifdef CONFIG_UART_LINE_CTRL
1063 	.line_ctrl_set = cdc_acm_line_ctrl_set,
1064 	.line_ctrl_get = cdc_acm_line_ctrl_get,
1065 #endif
1066 #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE
1067 	.configure = cdc_acm_configure,
1068 	.config_get = cdc_acm_config_get,
1069 #endif
1070 };
1071 
1072 struct usbd_class_api usbd_cdc_acm_api = {
1073 	.request = usbd_cdc_acm_request,
1074 	.update = usbd_cdc_acm_update,
1075 	.enable = usbd_cdc_acm_enable,
1076 	.disable = usbd_cdc_acm_disable,
1077 	.suspended = usbd_cdc_acm_suspended,
1078 	.resumed = usbd_cdc_acm_resumed,
1079 	.control_to_host = usbd_cdc_acm_cth,
1080 	.control_to_dev = usbd_cdc_acm_ctd,
1081 	.init = usbd_cdc_acm_init,
1082 	.get_desc = usbd_cdc_acm_get_desc,
1083 };
1084 
1085 #define CDC_ACM_DEFINE_DESCRIPTOR(n)						\
1086 static struct usbd_cdc_acm_desc cdc_acm_desc_##n = {				\
1087 	.iad = {								\
1088 		.bLength = sizeof(struct usb_association_descriptor),		\
1089 		.bDescriptorType = USB_DESC_INTERFACE_ASSOC,			\
1090 		.bFirstInterface = 0,						\
1091 		.bInterfaceCount = 0x02,					\
1092 		.bFunctionClass = USB_BCC_CDC_CONTROL,				\
1093 		.bFunctionSubClass = ACM_SUBCLASS,				\
1094 		.bFunctionProtocol = 0,						\
1095 		.iFunction = 0,							\
1096 	},									\
1097 										\
1098 	.if0 = {								\
1099 		.bLength = sizeof(struct usb_if_descriptor),			\
1100 		.bDescriptorType = USB_DESC_INTERFACE,				\
1101 		.bInterfaceNumber = 0,						\
1102 		.bAlternateSetting = 0,						\
1103 		.bNumEndpoints = 1,						\
1104 		.bInterfaceClass = USB_BCC_CDC_CONTROL,				\
1105 		.bInterfaceSubClass = ACM_SUBCLASS,				\
1106 		.bInterfaceProtocol = 0,					\
1107 		.iInterface = 0,						\
1108 	},									\
1109 										\
1110 	.if0_header = {								\
1111 		.bFunctionLength = sizeof(struct cdc_header_descriptor),	\
1112 		.bDescriptorType = USB_DESC_CS_INTERFACE,			\
1113 		.bDescriptorSubtype = HEADER_FUNC_DESC,				\
1114 		.bcdCDC = sys_cpu_to_le16(USB_SRN_1_1),				\
1115 	},									\
1116 										\
1117 	.if0_cm = {								\
1118 		.bFunctionLength = sizeof(struct cdc_cm_descriptor),		\
1119 		.bDescriptorType = USB_DESC_CS_INTERFACE,			\
1120 		.bDescriptorSubtype = CALL_MANAGEMENT_FUNC_DESC,		\
1121 		.bmCapabilities = 0,						\
1122 		.bDataInterface = 1,						\
1123 	},									\
1124 										\
1125 	.if0_acm = {								\
1126 		.bFunctionLength = sizeof(struct cdc_acm_descriptor),		\
1127 		.bDescriptorType = USB_DESC_CS_INTERFACE,			\
1128 		.bDescriptorSubtype = ACM_FUNC_DESC,				\
1129 		/* See CDC PSTN Subclass Chapter 5.3.2 */			\
1130 		.bmCapabilities = BIT(1),					\
1131 	},									\
1132 										\
1133 	.if0_union = {								\
1134 		.bFunctionLength = sizeof(struct cdc_union_descriptor),		\
1135 		.bDescriptorType = USB_DESC_CS_INTERFACE,			\
1136 		.bDescriptorSubtype = UNION_FUNC_DESC,				\
1137 		.bControlInterface = 0,						\
1138 		.bSubordinateInterface0 = 1,					\
1139 	},									\
1140 										\
1141 	.if0_int_ep = {								\
1142 		.bLength = sizeof(struct usb_ep_descriptor),			\
1143 		.bDescriptorType = USB_DESC_ENDPOINT,				\
1144 		.bEndpointAddress = 0x81,					\
1145 		.bmAttributes = USB_EP_TYPE_INTERRUPT,				\
1146 		.wMaxPacketSize = sys_cpu_to_le16(CDC_ACM_DEFAULT_INT_EP_MPS),	\
1147 		.bInterval = CDC_ACM_FS_INT_EP_INTERVAL,			\
1148 	},									\
1149 										\
1150 	.if0_hs_int_ep = {							\
1151 		.bLength = sizeof(struct usb_ep_descriptor),			\
1152 		.bDescriptorType = USB_DESC_ENDPOINT,				\
1153 		.bEndpointAddress = 0x81,					\
1154 		.bmAttributes = USB_EP_TYPE_INTERRUPT,				\
1155 		.wMaxPacketSize = sys_cpu_to_le16(CDC_ACM_DEFAULT_INT_EP_MPS),	\
1156 		.bInterval = CDC_ACM_HS_INT_EP_INTERVAL,			\
1157 	},									\
1158 										\
1159 	.if1 = {								\
1160 		.bLength = sizeof(struct usb_if_descriptor),			\
1161 		.bDescriptorType = USB_DESC_INTERFACE,				\
1162 		.bInterfaceNumber = 1,						\
1163 		.bAlternateSetting = 0,						\
1164 		.bNumEndpoints = 2,						\
1165 		.bInterfaceClass = USB_BCC_CDC_DATA,				\
1166 		.bInterfaceSubClass = 0,					\
1167 		.bInterfaceProtocol = 0,					\
1168 		.iInterface = 0,						\
1169 	},									\
1170 										\
1171 	.if1_in_ep = {								\
1172 		.bLength = sizeof(struct usb_ep_descriptor),			\
1173 		.bDescriptorType = USB_DESC_ENDPOINT,				\
1174 		.bEndpointAddress = 0x82,					\
1175 		.bmAttributes = USB_EP_TYPE_BULK,				\
1176 		.wMaxPacketSize = sys_cpu_to_le16(64U),				\
1177 		.bInterval = 0,							\
1178 	},									\
1179 										\
1180 	.if1_out_ep = {								\
1181 		.bLength = sizeof(struct usb_ep_descriptor),			\
1182 		.bDescriptorType = USB_DESC_ENDPOINT,				\
1183 		.bEndpointAddress = 0x01,					\
1184 		.bmAttributes = USB_EP_TYPE_BULK,				\
1185 		.wMaxPacketSize = sys_cpu_to_le16(64U),				\
1186 		.bInterval = 0,							\
1187 	},									\
1188 										\
1189 	.if1_hs_in_ep = {							\
1190 		.bLength = sizeof(struct usb_ep_descriptor),			\
1191 		.bDescriptorType = USB_DESC_ENDPOINT,				\
1192 		.bEndpointAddress = 0x82,					\
1193 		.bmAttributes = USB_EP_TYPE_BULK,				\
1194 		.wMaxPacketSize = sys_cpu_to_le16(512U),			\
1195 		.bInterval = 0,							\
1196 	},									\
1197 										\
1198 	.if1_hs_out_ep = {							\
1199 		.bLength = sizeof(struct usb_ep_descriptor),			\
1200 		.bDescriptorType = USB_DESC_ENDPOINT,				\
1201 		.bEndpointAddress = 0x01,					\
1202 		.bmAttributes = USB_EP_TYPE_BULK,				\
1203 		.wMaxPacketSize = sys_cpu_to_le16(512U),			\
1204 		.bInterval = 0,							\
1205 	},									\
1206 										\
1207 	.nil_desc = {								\
1208 		.bLength = 0,							\
1209 		.bDescriptorType = 0,						\
1210 	},									\
1211 };										\
1212 										\
1213 const static struct usb_desc_header *cdc_acm_fs_desc_##n[] = {			\
1214 	(struct usb_desc_header *) &cdc_acm_desc_##n.iad,			\
1215 	(struct usb_desc_header *) &cdc_acm_desc_##n.if0,			\
1216 	(struct usb_desc_header *) &cdc_acm_desc_##n.if0_header,		\
1217 	(struct usb_desc_header *) &cdc_acm_desc_##n.if0_cm,			\
1218 	(struct usb_desc_header *) &cdc_acm_desc_##n.if0_acm,			\
1219 	(struct usb_desc_header *) &cdc_acm_desc_##n.if0_union,			\
1220 	(struct usb_desc_header *) &cdc_acm_desc_##n.if0_int_ep,		\
1221 	(struct usb_desc_header *) &cdc_acm_desc_##n.if1,			\
1222 	(struct usb_desc_header *) &cdc_acm_desc_##n.if1_in_ep,			\
1223 	(struct usb_desc_header *) &cdc_acm_desc_##n.if1_out_ep,		\
1224 	(struct usb_desc_header *) &cdc_acm_desc_##n.nil_desc,			\
1225 };										\
1226 										\
1227 const static struct usb_desc_header *cdc_acm_hs_desc_##n[] = {			\
1228 	(struct usb_desc_header *) &cdc_acm_desc_##n.iad,			\
1229 	(struct usb_desc_header *) &cdc_acm_desc_##n.if0,			\
1230 	(struct usb_desc_header *) &cdc_acm_desc_##n.if0_header,		\
1231 	(struct usb_desc_header *) &cdc_acm_desc_##n.if0_cm,			\
1232 	(struct usb_desc_header *) &cdc_acm_desc_##n.if0_acm,			\
1233 	(struct usb_desc_header *) &cdc_acm_desc_##n.if0_union,			\
1234 	(struct usb_desc_header *) &cdc_acm_desc_##n.if0_hs_int_ep,		\
1235 	(struct usb_desc_header *) &cdc_acm_desc_##n.if1,			\
1236 	(struct usb_desc_header *) &cdc_acm_desc_##n.if1_hs_in_ep,		\
1237 	(struct usb_desc_header *) &cdc_acm_desc_##n.if1_hs_out_ep,		\
1238 	(struct usb_desc_header *) &cdc_acm_desc_##n.nil_desc,			\
1239 }
1240 
1241 #define USBD_CDC_ACM_DT_DEVICE_DEFINE(n)					\
1242 	BUILD_ASSERT(DT_INST_ON_BUS(n, usb),					\
1243 		     "node " DT_NODE_PATH(DT_DRV_INST(n))			\
1244 		     " is not assigned to a USB device controller");		\
1245 										\
1246 	CDC_ACM_DEFINE_DESCRIPTOR(n);						\
1247 										\
1248 	USBD_DEFINE_CLASS(cdc_acm_##n,						\
1249 			  &usbd_cdc_acm_api,					\
1250 			  (void *)DEVICE_DT_GET(DT_DRV_INST(n)), NULL);		\
1251 										\
1252 	RING_BUF_DECLARE(cdc_acm_rb_rx_##n, DT_INST_PROP(n, rx_fifo_size));	\
1253 	RING_BUF_DECLARE(cdc_acm_rb_tx_##n, DT_INST_PROP(n, tx_fifo_size));	\
1254 										\
1255 	static struct cdc_acm_uart_data uart_data_##n = {			\
1256 		.line_coding = CDC_ACM_DEFAULT_LINECODING,			\
1257 		.c_data = &cdc_acm_##n,						\
1258 		.rx_fifo.rb = &cdc_acm_rb_rx_##n,				\
1259 		.tx_fifo.rb = &cdc_acm_rb_tx_##n,				\
1260 		.flow_ctrl = DT_INST_PROP(n, hw_flow_control),			\
1261 		.notif_sem = Z_SEM_INITIALIZER(uart_data_##n.notif_sem, 0, 1),	\
1262 		.desc = &cdc_acm_desc_##n,					\
1263 		.fs_desc = cdc_acm_fs_desc_##n,					\
1264 		.hs_desc = cdc_acm_hs_desc_##n,					\
1265 	};									\
1266 										\
1267 	DEVICE_DT_INST_DEFINE(n, usbd_cdc_acm_preinit, NULL,			\
1268 		&uart_data_##n, NULL,						\
1269 		PRE_KERNEL_1, CONFIG_SERIAL_INIT_PRIORITY,			\
1270 		&cdc_acm_uart_api);
1271 
1272 DT_INST_FOREACH_STATUS_OKAY(USBD_CDC_ACM_DT_DEVICE_DEFINE);
1273 
1274 SYS_INIT(usbd_cdc_acm_init_wq, POST_KERNEL, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);
1275