1 /*
2  * Copyright (c) 2021-2022 Nordic Semiconductor ASA
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <zephyr/init.h>
8 #include <zephyr/kernel.h>
9 #include <zephyr/net/buf.h>
10 #include <zephyr/sys/byteorder.h>
11 #include <zephyr/sys/__assert.h>
12 #include <zephyr/usb/usb_ch9.h>
13 #include "udc_common.h"
14 
15 #include <zephyr/logging/log.h>
16 #if defined(CONFIG_UDC_DRIVER_LOG_LEVEL)
17 #define UDC_COMMON_LOG_LEVEL CONFIG_UDC_DRIVER_LOG_LEVEL
18 #else
19 #define UDC_COMMON_LOG_LEVEL LOG_LEVEL_NONE
20 #endif
21 LOG_MODULE_REGISTER(udc, CONFIG_UDC_DRIVER_LOG_LEVEL);
22 
23 static inline void udc_buf_destroy(struct net_buf *buf);
24 
25 NET_BUF_POOL_VAR_DEFINE(udc_ep_pool,
26 			CONFIG_UDC_BUF_COUNT, CONFIG_UDC_BUF_POOL_SIZE,
27 			sizeof(struct udc_buf_info), udc_buf_destroy);
28 
29 #define USB_EP_LUT_IDX(ep) (USB_EP_DIR_IS_IN(ep) ? (ep & BIT_MASK(4)) + 16 : \
30 						   ep & BIT_MASK(4))
31 
udc_set_suspended(const struct device * dev,const bool value)32 void udc_set_suspended(const struct device *dev, const bool value)
33 {
34 	struct udc_data *data = dev->data;
35 
36 	if (value == udc_is_suspended(dev)) {
37 		LOG_WRN("Spurious suspend/resume event");
38 	}
39 
40 	atomic_set_bit_to(&data->status, UDC_STATUS_SUSPENDED, value);
41 }
42 
udc_get_ep_cfg(const struct device * dev,const uint8_t ep)43 struct udc_ep_config *udc_get_ep_cfg(const struct device *dev, const uint8_t ep)
44 {
45 	struct udc_data *data = dev->data;
46 
47 	return data->ep_lut[USB_EP_LUT_IDX(ep)];
48 }
49 
udc_ep_is_busy(const struct device * dev,const uint8_t ep)50 bool udc_ep_is_busy(const struct device *dev, const uint8_t ep)
51 {
52 	struct udc_ep_config *ep_cfg;
53 
54 	ep_cfg = udc_get_ep_cfg(dev, ep);
55 	__ASSERT(ep_cfg != NULL, "ep 0x%02x is not available", ep);
56 
57 	return ep_cfg->stat.busy;
58 }
59 
udc_ep_set_busy(const struct device * dev,const uint8_t ep,const bool busy)60 void udc_ep_set_busy(const struct device *dev, const uint8_t ep, const bool busy)
61 {
62 	struct udc_ep_config *ep_cfg;
63 
64 	ep_cfg = udc_get_ep_cfg(dev, ep);
65 	__ASSERT(ep_cfg != NULL, "ep 0x%02x is not available", ep);
66 	ep_cfg->stat.busy = busy;
67 }
68 
udc_register_ep(const struct device * dev,struct udc_ep_config * const cfg)69 int udc_register_ep(const struct device *dev, struct udc_ep_config *const cfg)
70 {
71 	struct udc_data *data = dev->data;
72 	uint8_t idx;
73 
74 	if (udc_is_initialized(dev)) {
75 		return -EACCES;
76 	}
77 
78 	idx = USB_EP_LUT_IDX(cfg->addr);
79 	__ASSERT_NO_MSG(idx < ARRAY_SIZE(data->ep_lut));
80 
81 	data->ep_lut[idx] = cfg;
82 	k_fifo_init(&cfg->fifo);
83 
84 	return 0;
85 }
86 
udc_buf_get(const struct device * dev,const uint8_t ep)87 struct net_buf *udc_buf_get(const struct device *dev, const uint8_t ep)
88 {
89 	struct udc_ep_config *ep_cfg;
90 
91 	ep_cfg = udc_get_ep_cfg(dev, ep);
92 	if (ep_cfg == NULL) {
93 		return NULL;
94 	}
95 
96 	return net_buf_get(&ep_cfg->fifo, K_NO_WAIT);
97 }
98 
udc_buf_get_all(const struct device * dev,const uint8_t ep)99 struct net_buf *udc_buf_get_all(const struct device *dev, const uint8_t ep)
100 {
101 	struct udc_ep_config *ep_cfg;
102 	struct net_buf *buf;
103 
104 	ep_cfg = udc_get_ep_cfg(dev, ep);
105 	if (ep_cfg == NULL) {
106 		return NULL;
107 	}
108 
109 	buf = k_fifo_get(&ep_cfg->fifo, K_NO_WAIT);
110 	if (!buf) {
111 		return NULL;
112 	}
113 
114 	LOG_DBG("ep 0x%02x dequeue %p", ep, buf);
115 	for (struct net_buf *n = buf; !k_fifo_is_empty(&ep_cfg->fifo); n = n->frags) {
116 		n->frags = k_fifo_get(&ep_cfg->fifo, K_NO_WAIT);
117 		LOG_DBG("|-> %p ", n->frags);
118 		if (n->frags == NULL) {
119 			break;
120 		}
121 	}
122 
123 	return buf;
124 }
125 
udc_buf_peek(const struct device * dev,const uint8_t ep)126 struct net_buf *udc_buf_peek(const struct device *dev, const uint8_t ep)
127 {
128 	struct udc_ep_config *ep_cfg;
129 
130 	ep_cfg = udc_get_ep_cfg(dev, ep);
131 	if (ep_cfg == NULL) {
132 		return NULL;
133 	}
134 
135 	return k_fifo_peek_head(&ep_cfg->fifo);
136 }
137 
udc_buf_put(struct udc_ep_config * const ep_cfg,struct net_buf * const buf)138 void udc_buf_put(struct udc_ep_config *const ep_cfg,
139 		 struct net_buf *const buf)
140 {
141 	net_buf_put(&ep_cfg->fifo, buf);
142 }
143 
udc_ep_buf_set_setup(struct net_buf * const buf)144 void udc_ep_buf_set_setup(struct net_buf *const buf)
145 {
146 	struct udc_buf_info *bi = udc_get_buf_info(buf);
147 
148 	bi->setup = 1;
149 	bi->data = 0;
150 	bi->status = 0;
151 }
152 
udc_ep_buf_has_zlp(const struct net_buf * const buf)153 bool udc_ep_buf_has_zlp(const struct net_buf *const buf)
154 {
155 	const struct udc_buf_info *bi = udc_get_buf_info(buf);
156 
157 	return bi->zlp;
158 }
159 
udc_ep_buf_clear_zlp(const struct net_buf * const buf)160 void udc_ep_buf_clear_zlp(const struct net_buf *const buf)
161 {
162 	struct udc_buf_info *bi = udc_get_buf_info(buf);
163 
164 	bi->zlp = false;
165 }
166 
udc_submit_event(const struct device * dev,const enum udc_event_type type,const int status)167 int udc_submit_event(const struct device *dev,
168 		     const enum udc_event_type type,
169 		     const int status)
170 {
171 	struct udc_data *data = dev->data;
172 	struct udc_event drv_evt = {
173 		.type = type,
174 		.status = status,
175 		.dev = dev,
176 	};
177 
178 	if (!udc_is_initialized(dev)) {
179 		return -EPERM;
180 	}
181 
182 	return data->event_cb(dev, &drv_evt);
183 }
184 
udc_submit_ep_event(const struct device * dev,struct net_buf * const buf,const int err)185 int udc_submit_ep_event(const struct device *dev,
186 			struct net_buf *const buf,
187 			const int err)
188 {
189 	struct udc_buf_info *bi = udc_get_buf_info(buf);
190 	struct udc_data *data = dev->data;
191 	const struct udc_event drv_evt = {
192 		.type = UDC_EVT_EP_REQUEST,
193 		.buf = buf,
194 		.dev = dev,
195 	};
196 
197 	if (!udc_is_initialized(dev)) {
198 		return -EPERM;
199 	}
200 
201 	bi->err = err;
202 
203 	return data->event_cb(dev, &drv_evt);
204 }
205 
ep_attrib_get_transfer(uint8_t attributes)206 static uint8_t ep_attrib_get_transfer(uint8_t attributes)
207 {
208 	return attributes & USB_EP_TRANSFER_TYPE_MASK;
209 }
210 
ep_check_config(const struct device * dev,const struct udc_ep_config * const cfg,const uint8_t ep,const uint8_t attributes,const uint16_t mps,const uint8_t interval)211 static bool ep_check_config(const struct device *dev,
212 			    const struct udc_ep_config *const cfg,
213 			    const uint8_t ep,
214 			    const uint8_t attributes,
215 			    const uint16_t mps,
216 			    const uint8_t interval)
217 {
218 	bool dir_is_in = USB_EP_DIR_IS_IN(ep);
219 	bool dir_is_out = USB_EP_DIR_IS_OUT(ep);
220 
221 	LOG_DBG("cfg d:%c|%c t:%c|%c|%c|%c, mps %u",
222 		cfg->caps.in ? 'I' : '-',
223 		cfg->caps.out ? 'O' : '-',
224 		cfg->caps.iso ? 'S' : '-',
225 		cfg->caps.bulk ? 'B' : '-',
226 		cfg->caps.interrupt ? 'I' : '-',
227 		cfg->caps.control ? 'C' : '-',
228 		cfg->caps.mps);
229 
230 	if (dir_is_out && !cfg->caps.out) {
231 		return false;
232 	}
233 
234 	if (dir_is_in && !cfg->caps.in) {
235 		return false;
236 	}
237 
238 	if (mps > cfg->caps.mps) {
239 		return false;
240 	}
241 
242 	switch (ep_attrib_get_transfer(attributes)) {
243 	case USB_EP_TYPE_BULK:
244 		if (!cfg->caps.bulk) {
245 			return false;
246 		}
247 		break;
248 	case USB_EP_TYPE_INTERRUPT:
249 		if (!cfg->caps.interrupt) {
250 			return false;
251 		}
252 		break;
253 	case USB_EP_TYPE_ISO:
254 		if (!cfg->caps.iso) {
255 			return false;
256 		}
257 		break;
258 	case USB_EP_TYPE_CONTROL:
259 		if (!cfg->caps.control) {
260 			return false;
261 		}
262 		break;
263 	default:
264 		return false;
265 	}
266 
267 	return true;
268 }
269 
ep_update_mps(const struct device * dev,const struct udc_ep_config * const cfg,const uint8_t attributes,uint16_t * const mps)270 static void ep_update_mps(const struct device *dev,
271 			  const struct udc_ep_config *const cfg,
272 			  const uint8_t attributes,
273 			  uint16_t *const mps)
274 {
275 	struct udc_device_caps caps = udc_caps(dev);
276 	const uint16_t spec_int_mps = caps.hs ? 1024 : 64;
277 	const uint16_t spec_bulk_mps = caps.hs ? 512 : 64;
278 
279 	/*
280 	 * TODO: It does not take into account the actual speed of the
281 	 * bus after the RESET. Should be fixed/improved when the driver
282 	 * for high speed controller are ported.
283 	 */
284 	switch (ep_attrib_get_transfer(attributes)) {
285 	case USB_EP_TYPE_BULK:
286 		*mps = MIN(cfg->caps.mps, spec_bulk_mps);
287 		break;
288 	case USB_EP_TYPE_INTERRUPT:
289 		*mps = MIN(cfg->caps.mps, spec_int_mps);
290 		break;
291 	case USB_EP_TYPE_CONTROL:
292 		__fallthrough;
293 	case USB_EP_TYPE_ISO:
294 		__fallthrough;
295 	default:
296 		return;
297 	}
298 }
299 
udc_ep_try_config(const struct device * dev,const uint8_t ep,const uint8_t attributes,uint16_t * const mps,const uint8_t interval)300 int udc_ep_try_config(const struct device *dev,
301 		      const uint8_t ep,
302 		      const uint8_t attributes,
303 		      uint16_t *const mps,
304 		      const uint8_t interval)
305 {
306 	const struct udc_api *api = dev->api;
307 	struct udc_ep_config *cfg;
308 	bool ret;
309 
310 	cfg = udc_get_ep_cfg(dev, ep);
311 	if (cfg == NULL) {
312 		return -ENODEV;
313 	}
314 
315 	api->lock(dev);
316 
317 	ret = ep_check_config(dev, cfg, ep, attributes, *mps, interval);
318 	if (ret == true && *mps == 0U) {
319 		ep_update_mps(dev, cfg, attributes, mps);
320 	}
321 
322 	api->unlock(dev);
323 
324 	return (ret == false) ? -ENOTSUP : 0;
325 }
326 
udc_ep_enable_internal(const struct device * dev,const uint8_t ep,const uint8_t attributes,const uint16_t mps,const uint8_t interval)327 int udc_ep_enable_internal(const struct device *dev,
328 			   const uint8_t ep,
329 			   const uint8_t attributes,
330 			   const uint16_t mps,
331 			   const uint8_t interval)
332 {
333 	const struct udc_api *api = dev->api;
334 	struct udc_ep_config *cfg;
335 	int ret;
336 
337 	cfg = udc_get_ep_cfg(dev, ep);
338 	if (cfg == NULL) {
339 		return -ENODEV;
340 	}
341 
342 	if (cfg->stat.enabled) {
343 		LOG_ERR("ep 0x%02x already enabled", cfg->addr);
344 		return -EALREADY;
345 	}
346 
347 	if (!ep_check_config(dev, cfg, ep, attributes, mps, interval)) {
348 		LOG_ERR("Endpoint 0x%02x validation failed", cfg->addr);
349 		return -ENODEV;
350 	}
351 
352 	cfg->attributes = attributes;
353 	cfg->mps = mps;
354 	cfg->interval = interval;
355 
356 	cfg->stat.odd = 0;
357 	cfg->stat.halted = 0;
358 	cfg->stat.data1 = false;
359 	ret = api->ep_enable(dev, cfg);
360 	cfg->stat.enabled = ret ? false : true;
361 
362 	return ret;
363 }
364 
udc_ep_enable(const struct device * dev,const uint8_t ep,const uint8_t attributes,const uint16_t mps,const uint8_t interval)365 int udc_ep_enable(const struct device *dev,
366 		  const uint8_t ep,
367 		  const uint8_t attributes,
368 		  const uint16_t mps,
369 		  const uint8_t interval)
370 {
371 	const struct udc_api *api = dev->api;
372 	int ret;
373 
374 	if (ep == USB_CONTROL_EP_OUT || ep == USB_CONTROL_EP_IN) {
375 		return -EINVAL;
376 	}
377 
378 	api->lock(dev);
379 
380 	if (!udc_is_enabled(dev)) {
381 		ret = -EPERM;
382 		goto ep_enable_error;
383 	}
384 
385 	ret = udc_ep_enable_internal(dev, ep, attributes, mps, interval);
386 
387 ep_enable_error:
388 	api->unlock(dev);
389 
390 	return ret;
391 }
392 
udc_ep_disable_internal(const struct device * dev,const uint8_t ep)393 int udc_ep_disable_internal(const struct device *dev, const uint8_t ep)
394 {
395 	const struct udc_api *api = dev->api;
396 	struct udc_ep_config *cfg;
397 	int ret;
398 
399 	cfg = udc_get_ep_cfg(dev, ep);
400 	if (cfg == NULL) {
401 		return -ENODEV;
402 	}
403 
404 	if (!cfg->stat.enabled) {
405 		LOG_ERR("ep 0x%02x already disabled", cfg->addr);
406 		return -EALREADY;
407 	}
408 
409 	ret = api->ep_disable(dev, cfg);
410 	cfg->stat.enabled = ret ? cfg->stat.enabled : false;
411 
412 	return ret;
413 }
414 
udc_ep_disable(const struct device * dev,const uint8_t ep)415 int udc_ep_disable(const struct device *dev, const uint8_t ep)
416 {
417 	const struct udc_api *api = dev->api;
418 	int ret;
419 
420 	if (ep == USB_CONTROL_EP_OUT || ep == USB_CONTROL_EP_IN) {
421 		return -EINVAL;
422 	}
423 
424 	api->lock(dev);
425 
426 	if (!udc_is_initialized(dev)) {
427 		ret = -EPERM;
428 		goto ep_disable_error;
429 	}
430 
431 	ret = udc_ep_disable_internal(dev, ep);
432 
433 ep_disable_error:
434 	api->unlock(dev);
435 
436 	return ret;
437 }
438 
udc_ep_set_halt(const struct device * dev,const uint8_t ep)439 int udc_ep_set_halt(const struct device *dev, const uint8_t ep)
440 {
441 	const struct udc_api *api = dev->api;
442 	struct udc_ep_config *cfg;
443 	int ret;
444 
445 	api->lock(dev);
446 
447 	if (!udc_is_enabled(dev)) {
448 		ret = -EPERM;
449 		goto ep_set_halt_error;
450 	}
451 
452 	cfg = udc_get_ep_cfg(dev, ep);
453 	if (cfg == NULL) {
454 		ret = -ENODEV;
455 		goto ep_set_halt_error;
456 	}
457 
458 	if (!cfg->stat.enabled) {
459 		ret = -ENODEV;
460 		goto ep_set_halt_error;
461 	}
462 
463 	if (ep_attrib_get_transfer(cfg->attributes) == USB_EP_TYPE_ISO) {
464 		ret = -ENOTSUP;
465 		goto ep_set_halt_error;
466 	}
467 
468 	ret = api->ep_set_halt(dev, cfg);
469 
470 ep_set_halt_error:
471 	api->unlock(dev);
472 
473 	return ret;
474 }
475 
udc_ep_clear_halt(const struct device * dev,const uint8_t ep)476 int udc_ep_clear_halt(const struct device *dev, const uint8_t ep)
477 {
478 	const struct udc_api *api = dev->api;
479 	struct udc_ep_config *cfg;
480 	int ret;
481 
482 	api->lock(dev);
483 
484 	if (!udc_is_enabled(dev)) {
485 		ret = -EPERM;
486 		goto ep_clear_halt_error;
487 	}
488 
489 	cfg = udc_get_ep_cfg(dev, ep);
490 	if (cfg == NULL) {
491 		ret = -ENODEV;
492 		goto ep_clear_halt_error;
493 	}
494 
495 	if (!cfg->stat.enabled) {
496 		ret = -ENODEV;
497 		goto ep_clear_halt_error;
498 	}
499 
500 	if (ep_attrib_get_transfer(cfg->attributes) == USB_EP_TYPE_ISO) {
501 		ret = -ENOTSUP;
502 		goto ep_clear_halt_error;
503 	}
504 
505 	ret = api->ep_clear_halt(dev, cfg);
506 	if (ret == 0) {
507 		cfg->stat.halted = false;
508 	}
509 
510 ep_clear_halt_error:
511 	api->unlock(dev);
512 
513 	return ret;
514 }
515 
udc_debug_ep_enqueue(const struct device * dev,struct udc_ep_config * const cfg)516 static void udc_debug_ep_enqueue(const struct device *dev,
517 				 struct udc_ep_config *const cfg)
518 {
519 	struct udc_buf_info *bi;
520 	struct net_buf *buf;
521 	sys_slist_t list;
522 
523 	list.head = k_fifo_peek_head(&cfg->fifo);
524 	list.tail = k_fifo_peek_tail(&cfg->fifo);
525 	if (list.head == NULL) {
526 		LOG_DBG("ep 0x%02x queue is empty", cfg->addr);
527 		return;
528 	}
529 
530 	LOG_DBG("[de]queue ep 0x%02x:", cfg->addr);
531 
532 	SYS_SLIST_FOR_EACH_CONTAINER(&list, buf, node) {
533 		bi = udc_get_buf_info(buf);
534 		LOG_DBG("|-> %p (%u) ->", buf, buf->size);
535 	}
536 }
537 
udc_ep_enqueue(const struct device * dev,struct net_buf * const buf)538 int udc_ep_enqueue(const struct device *dev, struct net_buf *const buf)
539 {
540 	const struct udc_api *api = dev->api;
541 	struct udc_ep_config *cfg;
542 	struct udc_buf_info *bi;
543 	int ret;
544 
545 	api->lock(dev);
546 
547 	if (!udc_is_enabled(dev)) {
548 		ret = -EPERM;
549 		goto ep_enqueue_error;
550 	}
551 
552 	bi = udc_get_buf_info(buf);
553 	if (bi->ep == USB_CONTROL_EP_OUT) {
554 		ret = -EPERM;
555 		goto ep_enqueue_error;
556 	}
557 
558 	cfg = udc_get_ep_cfg(dev, bi->ep);
559 	if (cfg == NULL) {
560 		ret = -ENODEV;
561 		goto ep_enqueue_error;
562 	}
563 
564 	LOG_DBG("Queue ep 0x%02x %p len %u", cfg->addr, buf,
565 		USB_EP_DIR_IS_IN(cfg->addr) ? buf->len : buf->size);
566 
567 	bi->setup = 0;
568 	ret = api->ep_enqueue(dev, cfg, buf);
569 
570 ep_enqueue_error:
571 	api->unlock(dev);
572 
573 	return ret;
574 }
575 
udc_ep_dequeue(const struct device * dev,const uint8_t ep)576 int udc_ep_dequeue(const struct device *dev, const uint8_t ep)
577 {
578 	const struct udc_api *api = dev->api;
579 	struct udc_ep_config *cfg;
580 	int ret;
581 
582 	api->lock(dev);
583 
584 	if (!udc_is_initialized(dev)) {
585 		ret = -EPERM;
586 		goto ep_dequeue_error;
587 	}
588 
589 	cfg = udc_get_ep_cfg(dev, ep);
590 	if (cfg == NULL) {
591 		ret = -ENODEV;
592 		goto ep_dequeue_error;
593 	}
594 
595 	if (cfg->stat.enabled || cfg->stat.halted) {
596 		LOG_INF("ep 0x%02x is not halted|disabled", cfg->addr);
597 	}
598 
599 	if (UDC_COMMON_LOG_LEVEL == LOG_LEVEL_DBG) {
600 		udc_debug_ep_enqueue(dev, cfg);
601 	}
602 
603 	if (k_fifo_is_empty(&cfg->fifo)) {
604 		ret = 0;
605 	} else  {
606 		ret = api->ep_dequeue(dev, cfg);
607 	}
608 
609 ep_dequeue_error:
610 	api->unlock(dev);
611 
612 	return ret;
613 }
614 
udc_ep_buf_alloc(const struct device * dev,const uint8_t ep,const size_t size)615 struct net_buf *udc_ep_buf_alloc(const struct device *dev,
616 				 const uint8_t ep,
617 				 const size_t size)
618 {
619 	const struct udc_api *api = dev->api;
620 	struct net_buf *buf = NULL;
621 	struct udc_buf_info *bi;
622 
623 	api->lock(dev);
624 
625 	buf = net_buf_alloc_len(&udc_ep_pool, size, K_NO_WAIT);
626 	if (!buf) {
627 		LOG_ERR("Failed to allocate net_buf %zd", size);
628 		goto ep_alloc_error;
629 	}
630 
631 	bi = udc_get_buf_info(buf);
632 	memset(bi, 0, sizeof(struct udc_buf_info));
633 	bi->ep = ep;
634 	LOG_DBG("Allocate net_buf, ep 0x%02x, size %zd", ep, size);
635 
636 ep_alloc_error:
637 	api->unlock(dev);
638 
639 	return buf;
640 }
641 
udc_ctrl_alloc(const struct device * dev,const uint8_t ep,const size_t size)642 struct net_buf *udc_ctrl_alloc(const struct device *dev,
643 			       const uint8_t ep,
644 			       const size_t size)
645 {
646 	/* TODO: for now just pass to udc_buf_alloc() */
647 	return udc_ep_buf_alloc(dev, ep, size);
648 }
649 
udc_buf_destroy(struct net_buf * buf)650 static inline void udc_buf_destroy(struct net_buf *buf)
651 {
652 	/* Adjust level and use together with the log in udc_ep_buf_alloc() */
653 	LOG_DBG("destroy %p", buf);
654 	net_buf_destroy(buf);
655 }
656 
udc_ep_buf_free(const struct device * dev,struct net_buf * const buf)657 int udc_ep_buf_free(const struct device *dev, struct net_buf *const buf)
658 {
659 	const struct udc_api *api = dev->api;
660 	int ret = 0;
661 
662 	api->lock(dev);
663 	net_buf_unref(buf);
664 	api->unlock(dev);
665 
666 	return ret;
667 }
668 
udc_device_speed(const struct device * dev)669 enum udc_bus_speed udc_device_speed(const struct device *dev)
670 {
671 	const struct udc_api *api = dev->api;
672 	enum udc_bus_speed speed = UDC_BUS_UNKNOWN;
673 
674 	api->lock(dev);
675 
676 	if (!udc_is_enabled(dev)) {
677 		goto device_speed_error;
678 	}
679 
680 	if (api->device_speed) {
681 		speed = api->device_speed(dev);
682 	} else {
683 		/* TODO: Shall we track connected status in UDC? */
684 		speed = UDC_BUS_SPEED_FS;
685 	}
686 
687 device_speed_error:
688 	api->unlock(dev);
689 
690 	return speed;
691 }
692 
udc_enable(const struct device * dev)693 int udc_enable(const struct device *dev)
694 {
695 	const struct udc_api *api = dev->api;
696 	struct udc_data *data = dev->data;
697 	int ret;
698 
699 	api->lock(dev);
700 
701 	if (!udc_is_initialized(dev)) {
702 		ret = -EPERM;
703 		goto udc_enable_error;
704 	}
705 
706 	if (udc_is_enabled(dev)) {
707 		ret = -EALREADY;
708 		goto udc_enable_error;
709 	}
710 
711 	data->stage = CTRL_PIPE_STAGE_SETUP;
712 
713 	ret = api->enable(dev);
714 	if (ret == 0) {
715 		atomic_set_bit(&data->status, UDC_STATUS_ENABLED);
716 	}
717 
718 udc_enable_error:
719 	api->unlock(dev);
720 
721 	return ret;
722 }
723 
udc_disable(const struct device * dev)724 int udc_disable(const struct device *dev)
725 {
726 	const struct udc_api *api = dev->api;
727 	struct udc_data *data = dev->data;
728 	int ret;
729 
730 	api->lock(dev);
731 
732 	if (!udc_is_enabled(dev)) {
733 		ret = -EALREADY;
734 		goto udc_disable_error;
735 	}
736 
737 	ret = api->disable(dev);
738 	atomic_clear_bit(&data->status, UDC_STATUS_ENABLED);
739 
740 udc_disable_error:
741 	api->unlock(dev);
742 
743 	return ret;
744 }
745 
udc_init(const struct device * dev,udc_event_cb_t event_cb)746 int udc_init(const struct device *dev, udc_event_cb_t event_cb)
747 {
748 	const struct udc_api *api = dev->api;
749 	struct udc_data *data = dev->data;
750 	int ret;
751 
752 	if (event_cb == NULL) {
753 		return -EINVAL;
754 	}
755 
756 	api->lock(dev);
757 
758 	if (udc_is_initialized(dev)) {
759 		ret = -EALREADY;
760 		goto udc_init_error;
761 	}
762 
763 	data->event_cb = event_cb;
764 
765 	ret = api->init(dev);
766 	if (ret == 0) {
767 		atomic_set_bit(&data->status, UDC_STATUS_INITIALIZED);
768 	}
769 
770 udc_init_error:
771 	api->unlock(dev);
772 
773 	return ret;
774 }
775 
udc_shutdown(const struct device * dev)776 int udc_shutdown(const struct device *dev)
777 {
778 	const struct udc_api *api = dev->api;
779 	struct udc_data *data = dev->data;
780 	int ret;
781 
782 	api->lock(dev);
783 
784 	if (udc_is_enabled(dev)) {
785 		ret = -EBUSY;
786 		goto udc_shutdown_error;
787 	}
788 
789 	if (!udc_is_initialized(dev)) {
790 		ret = -EALREADY;
791 		goto udc_shutdown_error;
792 	}
793 
794 	ret = api->shutdown(dev);
795 	atomic_clear_bit(&data->status, UDC_STATUS_INITIALIZED);
796 
797 udc_shutdown_error:
798 	api->unlock(dev);
799 
800 	return ret;
801 }
802 
803 static ALWAYS_INLINE
udc_ctrl_alloc_stage(const struct device * dev,struct net_buf * const parent,const uint8_t ep,const size_t size)804 struct net_buf *udc_ctrl_alloc_stage(const struct device *dev,
805 				     struct net_buf *const parent,
806 				     const uint8_t ep,
807 				     const size_t size)
808 {
809 	struct net_buf *buf;
810 
811 	buf = udc_ctrl_alloc(dev, ep, size);
812 	if (buf == NULL) {
813 		return NULL;
814 	}
815 
816 	if (parent) {
817 		net_buf_frag_add(parent, buf);
818 	}
819 
820 	return buf;
821 }
822 
udc_ctrl_alloc_data(const struct device * dev,struct net_buf * const setup,const uint8_t ep)823 static struct net_buf *udc_ctrl_alloc_data(const struct device *dev,
824 					   struct net_buf *const setup,
825 					   const uint8_t ep)
826 {
827 	size_t size = udc_data_stage_length(setup);
828 	struct udc_buf_info *bi;
829 	struct net_buf *buf;
830 
831 	buf = udc_ctrl_alloc_stage(dev, setup, ep, size);
832 	if (buf) {
833 		bi = udc_get_buf_info(buf);
834 		bi->data = true;
835 	}
836 
837 	return buf;
838 }
839 
udc_ctrl_alloc_status(const struct device * dev,struct net_buf * const parent,const uint8_t ep)840 static struct net_buf *udc_ctrl_alloc_status(const struct device *dev,
841 					     struct net_buf *const parent,
842 					     const uint8_t ep)
843 {
844 	size_t size = (ep == USB_CONTROL_EP_OUT) ? 64 : 0;
845 	struct udc_buf_info *bi;
846 	struct net_buf *buf;
847 
848 	buf = udc_ctrl_alloc_stage(dev, parent, ep, size);
849 	if (buf) {
850 		bi = udc_get_buf_info(buf);
851 		bi->status = true;
852 	}
853 
854 	return buf;
855 }
856 
udc_ctrl_submit_s_out_status(const struct device * dev,struct net_buf * const dout)857 int udc_ctrl_submit_s_out_status(const struct device *dev,
858 			      struct net_buf *const dout)
859 {
860 	struct udc_buf_info *bi = udc_get_buf_info(dout);
861 	struct udc_data *data = dev->data;
862 	struct net_buf *buf;
863 	int ret = 0;
864 
865 	bi->data = true;
866 	net_buf_frag_add(data->setup, dout);
867 
868 	buf = udc_ctrl_alloc_status(dev, dout, USB_CONTROL_EP_IN);
869 	if (buf == NULL) {
870 		ret = -ENOMEM;
871 	}
872 
873 	return udc_submit_ep_event(dev, data->setup, ret);
874 }
875 
udc_ctrl_submit_s_in_status(const struct device * dev)876 int udc_ctrl_submit_s_in_status(const struct device *dev)
877 {
878 	struct udc_data *data = dev->data;
879 	struct net_buf *buf;
880 	int ret = 0;
881 
882 	if (!udc_ctrl_stage_is_data_in(dev)) {
883 		return -ENOTSUP;
884 	}
885 
886 	/* Allocate buffer for data stage IN */
887 	buf = udc_ctrl_alloc_data(dev, data->setup, USB_CONTROL_EP_IN);
888 	if (buf == NULL) {
889 		ret = -ENOMEM;
890 	}
891 
892 	return udc_submit_ep_event(dev, data->setup, ret);
893 }
894 
udc_ctrl_submit_s_status(const struct device * dev)895 int udc_ctrl_submit_s_status(const struct device *dev)
896 {
897 	struct udc_data *data = dev->data;
898 	struct net_buf *buf;
899 	int ret = 0;
900 
901 	/* Allocate buffer for possible status IN */
902 	buf = udc_ctrl_alloc_status(dev, data->setup, USB_CONTROL_EP_IN);
903 	if (buf == NULL) {
904 		ret = -ENOMEM;
905 	}
906 
907 	return udc_submit_ep_event(dev, data->setup, ret);
908 }
909 
udc_ctrl_submit_status(const struct device * dev,struct net_buf * const buf)910 int udc_ctrl_submit_status(const struct device *dev,
911 			   struct net_buf *const buf)
912 {
913 	struct udc_buf_info *bi = udc_get_buf_info(buf);
914 
915 	bi->status = true;
916 
917 	return udc_submit_ep_event(dev, buf, 0);
918 }
919 
udc_ctrl_stage_is_data_out(const struct device * dev)920 bool udc_ctrl_stage_is_data_out(const struct device *dev)
921 {
922 	struct udc_data *data = dev->data;
923 
924 	return data->stage == CTRL_PIPE_STAGE_DATA_OUT ? true : false;
925 }
926 
udc_ctrl_stage_is_data_in(const struct device * dev)927 bool udc_ctrl_stage_is_data_in(const struct device *dev)
928 {
929 	struct udc_data *data = dev->data;
930 
931 	return data->stage == CTRL_PIPE_STAGE_DATA_IN ? true : false;
932 }
933 
udc_ctrl_stage_is_status_out(const struct device * dev)934 bool udc_ctrl_stage_is_status_out(const struct device *dev)
935 {
936 	struct udc_data *data = dev->data;
937 
938 	return data->stage == CTRL_PIPE_STAGE_STATUS_OUT ? true : false;
939 }
940 
udc_ctrl_stage_is_status_in(const struct device * dev)941 bool udc_ctrl_stage_is_status_in(const struct device *dev)
942 {
943 	struct udc_data *data = dev->data;
944 
945 	return data->stage == CTRL_PIPE_STAGE_STATUS_IN ? true : false;
946 }
947 
udc_ctrl_stage_is_no_data(const struct device * dev)948 bool udc_ctrl_stage_is_no_data(const struct device *dev)
949 {
950 	struct udc_data *data = dev->data;
951 
952 	return data->stage == CTRL_PIPE_STAGE_NO_DATA ? true : false;
953 }
954 
udc_data_stage_to_host(const struct net_buf * const buf)955 static bool udc_data_stage_to_host(const struct net_buf *const buf)
956 {
957 	struct usb_setup_packet *setup = (void *)buf->data;
958 
959 	return USB_REQTYPE_GET_DIR(setup->bmRequestType);
960 }
961 
udc_ctrl_update_stage(const struct device * dev,struct net_buf * const buf)962 void udc_ctrl_update_stage(const struct device *dev,
963 			   struct net_buf *const buf)
964 {
965 	struct udc_buf_info *bi = udc_get_buf_info(buf);
966 	struct udc_device_caps caps = udc_caps(dev);
967 	uint8_t next_stage = CTRL_PIPE_STAGE_ERROR;
968 	struct udc_data *data = dev->data;
969 
970 	__ASSERT(USB_EP_GET_IDX(bi->ep) == 0,
971 		 "0x%02x is not a control endpoint", bi->ep);
972 
973 	if (bi->setup && bi->ep == USB_CONTROL_EP_OUT) {
974 		uint16_t length  = udc_data_stage_length(buf);
975 
976 		data->setup = buf;
977 
978 		if (data->stage != CTRL_PIPE_STAGE_SETUP) {
979 			LOG_INF("Sequence %u not completed", data->stage);
980 			data->stage = CTRL_PIPE_STAGE_SETUP;
981 		}
982 
983 		/*
984 		 * Setup Stage has been completed (setup packet received),
985 		 * regardless of the previous stage, this is now being reset.
986 		 * Next state depends on wLength and the direction bit (D7).
987 		 */
988 		if (length == 0) {
989 			/*
990 			 * No Data Stage, next is Status Stage
991 			 * complete sequence: s->status
992 			 */
993 			LOG_DBG("s->(status)");
994 			next_stage = CTRL_PIPE_STAGE_NO_DATA;
995 		} else if (udc_data_stage_to_host(buf)) {
996 			/*
997 			 * Next is Data Stage (to host / IN)
998 			 * complete sequence: s->in->status
999 			 */
1000 			LOG_DBG("s->(in)");
1001 			next_stage = CTRL_PIPE_STAGE_DATA_IN;
1002 		} else {
1003 			/*
1004 			 * Next is Data Stage (to device / OUT)
1005 			 * complete sequence: s->out->status
1006 			 */
1007 			LOG_DBG("s->(out)");
1008 			next_stage = CTRL_PIPE_STAGE_DATA_OUT;
1009 		}
1010 
1011 	} else if (bi->ep == USB_CONTROL_EP_OUT) {
1012 		if (data->stage == CTRL_PIPE_STAGE_DATA_OUT) {
1013 			/*
1014 			 * Next sequence is Status Stage if request is okay,
1015 			 * (IN ZLP status to host)
1016 			 */
1017 			next_stage = CTRL_PIPE_STAGE_STATUS_IN;
1018 		} else if (data->stage == CTRL_PIPE_STAGE_STATUS_OUT) {
1019 			/*
1020 			 * End of a sequence: s->in->status,
1021 			 * We should check the length here because we always
1022 			 * submit a OUT request with the minimum length
1023 			 * of the control endpoint.
1024 			 */
1025 			if (buf->len == 0) {
1026 				LOG_DBG("s-in-status");
1027 				next_stage = CTRL_PIPE_STAGE_SETUP;
1028 			} else {
1029 				LOG_WRN("ZLP expected");
1030 				next_stage = CTRL_PIPE_STAGE_ERROR;
1031 			}
1032 		} else {
1033 			LOG_ERR("Cannot determine the next stage");
1034 			next_stage = CTRL_PIPE_STAGE_ERROR;
1035 		}
1036 
1037 	} else { /* if (bi->ep == USB_CONTROL_EP_IN) */
1038 		if (data->stage == CTRL_PIPE_STAGE_STATUS_IN) {
1039 			/*
1040 			 * End of a sequence: setup->out->in
1041 			 */
1042 			LOG_DBG("s-out-status");
1043 			next_stage = CTRL_PIPE_STAGE_SETUP;
1044 		} else if (data->stage == CTRL_PIPE_STAGE_DATA_IN) {
1045 			/*
1046 			 * Data IN stage completed, next sequence
1047 			 * is Status Stage (OUT ZLP status to device).
1048 			 * over-engineered controllers can send status
1049 			 * on their own, skip this state then.
1050 			 */
1051 			if (caps.out_ack) {
1052 				LOG_DBG("s-in->[status]");
1053 				next_stage = CTRL_PIPE_STAGE_SETUP;
1054 			} else {
1055 				LOG_DBG("s-in->(status)");
1056 				next_stage = CTRL_PIPE_STAGE_STATUS_OUT;
1057 			}
1058 		} else if (data->stage == CTRL_PIPE_STAGE_NO_DATA) {
1059 			/*
1060 			 * End of a sequence (setup->in)
1061 			 * Previous NO Data stage was completed and
1062 			 * we confirmed it with an IN ZLP.
1063 			 */
1064 			LOG_DBG("s-status");
1065 			next_stage = CTRL_PIPE_STAGE_SETUP;
1066 		} else {
1067 			LOG_ERR("Cannot determine the next stage");
1068 			next_stage = CTRL_PIPE_STAGE_ERROR;
1069 		}
1070 	}
1071 
1072 
1073 	if (next_stage == data->stage) {
1074 		LOG_WRN("State not changed!");
1075 	}
1076 
1077 	data->stage = next_stage;
1078 }
1079 
1080 #if defined(CONFIG_UDC_WORKQUEUE)
1081 K_KERNEL_STACK_DEFINE(udc_work_q_stack, CONFIG_UDC_WORKQUEUE_STACK_SIZE);
1082 
1083 struct k_work_q udc_work_q;
1084 
udc_work_q_init(void)1085 static int udc_work_q_init(void)
1086 {
1087 
1088 	k_work_queue_start(&udc_work_q,
1089 			   udc_work_q_stack,
1090 			   K_KERNEL_STACK_SIZEOF(udc_work_q_stack),
1091 			   CONFIG_UDC_WORKQUEUE_PRIORITY, NULL);
1092 	k_thread_name_set(&udc_work_q.thread, "udc_work_q");
1093 
1094 	return 0;
1095 }
1096 
1097 SYS_INIT(udc_work_q_init, POST_KERNEL, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);
1098 #endif
1099