1 /*
2  * Copyright (c) 2021-2022 Nordic Semiconductor ASA
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <zephyr/init.h>
8 #include <zephyr/kernel.h>
9 #include <zephyr/net/buf.h>
10 #include <zephyr/sys/byteorder.h>
11 #include <zephyr/sys/__assert.h>
12 #include <zephyr/usb/usb_ch9.h>
13 #include <zephyr/drivers/usb/udc_buf.h>
14 #include "udc_common.h"
15 
16 #include <zephyr/logging/log.h>
17 #if defined(CONFIG_UDC_DRIVER_LOG_LEVEL)
18 #define UDC_COMMON_LOG_LEVEL CONFIG_UDC_DRIVER_LOG_LEVEL
19 #else
20 #define UDC_COMMON_LOG_LEVEL LOG_LEVEL_NONE
21 #endif
22 LOG_MODULE_REGISTER(udc, CONFIG_UDC_DRIVER_LOG_LEVEL);
23 
udc_pool_data_alloc(struct net_buf * const buf,size_t * const size,k_timeout_t timeout)24 static inline uint8_t *udc_pool_data_alloc(struct net_buf *const buf,
25 					   size_t *const size, k_timeout_t timeout)
26 {
27 	struct net_buf_pool *const buf_pool = net_buf_pool_get(buf->pool_id);
28 	struct k_heap *const pool = buf_pool->alloc->alloc_data;
29 	void *b;
30 
31 	*size = ROUND_UP(*size, UDC_BUF_GRANULARITY);
32 	b = k_heap_aligned_alloc(pool, UDC_BUF_ALIGN, *size, timeout);
33 	if (b == NULL) {
34 		*size = 0;
35 		return NULL;
36 	}
37 
38 	return b;
39 }
40 
udc_pool_data_unref(struct net_buf * buf,uint8_t * const data)41 static inline void udc_pool_data_unref(struct net_buf *buf, uint8_t *const data)
42 {
43 	struct net_buf_pool *buf_pool = net_buf_pool_get(buf->pool_id);
44 	struct k_heap *pool = buf_pool->alloc->alloc_data;
45 
46 	k_heap_free(pool, data);
47 }
48 
49 const struct net_buf_data_cb net_buf_dma_cb = {
50 	.alloc = udc_pool_data_alloc,
51 	.unref = udc_pool_data_unref,
52 };
53 
54 static inline void udc_buf_destroy(struct net_buf *buf);
55 
56 UDC_BUF_POOL_VAR_DEFINE(udc_ep_pool,
57 			CONFIG_UDC_BUF_COUNT, CONFIG_UDC_BUF_POOL_SIZE,
58 			sizeof(struct udc_buf_info), udc_buf_destroy);
59 
60 #define USB_EP_LUT_IDX(ep) (USB_EP_DIR_IS_IN(ep) ? (ep & BIT_MASK(4)) + 16 : \
61 						   ep & BIT_MASK(4))
62 
udc_set_suspended(const struct device * dev,const bool value)63 void udc_set_suspended(const struct device *dev, const bool value)
64 {
65 	struct udc_data *data = dev->data;
66 
67 	if (value == udc_is_suspended(dev)) {
68 		LOG_WRN("Spurious suspend/resume event");
69 	}
70 
71 	atomic_set_bit_to(&data->status, UDC_STATUS_SUSPENDED, value);
72 }
73 
udc_get_ep_cfg(const struct device * dev,const uint8_t ep)74 struct udc_ep_config *udc_get_ep_cfg(const struct device *dev, const uint8_t ep)
75 {
76 	struct udc_data *data = dev->data;
77 
78 	return data->ep_lut[USB_EP_LUT_IDX(ep)];
79 }
80 
udc_ep_is_busy(const struct device * dev,const uint8_t ep)81 bool udc_ep_is_busy(const struct device *dev, const uint8_t ep)
82 {
83 	struct udc_ep_config *ep_cfg;
84 
85 	ep_cfg = udc_get_ep_cfg(dev, ep);
86 	__ASSERT(ep_cfg != NULL, "ep 0x%02x is not available", ep);
87 
88 	return ep_cfg->stat.busy;
89 }
90 
udc_ep_set_busy(const struct device * dev,const uint8_t ep,const bool busy)91 void udc_ep_set_busy(const struct device *dev, const uint8_t ep, const bool busy)
92 {
93 	struct udc_ep_config *ep_cfg;
94 
95 	ep_cfg = udc_get_ep_cfg(dev, ep);
96 	__ASSERT(ep_cfg != NULL, "ep 0x%02x is not available", ep);
97 	ep_cfg->stat.busy = busy;
98 }
99 
udc_register_ep(const struct device * dev,struct udc_ep_config * const cfg)100 int udc_register_ep(const struct device *dev, struct udc_ep_config *const cfg)
101 {
102 	struct udc_data *data = dev->data;
103 	uint8_t idx;
104 
105 	if (udc_is_initialized(dev)) {
106 		return -EACCES;
107 	}
108 
109 	idx = USB_EP_LUT_IDX(cfg->addr);
110 	__ASSERT_NO_MSG(idx < ARRAY_SIZE(data->ep_lut));
111 
112 	data->ep_lut[idx] = cfg;
113 	k_fifo_init(&cfg->fifo);
114 
115 	return 0;
116 }
117 
udc_buf_get(const struct device * dev,const uint8_t ep)118 struct net_buf *udc_buf_get(const struct device *dev, const uint8_t ep)
119 {
120 	struct udc_ep_config *ep_cfg;
121 
122 	ep_cfg = udc_get_ep_cfg(dev, ep);
123 	if (ep_cfg == NULL) {
124 		return NULL;
125 	}
126 
127 	return net_buf_get(&ep_cfg->fifo, K_NO_WAIT);
128 }
129 
udc_buf_get_all(const struct device * dev,const uint8_t ep)130 struct net_buf *udc_buf_get_all(const struct device *dev, const uint8_t ep)
131 {
132 	struct udc_ep_config *ep_cfg;
133 	struct net_buf *buf;
134 
135 	ep_cfg = udc_get_ep_cfg(dev, ep);
136 	if (ep_cfg == NULL) {
137 		return NULL;
138 	}
139 
140 	buf = k_fifo_get(&ep_cfg->fifo, K_NO_WAIT);
141 	if (!buf) {
142 		return NULL;
143 	}
144 
145 	LOG_DBG("ep 0x%02x dequeue %p", ep, buf);
146 	for (struct net_buf *n = buf; !k_fifo_is_empty(&ep_cfg->fifo); n = n->frags) {
147 		n->frags = k_fifo_get(&ep_cfg->fifo, K_NO_WAIT);
148 		LOG_DBG("|-> %p ", n->frags);
149 		if (n->frags == NULL) {
150 			break;
151 		}
152 	}
153 
154 	return buf;
155 }
156 
udc_buf_peek(const struct device * dev,const uint8_t ep)157 struct net_buf *udc_buf_peek(const struct device *dev, const uint8_t ep)
158 {
159 	struct udc_ep_config *ep_cfg;
160 
161 	ep_cfg = udc_get_ep_cfg(dev, ep);
162 	if (ep_cfg == NULL) {
163 		return NULL;
164 	}
165 
166 	return k_fifo_peek_head(&ep_cfg->fifo);
167 }
168 
udc_buf_put(struct udc_ep_config * const ep_cfg,struct net_buf * const buf)169 void udc_buf_put(struct udc_ep_config *const ep_cfg,
170 		 struct net_buf *const buf)
171 {
172 	net_buf_put(&ep_cfg->fifo, buf);
173 }
174 
udc_ep_buf_set_setup(struct net_buf * const buf)175 void udc_ep_buf_set_setup(struct net_buf *const buf)
176 {
177 	struct udc_buf_info *bi = udc_get_buf_info(buf);
178 
179 	bi->setup = 1;
180 	bi->data = 0;
181 	bi->status = 0;
182 }
183 
udc_ep_buf_has_zlp(const struct net_buf * const buf)184 bool udc_ep_buf_has_zlp(const struct net_buf *const buf)
185 {
186 	const struct udc_buf_info *bi = udc_get_buf_info(buf);
187 
188 	return bi->zlp;
189 }
190 
udc_ep_buf_clear_zlp(const struct net_buf * const buf)191 void udc_ep_buf_clear_zlp(const struct net_buf *const buf)
192 {
193 	struct udc_buf_info *bi = udc_get_buf_info(buf);
194 
195 	bi->zlp = false;
196 }
197 
udc_submit_event(const struct device * dev,const enum udc_event_type type,const int status)198 int udc_submit_event(const struct device *dev,
199 		     const enum udc_event_type type,
200 		     const int status)
201 {
202 	struct udc_data *data = dev->data;
203 	struct udc_event drv_evt = {
204 		.type = type,
205 		.status = status,
206 		.dev = dev,
207 	};
208 
209 	return data->event_cb(dev, &drv_evt);
210 }
211 
udc_submit_ep_event(const struct device * dev,struct net_buf * const buf,const int err)212 int udc_submit_ep_event(const struct device *dev,
213 			struct net_buf *const buf,
214 			const int err)
215 {
216 	struct udc_buf_info *bi = udc_get_buf_info(buf);
217 	struct udc_data *data = dev->data;
218 	const struct udc_event drv_evt = {
219 		.type = UDC_EVT_EP_REQUEST,
220 		.buf = buf,
221 		.dev = dev,
222 	};
223 
224 	if (!udc_is_initialized(dev)) {
225 		return -EPERM;
226 	}
227 
228 	bi->err = err;
229 
230 	return data->event_cb(dev, &drv_evt);
231 }
232 
ep_attrib_get_transfer(uint8_t attributes)233 static uint8_t ep_attrib_get_transfer(uint8_t attributes)
234 {
235 	return attributes & USB_EP_TRANSFER_TYPE_MASK;
236 }
237 
ep_check_config(const struct device * dev,const struct udc_ep_config * const cfg,const uint8_t ep,const uint8_t attributes,const uint16_t mps,const uint8_t interval)238 static bool ep_check_config(const struct device *dev,
239 			    const struct udc_ep_config *const cfg,
240 			    const uint8_t ep,
241 			    const uint8_t attributes,
242 			    const uint16_t mps,
243 			    const uint8_t interval)
244 {
245 	bool dir_is_in = USB_EP_DIR_IS_IN(ep);
246 	bool dir_is_out = USB_EP_DIR_IS_OUT(ep);
247 
248 	LOG_DBG("cfg d:%c|%c t:%c|%c|%c|%c, mps %u",
249 		cfg->caps.in ? 'I' : '-',
250 		cfg->caps.out ? 'O' : '-',
251 		cfg->caps.iso ? 'S' : '-',
252 		cfg->caps.bulk ? 'B' : '-',
253 		cfg->caps.interrupt ? 'I' : '-',
254 		cfg->caps.control ? 'C' : '-',
255 		cfg->caps.mps);
256 
257 	if (dir_is_out && !cfg->caps.out) {
258 		return false;
259 	}
260 
261 	if (dir_is_in && !cfg->caps.in) {
262 		return false;
263 	}
264 
265 	if (mps > cfg->caps.mps) {
266 		return false;
267 	}
268 
269 	switch (ep_attrib_get_transfer(attributes)) {
270 	case USB_EP_TYPE_BULK:
271 		if (!cfg->caps.bulk) {
272 			return false;
273 		}
274 		break;
275 	case USB_EP_TYPE_INTERRUPT:
276 		if (!cfg->caps.interrupt) {
277 			return false;
278 		}
279 		break;
280 	case USB_EP_TYPE_ISO:
281 		if (!cfg->caps.iso) {
282 			return false;
283 		}
284 		break;
285 	case USB_EP_TYPE_CONTROL:
286 		if (!cfg->caps.control) {
287 			return false;
288 		}
289 		break;
290 	default:
291 		return false;
292 	}
293 
294 	return true;
295 }
296 
ep_update_mps(const struct device * dev,const struct udc_ep_config * const cfg,const uint8_t attributes,uint16_t * const mps)297 static void ep_update_mps(const struct device *dev,
298 			  const struct udc_ep_config *const cfg,
299 			  const uint8_t attributes,
300 			  uint16_t *const mps)
301 {
302 	struct udc_device_caps caps = udc_caps(dev);
303 	const uint16_t spec_int_mps = caps.hs ? 1024 : 64;
304 	const uint16_t spec_bulk_mps = caps.hs ? 512 : 64;
305 
306 	/*
307 	 * TODO: It does not take into account the actual speed of the
308 	 * bus after the RESET. Should be fixed/improved when the driver
309 	 * for high speed controller are ported.
310 	 */
311 	switch (ep_attrib_get_transfer(attributes)) {
312 	case USB_EP_TYPE_BULK:
313 		*mps = MIN(cfg->caps.mps, spec_bulk_mps);
314 		break;
315 	case USB_EP_TYPE_INTERRUPT:
316 		*mps = MIN(cfg->caps.mps, spec_int_mps);
317 		break;
318 	case USB_EP_TYPE_CONTROL:
319 		__fallthrough;
320 	case USB_EP_TYPE_ISO:
321 		__fallthrough;
322 	default:
323 		return;
324 	}
325 }
326 
udc_ep_try_config(const struct device * dev,const uint8_t ep,const uint8_t attributes,uint16_t * const mps,const uint8_t interval)327 int udc_ep_try_config(const struct device *dev,
328 		      const uint8_t ep,
329 		      const uint8_t attributes,
330 		      uint16_t *const mps,
331 		      const uint8_t interval)
332 {
333 	const struct udc_api *api = dev->api;
334 	struct udc_ep_config *cfg;
335 	bool ret;
336 
337 	cfg = udc_get_ep_cfg(dev, ep);
338 	if (cfg == NULL) {
339 		return -ENODEV;
340 	}
341 
342 	api->lock(dev);
343 
344 	ret = ep_check_config(dev, cfg, ep, attributes, *mps, interval);
345 	if (ret == true && *mps == 0U) {
346 		ep_update_mps(dev, cfg, attributes, mps);
347 	}
348 
349 	api->unlock(dev);
350 
351 	return (ret == false) ? -ENOTSUP : 0;
352 }
353 
udc_ep_enable_internal(const struct device * dev,const uint8_t ep,const uint8_t attributes,const uint16_t mps,const uint8_t interval)354 int udc_ep_enable_internal(const struct device *dev,
355 			   const uint8_t ep,
356 			   const uint8_t attributes,
357 			   const uint16_t mps,
358 			   const uint8_t interval)
359 {
360 	const struct udc_api *api = dev->api;
361 	struct udc_ep_config *cfg;
362 	int ret;
363 
364 	cfg = udc_get_ep_cfg(dev, ep);
365 	if (cfg == NULL) {
366 		return -ENODEV;
367 	}
368 
369 	if (cfg->stat.enabled) {
370 		LOG_ERR("ep 0x%02x already enabled", cfg->addr);
371 		return -EALREADY;
372 	}
373 
374 	if (!ep_check_config(dev, cfg, ep, attributes, mps, interval)) {
375 		LOG_ERR("Endpoint 0x%02x validation failed", cfg->addr);
376 		return -ENODEV;
377 	}
378 
379 	cfg->attributes = attributes;
380 	cfg->mps = mps;
381 	cfg->interval = interval;
382 
383 	cfg->stat.odd = 0;
384 	cfg->stat.halted = 0;
385 	cfg->stat.data1 = false;
386 	ret = api->ep_enable(dev, cfg);
387 	cfg->stat.enabled = ret ? false : true;
388 
389 	return ret;
390 }
391 
udc_ep_enable(const struct device * dev,const uint8_t ep,const uint8_t attributes,const uint16_t mps,const uint8_t interval)392 int udc_ep_enable(const struct device *dev,
393 		  const uint8_t ep,
394 		  const uint8_t attributes,
395 		  const uint16_t mps,
396 		  const uint8_t interval)
397 {
398 	const struct udc_api *api = dev->api;
399 	int ret;
400 
401 	if (ep == USB_CONTROL_EP_OUT || ep == USB_CONTROL_EP_IN) {
402 		return -EINVAL;
403 	}
404 
405 	api->lock(dev);
406 
407 	if (!udc_is_enabled(dev)) {
408 		ret = -EPERM;
409 		goto ep_enable_error;
410 	}
411 
412 	ret = udc_ep_enable_internal(dev, ep, attributes, mps, interval);
413 
414 ep_enable_error:
415 	api->unlock(dev);
416 
417 	return ret;
418 }
419 
udc_ep_disable_internal(const struct device * dev,const uint8_t ep)420 int udc_ep_disable_internal(const struct device *dev, const uint8_t ep)
421 {
422 	const struct udc_api *api = dev->api;
423 	struct udc_ep_config *cfg;
424 	int ret;
425 
426 	cfg = udc_get_ep_cfg(dev, ep);
427 	if (cfg == NULL) {
428 		return -ENODEV;
429 	}
430 
431 	if (!cfg->stat.enabled) {
432 		LOG_ERR("ep 0x%02x already disabled", cfg->addr);
433 		return -EALREADY;
434 	}
435 
436 	ret = api->ep_disable(dev, cfg);
437 	cfg->stat.enabled = ret ? cfg->stat.enabled : false;
438 
439 	return ret;
440 }
441 
udc_ep_disable(const struct device * dev,const uint8_t ep)442 int udc_ep_disable(const struct device *dev, const uint8_t ep)
443 {
444 	const struct udc_api *api = dev->api;
445 	int ret;
446 
447 	if (ep == USB_CONTROL_EP_OUT || ep == USB_CONTROL_EP_IN) {
448 		return -EINVAL;
449 	}
450 
451 	api->lock(dev);
452 
453 	if (!udc_is_initialized(dev)) {
454 		ret = -EPERM;
455 		goto ep_disable_error;
456 	}
457 
458 	ret = udc_ep_disable_internal(dev, ep);
459 
460 ep_disable_error:
461 	api->unlock(dev);
462 
463 	return ret;
464 }
465 
udc_ep_set_halt(const struct device * dev,const uint8_t ep)466 int udc_ep_set_halt(const struct device *dev, const uint8_t ep)
467 {
468 	const struct udc_api *api = dev->api;
469 	struct udc_ep_config *cfg;
470 	int ret;
471 
472 	api->lock(dev);
473 
474 	if (!udc_is_enabled(dev)) {
475 		ret = -EPERM;
476 		goto ep_set_halt_error;
477 	}
478 
479 	cfg = udc_get_ep_cfg(dev, ep);
480 	if (cfg == NULL) {
481 		ret = -ENODEV;
482 		goto ep_set_halt_error;
483 	}
484 
485 	if (!cfg->stat.enabled) {
486 		ret = -ENODEV;
487 		goto ep_set_halt_error;
488 	}
489 
490 	if (ep_attrib_get_transfer(cfg->attributes) == USB_EP_TYPE_ISO) {
491 		ret = -ENOTSUP;
492 		goto ep_set_halt_error;
493 	}
494 
495 	ret = api->ep_set_halt(dev, cfg);
496 
497 ep_set_halt_error:
498 	api->unlock(dev);
499 
500 	return ret;
501 }
502 
udc_ep_clear_halt(const struct device * dev,const uint8_t ep)503 int udc_ep_clear_halt(const struct device *dev, const uint8_t ep)
504 {
505 	const struct udc_api *api = dev->api;
506 	struct udc_ep_config *cfg;
507 	int ret;
508 
509 	api->lock(dev);
510 
511 	if (!udc_is_enabled(dev)) {
512 		ret = -EPERM;
513 		goto ep_clear_halt_error;
514 	}
515 
516 	cfg = udc_get_ep_cfg(dev, ep);
517 	if (cfg == NULL) {
518 		ret = -ENODEV;
519 		goto ep_clear_halt_error;
520 	}
521 
522 	if (!cfg->stat.enabled) {
523 		ret = -ENODEV;
524 		goto ep_clear_halt_error;
525 	}
526 
527 	if (ep_attrib_get_transfer(cfg->attributes) == USB_EP_TYPE_ISO) {
528 		ret = -ENOTSUP;
529 		goto ep_clear_halt_error;
530 	}
531 
532 	ret = api->ep_clear_halt(dev, cfg);
533 	if (ret == 0) {
534 		cfg->stat.halted = false;
535 	}
536 
537 ep_clear_halt_error:
538 	api->unlock(dev);
539 
540 	return ret;
541 }
542 
udc_debug_ep_enqueue(const struct device * dev,struct udc_ep_config * const cfg)543 static void udc_debug_ep_enqueue(const struct device *dev,
544 				 struct udc_ep_config *const cfg)
545 {
546 	struct udc_buf_info *bi;
547 	struct net_buf *buf;
548 	sys_slist_t list;
549 
550 	list.head = k_fifo_peek_head(&cfg->fifo);
551 	list.tail = k_fifo_peek_tail(&cfg->fifo);
552 	if (list.head == NULL) {
553 		LOG_DBG("ep 0x%02x queue is empty", cfg->addr);
554 		return;
555 	}
556 
557 	LOG_DBG("[de]queue ep 0x%02x:", cfg->addr);
558 
559 	SYS_SLIST_FOR_EACH_CONTAINER(&list, buf, node) {
560 		bi = udc_get_buf_info(buf);
561 		LOG_DBG("|-> %p (%u) ->", buf, buf->size);
562 	}
563 }
564 
udc_ep_enqueue(const struct device * dev,struct net_buf * const buf)565 int udc_ep_enqueue(const struct device *dev, struct net_buf *const buf)
566 {
567 	const struct udc_api *api = dev->api;
568 	struct udc_ep_config *cfg;
569 	struct udc_buf_info *bi;
570 	int ret;
571 
572 	api->lock(dev);
573 
574 	if (!udc_is_enabled(dev)) {
575 		ret = -EPERM;
576 		goto ep_enqueue_error;
577 	}
578 
579 	bi = udc_get_buf_info(buf);
580 	if (bi->ep == USB_CONTROL_EP_OUT) {
581 		ret = -EPERM;
582 		goto ep_enqueue_error;
583 	}
584 
585 	cfg = udc_get_ep_cfg(dev, bi->ep);
586 	if (cfg == NULL) {
587 		ret = -ENODEV;
588 		goto ep_enqueue_error;
589 	}
590 
591 	LOG_DBG("Queue ep 0x%02x %p len %u", cfg->addr, buf,
592 		USB_EP_DIR_IS_IN(cfg->addr) ? buf->len : buf->size);
593 
594 	bi->setup = 0;
595 	ret = api->ep_enqueue(dev, cfg, buf);
596 
597 ep_enqueue_error:
598 	api->unlock(dev);
599 
600 	return ret;
601 }
602 
udc_ep_dequeue(const struct device * dev,const uint8_t ep)603 int udc_ep_dequeue(const struct device *dev, const uint8_t ep)
604 {
605 	const struct udc_api *api = dev->api;
606 	struct udc_ep_config *cfg;
607 	int ret;
608 
609 	api->lock(dev);
610 
611 	if (!udc_is_initialized(dev)) {
612 		ret = -EPERM;
613 		goto ep_dequeue_error;
614 	}
615 
616 	cfg = udc_get_ep_cfg(dev, ep);
617 	if (cfg == NULL) {
618 		ret = -ENODEV;
619 		goto ep_dequeue_error;
620 	}
621 
622 	if (cfg->stat.enabled || cfg->stat.halted) {
623 		LOG_INF("ep 0x%02x is not halted|disabled", cfg->addr);
624 	}
625 
626 	if (UDC_COMMON_LOG_LEVEL == LOG_LEVEL_DBG) {
627 		udc_debug_ep_enqueue(dev, cfg);
628 	}
629 
630 	if (k_fifo_is_empty(&cfg->fifo)) {
631 		ret = 0;
632 	} else  {
633 		ret = api->ep_dequeue(dev, cfg);
634 	}
635 
636 ep_dequeue_error:
637 	api->unlock(dev);
638 
639 	return ret;
640 }
641 
udc_ep_buf_alloc(const struct device * dev,const uint8_t ep,const size_t size)642 struct net_buf *udc_ep_buf_alloc(const struct device *dev,
643 				 const uint8_t ep,
644 				 const size_t size)
645 {
646 	const struct udc_api *api = dev->api;
647 	struct net_buf *buf = NULL;
648 	struct udc_buf_info *bi;
649 
650 	api->lock(dev);
651 
652 	buf = net_buf_alloc_len(&udc_ep_pool, size, K_NO_WAIT);
653 	if (!buf) {
654 		LOG_ERR("Failed to allocate net_buf %zd", size);
655 		goto ep_alloc_error;
656 	}
657 
658 	bi = udc_get_buf_info(buf);
659 	memset(bi, 0, sizeof(struct udc_buf_info));
660 	bi->ep = ep;
661 	LOG_DBG("Allocate net_buf, ep 0x%02x, size %zd", ep, size);
662 
663 ep_alloc_error:
664 	api->unlock(dev);
665 
666 	return buf;
667 }
668 
udc_ctrl_alloc(const struct device * dev,const uint8_t ep,const size_t size)669 struct net_buf *udc_ctrl_alloc(const struct device *dev,
670 			       const uint8_t ep,
671 			       const size_t size)
672 {
673 	/* TODO: for now just pass to udc_buf_alloc() */
674 	return udc_ep_buf_alloc(dev, ep, size);
675 }
676 
udc_buf_destroy(struct net_buf * buf)677 static inline void udc_buf_destroy(struct net_buf *buf)
678 {
679 	/* Adjust level and use together with the log in udc_ep_buf_alloc() */
680 	LOG_DBG("destroy %p", buf);
681 	net_buf_destroy(buf);
682 }
683 
udc_ep_buf_free(const struct device * dev,struct net_buf * const buf)684 int udc_ep_buf_free(const struct device *dev, struct net_buf *const buf)
685 {
686 	const struct udc_api *api = dev->api;
687 	int ret = 0;
688 
689 	api->lock(dev);
690 	net_buf_unref(buf);
691 	api->unlock(dev);
692 
693 	return ret;
694 }
695 
udc_device_speed(const struct device * dev)696 enum udc_bus_speed udc_device_speed(const struct device *dev)
697 {
698 	const struct udc_api *api = dev->api;
699 	enum udc_bus_speed speed = UDC_BUS_UNKNOWN;
700 
701 	api->lock(dev);
702 
703 	if (!udc_is_enabled(dev)) {
704 		goto device_speed_error;
705 	}
706 
707 	if (api->device_speed) {
708 		speed = api->device_speed(dev);
709 	} else {
710 		/* TODO: Shall we track connected status in UDC? */
711 		speed = UDC_BUS_SPEED_FS;
712 	}
713 
714 device_speed_error:
715 	api->unlock(dev);
716 
717 	return speed;
718 }
719 
udc_enable(const struct device * dev)720 int udc_enable(const struct device *dev)
721 {
722 	const struct udc_api *api = dev->api;
723 	struct udc_data *data = dev->data;
724 	int ret;
725 
726 	api->lock(dev);
727 
728 	if (!udc_is_initialized(dev)) {
729 		ret = -EPERM;
730 		goto udc_enable_error;
731 	}
732 
733 	if (udc_is_enabled(dev)) {
734 		ret = -EALREADY;
735 		goto udc_enable_error;
736 	}
737 
738 	data->stage = CTRL_PIPE_STAGE_SETUP;
739 
740 	ret = api->enable(dev);
741 	if (ret == 0) {
742 		atomic_set_bit(&data->status, UDC_STATUS_ENABLED);
743 	}
744 
745 udc_enable_error:
746 	api->unlock(dev);
747 
748 	return ret;
749 }
750 
udc_disable(const struct device * dev)751 int udc_disable(const struct device *dev)
752 {
753 	const struct udc_api *api = dev->api;
754 	struct udc_data *data = dev->data;
755 	int ret;
756 
757 	api->lock(dev);
758 
759 	if (!udc_is_enabled(dev)) {
760 		ret = -EALREADY;
761 		goto udc_disable_error;
762 	}
763 
764 	ret = api->disable(dev);
765 	atomic_clear_bit(&data->status, UDC_STATUS_ENABLED);
766 
767 udc_disable_error:
768 	api->unlock(dev);
769 
770 	return ret;
771 }
772 
udc_init(const struct device * dev,udc_event_cb_t event_cb)773 int udc_init(const struct device *dev, udc_event_cb_t event_cb)
774 {
775 	const struct udc_api *api = dev->api;
776 	struct udc_data *data = dev->data;
777 	int ret;
778 
779 	if (event_cb == NULL) {
780 		return -EINVAL;
781 	}
782 
783 	api->lock(dev);
784 
785 	if (udc_is_initialized(dev)) {
786 		ret = -EALREADY;
787 		goto udc_init_error;
788 	}
789 
790 	data->event_cb = event_cb;
791 
792 	ret = api->init(dev);
793 	if (ret == 0) {
794 		atomic_set_bit(&data->status, UDC_STATUS_INITIALIZED);
795 	}
796 
797 udc_init_error:
798 	api->unlock(dev);
799 
800 	return ret;
801 }
802 
udc_shutdown(const struct device * dev)803 int udc_shutdown(const struct device *dev)
804 {
805 	const struct udc_api *api = dev->api;
806 	struct udc_data *data = dev->data;
807 	int ret;
808 
809 	api->lock(dev);
810 
811 	if (udc_is_enabled(dev)) {
812 		ret = -EBUSY;
813 		goto udc_shutdown_error;
814 	}
815 
816 	if (!udc_is_initialized(dev)) {
817 		ret = -EALREADY;
818 		goto udc_shutdown_error;
819 	}
820 
821 	ret = api->shutdown(dev);
822 	atomic_clear_bit(&data->status, UDC_STATUS_INITIALIZED);
823 
824 udc_shutdown_error:
825 	api->unlock(dev);
826 
827 	return ret;
828 }
829 
830 static ALWAYS_INLINE
udc_ctrl_alloc_stage(const struct device * dev,struct net_buf * const parent,const uint8_t ep,const size_t size)831 struct net_buf *udc_ctrl_alloc_stage(const struct device *dev,
832 				     struct net_buf *const parent,
833 				     const uint8_t ep,
834 				     const size_t size)
835 {
836 	struct net_buf *buf;
837 
838 	buf = udc_ctrl_alloc(dev, ep, size);
839 	if (buf == NULL) {
840 		return NULL;
841 	}
842 
843 	if (parent) {
844 		net_buf_frag_add(parent, buf);
845 	}
846 
847 	return buf;
848 }
849 
udc_ctrl_alloc_data(const struct device * dev,struct net_buf * const setup,const uint8_t ep)850 static struct net_buf *udc_ctrl_alloc_data(const struct device *dev,
851 					   struct net_buf *const setup,
852 					   const uint8_t ep)
853 {
854 	size_t size = udc_data_stage_length(setup);
855 	struct udc_buf_info *bi;
856 	struct net_buf *buf;
857 
858 	buf = udc_ctrl_alloc_stage(dev, setup, ep, size);
859 	if (buf) {
860 		bi = udc_get_buf_info(buf);
861 		bi->data = true;
862 	}
863 
864 	return buf;
865 }
866 
udc_ctrl_alloc_status(const struct device * dev,struct net_buf * const parent,const uint8_t ep)867 static struct net_buf *udc_ctrl_alloc_status(const struct device *dev,
868 					     struct net_buf *const parent,
869 					     const uint8_t ep)
870 {
871 	size_t size = (ep == USB_CONTROL_EP_OUT) ? 64 : 0;
872 	struct udc_buf_info *bi;
873 	struct net_buf *buf;
874 
875 	buf = udc_ctrl_alloc_stage(dev, parent, ep, size);
876 	if (buf) {
877 		bi = udc_get_buf_info(buf);
878 		bi->status = true;
879 	}
880 
881 	return buf;
882 }
883 
udc_ctrl_submit_s_out_status(const struct device * dev,struct net_buf * const dout)884 int udc_ctrl_submit_s_out_status(const struct device *dev,
885 			      struct net_buf *const dout)
886 {
887 	struct udc_buf_info *bi = udc_get_buf_info(dout);
888 	struct udc_data *data = dev->data;
889 	struct net_buf *buf;
890 	int ret = 0;
891 
892 	bi->data = true;
893 	net_buf_frag_add(data->setup, dout);
894 
895 	buf = udc_ctrl_alloc_status(dev, dout, USB_CONTROL_EP_IN);
896 	if (buf == NULL) {
897 		ret = -ENOMEM;
898 	}
899 
900 	return udc_submit_ep_event(dev, data->setup, ret);
901 }
902 
udc_ctrl_submit_s_in_status(const struct device * dev)903 int udc_ctrl_submit_s_in_status(const struct device *dev)
904 {
905 	struct udc_data *data = dev->data;
906 	struct net_buf *buf;
907 	int ret = 0;
908 
909 	if (!udc_ctrl_stage_is_data_in(dev)) {
910 		return -ENOTSUP;
911 	}
912 
913 	/* Allocate buffer for data stage IN */
914 	buf = udc_ctrl_alloc_data(dev, data->setup, USB_CONTROL_EP_IN);
915 	if (buf == NULL) {
916 		ret = -ENOMEM;
917 	}
918 
919 	return udc_submit_ep_event(dev, data->setup, ret);
920 }
921 
udc_ctrl_submit_s_status(const struct device * dev)922 int udc_ctrl_submit_s_status(const struct device *dev)
923 {
924 	struct udc_data *data = dev->data;
925 	struct net_buf *buf;
926 	int ret = 0;
927 
928 	/* Allocate buffer for possible status IN */
929 	buf = udc_ctrl_alloc_status(dev, data->setup, USB_CONTROL_EP_IN);
930 	if (buf == NULL) {
931 		ret = -ENOMEM;
932 	}
933 
934 	return udc_submit_ep_event(dev, data->setup, ret);
935 }
936 
udc_ctrl_submit_status(const struct device * dev,struct net_buf * const buf)937 int udc_ctrl_submit_status(const struct device *dev,
938 			   struct net_buf *const buf)
939 {
940 	struct udc_buf_info *bi = udc_get_buf_info(buf);
941 
942 	bi->status = true;
943 
944 	return udc_submit_ep_event(dev, buf, 0);
945 }
946 
udc_ctrl_stage_is_data_out(const struct device * dev)947 bool udc_ctrl_stage_is_data_out(const struct device *dev)
948 {
949 	struct udc_data *data = dev->data;
950 
951 	return data->stage == CTRL_PIPE_STAGE_DATA_OUT ? true : false;
952 }
953 
udc_ctrl_stage_is_data_in(const struct device * dev)954 bool udc_ctrl_stage_is_data_in(const struct device *dev)
955 {
956 	struct udc_data *data = dev->data;
957 
958 	return data->stage == CTRL_PIPE_STAGE_DATA_IN ? true : false;
959 }
960 
udc_ctrl_stage_is_status_out(const struct device * dev)961 bool udc_ctrl_stage_is_status_out(const struct device *dev)
962 {
963 	struct udc_data *data = dev->data;
964 
965 	return data->stage == CTRL_PIPE_STAGE_STATUS_OUT ? true : false;
966 }
967 
udc_ctrl_stage_is_status_in(const struct device * dev)968 bool udc_ctrl_stage_is_status_in(const struct device *dev)
969 {
970 	struct udc_data *data = dev->data;
971 
972 	return data->stage == CTRL_PIPE_STAGE_STATUS_IN ? true : false;
973 }
974 
udc_ctrl_stage_is_no_data(const struct device * dev)975 bool udc_ctrl_stage_is_no_data(const struct device *dev)
976 {
977 	struct udc_data *data = dev->data;
978 
979 	return data->stage == CTRL_PIPE_STAGE_NO_DATA ? true : false;
980 }
981 
udc_data_stage_to_host(const struct net_buf * const buf)982 static bool udc_data_stage_to_host(const struct net_buf *const buf)
983 {
984 	struct usb_setup_packet *setup = (void *)buf->data;
985 
986 	return USB_REQTYPE_GET_DIR(setup->bmRequestType);
987 }
988 
udc_ctrl_update_stage(const struct device * dev,struct net_buf * const buf)989 void udc_ctrl_update_stage(const struct device *dev,
990 			   struct net_buf *const buf)
991 {
992 	struct udc_buf_info *bi = udc_get_buf_info(buf);
993 	struct udc_device_caps caps = udc_caps(dev);
994 	uint8_t next_stage = CTRL_PIPE_STAGE_ERROR;
995 	struct udc_data *data = dev->data;
996 
997 	__ASSERT(USB_EP_GET_IDX(bi->ep) == 0,
998 		 "0x%02x is not a control endpoint", bi->ep);
999 
1000 	if (bi->setup && bi->ep == USB_CONTROL_EP_OUT) {
1001 		uint16_t length  = udc_data_stage_length(buf);
1002 
1003 		data->setup = buf;
1004 
1005 		if (data->stage != CTRL_PIPE_STAGE_SETUP) {
1006 			LOG_INF("Sequence %u not completed", data->stage);
1007 			data->stage = CTRL_PIPE_STAGE_SETUP;
1008 		}
1009 
1010 		/*
1011 		 * Setup Stage has been completed (setup packet received),
1012 		 * regardless of the previous stage, this is now being reset.
1013 		 * Next state depends on wLength and the direction bit (D7).
1014 		 */
1015 		if (length == 0) {
1016 			/*
1017 			 * No Data Stage, next is Status Stage
1018 			 * complete sequence: s->status
1019 			 */
1020 			LOG_DBG("s->(status)");
1021 			next_stage = CTRL_PIPE_STAGE_NO_DATA;
1022 		} else if (udc_data_stage_to_host(buf)) {
1023 			/*
1024 			 * Next is Data Stage (to host / IN)
1025 			 * complete sequence: s->in->status
1026 			 */
1027 			LOG_DBG("s->(in)");
1028 			next_stage = CTRL_PIPE_STAGE_DATA_IN;
1029 		} else {
1030 			/*
1031 			 * Next is Data Stage (to device / OUT)
1032 			 * complete sequence: s->out->status
1033 			 */
1034 			LOG_DBG("s->(out)");
1035 			next_stage = CTRL_PIPE_STAGE_DATA_OUT;
1036 		}
1037 
1038 	} else if (bi->ep == USB_CONTROL_EP_OUT) {
1039 		if (data->stage == CTRL_PIPE_STAGE_DATA_OUT) {
1040 			/*
1041 			 * Next sequence is Status Stage if request is okay,
1042 			 * (IN ZLP status to host)
1043 			 */
1044 			next_stage = CTRL_PIPE_STAGE_STATUS_IN;
1045 		} else if (data->stage == CTRL_PIPE_STAGE_STATUS_OUT) {
1046 			/*
1047 			 * End of a sequence: s->in->status,
1048 			 * We should check the length here because we always
1049 			 * submit a OUT request with the minimum length
1050 			 * of the control endpoint.
1051 			 */
1052 			if (buf->len == 0) {
1053 				LOG_DBG("s-in-status");
1054 				next_stage = CTRL_PIPE_STAGE_SETUP;
1055 			} else {
1056 				LOG_WRN("ZLP expected");
1057 				next_stage = CTRL_PIPE_STAGE_ERROR;
1058 			}
1059 		} else {
1060 			LOG_ERR("Cannot determine the next stage");
1061 			next_stage = CTRL_PIPE_STAGE_ERROR;
1062 		}
1063 
1064 	} else { /* if (bi->ep == USB_CONTROL_EP_IN) */
1065 		if (data->stage == CTRL_PIPE_STAGE_STATUS_IN) {
1066 			/*
1067 			 * End of a sequence: setup->out->in
1068 			 */
1069 			LOG_DBG("s-out-status");
1070 			next_stage = CTRL_PIPE_STAGE_SETUP;
1071 		} else if (data->stage == CTRL_PIPE_STAGE_DATA_IN) {
1072 			/*
1073 			 * Data IN stage completed, next sequence
1074 			 * is Status Stage (OUT ZLP status to device).
1075 			 * over-engineered controllers can send status
1076 			 * on their own, skip this state then.
1077 			 */
1078 			if (caps.out_ack) {
1079 				LOG_DBG("s-in->[status]");
1080 				next_stage = CTRL_PIPE_STAGE_SETUP;
1081 			} else {
1082 				LOG_DBG("s-in->(status)");
1083 				next_stage = CTRL_PIPE_STAGE_STATUS_OUT;
1084 			}
1085 		} else if (data->stage == CTRL_PIPE_STAGE_NO_DATA) {
1086 			/*
1087 			 * End of a sequence (setup->in)
1088 			 * Previous NO Data stage was completed and
1089 			 * we confirmed it with an IN ZLP.
1090 			 */
1091 			LOG_DBG("s-status");
1092 			next_stage = CTRL_PIPE_STAGE_SETUP;
1093 		} else {
1094 			LOG_ERR("Cannot determine the next stage");
1095 			next_stage = CTRL_PIPE_STAGE_ERROR;
1096 		}
1097 	}
1098 
1099 
1100 	if (next_stage == data->stage) {
1101 		LOG_WRN("State not changed!");
1102 	}
1103 
1104 	data->stage = next_stage;
1105 }
1106 
1107 #if defined(CONFIG_UDC_WORKQUEUE)
1108 K_KERNEL_STACK_DEFINE(udc_work_q_stack, CONFIG_UDC_WORKQUEUE_STACK_SIZE);
1109 
1110 struct k_work_q udc_work_q;
1111 
udc_work_q_init(void)1112 static int udc_work_q_init(void)
1113 {
1114 
1115 	k_work_queue_start(&udc_work_q,
1116 			   udc_work_q_stack,
1117 			   K_KERNEL_STACK_SIZEOF(udc_work_q_stack),
1118 			   CONFIG_UDC_WORKQUEUE_PRIORITY, NULL);
1119 	k_thread_name_set(&udc_work_q.thread, "udc_work_q");
1120 
1121 	return 0;
1122 }
1123 
1124 SYS_INIT(udc_work_q_init, POST_KERNEL, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);
1125 #endif
1126