1 /*
2 * Copyright (c) 2021-2022 Nordic Semiconductor ASA
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/init.h>
8 #include <zephyr/kernel.h>
9 #include <zephyr/net_buf.h>
10 #include <zephyr/sys/byteorder.h>
11 #include <zephyr/sys/__assert.h>
12 #include <zephyr/usb/usb_ch9.h>
13 #include <zephyr/drivers/usb/udc_buf.h>
14 #include "udc_common.h"
15
16 #include <zephyr/logging/log.h>
17 #if defined(CONFIG_UDC_DRIVER_LOG_LEVEL)
18 #define UDC_COMMON_LOG_LEVEL CONFIG_UDC_DRIVER_LOG_LEVEL
19 #else
20 #define UDC_COMMON_LOG_LEVEL LOG_LEVEL_NONE
21 #endif
22 LOG_MODULE_REGISTER(udc, CONFIG_UDC_DRIVER_LOG_LEVEL);
23
udc_pool_data_alloc(struct net_buf * const buf,size_t * const size,k_timeout_t timeout)24 static inline uint8_t *udc_pool_data_alloc(struct net_buf *const buf,
25 size_t *const size, k_timeout_t timeout)
26 {
27 struct net_buf_pool *const buf_pool = net_buf_pool_get(buf->pool_id);
28 struct k_heap *const pool = buf_pool->alloc->alloc_data;
29 void *b;
30
31 *size = ROUND_UP(*size, UDC_BUF_GRANULARITY);
32 b = k_heap_aligned_alloc(pool, UDC_BUF_ALIGN, *size, timeout);
33 if (b == NULL) {
34 *size = 0;
35 return NULL;
36 }
37
38 return b;
39 }
40
udc_pool_data_unref(struct net_buf * buf,uint8_t * const data)41 static inline void udc_pool_data_unref(struct net_buf *buf, uint8_t *const data)
42 {
43 struct net_buf_pool *buf_pool = net_buf_pool_get(buf->pool_id);
44 struct k_heap *pool = buf_pool->alloc->alloc_data;
45
46 k_heap_free(pool, data);
47 }
48
49 const struct net_buf_data_cb net_buf_dma_cb = {
50 .alloc = udc_pool_data_alloc,
51 .unref = udc_pool_data_unref,
52 };
53
54 static inline void udc_buf_destroy(struct net_buf *buf);
55
56 UDC_BUF_POOL_VAR_DEFINE(udc_ep_pool,
57 CONFIG_UDC_BUF_COUNT, CONFIG_UDC_BUF_POOL_SIZE,
58 sizeof(struct udc_buf_info), udc_buf_destroy);
59
60 #define USB_EP_LUT_IDX(ep) (USB_EP_DIR_IS_IN(ep) ? (ep & BIT_MASK(4)) + 16 : \
61 ep & BIT_MASK(4))
62
udc_set_suspended(const struct device * dev,const bool value)63 void udc_set_suspended(const struct device *dev, const bool value)
64 {
65 struct udc_data *data = dev->data;
66
67 if (value == udc_is_suspended(dev)) {
68 LOG_WRN("Spurious suspend/resume event");
69 }
70
71 atomic_set_bit_to(&data->status, UDC_STATUS_SUSPENDED, value);
72 }
73
udc_get_ep_cfg(const struct device * dev,const uint8_t ep)74 struct udc_ep_config *udc_get_ep_cfg(const struct device *dev, const uint8_t ep)
75 {
76 struct udc_data *data = dev->data;
77
78 return data->ep_lut[USB_EP_LUT_IDX(ep)];
79 }
80
udc_ep_is_busy(const struct device * dev,const uint8_t ep)81 bool udc_ep_is_busy(const struct device *dev, const uint8_t ep)
82 {
83 struct udc_ep_config *ep_cfg;
84
85 ep_cfg = udc_get_ep_cfg(dev, ep);
86 __ASSERT(ep_cfg != NULL, "ep 0x%02x is not available", ep);
87
88 return ep_cfg->stat.busy;
89 }
90
udc_ep_set_busy(const struct device * dev,const uint8_t ep,const bool busy)91 void udc_ep_set_busy(const struct device *dev, const uint8_t ep, const bool busy)
92 {
93 struct udc_ep_config *ep_cfg;
94
95 ep_cfg = udc_get_ep_cfg(dev, ep);
96 __ASSERT(ep_cfg != NULL, "ep 0x%02x is not available", ep);
97 ep_cfg->stat.busy = busy;
98 }
99
udc_register_ep(const struct device * dev,struct udc_ep_config * const cfg)100 int udc_register_ep(const struct device *dev, struct udc_ep_config *const cfg)
101 {
102 struct udc_data *data = dev->data;
103 uint8_t idx;
104
105 if (udc_is_initialized(dev)) {
106 return -EACCES;
107 }
108
109 idx = USB_EP_LUT_IDX(cfg->addr);
110 __ASSERT_NO_MSG(idx < ARRAY_SIZE(data->ep_lut));
111
112 data->ep_lut[idx] = cfg;
113 k_fifo_init(&cfg->fifo);
114
115 return 0;
116 }
117
udc_buf_get(const struct device * dev,const uint8_t ep)118 struct net_buf *udc_buf_get(const struct device *dev, const uint8_t ep)
119 {
120 struct udc_ep_config *ep_cfg;
121
122 ep_cfg = udc_get_ep_cfg(dev, ep);
123 if (ep_cfg == NULL) {
124 return NULL;
125 }
126
127 return k_fifo_get(&ep_cfg->fifo, K_NO_WAIT);
128 }
129
udc_buf_get_all(const struct device * dev,const uint8_t ep)130 struct net_buf *udc_buf_get_all(const struct device *dev, const uint8_t ep)
131 {
132 struct udc_ep_config *ep_cfg;
133 struct net_buf *buf;
134
135 ep_cfg = udc_get_ep_cfg(dev, ep);
136 if (ep_cfg == NULL) {
137 return NULL;
138 }
139
140 buf = k_fifo_get(&ep_cfg->fifo, K_NO_WAIT);
141 if (!buf) {
142 return NULL;
143 }
144
145 LOG_DBG("ep 0x%02x dequeue %p", ep, buf);
146 for (struct net_buf *n = buf; !k_fifo_is_empty(&ep_cfg->fifo); n = n->frags) {
147 n->frags = k_fifo_get(&ep_cfg->fifo, K_NO_WAIT);
148 LOG_DBG("|-> %p ", n->frags);
149 if (n->frags == NULL) {
150 break;
151 }
152 }
153
154 return buf;
155 }
156
udc_buf_peek(const struct device * dev,const uint8_t ep)157 struct net_buf *udc_buf_peek(const struct device *dev, const uint8_t ep)
158 {
159 struct udc_ep_config *ep_cfg;
160
161 ep_cfg = udc_get_ep_cfg(dev, ep);
162 if (ep_cfg == NULL) {
163 return NULL;
164 }
165
166 return k_fifo_peek_head(&ep_cfg->fifo);
167 }
168
udc_buf_put(struct udc_ep_config * const ep_cfg,struct net_buf * const buf)169 void udc_buf_put(struct udc_ep_config *const ep_cfg,
170 struct net_buf *const buf)
171 {
172 k_fifo_put(&ep_cfg->fifo, buf);
173 }
174
udc_ep_buf_set_setup(struct net_buf * const buf)175 void udc_ep_buf_set_setup(struct net_buf *const buf)
176 {
177 struct udc_buf_info *bi = udc_get_buf_info(buf);
178
179 bi->setup = 1;
180 bi->data = 0;
181 bi->status = 0;
182 }
183
udc_ep_buf_has_zlp(const struct net_buf * const buf)184 bool udc_ep_buf_has_zlp(const struct net_buf *const buf)
185 {
186 const struct udc_buf_info *bi = udc_get_buf_info(buf);
187
188 return bi->zlp;
189 }
190
udc_ep_buf_clear_zlp(const struct net_buf * const buf)191 void udc_ep_buf_clear_zlp(const struct net_buf *const buf)
192 {
193 struct udc_buf_info *bi = udc_get_buf_info(buf);
194
195 bi->zlp = false;
196 }
197
udc_submit_event(const struct device * dev,const enum udc_event_type type,const int status)198 int udc_submit_event(const struct device *dev,
199 const enum udc_event_type type,
200 const int status)
201 {
202 struct udc_data *data = dev->data;
203 struct udc_event drv_evt = {
204 .type = type,
205 .status = status,
206 .dev = dev,
207 };
208
209 return data->event_cb(dev, &drv_evt);
210 }
211
udc_submit_ep_event(const struct device * dev,struct net_buf * const buf,const int err)212 int udc_submit_ep_event(const struct device *dev,
213 struct net_buf *const buf,
214 const int err)
215 {
216 struct udc_buf_info *bi = udc_get_buf_info(buf);
217 struct udc_data *data = dev->data;
218 const struct udc_event drv_evt = {
219 .type = UDC_EVT_EP_REQUEST,
220 .buf = buf,
221 .dev = dev,
222 };
223
224 if (!udc_is_initialized(dev)) {
225 return -EPERM;
226 }
227
228 bi->err = err;
229
230 return data->event_cb(dev, &drv_evt);
231 }
232
ep_attrib_get_transfer(uint8_t attributes)233 static uint8_t ep_attrib_get_transfer(uint8_t attributes)
234 {
235 return attributes & USB_EP_TRANSFER_TYPE_MASK;
236 }
237
ep_check_config(const struct device * dev,const struct udc_ep_config * const cfg,const uint8_t ep,const uint8_t attributes,const uint16_t mps,const uint8_t interval)238 static bool ep_check_config(const struct device *dev,
239 const struct udc_ep_config *const cfg,
240 const uint8_t ep,
241 const uint8_t attributes,
242 const uint16_t mps,
243 const uint8_t interval)
244 {
245 bool dir_is_in = USB_EP_DIR_IS_IN(ep);
246 bool dir_is_out = USB_EP_DIR_IS_OUT(ep);
247
248 LOG_DBG("cfg d:%c|%c t:%c|%c|%c|%c, mps %u",
249 cfg->caps.in ? 'I' : '-',
250 cfg->caps.out ? 'O' : '-',
251 cfg->caps.iso ? 'S' : '-',
252 cfg->caps.bulk ? 'B' : '-',
253 cfg->caps.interrupt ? 'I' : '-',
254 cfg->caps.control ? 'C' : '-',
255 cfg->caps.mps);
256
257 if (dir_is_out && !cfg->caps.out) {
258 return false;
259 }
260
261 if (dir_is_in && !cfg->caps.in) {
262 return false;
263 }
264
265 if (USB_MPS_EP_SIZE(mps) > USB_MPS_EP_SIZE(cfg->caps.mps)) {
266 return false;
267 }
268
269 switch (ep_attrib_get_transfer(attributes)) {
270 case USB_EP_TYPE_BULK:
271 if (!cfg->caps.bulk) {
272 return false;
273 }
274 break;
275 case USB_EP_TYPE_INTERRUPT:
276 if (!cfg->caps.interrupt ||
277 (USB_MPS_ADDITIONAL_TRANSACTIONS(mps) &&
278 !cfg->caps.high_bandwidth)) {
279 return false;
280 }
281 break;
282 case USB_EP_TYPE_ISO:
283 if (!cfg->caps.iso ||
284 (USB_MPS_ADDITIONAL_TRANSACTIONS(mps) &&
285 !cfg->caps.high_bandwidth)) {
286 return false;
287 }
288 break;
289 case USB_EP_TYPE_CONTROL:
290 if (!cfg->caps.control) {
291 return false;
292 }
293 break;
294 default:
295 return false;
296 }
297
298 return true;
299 }
300
ep_update_mps(const struct device * dev,const struct udc_ep_config * const cfg,const uint8_t attributes,uint16_t * const mps)301 static void ep_update_mps(const struct device *dev,
302 const struct udc_ep_config *const cfg,
303 const uint8_t attributes,
304 uint16_t *const mps)
305 {
306 struct udc_device_caps caps = udc_caps(dev);
307 const uint16_t spec_int_mps = caps.hs ? 1024 : 64;
308 const uint16_t spec_bulk_mps = caps.hs ? 512 : 64;
309
310 /*
311 * TODO: It does not take into account the actual speed of the
312 * bus after the RESET. Should be fixed/improved when the driver
313 * for high speed controller are ported.
314 */
315 switch (ep_attrib_get_transfer(attributes)) {
316 case USB_EP_TYPE_BULK:
317 *mps = MIN(cfg->caps.mps, spec_bulk_mps);
318 break;
319 case USB_EP_TYPE_INTERRUPT:
320 *mps = MIN(cfg->caps.mps, spec_int_mps);
321 break;
322 case USB_EP_TYPE_CONTROL:
323 __fallthrough;
324 case USB_EP_TYPE_ISO:
325 __fallthrough;
326 default:
327 return;
328 }
329 }
330
udc_ep_try_config(const struct device * dev,const uint8_t ep,const uint8_t attributes,uint16_t * const mps,const uint8_t interval)331 int udc_ep_try_config(const struct device *dev,
332 const uint8_t ep,
333 const uint8_t attributes,
334 uint16_t *const mps,
335 const uint8_t interval)
336 {
337 const struct udc_api *api = dev->api;
338 struct udc_ep_config *cfg;
339 bool ret;
340
341 cfg = udc_get_ep_cfg(dev, ep);
342 if (cfg == NULL) {
343 return -ENODEV;
344 }
345
346 api->lock(dev);
347
348 ret = ep_check_config(dev, cfg, ep, attributes, *mps, interval);
349 if (ret == true && *mps == 0U) {
350 ep_update_mps(dev, cfg, attributes, mps);
351 }
352
353 api->unlock(dev);
354
355 return (ret == false) ? -ENOTSUP : 0;
356 }
357
udc_ep_enable_internal(const struct device * dev,const uint8_t ep,const uint8_t attributes,const uint16_t mps,const uint8_t interval)358 int udc_ep_enable_internal(const struct device *dev,
359 const uint8_t ep,
360 const uint8_t attributes,
361 const uint16_t mps,
362 const uint8_t interval)
363 {
364 const struct udc_api *api = dev->api;
365 struct udc_ep_config *cfg;
366 int ret;
367
368 cfg = udc_get_ep_cfg(dev, ep);
369 if (cfg == NULL) {
370 return -ENODEV;
371 }
372
373 if (cfg->stat.enabled) {
374 LOG_ERR("ep 0x%02x already enabled", cfg->addr);
375 return -EALREADY;
376 }
377
378 if (!ep_check_config(dev, cfg, ep, attributes, mps, interval)) {
379 LOG_ERR("Endpoint 0x%02x validation failed", cfg->addr);
380 return -ENODEV;
381 }
382
383 cfg->attributes = attributes;
384 cfg->mps = mps;
385 cfg->interval = interval;
386
387 cfg->stat.odd = 0;
388 cfg->stat.halted = 0;
389 cfg->stat.data1 = false;
390 ret = api->ep_enable(dev, cfg);
391 cfg->stat.enabled = ret ? false : true;
392
393 return ret;
394 }
395
udc_ep_enable(const struct device * dev,const uint8_t ep,const uint8_t attributes,const uint16_t mps,const uint8_t interval)396 int udc_ep_enable(const struct device *dev,
397 const uint8_t ep,
398 const uint8_t attributes,
399 const uint16_t mps,
400 const uint8_t interval)
401 {
402 const struct udc_api *api = dev->api;
403 int ret;
404
405 if (ep == USB_CONTROL_EP_OUT || ep == USB_CONTROL_EP_IN) {
406 return -EINVAL;
407 }
408
409 api->lock(dev);
410
411 if (!udc_is_enabled(dev)) {
412 ret = -EPERM;
413 goto ep_enable_error;
414 }
415
416 ret = udc_ep_enable_internal(dev, ep, attributes, mps, interval);
417
418 ep_enable_error:
419 api->unlock(dev);
420
421 return ret;
422 }
423
udc_ep_disable_internal(const struct device * dev,const uint8_t ep)424 int udc_ep_disable_internal(const struct device *dev, const uint8_t ep)
425 {
426 const struct udc_api *api = dev->api;
427 struct udc_ep_config *cfg;
428 int ret;
429
430 cfg = udc_get_ep_cfg(dev, ep);
431 if (cfg == NULL) {
432 return -ENODEV;
433 }
434
435 if (!cfg->stat.enabled) {
436 LOG_ERR("ep 0x%02x already disabled", cfg->addr);
437 return -EALREADY;
438 }
439
440 ret = api->ep_disable(dev, cfg);
441 cfg->stat.enabled = ret ? cfg->stat.enabled : false;
442
443 return ret;
444 }
445
udc_ep_disable(const struct device * dev,const uint8_t ep)446 int udc_ep_disable(const struct device *dev, const uint8_t ep)
447 {
448 const struct udc_api *api = dev->api;
449 int ret;
450
451 if (ep == USB_CONTROL_EP_OUT || ep == USB_CONTROL_EP_IN) {
452 return -EINVAL;
453 }
454
455 api->lock(dev);
456
457 if (!udc_is_initialized(dev)) {
458 ret = -EPERM;
459 goto ep_disable_error;
460 }
461
462 ret = udc_ep_disable_internal(dev, ep);
463
464 ep_disable_error:
465 api->unlock(dev);
466
467 return ret;
468 }
469
udc_ep_set_halt(const struct device * dev,const uint8_t ep)470 int udc_ep_set_halt(const struct device *dev, const uint8_t ep)
471 {
472 const struct udc_api *api = dev->api;
473 struct udc_ep_config *cfg;
474 int ret;
475
476 api->lock(dev);
477
478 if (!udc_is_enabled(dev)) {
479 ret = -EPERM;
480 goto ep_set_halt_error;
481 }
482
483 cfg = udc_get_ep_cfg(dev, ep);
484 if (cfg == NULL) {
485 ret = -ENODEV;
486 goto ep_set_halt_error;
487 }
488
489 if (!cfg->stat.enabled) {
490 ret = -ENODEV;
491 goto ep_set_halt_error;
492 }
493
494 if (ep_attrib_get_transfer(cfg->attributes) == USB_EP_TYPE_ISO) {
495 ret = -ENOTSUP;
496 goto ep_set_halt_error;
497 }
498
499 ret = api->ep_set_halt(dev, cfg);
500
501 ep_set_halt_error:
502 api->unlock(dev);
503
504 return ret;
505 }
506
udc_ep_clear_halt(const struct device * dev,const uint8_t ep)507 int udc_ep_clear_halt(const struct device *dev, const uint8_t ep)
508 {
509 const struct udc_api *api = dev->api;
510 struct udc_ep_config *cfg;
511 int ret;
512
513 api->lock(dev);
514
515 if (!udc_is_enabled(dev)) {
516 ret = -EPERM;
517 goto ep_clear_halt_error;
518 }
519
520 cfg = udc_get_ep_cfg(dev, ep);
521 if (cfg == NULL) {
522 ret = -ENODEV;
523 goto ep_clear_halt_error;
524 }
525
526 if (!cfg->stat.enabled) {
527 ret = -ENODEV;
528 goto ep_clear_halt_error;
529 }
530
531 if (ep_attrib_get_transfer(cfg->attributes) == USB_EP_TYPE_ISO) {
532 ret = -ENOTSUP;
533 goto ep_clear_halt_error;
534 }
535
536 ret = api->ep_clear_halt(dev, cfg);
537 if (ret == 0) {
538 cfg->stat.halted = false;
539 }
540
541 ep_clear_halt_error:
542 api->unlock(dev);
543
544 return ret;
545 }
546
udc_debug_ep_enqueue(const struct device * dev,struct udc_ep_config * const cfg)547 static void udc_debug_ep_enqueue(const struct device *dev,
548 struct udc_ep_config *const cfg)
549 {
550 struct udc_buf_info *bi;
551 struct net_buf *buf;
552 sys_slist_t list;
553
554 list.head = k_fifo_peek_head(&cfg->fifo);
555 list.tail = k_fifo_peek_tail(&cfg->fifo);
556 if (list.head == NULL) {
557 LOG_DBG("ep 0x%02x queue is empty", cfg->addr);
558 return;
559 }
560
561 LOG_DBG("[de]queue ep 0x%02x:", cfg->addr);
562
563 SYS_SLIST_FOR_EACH_CONTAINER(&list, buf, node) {
564 bi = udc_get_buf_info(buf);
565 LOG_DBG("|-> %p (%u) ->", buf, buf->size);
566 }
567 }
568
udc_ep_enqueue(const struct device * dev,struct net_buf * const buf)569 int udc_ep_enqueue(const struct device *dev, struct net_buf *const buf)
570 {
571 const struct udc_api *api = dev->api;
572 struct udc_ep_config *cfg;
573 struct udc_buf_info *bi;
574 int ret;
575
576 api->lock(dev);
577
578 if (!udc_is_enabled(dev)) {
579 ret = -EPERM;
580 goto ep_enqueue_error;
581 }
582
583 bi = udc_get_buf_info(buf);
584 if (bi->ep == USB_CONTROL_EP_OUT) {
585 ret = -EPERM;
586 goto ep_enqueue_error;
587 }
588
589 cfg = udc_get_ep_cfg(dev, bi->ep);
590 if (cfg == NULL) {
591 ret = -ENODEV;
592 goto ep_enqueue_error;
593 }
594
595 if (!cfg->stat.enabled) {
596 ret = -ENODEV;
597 goto ep_enqueue_error;
598 }
599
600 LOG_DBG("Queue ep 0x%02x %p len %u", cfg->addr, buf,
601 USB_EP_DIR_IS_IN(cfg->addr) ? buf->len : buf->size);
602
603 bi->setup = 0;
604 ret = api->ep_enqueue(dev, cfg, buf);
605
606 ep_enqueue_error:
607 api->unlock(dev);
608
609 return ret;
610 }
611
udc_ep_dequeue(const struct device * dev,const uint8_t ep)612 int udc_ep_dequeue(const struct device *dev, const uint8_t ep)
613 {
614 const struct udc_api *api = dev->api;
615 struct udc_ep_config *cfg;
616 int ret;
617
618 api->lock(dev);
619
620 if (!udc_is_initialized(dev)) {
621 ret = -EPERM;
622 goto ep_dequeue_error;
623 }
624
625 cfg = udc_get_ep_cfg(dev, ep);
626 if (cfg == NULL) {
627 ret = -ENODEV;
628 goto ep_dequeue_error;
629 }
630
631 if (cfg->stat.enabled || cfg->stat.halted) {
632 LOG_INF("ep 0x%02x is not halted|disabled", cfg->addr);
633 }
634
635 if (UDC_COMMON_LOG_LEVEL == LOG_LEVEL_DBG) {
636 udc_debug_ep_enqueue(dev, cfg);
637 }
638
639 if (k_fifo_is_empty(&cfg->fifo)) {
640 ret = 0;
641 } else {
642 ret = api->ep_dequeue(dev, cfg);
643 }
644
645 ep_dequeue_error:
646 api->unlock(dev);
647
648 return ret;
649 }
650
udc_ep_buf_alloc(const struct device * dev,const uint8_t ep,const size_t size)651 struct net_buf *udc_ep_buf_alloc(const struct device *dev,
652 const uint8_t ep,
653 const size_t size)
654 {
655 const struct udc_api *api = dev->api;
656 struct net_buf *buf = NULL;
657 struct udc_buf_info *bi;
658
659 api->lock(dev);
660
661 buf = net_buf_alloc_len(&udc_ep_pool, size, K_NO_WAIT);
662 if (!buf) {
663 LOG_ERR("Failed to allocate net_buf %zd", size);
664 goto ep_alloc_error;
665 }
666
667 bi = udc_get_buf_info(buf);
668 bi->ep = ep;
669 LOG_DBG("Allocate net_buf, ep 0x%02x, size %zd", ep, size);
670
671 ep_alloc_error:
672 api->unlock(dev);
673
674 return buf;
675 }
676
udc_ctrl_alloc(const struct device * dev,const uint8_t ep,const size_t size)677 struct net_buf *udc_ctrl_alloc(const struct device *dev,
678 const uint8_t ep,
679 const size_t size)
680 {
681 /* TODO: for now just pass to udc_buf_alloc() */
682 return udc_ep_buf_alloc(dev, ep, size);
683 }
684
udc_buf_destroy(struct net_buf * buf)685 static inline void udc_buf_destroy(struct net_buf *buf)
686 {
687 /* Adjust level and use together with the log in udc_ep_buf_alloc() */
688 LOG_DBG("destroy %p", buf);
689 net_buf_destroy(buf);
690 }
691
udc_ep_buf_free(const struct device * dev,struct net_buf * const buf)692 int udc_ep_buf_free(const struct device *dev, struct net_buf *const buf)
693 {
694 const struct udc_api *api = dev->api;
695 int ret = 0;
696
697 api->lock(dev);
698 net_buf_unref(buf);
699 api->unlock(dev);
700
701 return ret;
702 }
703
udc_device_speed(const struct device * dev)704 enum udc_bus_speed udc_device_speed(const struct device *dev)
705 {
706 const struct udc_api *api = dev->api;
707 enum udc_bus_speed speed = UDC_BUS_UNKNOWN;
708
709 api->lock(dev);
710
711 if (!udc_is_enabled(dev)) {
712 goto device_speed_error;
713 }
714
715 if (api->device_speed) {
716 speed = api->device_speed(dev);
717 } else {
718 /* TODO: Shall we track connected status in UDC? */
719 speed = UDC_BUS_SPEED_FS;
720 }
721
722 device_speed_error:
723 api->unlock(dev);
724
725 return speed;
726 }
727
udc_enable(const struct device * dev)728 int udc_enable(const struct device *dev)
729 {
730 const struct udc_api *api = dev->api;
731 struct udc_data *data = dev->data;
732 int ret;
733
734 api->lock(dev);
735
736 if (!udc_is_initialized(dev)) {
737 ret = -EPERM;
738 goto udc_enable_error;
739 }
740
741 if (udc_is_enabled(dev)) {
742 ret = -EALREADY;
743 goto udc_enable_error;
744 }
745
746 data->stage = CTRL_PIPE_STAGE_SETUP;
747
748 ret = api->enable(dev);
749 if (ret == 0) {
750 atomic_set_bit(&data->status, UDC_STATUS_ENABLED);
751 }
752
753 udc_enable_error:
754 api->unlock(dev);
755
756 return ret;
757 }
758
udc_disable(const struct device * dev)759 int udc_disable(const struct device *dev)
760 {
761 const struct udc_api *api = dev->api;
762 struct udc_data *data = dev->data;
763 int ret;
764
765 api->lock(dev);
766
767 if (!udc_is_enabled(dev)) {
768 ret = -EALREADY;
769 goto udc_disable_error;
770 }
771
772 ret = api->disable(dev);
773 atomic_clear_bit(&data->status, UDC_STATUS_ENABLED);
774
775 udc_disable_error:
776 api->unlock(dev);
777
778 return ret;
779 }
780
udc_init(const struct device * dev,udc_event_cb_t event_cb,const void * const event_ctx)781 int udc_init(const struct device *dev,
782 udc_event_cb_t event_cb, const void *const event_ctx)
783 {
784 const struct udc_api *api = dev->api;
785 struct udc_data *data = dev->data;
786 int ret;
787
788 if (event_cb == NULL || event_ctx == NULL) {
789 return -EINVAL;
790 }
791
792 api->lock(dev);
793
794 if (udc_is_initialized(dev)) {
795 ret = -EALREADY;
796 goto udc_init_error;
797 }
798
799 data->event_cb = event_cb;
800 data->event_ctx = event_ctx;
801
802 ret = api->init(dev);
803 if (ret == 0) {
804 atomic_set_bit(&data->status, UDC_STATUS_INITIALIZED);
805 }
806
807 udc_init_error:
808 api->unlock(dev);
809
810 return ret;
811 }
812
udc_shutdown(const struct device * dev)813 int udc_shutdown(const struct device *dev)
814 {
815 const struct udc_api *api = dev->api;
816 struct udc_data *data = dev->data;
817 int ret;
818
819 api->lock(dev);
820
821 if (udc_is_enabled(dev)) {
822 ret = -EBUSY;
823 goto udc_shutdown_error;
824 }
825
826 if (!udc_is_initialized(dev)) {
827 ret = -EALREADY;
828 goto udc_shutdown_error;
829 }
830
831 ret = api->shutdown(dev);
832 atomic_clear_bit(&data->status, UDC_STATUS_INITIALIZED);
833
834 udc_shutdown_error:
835 api->unlock(dev);
836
837 return ret;
838 }
839
840 static ALWAYS_INLINE
udc_ctrl_alloc_stage(const struct device * dev,struct net_buf * const parent,const uint8_t ep,const size_t size)841 struct net_buf *udc_ctrl_alloc_stage(const struct device *dev,
842 struct net_buf *const parent,
843 const uint8_t ep,
844 const size_t size)
845 {
846 struct net_buf *buf;
847
848 buf = udc_ctrl_alloc(dev, ep, size);
849 if (buf == NULL) {
850 return NULL;
851 }
852
853 if (parent) {
854 net_buf_frag_add(parent, buf);
855 }
856
857 return buf;
858 }
859
udc_ctrl_alloc_data(const struct device * dev,struct net_buf * const setup,const uint8_t ep)860 static struct net_buf *udc_ctrl_alloc_data(const struct device *dev,
861 struct net_buf *const setup,
862 const uint8_t ep)
863 {
864 size_t size = udc_data_stage_length(setup);
865 struct udc_buf_info *bi;
866 struct net_buf *buf;
867
868 buf = udc_ctrl_alloc_stage(dev, setup, ep, size);
869 if (buf) {
870 bi = udc_get_buf_info(buf);
871 bi->data = true;
872 }
873
874 return buf;
875 }
876
udc_ctrl_alloc_status(const struct device * dev,struct net_buf * const parent,const uint8_t ep)877 static struct net_buf *udc_ctrl_alloc_status(const struct device *dev,
878 struct net_buf *const parent,
879 const uint8_t ep)
880 {
881 size_t size = (ep == USB_CONTROL_EP_OUT) ? 64 : 0;
882 struct udc_buf_info *bi;
883 struct net_buf *buf;
884
885 buf = udc_ctrl_alloc_stage(dev, parent, ep, size);
886 if (buf) {
887 bi = udc_get_buf_info(buf);
888 bi->status = true;
889 }
890
891 return buf;
892 }
893
udc_ctrl_submit_s_out_status(const struct device * dev,struct net_buf * const dout)894 int udc_ctrl_submit_s_out_status(const struct device *dev,
895 struct net_buf *const dout)
896 {
897 struct udc_buf_info *bi = udc_get_buf_info(dout);
898 struct udc_data *data = dev->data;
899 struct net_buf *buf;
900 int ret = 0;
901
902 bi->data = true;
903 net_buf_frag_add(data->setup, dout);
904
905 buf = udc_ctrl_alloc_status(dev, dout, USB_CONTROL_EP_IN);
906 if (buf == NULL) {
907 ret = -ENOMEM;
908 }
909
910 return udc_submit_ep_event(dev, data->setup, ret);
911 }
912
udc_ctrl_submit_s_in_status(const struct device * dev)913 int udc_ctrl_submit_s_in_status(const struct device *dev)
914 {
915 struct udc_data *data = dev->data;
916 struct net_buf *buf;
917 int ret = 0;
918
919 if (!udc_ctrl_stage_is_data_in(dev)) {
920 return -ENOTSUP;
921 }
922
923 /* Allocate buffer for data stage IN */
924 buf = udc_ctrl_alloc_data(dev, data->setup, USB_CONTROL_EP_IN);
925 if (buf == NULL) {
926 ret = -ENOMEM;
927 }
928
929 return udc_submit_ep_event(dev, data->setup, ret);
930 }
931
udc_ctrl_submit_s_status(const struct device * dev)932 int udc_ctrl_submit_s_status(const struct device *dev)
933 {
934 struct udc_data *data = dev->data;
935 struct net_buf *buf;
936 int ret = 0;
937
938 /* Allocate buffer for possible status IN */
939 buf = udc_ctrl_alloc_status(dev, data->setup, USB_CONTROL_EP_IN);
940 if (buf == NULL) {
941 ret = -ENOMEM;
942 }
943
944 return udc_submit_ep_event(dev, data->setup, ret);
945 }
946
udc_ctrl_submit_status(const struct device * dev,struct net_buf * const buf)947 int udc_ctrl_submit_status(const struct device *dev,
948 struct net_buf *const buf)
949 {
950 struct udc_buf_info *bi = udc_get_buf_info(buf);
951
952 bi->status = true;
953
954 return udc_submit_ep_event(dev, buf, 0);
955 }
956
udc_ctrl_stage_is_data_out(const struct device * dev)957 bool udc_ctrl_stage_is_data_out(const struct device *dev)
958 {
959 struct udc_data *data = dev->data;
960
961 return data->stage == CTRL_PIPE_STAGE_DATA_OUT ? true : false;
962 }
963
udc_ctrl_stage_is_data_in(const struct device * dev)964 bool udc_ctrl_stage_is_data_in(const struct device *dev)
965 {
966 struct udc_data *data = dev->data;
967
968 return data->stage == CTRL_PIPE_STAGE_DATA_IN ? true : false;
969 }
970
udc_ctrl_stage_is_status_out(const struct device * dev)971 bool udc_ctrl_stage_is_status_out(const struct device *dev)
972 {
973 struct udc_data *data = dev->data;
974
975 return data->stage == CTRL_PIPE_STAGE_STATUS_OUT ? true : false;
976 }
977
udc_ctrl_stage_is_status_in(const struct device * dev)978 bool udc_ctrl_stage_is_status_in(const struct device *dev)
979 {
980 struct udc_data *data = dev->data;
981
982 return data->stage == CTRL_PIPE_STAGE_STATUS_IN ? true : false;
983 }
984
udc_ctrl_stage_is_no_data(const struct device * dev)985 bool udc_ctrl_stage_is_no_data(const struct device *dev)
986 {
987 struct udc_data *data = dev->data;
988
989 return data->stage == CTRL_PIPE_STAGE_NO_DATA ? true : false;
990 }
991
udc_data_stage_to_host(const struct net_buf * const buf)992 static bool udc_data_stage_to_host(const struct net_buf *const buf)
993 {
994 struct usb_setup_packet *setup = (void *)buf->data;
995
996 return USB_REQTYPE_GET_DIR(setup->bmRequestType);
997 }
998
udc_ctrl_update_stage(const struct device * dev,struct net_buf * const buf)999 void udc_ctrl_update_stage(const struct device *dev,
1000 struct net_buf *const buf)
1001 {
1002 struct udc_buf_info *bi = udc_get_buf_info(buf);
1003 struct udc_device_caps caps = udc_caps(dev);
1004 uint8_t next_stage = CTRL_PIPE_STAGE_ERROR;
1005 struct udc_data *data = dev->data;
1006
1007 __ASSERT(USB_EP_GET_IDX(bi->ep) == 0,
1008 "0x%02x is not a control endpoint", bi->ep);
1009
1010 if (bi->setup && bi->ep == USB_CONTROL_EP_OUT) {
1011 uint16_t length = udc_data_stage_length(buf);
1012
1013 data->setup = buf;
1014
1015 if (data->stage != CTRL_PIPE_STAGE_SETUP) {
1016 LOG_INF("Sequence %u not completed", data->stage);
1017 data->stage = CTRL_PIPE_STAGE_SETUP;
1018 }
1019
1020 /*
1021 * Setup Stage has been completed (setup packet received),
1022 * regardless of the previous stage, this is now being reset.
1023 * Next state depends on wLength and the direction bit (D7).
1024 */
1025 if (length == 0) {
1026 /*
1027 * No Data Stage, next is Status Stage
1028 * complete sequence: s->status
1029 */
1030 LOG_DBG("s->(status)");
1031 next_stage = CTRL_PIPE_STAGE_NO_DATA;
1032 } else if (udc_data_stage_to_host(buf)) {
1033 /*
1034 * Next is Data Stage (to host / IN)
1035 * complete sequence: s->in->status
1036 */
1037 LOG_DBG("s->(in)");
1038 next_stage = CTRL_PIPE_STAGE_DATA_IN;
1039 } else {
1040 /*
1041 * Next is Data Stage (to device / OUT)
1042 * complete sequence: s->out->status
1043 */
1044 LOG_DBG("s->(out)");
1045 next_stage = CTRL_PIPE_STAGE_DATA_OUT;
1046 }
1047
1048 } else if (bi->ep == USB_CONTROL_EP_OUT) {
1049 if (data->stage == CTRL_PIPE_STAGE_DATA_OUT) {
1050 /*
1051 * Next sequence is Status Stage if request is okay,
1052 * (IN ZLP status to host)
1053 */
1054 next_stage = CTRL_PIPE_STAGE_STATUS_IN;
1055 } else if (data->stage == CTRL_PIPE_STAGE_STATUS_OUT) {
1056 /*
1057 * End of a sequence: s->in->status,
1058 * We should check the length here because we always
1059 * submit a OUT request with the minimum length
1060 * of the control endpoint.
1061 */
1062 if (buf->len == 0) {
1063 LOG_DBG("s-in-status");
1064 next_stage = CTRL_PIPE_STAGE_SETUP;
1065 } else {
1066 LOG_WRN("ZLP expected");
1067 next_stage = CTRL_PIPE_STAGE_ERROR;
1068 }
1069 } else {
1070 LOG_ERR("Cannot determine the next stage");
1071 next_stage = CTRL_PIPE_STAGE_ERROR;
1072 }
1073
1074 } else { /* if (bi->ep == USB_CONTROL_EP_IN) */
1075 if (data->stage == CTRL_PIPE_STAGE_STATUS_IN) {
1076 /*
1077 * End of a sequence: setup->out->in
1078 */
1079 LOG_DBG("s-out-status");
1080 next_stage = CTRL_PIPE_STAGE_SETUP;
1081 } else if (data->stage == CTRL_PIPE_STAGE_DATA_IN) {
1082 /*
1083 * Data IN stage completed, next sequence
1084 * is Status Stage (OUT ZLP status to device).
1085 * over-engineered controllers can send status
1086 * on their own, skip this state then.
1087 */
1088 if (caps.out_ack) {
1089 LOG_DBG("s-in->[status]");
1090 next_stage = CTRL_PIPE_STAGE_SETUP;
1091 } else {
1092 LOG_DBG("s-in->(status)");
1093 next_stage = CTRL_PIPE_STAGE_STATUS_OUT;
1094 }
1095 } else if (data->stage == CTRL_PIPE_STAGE_NO_DATA) {
1096 /*
1097 * End of a sequence (setup->in)
1098 * Previous NO Data stage was completed and
1099 * we confirmed it with an IN ZLP.
1100 */
1101 LOG_DBG("s-status");
1102 next_stage = CTRL_PIPE_STAGE_SETUP;
1103 } else {
1104 LOG_ERR("Cannot determine the next stage");
1105 next_stage = CTRL_PIPE_STAGE_ERROR;
1106 }
1107 }
1108
1109
1110 if (next_stage == data->stage) {
1111 LOG_WRN("State not changed!");
1112 }
1113
1114 data->stage = next_stage;
1115 }
1116
1117 #if defined(CONFIG_UDC_WORKQUEUE)
1118 K_KERNEL_STACK_DEFINE(udc_work_q_stack, CONFIG_UDC_WORKQUEUE_STACK_SIZE);
1119
1120 struct k_work_q udc_work_q;
1121
udc_work_q_init(void)1122 static int udc_work_q_init(void)
1123 {
1124
1125 k_work_queue_start(&udc_work_q,
1126 udc_work_q_stack,
1127 K_KERNEL_STACK_SIZEOF(udc_work_q_stack),
1128 CONFIG_UDC_WORKQUEUE_PRIORITY, NULL);
1129 k_thread_name_set(&udc_work_q.thread, "udc_work_q");
1130
1131 return 0;
1132 }
1133
1134 SYS_INIT(udc_work_q_init, POST_KERNEL, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);
1135 #endif
1136