1 /*
2 * Copyright 2024 Ambiq Micro Inc.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <soc.h>
8 #include "am_mcu_apollo.h"
9 #include <string.h>
10 #include <zephyr/drivers/clock_control/clock_control_ambiq.h>
11 #include <zephyr/sys/util.h>
12 #include <zephyr/kernel.h>
13 #include <zephyr/drivers/usb/udc.h>
14 #include <zephyr/sys/util.h>
15 #include <zephyr/drivers/gpio.h>
16 #include <zephyr/drivers/pinctrl.h>
17 #include <zephyr/cache.h>
18 #include <zephyr/logging/log.h>
19 #include <zephyr/irq.h>
20 #include "udc_common.h"
21 #include <usb_dwc2_hw.h>
22
23 LOG_MODULE_REGISTER(udc_ambiq, CONFIG_UDC_DRIVER_LOG_LEVEL);
24
25 enum udc_ambiq_event_type {
26 /* SETUP packet received at Control Endpoint */
27 UDC_AMBIQ_EVT_HAL_SETUP,
28 /* OUT transaction completed */
29 UDC_AMBIQ_EVT_HAL_OUT_CMP,
30 /* IN transaction completed */
31 UDC_AMBIQ_EVT_HAL_IN_CMP,
32 /* Xfer request received via udc_ambiq_ep_enqueue API */
33 UDC_AMBIQ_EVT_XFER,
34 };
35
36 struct udc_ambiq_event {
37 const struct device *dev;
38 enum udc_ambiq_event_type type;
39 uint8_t ep;
40 };
41
42 K_MSGQ_DEFINE(drv_msgq, sizeof(struct udc_ambiq_event), CONFIG_UDC_AMBIQ_MAX_QMESSAGES,
43 sizeof(void *));
44
45 /* USB device controller access from devicetree */
46 #define DT_DRV_COMPAT ambiq_usb
47
48 #define EP0_MPS 64U
49 #define EP_FS_MPS 64U
50 #define EP_HS_MPS 512U
51
52 struct udc_ambiq_data {
53 struct k_thread thread_data;
54 void *usb_handle;
55 am_hal_usb_dev_speed_e usb_speed;
56 uint8_t setup[8];
57 uint8_t ctrl_pending_setup_buffer[8];
58 bool ctrl_pending_in_ack;
59 bool ctrl_pending_setup;
60 bool ctrl_setup_recv_at_status_in;
61 };
62
63 struct udc_ambiq_config {
64 uint32_t num_endpoints;
65 int speed_idx;
66 struct udc_ep_config *ep_cfg_in;
67 struct udc_ep_config *ep_cfg_out;
68 struct gpio_dt_spec vddusb33_gpio;
69 struct gpio_dt_spec vddusb0p9_gpio;
70 void (*make_thread)(const struct device *dev);
71 void (*irq_enable_func)(const struct device *dev);
72 void (*irq_disable_func)(const struct device *dev);
73 void (*callback_register_func)(const struct device *dev);
74 };
75
76 static int udc_ambiq_rx(const struct device *dev, uint8_t ep, struct net_buf *buf);
77
usbd_ctrl_feed_dout(const struct device * dev,const size_t length)78 static int usbd_ctrl_feed_dout(const struct device *dev, const size_t length)
79 {
80 struct udc_ep_config *cfg = udc_get_ep_cfg(dev, USB_CONTROL_EP_OUT);
81 struct net_buf *buf;
82
83 buf = udc_ctrl_alloc(dev, USB_CONTROL_EP_OUT, length);
84 if (buf == NULL) {
85 return -ENOMEM;
86 }
87
88 k_fifo_put(&cfg->fifo, buf);
89 if (length) {
90 udc_ambiq_rx(dev, cfg->addr, buf);
91 }
92
93 return 0;
94 }
95
udc_ambiq_tx(const struct device * dev,uint8_t ep,struct net_buf * buf)96 static int udc_ambiq_tx(const struct device *dev, uint8_t ep, struct net_buf *buf)
97 {
98 const struct udc_ambiq_data *priv = udc_get_private(dev);
99 uint32_t status;
100
101 if (udc_ep_is_busy(dev, ep)) {
102 LOG_WRN("ep 0x%02x is busy!", ep);
103 return 0;
104 }
105 udc_ep_set_busy(dev, ep, true);
106
107 /* buf equals NULL is used as indication of ZLP request */
108 if (buf == NULL) {
109 status = am_hal_usb_ep_xfer(priv->usb_handle, ep, NULL, 0);
110 } else {
111 status = am_hal_usb_ep_xfer(priv->usb_handle, ep, buf->data, buf->len);
112 }
113
114 if (status != AM_HAL_STATUS_SUCCESS) {
115 udc_ep_set_busy(dev, ep, false);
116 LOG_ERR("am_hal_usb_ep_xfer write failed(0x%02x), %d", ep, (int)status);
117 return -EIO;
118 }
119
120 return 0;
121 }
122
udc_ambiq_rx(const struct device * dev,uint8_t ep,struct net_buf * buf)123 static int udc_ambiq_rx(const struct device *dev, uint8_t ep, struct net_buf *buf)
124 {
125 struct udc_ambiq_data *priv = udc_get_private(dev);
126 struct udc_ep_config *cfg = udc_get_ep_cfg(dev, USB_CONTROL_EP_OUT);
127 uint32_t status;
128 uint16_t rx_size = buf->size;
129
130 if (udc_ep_is_busy(dev, ep)) {
131 LOG_WRN("ep 0x%02x is busy!", ep);
132 return 0;
133 }
134 udc_ep_set_busy(dev, ep, true);
135
136 /* Make sure that OUT transaction size triggered doesn't exceed EP's MPS */
137 if ((ep != USB_CONTROL_EP_OUT) && (cfg->mps < rx_size)) {
138 rx_size = cfg->mps;
139 }
140
141 status = am_hal_usb_ep_xfer(priv->usb_handle, ep, buf->data, rx_size);
142 if (status != AM_HAL_STATUS_SUCCESS) {
143 udc_ep_set_busy(dev, ep, false);
144 LOG_ERR("am_hal_usb_ep_xfer read(rx) failed(0x%02x), %d", ep, (int)status);
145 return -EIO;
146 }
147
148 return 0;
149 }
150
udc_ambiq_evt_callback(const struct device * dev,am_hal_usb_dev_event_e dev_state)151 static void udc_ambiq_evt_callback(const struct device *dev, am_hal_usb_dev_event_e dev_state)
152 {
153 struct udc_ambiq_data *priv = udc_get_private(dev);
154
155 switch (dev_state) {
156 case AM_HAL_USB_DEV_EVT_BUS_RESET:
157 /* enable usb bus interrupts */
158 am_hal_usb_intr_usb_enable(priv->usb_handle,
159 USB_CFG2_SOFE_Msk | USB_CFG2_ResumeE_Msk |
160 USB_CFG2_SuspendE_Msk | USB_CFG2_ResetE_Msk);
161 /* init the endpoint */
162 am_hal_usb_ep_init(priv->usb_handle, 0, 0, EP0_MPS);
163 /* Set USB device speed to HAL */
164 am_hal_usb_set_dev_speed(priv->usb_handle, priv->usb_speed);
165 LOG_INF("USB Reset event");
166 /* Submit USB reset event to UDC */
167 udc_submit_event(dev, UDC_EVT_RESET, 0);
168 break;
169 case AM_HAL_USB_DEV_EVT_RESUME:
170 /* Handle USB Resume event, then set device state to active */
171 am_hal_usb_set_dev_state(priv->usb_handle, AM_HAL_USB_DEV_STATE_ACTIVE);
172 LOG_INF("RESUMING from suspend");
173 udc_set_suspended(dev, false);
174 udc_submit_event(dev, UDC_EVT_RESUME, 0);
175 break;
176 case AM_HAL_USB_DEV_EVT_SOF:
177 udc_submit_event(dev, UDC_EVT_SOF, 0);
178 break;
179 case AM_HAL_USB_DEV_EVT_SUSPEND:
180 /* Handle USB Suspend event, then set device state to suspended */
181 am_hal_usb_set_dev_state(priv->usb_handle, AM_HAL_USB_DEV_STATE_SUSPENDED);
182 udc_set_suspended(dev, true);
183 udc_submit_event(dev, UDC_EVT_SUSPEND, 0);
184 break;
185 default:
186 /* Unreachable case */
187 break;
188 }
189 }
190
udc_ambiq_ep0_setup_callback(const struct device * dev,uint8_t * usb_setup)191 static void udc_ambiq_ep0_setup_callback(const struct device *dev, uint8_t *usb_setup)
192 {
193 struct udc_ambiq_event evt = {.type = UDC_AMBIQ_EVT_HAL_SETUP};
194 struct udc_ambiq_data *priv = udc_get_private(dev);
195
196 /* Defer Setup Packet that arrives when we are waiting for
197 * status stage for OUT data control transfer to be completed
198 */
199 if (priv->ctrl_pending_in_ack) {
200 priv->ctrl_pending_setup = true;
201 memcpy(priv->ctrl_pending_setup_buffer, usb_setup, 8);
202 return;
203 }
204
205 /* Check whether we received SETUP packet during OUT_ACK (a.k.a STATUS_IN)
206 * state. If so, it might be inversion caused by register reading sequence.
207 * Raise flag accordingly and handle later.
208 */
209 priv->ctrl_setup_recv_at_status_in = udc_ctrl_stage_is_status_in(dev);
210 memcpy(priv->setup, usb_setup, sizeof(struct usb_setup_packet));
211 k_msgq_put(&drv_msgq, &evt, K_NO_WAIT);
212 }
213
udc_ambiq_ep_xfer_complete_callback(const struct device * dev,uint8_t ep_addr,uint16_t xfer_len,am_hal_usb_xfer_code_e code,void * param)214 static void udc_ambiq_ep_xfer_complete_callback(const struct device *dev, uint8_t ep_addr,
215 uint16_t xfer_len, am_hal_usb_xfer_code_e code,
216 void *param)
217 {
218 struct net_buf *buf;
219 struct udc_ambiq_event evt;
220
221 /* Extract EP information and queue event */
222 evt.ep = ep_addr;
223 if (USB_EP_DIR_IS_IN(ep_addr)) {
224 evt.type = UDC_AMBIQ_EVT_HAL_IN_CMP;
225 } else {
226 buf = udc_buf_peek(dev, ep_addr);
227 if (buf == NULL) {
228 LOG_ERR("No buffer for ep 0x%02x", ep_addr);
229 udc_submit_event(dev, UDC_EVT_ERROR, -ENOBUFS);
230 return;
231 }
232
233 net_buf_add(buf, xfer_len);
234 evt.type = UDC_AMBIQ_EVT_HAL_OUT_CMP;
235 }
236
237 k_msgq_put(&drv_msgq, &evt, K_NO_WAIT);
238 }
239
udc_ambiq_device_speed(const struct device * dev)240 static enum udc_bus_speed udc_ambiq_device_speed(const struct device *dev)
241 {
242 struct udc_ambiq_data *priv = udc_get_private(dev);
243 am_hal_usb_dev_speed_e e_speed = am_hal_get_usb_dev_speed(priv->usb_handle);
244
245 if (e_speed == AM_HAL_USB_SPEED_HIGH) {
246 return UDC_BUS_SPEED_HS;
247 } else {
248 return UDC_BUS_SPEED_FS;
249 }
250 }
251
udc_ambiq_ep_enqueue(const struct device * dev,struct udc_ep_config * ep_cfg,struct net_buf * buf)252 static int udc_ambiq_ep_enqueue(const struct device *dev, struct udc_ep_config *ep_cfg,
253 struct net_buf *buf)
254 {
255 struct udc_ambiq_data *priv = udc_get_private(dev);
256 struct udc_ambiq_event evt = {
257 .ep = ep_cfg->addr,
258 .type = UDC_AMBIQ_EVT_XFER,
259 };
260
261 LOG_DBG("%p enqueue %x %p", dev, ep_cfg->addr, buf);
262 udc_buf_put(ep_cfg, buf);
263 if (ep_cfg->addr == USB_CONTROL_EP_IN && buf->len == 0 && priv->ctrl_pending_in_ack) {
264 priv->ctrl_pending_in_ack = false;
265 udc_ambiq_ep_xfer_complete_callback(dev, USB_CONTROL_EP_IN, 0, 0, NULL);
266 return 0;
267 }
268
269 if (!ep_cfg->stat.halted) {
270 k_msgq_put(&drv_msgq, &evt, K_NO_WAIT);
271 }
272
273 return 0;
274 }
275
udc_ambiq_ep_dequeue(const struct device * dev,struct udc_ep_config * ep_cfg)276 static int udc_ambiq_ep_dequeue(const struct device *dev, struct udc_ep_config *ep_cfg)
277 {
278 unsigned int lock_key;
279 struct udc_ambiq_data *priv = udc_get_private(dev);
280 struct net_buf *buf;
281
282 lock_key = irq_lock();
283
284 buf = udc_buf_get_all(dev, ep_cfg->addr);
285 if (buf) {
286 udc_submit_ep_event(dev, buf, -ECONNABORTED);
287 }
288
289 udc_ep_set_busy(dev, ep_cfg->addr, false);
290 am_hal_usb_ep_state_reset(priv->usb_handle, ep_cfg->addr);
291 irq_unlock(lock_key);
292
293 LOG_DBG("dequeue ep 0x%02x", ep_cfg->addr);
294
295 return 0;
296 }
297
udc_ambiq_ep_set_halt(const struct device * dev,struct udc_ep_config * ep_cfg)298 static int udc_ambiq_ep_set_halt(const struct device *dev, struct udc_ep_config *ep_cfg)
299 {
300 struct udc_ambiq_data *priv = udc_get_private(dev);
301
302 LOG_DBG("Halt ep 0x%02x", ep_cfg->addr);
303
304 am_hal_usb_ep_stall(priv->usb_handle, ep_cfg->addr);
305 if (USB_EP_GET_IDX(ep_cfg->addr)) {
306 ep_cfg->stat.halted = true;
307 }
308
309 return 0;
310 }
311
udc_ambiq_ep_clear_halt(const struct device * dev,struct udc_ep_config * ep_cfg)312 static int udc_ambiq_ep_clear_halt(const struct device *dev, struct udc_ep_config *ep_cfg)
313 {
314 struct udc_ambiq_data *priv = udc_get_private(dev);
315
316 LOG_DBG("Clear halt ep 0x%02x", ep_cfg->addr);
317
318 am_hal_usb_ep_clear_stall(priv->usb_handle, ep_cfg->addr);
319
320 ep_cfg->stat.halted = false;
321
322 /* Resume queued transfer if any */
323 if (udc_buf_peek(dev, ep_cfg->addr)) {
324 struct udc_ambiq_event evt = {
325 .ep = ep_cfg->addr,
326 .type = UDC_AMBIQ_EVT_XFER,
327 };
328 k_msgq_put(&drv_msgq, &evt, K_NO_WAIT);
329 }
330
331 return 0;
332 }
333
udc_ambiq_ep_enable(const struct device * dev,struct udc_ep_config * ep_cfg)334 static int udc_ambiq_ep_enable(const struct device *dev, struct udc_ep_config *ep_cfg)
335 {
336 struct udc_ambiq_data *priv = udc_get_private(dev);
337 uint8_t endpoint_type;
338 uint32_t status;
339
340 __ASSERT_NO_MSG(ep_cfg);
341
342 switch (ep_cfg->attributes) {
343 case USB_EP_TYPE_CONTROL:
344 endpoint_type = 0; /* AM_HAL_USB_EP_XFER_CONTROL */
345 break;
346 case USB_EP_TYPE_ISO:
347 endpoint_type = 1; /* AM_HAL_USB_EP_XFER_ISOCHRONOUS */
348 break;
349 case USB_EP_TYPE_BULK:
350 endpoint_type = 2; /* AM_HAL_USB_EP_XFER_BULK */
351 break;
352 case USB_EP_TYPE_INTERRUPT:
353 endpoint_type = 3; /* AM_HAL_USB_EP_XFER_INTERRUPT */
354 break;
355 default:
356 return -EINVAL;
357 }
358
359 status = am_hal_usb_ep_init(priv->usb_handle, ep_cfg->addr, endpoint_type, ep_cfg->mps);
360 if (status != AM_HAL_STATUS_SUCCESS) {
361 LOG_ERR("am_hal_usb_ep_init failed(0x%02x), %d", ep_cfg->addr, (int)status);
362 return -EIO;
363 }
364
365 LOG_DBG("Enable ep 0x%02x", ep_cfg->addr);
366
367 return 0;
368 }
369
udc_ambiq_ep_disable(const struct device * dev,struct udc_ep_config * ep_cfg)370 static int udc_ambiq_ep_disable(const struct device *dev, struct udc_ep_config *ep_cfg)
371 {
372 struct udc_ambiq_data *priv = udc_get_private(dev);
373
374 __ASSERT_NO_MSG(ep_cfg);
375 am_hal_usb_ep_state_reset(priv->usb_handle, ep_cfg->addr);
376 LOG_DBG("Disable ep 0x%02x", ep_cfg->addr);
377
378 return 0;
379 }
380
udc_ambiq_host_wakeup(const struct device * dev)381 static int udc_ambiq_host_wakeup(const struct device *dev)
382 {
383 struct udc_ambiq_data *priv = udc_get_private(dev);
384
385 am_hal_usb_start_remote_wakeup(priv->usb_handle);
386
387 return 0;
388 }
389
udc_ambiq_set_address(const struct device * dev,const uint8_t addr)390 static int udc_ambiq_set_address(const struct device *dev, const uint8_t addr)
391 {
392 struct udc_ambiq_data *priv = udc_get_private(dev);
393
394 LOG_DBG("addr %u (0x%02x)", addr, addr);
395 am_hal_usb_set_addr(priv->usb_handle, addr);
396 am_hal_usb_set_dev_state(priv->usb_handle, AM_HAL_USB_DEV_STATE_ADDRESSED);
397
398 return 0;
399 }
400
udc_ambiq_test_mode(const struct device * dev,const uint8_t mode,const bool dryrun)401 static int udc_ambiq_test_mode(const struct device *dev, const uint8_t mode, const bool dryrun)
402 {
403 am_hal_usb_test_mode_e am_usb_test_mode;
404 struct udc_ambiq_data *priv = udc_get_private(dev);
405
406 switch (mode) {
407 case USB_DWC2_DCTL_TSTCTL_TESTJ:
408 am_usb_test_mode = AM_HAL_USB_TEST_J;
409 break;
410 case USB_DWC2_DCTL_TSTCTL_TESTK:
411 am_usb_test_mode = AM_HAL_USB_TEST_K;
412 break;
413 case USB_DWC2_DCTL_TSTCTL_TESTSN:
414 am_usb_test_mode = AM_HAL_USB_TEST_SE0_NAK;
415 break;
416 case USB_DWC2_DCTL_TSTCTL_TESTPM:
417 am_usb_test_mode = AM_HAL_USB_TEST_PACKET;
418 break;
419 default:
420 return -EINVAL;
421 }
422
423 if (dryrun) {
424 LOG_DBG("Test Mode %u supported", mode);
425 return 0;
426 }
427
428 am_hal_usb_enter_test_mode(priv->usb_handle);
429 am_hal_usb_test_mode(priv->usb_handle, am_usb_test_mode);
430
431 return 0;
432 }
433
udc_ambiq_enable(const struct device * dev)434 static int udc_ambiq_enable(const struct device *dev)
435 {
436 struct udc_ambiq_data *priv = udc_get_private(dev);
437
438 /* USB soft connect */
439 am_hal_usb_attach(priv->usb_handle);
440 LOG_DBG("Enable UDC");
441
442 return 0;
443 }
444
udc_ambiq_disable(const struct device * dev)445 static int udc_ambiq_disable(const struct device *dev)
446 {
447 unsigned int lock_key;
448 struct udc_ambiq_data *priv = udc_get_private(dev);
449 const struct udc_ambiq_config *cfg = dev->config;
450
451 /* Disable USB interrupt */
452 lock_key = irq_lock();
453 cfg->irq_disable_func(dev);
454 irq_unlock(lock_key);
455
456 /* Disable soft disconnect */
457 am_hal_usb_detach(priv->usb_handle);
458 am_hal_usb_intr_usb_disable(priv->usb_handle, USB_CFG2_SOFE_Msk | USB_CFG2_ResumeE_Msk |
459 USB_CFG2_SuspendE_Msk |
460 USB_CFG2_ResetE_Msk);
461 am_hal_usb_intr_usb_clear(priv->usb_handle);
462 for (unsigned int i = 0; i < cfg->num_endpoints; i++) {
463 am_hal_usb_ep_state_reset(priv->usb_handle, i);
464 am_hal_usb_ep_state_reset(priv->usb_handle, BIT(7) | i);
465 }
466 LOG_DBG("Disable UDC");
467
468 return 0;
469 }
470
udc_ambiq_usb_isr(const struct device * dev)471 static void udc_ambiq_usb_isr(const struct device *dev)
472 {
473 struct udc_ambiq_data *priv = udc_get_private(dev);
474 uint32_t int_status[3];
475
476 am_hal_usb_intr_status_get(priv->usb_handle, &int_status[0], &int_status[1],
477 &int_status[2]);
478 am_hal_usb_interrupt_service(priv->usb_handle, int_status[0], int_status[1], int_status[2]);
479 }
480
usb_power_rails_set(const struct device * dev,bool on)481 static int usb_power_rails_set(const struct device *dev, bool on)
482 {
483 int ret = 0;
484 const struct udc_ambiq_config *cfg = dev->config;
485
486 /* Check that both power control GPIO is defined */
487 if ((cfg->vddusb33_gpio.port == NULL) || (cfg->vddusb0p9_gpio.port == NULL)) {
488 LOG_WRN("vddusb control gpio not defined");
489 return -EINVAL;
490 }
491
492 /* Enable USB IO */
493 ret = gpio_pin_configure_dt(&cfg->vddusb33_gpio, GPIO_OUTPUT);
494 if (ret) {
495 return ret;
496 }
497
498 ret = gpio_pin_configure_dt(&cfg->vddusb0p9_gpio, GPIO_OUTPUT);
499 if (ret) {
500 return ret;
501 }
502
503 /* power rails set */
504 ret = gpio_pin_set_dt(&cfg->vddusb33_gpio, on);
505 if (ret) {
506 return ret;
507 }
508 ret = gpio_pin_set_dt(&cfg->vddusb0p9_gpio, on);
509 if (ret) {
510 return ret;
511 }
512 am_hal_delay_us(50000);
513
514 return 0;
515 }
516
udc_ambiq_init(const struct device * dev)517 static int udc_ambiq_init(const struct device *dev)
518 {
519 struct udc_ambiq_data *priv = udc_get_private(dev);
520 const struct udc_ambiq_config *cfg = dev->config;
521 uint32_t ret = 0;
522
523 /* Create USB */
524 if (am_hal_usb_initialize(0, (void *)&priv->usb_handle) != AM_HAL_STATUS_SUCCESS) {
525 return -EIO;
526 }
527
528 /* Register callback functions */
529 cfg->callback_register_func(dev);
530 /* enable internal power rail */
531 am_hal_usb_power_control(priv->usb_handle, AM_HAL_SYSCTRL_WAKE, false);
532 /* Assert USB PHY reset in MCU control registers */
533 am_hal_usb_enable_phy_reset_override();
534 /* Enable the USB power rails */
535 ret = usb_power_rails_set(dev, true);
536 if (ret) {
537 return ret;
538 }
539 /* Disable BC detection voltage source */
540 am_hal_usb_hardware_unreset();
541 /* Release USB PHY reset */
542 am_hal_usb_disable_phy_reset_override();
543 /* Set USB Speed */
544 am_hal_usb_set_dev_speed(priv->usb_handle, priv->usb_speed);
545 /* Enable USB interrupt */
546 am_hal_usb_intr_usb_enable(priv->usb_handle, USB_INTRUSB_Reset_Msk);
547 /* Enable Control Endpoints */
548 if (udc_ep_enable_internal(dev, USB_CONTROL_EP_OUT, USB_EP_TYPE_CONTROL, EP0_MPS, 0)) {
549 LOG_ERR("Failed to enable control endpoint");
550 return -EIO;
551 }
552 if (udc_ep_enable_internal(dev, USB_CONTROL_EP_IN, USB_EP_TYPE_CONTROL, EP0_MPS, 0)) {
553 LOG_ERR("Failed to enable control endpoint");
554 return -EIO;
555 }
556 /* Connect and enable USB interrupt */
557 cfg->irq_enable_func(dev);
558
559 return 0;
560 }
561
udc_ambiq_shutdown(const struct device * dev)562 static int udc_ambiq_shutdown(const struct device *dev)
563 {
564 struct udc_ambiq_data *priv = udc_get_private(dev);
565 const struct udc_ambiq_config *cfg = dev->config;
566 int ret = 0;
567
568 LOG_INF("shutdown");
569
570 /* Disable Control Endpoints */
571 if (udc_ep_disable_internal(dev, USB_CONTROL_EP_OUT)) {
572 LOG_ERR("Failed to disable control endpoint");
573 return -EIO;
574 }
575 if (udc_ep_disable_internal(dev, USB_CONTROL_EP_IN)) {
576 LOG_ERR("Failed to disable control endpoint");
577 return -EIO;
578 }
579 /* Disable USB interrupt */
580 cfg->irq_disable_func(dev);
581 /* Assert USB PHY reset */
582 am_hal_usb_enable_phy_reset_override();
583 /* Disable the USB power rails */
584 ret = usb_power_rails_set(dev, false);
585 if (ret) {
586 return ret;
587 }
588 /* Power down USB HAL */
589 am_hal_usb_power_control(priv->usb_handle, AM_HAL_SYSCTRL_DEEPSLEEP, false);
590 /* Deinitialize USB instance */
591 am_hal_usb_deinitialize(priv->usb_handle);
592 priv->usb_handle = NULL;
593
594 return 0;
595 }
596
udc_ambiq_lock(const struct device * dev)597 static int udc_ambiq_lock(const struct device *dev)
598 {
599 return udc_lock_internal(dev, K_FOREVER);
600 }
601
udc_ambiq_unlock(const struct device * dev)602 static int udc_ambiq_unlock(const struct device *dev)
603 {
604 return udc_unlock_internal(dev);
605 }
606
ambiq_handle_evt_setup(const struct device * dev)607 static void ambiq_handle_evt_setup(const struct device *dev)
608 {
609 struct udc_ambiq_data *priv = udc_get_private(dev);
610 struct net_buf *buf;
611 int err;
612
613 /* Create network buffer for SETUP packet and pass into UDC framework */
614 buf = udc_ctrl_alloc(dev, USB_CONTROL_EP_OUT, sizeof(struct usb_setup_packet));
615 if (buf == NULL) {
616 LOG_ERR("Failed to allocate for setup");
617 return;
618 }
619 net_buf_add_mem(buf, priv->setup, sizeof(priv->setup));
620 udc_ep_buf_set_setup(buf);
621 LOG_HEXDUMP_DBG(buf->data, buf->len, "setup");
622
623 /* Update to next stage of control transfer */
624 udc_ctrl_update_stage(dev, buf);
625
626 if (udc_ctrl_stage_is_data_out(dev)) {
627 /* Allocate and feed buffer for data OUT stage */
628 LOG_DBG("s:%p|feed for -out-", buf);
629 err = usbd_ctrl_feed_dout(dev, udc_data_stage_length(buf));
630 priv->ctrl_pending_in_ack = true;
631 if (err == -ENOMEM) {
632 udc_submit_ep_event(dev, buf, err);
633 }
634 } else if (udc_ctrl_stage_is_data_in(dev)) {
635 /* Submit event for data IN stage */
636 LOG_DBG("s:%p|feed for -in-status", buf);
637 udc_ctrl_submit_s_in_status(dev);
638 } else {
639 /* Submit event for no-data stage */
640 LOG_DBG("s:%p|feed >setup", buf);
641 udc_ctrl_submit_s_status(dev);
642 }
643 }
644
ambiq_handle_evt_dout(const struct device * dev,struct udc_ep_config * const cfg)645 static inline void ambiq_handle_evt_dout(const struct device *dev, struct udc_ep_config *const cfg)
646 {
647 struct net_buf *buf;
648
649 /* retrieve endpoint buffer */
650 buf = udc_buf_get(dev, cfg->addr);
651 if (buf == NULL) {
652 LOG_ERR("No buffer queued for control ep");
653 return;
654 }
655
656 /* Clear endpoint busy status */
657 udc_ep_set_busy(dev, cfg->addr, false);
658
659 /* Handle transfer complete event */
660 if (cfg->addr == USB_CONTROL_EP_OUT) {
661 if (udc_ctrl_stage_is_status_out(dev)) {
662 udc_ctrl_update_stage(dev, buf);
663 udc_ctrl_submit_status(dev, buf);
664 } else {
665 udc_ctrl_update_stage(dev, buf);
666 }
667
668 if (udc_ctrl_stage_is_status_in(dev)) {
669 udc_ctrl_submit_s_out_status(dev, buf);
670 }
671 } else {
672 udc_submit_ep_event(dev, buf, 0);
673 }
674 }
675
ambiq_handle_zlp_tx(const struct device * dev,struct udc_ep_config * const cfg)676 static void ambiq_handle_zlp_tx(const struct device *dev, struct udc_ep_config *const cfg)
677 {
678 udc_ambiq_tx(dev, cfg->addr, NULL);
679 }
680
ambiq_handle_evt_din(const struct device * dev,struct udc_ep_config * const cfg)681 static void ambiq_handle_evt_din(const struct device *dev, struct udc_ep_config *const cfg)
682 {
683 struct udc_ambiq_data *priv = udc_get_private(dev);
684 struct udc_data *data = dev->data;
685 struct net_buf *buf;
686 bool udc_ambiq_rx_status_in_completed = false;
687
688 /* Clear endpoint busy status */
689 udc_ep_set_busy(dev, cfg->addr, false);
690 /* Check and Handle ZLP flag */
691 buf = udc_buf_peek(dev, cfg->addr);
692 if (cfg->addr != USB_CONTROL_EP_IN) {
693 if (udc_ep_buf_has_zlp(buf)) {
694 udc_ep_buf_clear_zlp(buf);
695 udc_ambiq_tx(dev, cfg->addr, NULL);
696 ambiq_handle_zlp_tx(dev, cfg);
697 return;
698 }
699 }
700
701 /* retrieve endpoint buffer */
702 buf = udc_buf_get(dev, cfg->addr);
703 if (buf == NULL) {
704 LOG_ERR("No buffer queued for control ep");
705 return;
706 }
707 LOG_DBG("DataIn ep 0x%02x len %u", cfg->addr, buf->size);
708
709 /* Handle transfer complete event */
710 if (cfg->addr == USB_CONTROL_EP_IN) {
711 if (udc_ctrl_stage_is_status_in(dev) || udc_ctrl_stage_is_no_data(dev)) {
712 if (data->caps.out_ack == 0) {
713 /* Status stage finished, notify upper layer */
714 udc_ctrl_submit_status(dev, buf);
715 }
716
717 if (udc_ctrl_stage_is_status_in(dev)) {
718 udc_ambiq_rx_status_in_completed = true;
719 }
720 }
721
722 if (priv->ctrl_setup_recv_at_status_in && (buf->len == 0)) {
723 priv->ctrl_setup_recv_at_status_in = false;
724 net_buf_unref(buf);
725 return;
726 }
727 priv->ctrl_setup_recv_at_status_in = false;
728 /* Update to next stage of control transfer */
729 udc_ctrl_update_stage(dev, buf);
730
731 if (((data->caps.out_ack == false) && udc_ctrl_stage_is_status_out(dev)) ||
732 ((data->caps.out_ack == true) && (data->stage == CTRL_PIPE_STAGE_SETUP))) {
733 /*
734 * IN transfer finished, release buffer,
735 * control OUT buffer should be already fed.
736 */
737 net_buf_unref(buf);
738 }
739
740 /*
741 * Trigger deferred SETUP that was hold back if we are
742 * waiting for DATA_OUT status stage to be completed
743 */
744 if (udc_ambiq_rx_status_in_completed && priv->ctrl_pending_setup) {
745 priv->ctrl_pending_setup = false;
746 udc_ambiq_ep0_setup_callback(dev, priv->ctrl_pending_setup_buffer);
747 }
748 } else {
749 udc_submit_ep_event(dev, buf, 0);
750 }
751 }
752
udc_event_xfer(const struct device * dev,struct udc_ep_config * const cfg)753 static void udc_event_xfer(const struct device *dev, struct udc_ep_config *const cfg)
754 {
755 struct net_buf *buf;
756
757 buf = udc_buf_peek(dev, cfg->addr);
758 if (buf == NULL) {
759 LOG_ERR("No buffer for ep 0x%02x", cfg->addr);
760 return;
761 }
762
763 if (USB_EP_DIR_IS_IN(cfg->addr)) {
764 udc_ambiq_tx(dev, cfg->addr, buf);
765 } else {
766 udc_ambiq_rx(dev, cfg->addr, buf);
767 }
768 }
769
ambiq_thread_handler(void * const arg)770 static ALWAYS_INLINE void ambiq_thread_handler(void *const arg)
771 {
772 const struct device *dev = (const struct device *)arg;
773 struct udc_ep_config *ep_cfg;
774 struct udc_ambiq_event evt;
775
776 while (true) {
777 k_msgq_get(&drv_msgq, &evt, K_FOREVER);
778 ep_cfg = udc_get_ep_cfg(dev, evt.ep);
779
780 switch (evt.type) {
781 case UDC_AMBIQ_EVT_XFER:
782 udc_event_xfer(dev, ep_cfg);
783 break;
784 case UDC_AMBIQ_EVT_HAL_SETUP:
785 LOG_DBG("SETUP event");
786 ambiq_handle_evt_setup(dev);
787 break;
788 case UDC_AMBIQ_EVT_HAL_OUT_CMP:
789 LOG_DBG("DOUT event ep 0x%02x", ep_cfg->addr);
790 ambiq_handle_evt_dout(dev, ep_cfg);
791 break;
792 case UDC_AMBIQ_EVT_HAL_IN_CMP:
793 LOG_DBG("DIN event");
794 ambiq_handle_evt_din(dev, ep_cfg);
795 break;
796 default:
797 __ASSERT_NO_MSG(false);
798 break;
799 }
800 }
801 }
802
803 /*
804 * This is called once to initialize the controller and endpoints
805 * capabilities, and register endpoint structures.
806 */
udc_ambiq_driver_init(const struct device * dev)807 static int udc_ambiq_driver_init(const struct device *dev)
808 {
809 struct udc_ambiq_data *priv = udc_get_private(dev);
810 const struct udc_ambiq_config *cfg = dev->config;
811 struct udc_data *data = dev->data;
812 int ep_mps = 0;
813 int err;
814
815 if (cfg->speed_idx == 1) {
816 data->caps.hs = false;
817 priv->usb_speed = AM_HAL_USB_SPEED_FULL;
818 ep_mps = EP_FS_MPS;
819 } else if ((cfg->speed_idx == 2)) {
820 data->caps.hs = true;
821 priv->usb_speed = AM_HAL_USB_SPEED_HIGH;
822 ep_mps = EP_HS_MPS;
823 }
824
825 for (unsigned int i = 0; i < cfg->num_endpoints; i++) {
826 cfg->ep_cfg_out[i].caps.out = 1;
827 if (i == 0) {
828 cfg->ep_cfg_out[i].caps.control = 1;
829 cfg->ep_cfg_out[i].caps.mps = EP0_MPS;
830 } else {
831 cfg->ep_cfg_out[i].caps.bulk = 1;
832 cfg->ep_cfg_out[i].caps.interrupt = 1;
833 cfg->ep_cfg_out[i].caps.iso = 1;
834 cfg->ep_cfg_out[i].caps.mps = ep_mps;
835 }
836
837 cfg->ep_cfg_out[i].addr = USB_EP_DIR_OUT | i;
838 err = udc_register_ep(dev, &cfg->ep_cfg_out[i]);
839 if (err != 0) {
840 LOG_ERR("Failed to register endpoint");
841 return err;
842 }
843 }
844
845 for (unsigned int i = 0; i < cfg->num_endpoints; i++) {
846 cfg->ep_cfg_in[i].caps.in = 1;
847 if (i == 0) {
848 cfg->ep_cfg_in[i].caps.control = 1;
849 cfg->ep_cfg_in[i].caps.mps = EP0_MPS;
850 } else {
851 cfg->ep_cfg_in[i].caps.bulk = 1;
852 cfg->ep_cfg_in[i].caps.interrupt = 1;
853 cfg->ep_cfg_in[i].caps.iso = 1;
854 cfg->ep_cfg_in[i].caps.mps = ep_mps;
855 }
856
857 cfg->ep_cfg_in[i].addr = USB_EP_DIR_IN | i;
858 err = udc_register_ep(dev, &cfg->ep_cfg_in[i]);
859 if (err != 0) {
860 LOG_ERR("Failed to register endpoint");
861 return err;
862 }
863 }
864 data->caps.addr_before_status = true;
865 data->caps.rwup = true;
866 data->caps.out_ack = true;
867 data->caps.mps0 = UDC_MPS0_64;
868
869 cfg->make_thread(dev);
870
871 return 0;
872 }
873
874 static const struct udc_api udc_ambiq_api = {
875 .device_speed = udc_ambiq_device_speed,
876 .ep_enqueue = udc_ambiq_ep_enqueue,
877 .ep_dequeue = udc_ambiq_ep_dequeue,
878 .ep_set_halt = udc_ambiq_ep_set_halt,
879 .ep_clear_halt = udc_ambiq_ep_clear_halt,
880 .ep_try_config = NULL,
881 .ep_enable = udc_ambiq_ep_enable,
882 .ep_disable = udc_ambiq_ep_disable,
883 .host_wakeup = udc_ambiq_host_wakeup,
884 .set_address = udc_ambiq_set_address,
885 .test_mode = udc_ambiq_test_mode,
886 .enable = udc_ambiq_enable,
887 .disable = udc_ambiq_disable,
888 .init = udc_ambiq_init,
889 .shutdown = udc_ambiq_shutdown,
890 .lock = udc_ambiq_lock,
891 .unlock = udc_ambiq_unlock,
892 };
893
894 /*
895 * A UDC driver should always be implemented as a multi-instance
896 * driver, even if your platform does not require it.
897 */
898 #define UDC_AMBIQ_DEVICE_DEFINE(n) \
899 K_THREAD_STACK_DEFINE(udc_ambiq_stack_##n, CONFIG_UDC_AMBIQ_STACK_SIZE); \
900 \
901 static void udc_ambiq_evt_callback_##n(am_hal_usb_dev_event_e dev_state) \
902 { \
903 udc_ambiq_evt_callback(DEVICE_DT_INST_GET(n), dev_state); \
904 } \
905 \
906 static void udc_ambiq_ep0_setup_callback_##n(uint8_t *usb_setup) \
907 { \
908 udc_ambiq_ep0_setup_callback(DEVICE_DT_INST_GET(n), usb_setup); \
909 } \
910 \
911 static void udc_ambiq_ep_xfer_complete_callback_##n( \
912 uint8_t ep_addr, uint16_t xfer_len, am_hal_usb_xfer_code_e code, void *param) \
913 { \
914 udc_ambiq_ep_xfer_complete_callback(DEVICE_DT_INST_GET(n), ep_addr, xfer_len, \
915 code, param); \
916 } \
917 \
918 static void udc_ambiq_register_callback_##n(const struct device *dev) \
919 { \
920 struct udc_ambiq_data *priv = udc_get_private(dev); \
921 \
922 am_hal_usb_register_dev_evt_callback(priv->usb_handle, \
923 udc_ambiq_evt_callback_##n); \
924 am_hal_usb_register_ep0_setup_received_callback(priv->usb_handle, \
925 udc_ambiq_ep0_setup_callback_##n); \
926 am_hal_usb_register_ep_xfer_complete_callback( \
927 priv->usb_handle, udc_ambiq_ep_xfer_complete_callback_##n); \
928 } \
929 static void udc_ambiq_thread_##n(void *dev, void *arg1, void *arg2) \
930 { \
931 ambiq_thread_handler(dev); \
932 } \
933 \
934 static void udc_ambiq_make_thread_##n(const struct device *dev) \
935 { \
936 struct udc_ambiq_data *priv = udc_get_private(dev); \
937 \
938 k_thread_create(&priv->thread_data, udc_ambiq_stack_##n, \
939 K_THREAD_STACK_SIZEOF(udc_ambiq_stack_##n), udc_ambiq_thread_##n, \
940 (void *)dev, NULL, NULL, \
941 K_PRIO_COOP(CONFIG_UDC_AMBIQ_THREAD_PRIORITY), K_ESSENTIAL, \
942 K_NO_WAIT); \
943 k_thread_name_set(&priv->thread_data, dev->name); \
944 } \
945 \
946 static void udc_ambiq_irq_enable_func_##n(const struct device *dev) \
947 { \
948 IRQ_CONNECT(DT_INST_IRQN(n), DT_INST_IRQ(n, priority), udc_ambiq_usb_isr, \
949 DEVICE_DT_INST_GET(n), 0); \
950 \
951 irq_enable(DT_INST_IRQN(n)); \
952 } \
953 \
954 static void udc_ambiq_irq_disable_func_##n(const struct device *dev) \
955 { \
956 irq_disable(DT_INST_IRQN(n)); \
957 } \
958 static struct udc_ep_config ep_cfg_out[DT_INST_PROP(n, num_bidir_endpoints)]; \
959 static struct udc_ep_config ep_cfg_in[DT_INST_PROP(n, num_bidir_endpoints)]; \
960 \
961 static const struct udc_ambiq_config udc_ambiq_config_##n = { \
962 .num_endpoints = DT_INST_PROP(n, num_bidir_endpoints), \
963 .ep_cfg_in = ep_cfg_out, \
964 .ep_cfg_out = ep_cfg_in, \
965 .speed_idx = DT_ENUM_IDX(DT_DRV_INST(n), maximum_speed), \
966 .vddusb33_gpio = GPIO_DT_SPEC_GET_OR(DT_DRV_INST(n), vddusb33_gpios, {0}), \
967 .vddusb0p9_gpio = GPIO_DT_SPEC_GET_OR(DT_DRV_INST(n), vddusb0p9_gpios, {0}), \
968 .irq_enable_func = udc_ambiq_irq_enable_func_##n, \
969 .irq_disable_func = udc_ambiq_irq_disable_func_##n, \
970 .make_thread = udc_ambiq_make_thread_##n, \
971 .callback_register_func = udc_ambiq_register_callback_##n, \
972 }; \
973 \
974 static struct udc_ambiq_data udc_priv_##n = {}; \
975 \
976 static struct udc_data udc_data_##n = { \
977 .mutex = Z_MUTEX_INITIALIZER(udc_data_##n.mutex), \
978 .priv = &udc_priv_##n, \
979 }; \
980 \
981 DEVICE_DT_INST_DEFINE(n, udc_ambiq_driver_init, NULL, &udc_data_##n, \
982 &udc_ambiq_config_##n, POST_KERNEL, \
983 CONFIG_KERNEL_INIT_PRIORITY_DEVICE, &udc_ambiq_api);
984
985 DT_INST_FOREACH_STATUS_OKAY(UDC_AMBIQ_DEVICE_DEFINE)
986