1 /*
2 * Copyright (c) 2024 Renesas Electronics Corporation
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 /*
8 * Driver for the Smartbond USB device controller.
9 */
10
11 #include <string.h>
12
13 #include "udc_common.h"
14
15 #include <DA1469xAB.h>
16 #include <da1469x_config.h>
17
18 #include <zephyr/kernel.h>
19 #include <zephyr/drivers/dma.h>
20 #include <zephyr/drivers/usb/udc.h>
21 #include <zephyr/drivers/usb/usb_dc.h>
22 #include <zephyr/drivers/clock_control.h>
23 #include <zephyr/drivers/clock_control/smartbond_clock_control.h>
24
25 #include <zephyr/logging/log.h>
26 #include <zephyr/pm/policy.h>
27
28 LOG_MODULE_REGISTER(udc_smartbond, CONFIG_UDC_DRIVER_LOG_LEVEL);
29
30 /* Size of hardware RX and TX FIFO. */
31 #define EP0_FIFO_SIZE 8
32 #define EP_FIFO_SIZE 64
33
34 /*
35 * DA146xx register fields and bit mask are very long. Filed masks repeat register names.
36 * Those convenience macros are a way to reduce complexity of register modification lines.
37 */
38 #define GET_BIT(val, field) (val & field##_Msk) >> field##_Pos
39 #define REG_GET_BIT(reg, field) (USB->reg & USB_##reg##_##field##_Msk)
40 #define REG_SET_BIT(reg, field) (USB->reg |= USB_##reg##_##field##_Msk)
41 #define REG_CLR_BIT(reg, field) (USB->reg &= ~USB_##reg##_##field##_Msk)
42 #define REG_SET_VAL(reg, field, val) \
43 (USB->reg = (USB->reg & ~USB_##reg##_##field##_Msk) | (val << USB_##reg##_##field##_Pos))
44
45 struct usb_smartbond_dma_config {
46 int tx_chan;
47 int rx_chan;
48 uint8_t tx_slot_mux;
49 uint8_t rx_slot_mux;
50 const struct device *tx_dev;
51 const struct device *rx_dev;
52 };
53
54 struct udc_smartbond_config {
55 IRQn_Type udc_irq;
56 IRQn_Type vbus_irq;
57 uint8_t fifo_read_threshold;
58 uint8_t num_of_eps;
59 uint16_t dma_min_transfer_size;
60 struct usb_smartbond_dma_config dma_cfg;
61 };
62
63 /* Node functional states */
64 #define NFSR_NODE_RESET 0
65 #define NFSR_NODE_RESUME 1
66 #define NFSR_NODE_OPERATIONAL 2
67 #define NFSR_NODE_SUSPEND 3
68 /*
69 * Those two following states are added to allow going out of sleep mode
70 * using frame interrupt. On remove wakeup RESUME state must be kept for
71 * at least 1ms. It is accomplished by using FRAME interrupt that goes
72 * through those two fake states before entering OPERATIONAL state.
73 */
74 #define NFSR_NODE_WAKING (0x10 | (NFSR_NODE_RESUME))
75 #define NFSR_NODE_WAKING2 (0x20 | (NFSR_NODE_RESUME))
76
77 struct smartbond_ep_reg_set {
78 volatile uint32_t epc_in;
79 volatile uint32_t txd;
80 volatile uint32_t txs;
81 volatile uint32_t txc;
82 volatile uint32_t epc_out;
83 volatile uint32_t rxd;
84 volatile uint32_t rxs;
85 volatile uint32_t rxc;
86 };
87
88 struct smartbond_ep_state {
89 struct udc_ep_config config;
90 struct smartbond_ep_reg_set *regs;
91 struct net_buf *buf;
92 /** Packet size sent or received so far. It is used to modify transferred field
93 * after ACK is received or when filling ISO endpoint with size larger then
94 * FIFO size.
95 */
96 uint16_t last_packet_size;
97 uint8_t iso: 1; /** ISO endpoint */
98 };
99
100 struct usb_smartbond_dma_data {
101 struct dma_config tx_cfg;
102 struct dma_config rx_cfg;
103 struct dma_block_config tx_block_cfg;
104 struct dma_block_config rx_block_cfg;
105 };
106
107 struct usb_smartbond_data {
108 struct udc_data udc_data;
109 struct usb_smartbond_dma_data dma_data;
110 const struct device *dev;
111 struct k_work ep0_setup_work;
112 struct k_work ep0_tx_work;
113 struct k_work ep0_rx_work;
114 uint8_t setup_buffer[8];
115 bool vbus_present;
116 bool attached;
117 atomic_t clk_requested;
118 uint8_t nfsr;
119 struct smartbond_ep_state ep_state[2][4];
120 atomic_ptr_t dma_ep[2]; /** DMA used by channel */
121 };
122
123 #define EP0_OUT_STATE(data) (&data->ep_state[0][0])
124 #define EP0_IN_STATE(data) (&data->ep_state[1][0])
125
usb_smartbond_dma_config(struct usb_smartbond_data * data)126 static int usb_smartbond_dma_config(struct usb_smartbond_data *data)
127 {
128 const struct udc_smartbond_config *config = data->dev->config;
129 const struct usb_smartbond_dma_config *dma_cfg = &config->dma_cfg;
130 struct dma_config *tx = &data->dma_data.tx_cfg;
131 struct dma_config *rx = &data->dma_data.rx_cfg;
132 struct dma_block_config *tx_block = &data->dma_data.tx_block_cfg;
133 struct dma_block_config *rx_block = &data->dma_data.rx_block_cfg;
134
135 if (dma_request_channel(dma_cfg->rx_dev, (void *)&dma_cfg->rx_chan) < 0) {
136 LOG_ERR("RX DMA channel is already occupied");
137 return -EIO;
138 }
139
140 if (dma_request_channel(dma_cfg->tx_dev, (void *)&dma_cfg->tx_chan) < 0) {
141 LOG_ERR("TX DMA channel is already occupied");
142 return -EIO;
143 }
144
145 tx->channel_direction = MEMORY_TO_PERIPHERAL;
146 tx->dma_callback = NULL;
147 tx->user_data = NULL;
148 tx->block_count = 1;
149 tx->head_block = tx_block;
150
151 tx->error_callback_dis = 1;
152 /* DMA callback is not used */
153 tx->complete_callback_en = 1;
154
155 tx->dma_slot = dma_cfg->tx_slot_mux;
156 tx->channel_priority = 7;
157
158 /* Burst mode is not using when DREQ is one */
159 tx->source_burst_length = 1;
160 tx->dest_burst_length = 1;
161 /* USB is byte-oriented protocol */
162 tx->source_data_size = 1;
163 tx->dest_data_size = 1;
164
165 /* Do not change */
166 tx_block->dest_addr_adj = 0x2;
167 /* Incremental */
168 tx_block->source_addr_adj = 0x0;
169
170 /* Should reflect TX buffer */
171 tx_block->source_address = 0;
172 /* Should reflect USB TX FIFO. Temporarily assign an SRAM location. */
173 tx_block->dest_address = MCU_SYSRAM_M_BASE;
174 /* Should reflect total bytes to be transmitted */
175 tx_block->block_size = 0;
176
177 rx->channel_direction = PERIPHERAL_TO_MEMORY;
178 rx->dma_callback = NULL;
179 rx->user_data = NULL;
180 rx->block_count = 1;
181 rx->head_block = rx_block;
182
183 rx->error_callback_dis = 1;
184 /* DMA callback is not used */
185 rx->complete_callback_en = 1;
186
187 rx->dma_slot = dma_cfg->rx_slot_mux;
188 rx->channel_priority = 2;
189
190 /* Burst mode is not using when DREQ is one */
191 rx->source_burst_length = 1;
192 rx->dest_burst_length = 1;
193 /* USB is byte-oriented protocol */
194 rx->source_data_size = 1;
195 rx->dest_data_size = 1;
196
197 /* Do not change */
198 rx_block->source_addr_adj = 0x2;
199 /* Incremental */
200 rx_block->dest_addr_adj = 0x0;
201
202 /* Should reflect USB RX FIFO */
203 rx_block->source_address = 0;
204 /* Should reflect RX buffer. Temporarily assign an SRAM location. */
205 rx_block->dest_address = MCU_SYSRAM_M_BASE;
206 /* Should reflect total bytes to be received */
207 rx_block->block_size = 0;
208
209 if (dma_config(dma_cfg->rx_dev, dma_cfg->rx_chan, rx) < 0) {
210 LOG_ERR("RX DMA configuration failed");
211 return -EINVAL;
212 }
213
214 if (dma_config(dma_cfg->tx_dev, dma_cfg->tx_chan, tx) < 0) {
215 LOG_ERR("TX DMA configuration failed");
216 return -EINVAL;
217 }
218
219 return 0;
220 }
221
usb_smartbond_dma_deconfig(struct usb_smartbond_data * data)222 static void usb_smartbond_dma_deconfig(struct usb_smartbond_data *data)
223 {
224 const struct udc_smartbond_config *config = data->dev->config;
225 const struct usb_smartbond_dma_config *dma_cfg = &config->dma_cfg;
226
227 dma_stop(dma_cfg->tx_dev, dma_cfg->tx_chan);
228 dma_stop(dma_cfg->rx_dev, dma_cfg->rx_chan);
229
230 dma_release_channel(dma_cfg->tx_dev, dma_cfg->tx_chan);
231 dma_release_channel(dma_cfg->rx_dev, dma_cfg->rx_chan);
232 }
233
usb_dc_get_ep_state(struct usb_smartbond_data * data,uint8_t ep)234 static struct smartbond_ep_state *usb_dc_get_ep_state(struct usb_smartbond_data *data, uint8_t ep)
235 {
236 const struct udc_smartbond_config *config = data->dev->config;
237
238 uint8_t ep_idx = USB_EP_GET_IDX(ep);
239 uint8_t ep_dir = USB_EP_GET_DIR(ep) ? 1 : 0;
240
241 return (ep_idx < config->num_of_eps) ? &data->ep_state[ep_dir][ep_idx] : NULL;
242 }
243
usb_dc_get_ep_out_state(struct usb_smartbond_data * data,uint8_t ep_idx)244 static struct smartbond_ep_state *usb_dc_get_ep_out_state(struct usb_smartbond_data *data,
245 uint8_t ep_idx)
246 {
247 const struct udc_smartbond_config *config = data->dev->config;
248
249 return ep_idx < config->num_of_eps ? &data->ep_state[0][ep_idx] : NULL;
250 }
251
usb_dc_get_ep_in_state(struct usb_smartbond_data * data,uint8_t ep_idx)252 static struct smartbond_ep_state *usb_dc_get_ep_in_state(struct usb_smartbond_data *data,
253 uint8_t ep_idx)
254 {
255 const struct udc_smartbond_config *config = data->dev->config;
256
257 return ep_idx < config->num_of_eps ? &data->ep_state[1][ep_idx] : NULL;
258 }
259
set_nfsr(struct usb_smartbond_data * data,uint8_t val)260 static void set_nfsr(struct usb_smartbond_data *data, uint8_t val)
261 {
262 data->nfsr = val;
263 /*
264 * Write only lower 2 bits to register, higher bits are used
265 * to count down till OPERATIONAL state can be entered when
266 * remote wakeup activated.
267 */
268 USB->USB_NFSR_REG = val & 3;
269 }
270
fill_tx_fifo(struct smartbond_ep_state * ep_state)271 static void fill_tx_fifo(struct smartbond_ep_state *ep_state)
272 {
273 int remaining;
274 const uint8_t *src;
275 struct smartbond_ep_reg_set *regs = ep_state->regs;
276 struct net_buf *buf = ep_state->buf;
277 const struct udc_ep_config *const ep_cfg = &ep_state->config;
278 const uint16_t mps = udc_mps_ep_size(ep_cfg);
279 const uint8_t ep_idx = USB_EP_GET_IDX(ep_cfg->addr);
280
281 src = buf->data;
282 remaining = buf->len;
283 if (remaining > mps - ep_state->last_packet_size) {
284 remaining = mps - ep_state->last_packet_size;
285 }
286
287 /*
288 * Loop checks TCOUNT all the time since this value is saturated to 31
289 * and can't be read just once before.
290 */
291 while ((regs->txs & USB_USB_TXS1_REG_USB_TCOUNT_Msk) > 0 && remaining > 0) {
292 regs->txd = *src++;
293 ep_state->last_packet_size++;
294 remaining--;
295 }
296
297 /*
298 * Setup FIFO level warning in case whole packet could not be placed
299 * in FIFO at once. This case only applies to ISO endpoints with packet
300 * size grater then 64. All other packets will fit in corresponding
301 * FIFO and there is no need for enabling FIFO level interrupt.
302 */
303 if (ep_idx == 0 || ep_cfg->mps <= EP_FIFO_SIZE) {
304 return;
305 }
306
307 if (remaining > 0) {
308 /*
309 * Max packet size is set to value greater then FIFO.
310 * Enable fifo level warning to handle larger packets.
311 */
312 regs->txc |= (3 << USB_USB_TXC1_REG_USB_TFWL_Pos);
313 USB->USB_FWMSK_REG |= BIT(ep_idx - 1 + USB_USB_FWMSK_REG_USB_M_TXWARN31_Pos);
314 } else {
315 regs->txc &= ~USB_USB_TXC1_REG_USB_TFWL_Msk;
316 USB->USB_FWMSK_REG &= ~(BIT(ep_idx - 1 + USB_USB_FWMSK_REG_USB_M_TXWARN31_Pos));
317 /* Whole packet already in fifo, no need to
318 * refill it later. Mark last.
319 */
320 regs->txc |= USB_USB_TXC1_REG_USB_LAST_Msk;
321 }
322 }
323
try_allocate_dma(struct usb_smartbond_data * data,struct smartbond_ep_state * ep_state)324 static bool try_allocate_dma(struct usb_smartbond_data *data, struct smartbond_ep_state *ep_state)
325 {
326 struct udc_ep_config *const ep_cfg = &ep_state->config;
327 const uint8_t ep = ep_cfg->addr;
328 uint8_t ep_idx = USB_EP_GET_IDX(ep);
329 uint8_t dir_ix = USB_EP_DIR_IS_OUT(ep) ? 0 : 1;
330
331 if (atomic_ptr_cas(&data->dma_ep[dir_ix], NULL, ep_state)) {
332 if (dir_ix == 0) {
333 USB->USB_DMA_CTRL_REG =
334 (USB->USB_DMA_CTRL_REG & ~USB_USB_DMA_CTRL_REG_USB_DMA_RX_Msk) |
335 ((ep_idx - 1) << USB_USB_DMA_CTRL_REG_USB_DMA_RX_Pos);
336 } else {
337 USB->USB_DMA_CTRL_REG =
338 (USB->USB_DMA_CTRL_REG & ~USB_USB_DMA_CTRL_REG_USB_DMA_TX_Msk) |
339 ((ep_idx - 1) << USB_USB_DMA_CTRL_REG_USB_DMA_TX_Pos);
340 }
341 USB->USB_DMA_CTRL_REG |= USB_USB_DMA_CTRL_REG_USB_DMA_EN_Msk;
342 return true;
343 } else {
344 return false;
345 }
346 }
347
start_rx_dma(const struct usb_smartbond_dma_config * dma_cfg,uintptr_t src,uintptr_t dst,uint16_t size)348 static void start_rx_dma(const struct usb_smartbond_dma_config *dma_cfg, uintptr_t src,
349 uintptr_t dst, uint16_t size)
350 {
351 if (dma_reload(dma_cfg->rx_dev, dma_cfg->rx_chan, src, dst, size) < 0) {
352 LOG_ERR("Failed to reload RX DMA");
353 } else {
354 dma_start(dma_cfg->rx_dev, dma_cfg->rx_chan);
355 }
356 }
357
start_rx_packet(struct usb_smartbond_data * data,struct smartbond_ep_state * ep_state)358 static void start_rx_packet(struct usb_smartbond_data *data, struct smartbond_ep_state *ep_state)
359 {
360 struct udc_ep_config *const ep_cfg = &ep_state->config;
361 const struct udc_smartbond_config *config = data->dev->config;
362 const uint8_t ep = ep_cfg->addr;
363 struct smartbond_ep_reg_set *regs = ep_state->regs;
364 struct net_buf *buf = ep_state->buf;
365 uint8_t ep_idx = USB_EP_GET_IDX(ep);
366 const uint16_t mps = udc_mps_ep_size(ep_cfg);
367 uint8_t rxc = regs->rxc | USB_USB_RXC1_REG_USB_RX_EN_Msk;
368
369 LOG_DBG("Start rx ep 0x%02x", ep);
370
371 ep_state->last_packet_size = 0;
372
373 if (mps > config->dma_min_transfer_size) {
374 if (try_allocate_dma(data, ep_state)) {
375 start_rx_dma(&config->dma_cfg, (uintptr_t)®s->rxd,
376 (uintptr_t)net_buf_tail(buf), mps);
377 } else if (mps > EP_FIFO_SIZE) {
378 /*
379 * Other endpoint is using DMA in that direction,
380 * fall back to interrupts.
381 * For endpoint size greater than FIFO size,
382 * enable FIFO level warning interrupt when FIFO
383 * has less than 17 bytes free.
384 */
385 rxc |= USB_USB_RXC1_REG_USB_RFWL_Msk;
386 USB->USB_FWMSK_REG |=
387 BIT(ep_idx - 1 + USB_USB_FWMSK_REG_USB_M_RXWARN31_Pos);
388 }
389 } else if (ep_idx != 0) {
390 /* If max_packet_size would fit in FIFO no need
391 * for FIFO level warning interrupt.
392 */
393 rxc &= ~USB_USB_RXC1_REG_USB_RFWL_Msk;
394 USB->USB_FWMSK_REG &= ~(BIT(ep_idx - 1 + USB_USB_FWMSK_REG_USB_M_RXWARN31_Pos));
395 }
396
397 regs->rxc = rxc;
398 }
399
start_tx_dma(const struct usb_smartbond_dma_config * dma_cfg,uintptr_t src,uintptr_t dst,uint16_t size)400 static void start_tx_dma(const struct usb_smartbond_dma_config *dma_cfg, uintptr_t src,
401 uintptr_t dst, uint16_t size)
402 {
403 if (dma_reload(dma_cfg->tx_dev, dma_cfg->tx_chan, src, dst, size) < 0) {
404 LOG_ERR("Failed to reload TX DMA");
405 } else {
406 dma_start(dma_cfg->tx_dev, dma_cfg->tx_chan);
407 }
408 }
409
start_tx_packet(struct usb_smartbond_data * data,struct smartbond_ep_state * ep_state)410 static void start_tx_packet(struct usb_smartbond_data *data, struct smartbond_ep_state *ep_state)
411 {
412 const struct udc_smartbond_config *config = data->dev->config;
413 struct smartbond_ep_reg_set *regs = ep_state->regs;
414 struct udc_ep_config *const ep_cfg = &ep_state->config;
415 struct net_buf *buf = ep_state->buf;
416 const uint8_t ep = ep_cfg->addr;
417 uint16_t remaining = buf->len;
418 const uint16_t mps = udc_mps_ep_size(ep_cfg);
419 uint16_t size = MIN(remaining, mps);
420 uint8_t txc;
421
422 LOG_DBG("ep 0x%02x %d/%d", ep, size, remaining);
423
424 ep_state->last_packet_size = 0;
425
426 regs->txc = USB_USB_TXC1_REG_USB_FLUSH_Msk;
427
428 txc = USB_USB_TXC1_REG_USB_TX_EN_Msk | USB_USB_TXC1_REG_USB_LAST_Msk;
429 if (ep_cfg->stat.data1) {
430 txc |= USB_USB_TXC1_REG_USB_TOGGLE_TX_Msk;
431 }
432
433 if (ep != USB_CONTROL_EP_IN && size > config->dma_min_transfer_size &&
434 (uint32_t)(buf->data) >= CONFIG_SRAM_BASE_ADDRESS && try_allocate_dma(data, ep_state)) {
435 start_tx_dma(&config->dma_cfg, (uintptr_t)buf->data, (uintptr_t)®s->txd, size);
436 } else {
437 fill_tx_fifo(ep_state);
438 }
439
440 regs->txc = txc;
441 if (ep == USB_CONTROL_EP_IN) {
442 (void)USB->USB_EP0_NAK_REG;
443 /*
444 * While driver expects upper layer to send data to the host,
445 * code should detect EP0 NAK event that could mean that
446 * host already sent ZLP without waiting for all requested
447 * data.
448 */
449 REG_SET_BIT(USB_MAMSK_REG, USB_M_EP0_NAK);
450 }
451 }
452
read_rx_fifo(struct smartbond_ep_state * ep_state,uint8_t * dst,uint16_t bytes_in_fifo)453 static uint16_t read_rx_fifo(struct smartbond_ep_state *ep_state, uint8_t *dst,
454 uint16_t bytes_in_fifo)
455 {
456 struct smartbond_ep_reg_set *regs = ep_state->regs;
457 struct udc_ep_config *const ep_cfg = &ep_state->config;
458 const uint16_t mps = udc_mps_ep_size(ep_cfg);
459 uint16_t remaining = mps - ep_state->last_packet_size;
460 uint16_t receive_this_time = bytes_in_fifo;
461
462 if (remaining < bytes_in_fifo) {
463 receive_this_time = remaining;
464 }
465
466 for (int i = 0; i < receive_this_time; ++i) {
467 dst[i] = regs->rxd;
468 }
469
470 ep_state->last_packet_size += receive_this_time;
471
472 return bytes_in_fifo - receive_this_time;
473 }
474
handle_ep0_rx(struct usb_smartbond_data * data)475 static void handle_ep0_rx(struct usb_smartbond_data *data)
476 {
477 int fifo_bytes;
478 uint32_t rxs0 = USB->USB_RXS0_REG;
479 struct smartbond_ep_state *ep0_out_state = EP0_OUT_STATE(data);
480 struct udc_ep_config *ep0_out_config = &ep0_out_state->config;
481 struct smartbond_ep_state *ep0_in_state;
482 struct udc_ep_config *ep0_in_config;
483 struct net_buf *buf = ep0_out_state->buf;
484
485 fifo_bytes = GET_BIT(rxs0, USB_USB_RXS0_REG_USB_RCOUNT);
486
487 if (rxs0 & USB_USB_RXS0_REG_USB_SETUP_Msk) {
488 ep0_in_state = EP0_IN_STATE(data);
489 ep0_in_config = &ep0_in_state->config;
490 ep0_out_state->last_packet_size = 0;
491 read_rx_fifo(ep0_out_state, data->setup_buffer, EP0_FIFO_SIZE);
492
493 ep0_out_config->stat.halted = 0;
494 ep0_out_config->stat.data1 = 1;
495 ep0_in_config->stat.halted = 0;
496 ep0_in_config->stat.data1 = 1;
497 REG_SET_BIT(USB_TXC0_REG, USB_TOGGLE_TX0);
498 REG_CLR_BIT(USB_EPC0_REG, USB_STALL);
499 LOG_HEXDUMP_DBG(data->setup_buffer, 8, "setup");
500 REG_CLR_BIT(USB_MAMSK_REG, USB_M_EP0_NAK);
501 REG_CLR_BIT(USB_RXC0_REG, USB_RX_EN);
502 (void)USB->USB_EP0_NAK_REG;
503 k_work_submit_to_queue(udc_get_work_q(), &data->ep0_setup_work);
504 } else {
505 (void)USB->USB_EP0_NAK_REG;
506 if (GET_BIT(rxs0, USB_USB_RXS0_REG_USB_TOGGLE_RX0) != ep0_out_config->stat.data1) {
507 /* Toggle bit does not match discard packet */
508 REG_SET_BIT(USB_RXC0_REG, USB_FLUSH);
509 ep0_out_state->last_packet_size = 0;
510 LOG_WRN("Packet with incorrect data1 bit rejected");
511 } else {
512 read_rx_fifo(ep0_out_state,
513 net_buf_tail(buf) + ep0_out_state->last_packet_size,
514 fifo_bytes);
515 if (rxs0 & USB_USB_RXS0_REG_USB_RX_LAST_Msk) {
516 ep0_out_config->stat.data1 ^= 1;
517 net_buf_add(ep0_out_state->buf, ep0_out_state->last_packet_size);
518 if (ep0_out_state->last_packet_size < EP0_FIFO_SIZE ||
519 ep0_out_state->buf->len == 0) {
520 k_work_submit_to_queue(udc_get_work_q(),
521 &data->ep0_rx_work);
522 } else {
523 start_rx_packet(data, ep0_out_state);
524 }
525 }
526 }
527 }
528 }
529
udc_smartbond_ep_abort(const struct device * dev,struct udc_ep_config * const ep_cfg)530 static void udc_smartbond_ep_abort(const struct device *dev, struct udc_ep_config *const ep_cfg)
531 {
532 struct smartbond_ep_state *ep_state = (struct smartbond_ep_state *)(ep_cfg);
533 struct usb_smartbond_data *data = udc_get_private(dev);
534 const struct udc_smartbond_config *config = data->dev->config;
535
536 /* Stop DMA if it used by this endpoint */
537 if (data->dma_ep[0] == ep_state) {
538 dma_stop(config->dma_cfg.rx_dev, config->dma_cfg.rx_chan);
539 data->dma_ep[0] = NULL;
540 } else if (data->dma_ep[1] == ep_state) {
541 dma_stop(config->dma_cfg.tx_dev, config->dma_cfg.tx_chan);
542 data->dma_ep[1] = NULL;
543 }
544 /* Flush FIFO */
545 if (USB_EP_DIR_IS_OUT(ep_cfg->addr)) {
546 ep_state->regs->rxc |= USB_USB_RXC0_REG_USB_FLUSH_Msk;
547 ep_state->regs->rxc &= ~USB_USB_RXC0_REG_USB_FLUSH_Msk;
548 } else {
549 ep_state->regs->txc |= USB_USB_TXC0_REG_USB_FLUSH_Msk;
550 ep_state->regs->txc &= ~USB_USB_TXC0_REG_USB_FLUSH_Msk;
551 }
552 }
553
udc_smartbond_ep_tx(const struct device * dev,uint8_t ep)554 static int udc_smartbond_ep_tx(const struct device *dev, uint8_t ep)
555 {
556 struct usb_smartbond_data *data = dev->data;
557 struct smartbond_ep_state *ep_state = usb_dc_get_ep_in_state(data, USB_EP_GET_IDX(ep));
558 struct net_buf *buf;
559
560 if (udc_ep_is_busy(dev, ep) ||
561 (ep_state->regs->epc_in & USB_USB_EPC1_REG_USB_STALL_Msk) != 0) {
562 return 0;
563 }
564
565 buf = udc_buf_peek(dev, ep);
566 LOG_DBG("TX ep 0x%02x len %u", ep, buf ? buf->len : -1);
567
568 if (buf) {
569 ep_state->buf = buf;
570 ep_state->last_packet_size = 0;
571
572 start_tx_packet(data, ep_state);
573
574 udc_ep_set_busy(dev, ep, true);
575 }
576
577 return 0;
578 }
579
udc_smartbond_ep_rx(const struct device * dev,uint8_t ep)580 static int udc_smartbond_ep_rx(const struct device *dev, uint8_t ep)
581 {
582 struct usb_smartbond_data *data = dev->data;
583 struct smartbond_ep_state *ep_state = usb_dc_get_ep_out_state(data, USB_EP_GET_IDX(ep));
584 struct net_buf *buf;
585
586 if (udc_ep_is_busy(dev, ep)) {
587 return 0;
588 }
589
590 buf = udc_buf_peek(dev, ep);
591
592 if (buf) {
593 LOG_DBG("RX ep 0x%02x len %u", ep, buf->size);
594
595 ep_state->last_packet_size = 0;
596 ep_state->buf = buf;
597
598 start_rx_packet(data, ep_state);
599
600 udc_ep_set_busy(dev, ep, true);
601 }
602
603 return 0;
604 }
605
udc_smartbond_ep_enqueue(const struct device * dev,struct udc_ep_config * const ep_cfg,struct net_buf * buf)606 static int udc_smartbond_ep_enqueue(const struct device *dev, struct udc_ep_config *const ep_cfg,
607 struct net_buf *buf)
608 {
609 unsigned int lock_key;
610 const uint8_t ep = ep_cfg->addr;
611 int ret;
612
613 LOG_DBG("ep 0x%02x enqueue %p", ep, buf);
614 udc_buf_put(ep_cfg, buf);
615
616 if (ep_cfg->stat.halted) {
617 /*
618 * It is fine to enqueue a transfer for a halted endpoint,
619 * you need to make sure that transfers are re-triggered when
620 * the halt is cleared.
621 */
622 LOG_DBG("ep 0x%02x halted", ep);
623 return 0;
624 }
625
626 lock_key = irq_lock();
627
628 if (USB_EP_DIR_IS_IN(ep)) {
629 ret = udc_smartbond_ep_tx(dev, ep);
630 } else {
631 ret = udc_smartbond_ep_rx(dev, ep);
632 }
633
634 irq_unlock(lock_key);
635
636 return ret;
637 }
638
udc_smartbond_ep_dequeue(const struct device * dev,struct udc_ep_config * const ep_cfg)639 static int udc_smartbond_ep_dequeue(const struct device *dev, struct udc_ep_config *const ep_cfg)
640 {
641 const uint8_t ep = ep_cfg->addr;
642 unsigned int lock_key;
643 struct net_buf *buf;
644
645 LOG_INF("ep 0x%02x dequeue all", ep);
646
647 lock_key = irq_lock();
648
649 udc_smartbond_ep_abort(dev, ep_cfg);
650
651 buf = udc_buf_get_all(dev, ep);
652 if (buf) {
653 udc_submit_ep_event(dev, buf, -ECONNABORTED);
654 }
655
656 udc_ep_set_busy(dev, ep, false);
657
658 irq_unlock(lock_key);
659
660 return 0;
661 }
662
udc_smartbond_ep_enable(const struct device * dev,struct udc_ep_config * const ep_cfg)663 int udc_smartbond_ep_enable(const struct device *dev, struct udc_ep_config *const ep_cfg)
664 {
665 const uint8_t ep = ep_cfg->addr;
666 struct smartbond_ep_state *ep_state;
667 bool iso = (ep_cfg->attributes & USB_EP_TRANSFER_TYPE_MASK) == USB_EP_TYPE_ISO;
668 uint8_t ep_idx = USB_EP_GET_IDX(ep);
669
670 ARG_UNUSED(dev);
671
672 LOG_INF("Enable ep 0x%02x", ep);
673
674 ep_state = (struct smartbond_ep_state *)(ep_cfg);
675 if (USB_EP_DIR_IS_IN(ep)) {
676 ep_state->regs->txc &= ~USB_USB_TXC0_REG_USB_IGN_IN_Msk;
677 if (ep != USB_CONTROL_EP_IN) {
678 ep_state->regs->epc_in |= USB_USB_EPC1_REG_USB_EP_EN_Msk |
679 USB_EP_GET_IDX(ep) |
680 (iso ? USB_USB_EPC2_REG_USB_ISO_Msk : 0);
681 USB->USB_TXMSK_REG |= 0x11 << (ep_idx - 1);
682 REG_SET_BIT(USB_MAMSK_REG, USB_M_TX_EV);
683 }
684 } else {
685 ep_state->regs->rxc &= ~USB_USB_RXC0_REG_USB_IGN_OUT_Msk;
686 if (ep == USB_CONTROL_EP_OUT) {
687 ep_state->regs->rxc &= ~USB_USB_RXC0_REG_USB_IGN_SETUP_Msk;
688 } else {
689 ep_state->regs->epc_out = USB_USB_EPC2_REG_USB_EP_EN_Msk |
690 USB_EP_GET_IDX(ep) |
691 (iso ? USB_USB_EPC2_REG_USB_ISO_Msk : 0);
692 USB->USB_RXMSK_REG |= 0x11 << (ep_idx - 1);
693 REG_SET_BIT(USB_MAMSK_REG, USB_M_RX_EV);
694 }
695 }
696
697 return 0;
698 }
699
udc_smartbond_ep_disable(const struct device * dev,struct udc_ep_config * const ep_cfg)700 static int udc_smartbond_ep_disable(const struct device *dev, struct udc_ep_config *const ep_cfg)
701 {
702 struct usb_smartbond_data *data = udc_get_private(dev);
703 const uint8_t ep = ep_cfg->addr;
704 struct smartbond_ep_state *ep_state;
705
706 LOG_INF("Disable ep 0x%02x", ep);
707
708 ep_state = usb_dc_get_ep_state(data, ep);
709 if (USB_EP_DIR_IS_IN(ep)) {
710 ep_state->regs->txc =
711 USB_USB_TXC0_REG_USB_IGN_IN_Msk | USB_USB_TXC0_REG_USB_FLUSH_Msk;
712 } else {
713 ep_state->regs->rxc =
714 USB_USB_RXC0_REG_USB_IGN_SETUP_Msk | USB_USB_RXC0_REG_USB_IGN_OUT_Msk;
715 }
716
717 return 0;
718 }
719
udc_smartbond_ep_set_halt(const struct device * dev,struct udc_ep_config * const ep_cfg)720 static int udc_smartbond_ep_set_halt(const struct device *dev, struct udc_ep_config *const ep_cfg)
721 {
722 struct smartbond_ep_state *ep_state = (struct smartbond_ep_state *)(ep_cfg);
723 struct net_buf *buf;
724 const uint8_t ep = ep_cfg->addr;
725
726 LOG_DBG("Set halt ep 0x%02x", ep);
727
728 ep_cfg->stat.halted = 1;
729 if (ep_cfg->addr == USB_CONTROL_EP_IN) {
730 /* Stall in DATA IN phase, drop status OUT packet */
731 if (udc_ctrl_stage_is_data_in(dev)) {
732 buf = udc_buf_get(dev, USB_CONTROL_EP_OUT);
733 if (buf) {
734 net_buf_unref(buf);
735 }
736 }
737 USB->USB_RXC0_REG = USB_USB_RXC0_REG_USB_FLUSH_Msk;
738 USB->USB_EPC0_REG |= USB_USB_EPC0_REG_USB_STALL_Msk;
739 USB->USB_TXC0_REG |= USB_USB_TXC0_REG_USB_TX_EN_Msk;
740 } else if (ep == USB_CONTROL_EP_OUT) {
741 ep_state->regs->rxc |= USB_USB_RXC0_REG_USB_RX_EN_Msk;
742 ep_state->regs->epc_in |= USB_USB_EPC0_REG_USB_STALL_Msk;
743 } else if (USB_EP_DIR_IS_OUT(ep)) {
744 ep_state->regs->epc_out = USB_USB_EPC1_REG_USB_STALL_Msk;
745 ep_state->regs->rxc = USB_USB_RXC1_REG_USB_RX_EN_Msk;
746 } else {
747 ep_state->regs->epc_in |= USB_USB_EPC1_REG_USB_STALL_Msk;
748 ep_state->regs->txc =
749 USB_USB_TXC1_REG_USB_TX_EN_Msk | USB_USB_TXC1_REG_USB_LAST_Msk;
750 }
751
752 return 0;
753 }
754
udc_smartbond_ep_clear_halt(const struct device * dev,struct udc_ep_config * const ep_cfg)755 static int udc_smartbond_ep_clear_halt(const struct device *dev, struct udc_ep_config *const ep_cfg)
756 {
757 const uint8_t ep = ep_cfg->addr;
758 struct smartbond_ep_state *ep_state = (struct smartbond_ep_state *)(ep_cfg);
759
760 LOG_DBG("Clear halt ep 0x%02x", ep);
761
762 ep_cfg->stat.data1 = 0;
763 ep_cfg->stat.halted = 0;
764
765 if (ep == USB_CONTROL_EP_OUT || ep == USB_CONTROL_EP_IN) {
766 REG_CLR_BIT(USB_MAMSK_REG, USB_M_EP0_NAK);
767 }
768
769 if (USB_EP_DIR_IS_OUT(ep)) {
770 ep_state->regs->epc_out &= ~USB_USB_EPC1_REG_USB_STALL_Msk;
771 udc_smartbond_ep_rx(dev, ep);
772 } else {
773 ep_state->regs->epc_in &= ~USB_USB_EPC1_REG_USB_STALL_Msk;
774 udc_smartbond_ep_tx(dev, ep);
775 }
776
777 return 0;
778 }
779
udc_smartbond_set_address(const struct device * dev,const uint8_t addr)780 static int udc_smartbond_set_address(const struct device *dev, const uint8_t addr)
781 {
782 ARG_UNUSED(dev);
783
784 LOG_DBG("Set new address %u for %p", addr, dev);
785
786 USB->USB_FAR_REG = (addr & USB_USB_FAR_REG_USB_AD_Msk) | USB_USB_FAR_REG_USB_AD_EN_Msk;
787
788 return 0;
789 }
790
udc_smartbond_host_wakeup(const struct device * dev)791 static int udc_smartbond_host_wakeup(const struct device *dev)
792 {
793 struct usb_smartbond_data *data = udc_get_private(dev);
794
795 LOG_DBG("Remote wakeup from %p", dev);
796
797 if (data->nfsr == NFSR_NODE_SUSPEND) {
798 /*
799 * Enter fake state that will use FRAME interrupt to wait before
800 * going operational.
801 */
802 set_nfsr(data, NFSR_NODE_WAKING);
803 USB->USB_MAMSK_REG |= USB_USB_MAMSK_REG_USB_M_FRAME_Msk;
804 }
805
806 return 0;
807 }
808
udc_smartbond_device_speed(const struct device * dev)809 static enum udc_bus_speed udc_smartbond_device_speed(const struct device *dev)
810 {
811 ARG_UNUSED(dev);
812
813 return UDC_BUS_SPEED_FS;
814 }
815
udc_smartbond_shutdown(const struct device * dev)816 static int udc_smartbond_shutdown(const struct device *dev)
817 {
818 if (udc_ep_disable_internal(dev, USB_CONTROL_EP_OUT)) {
819 LOG_ERR("Failed to disable control endpoint");
820 return -EIO;
821 }
822
823 if (udc_ep_disable_internal(dev, USB_CONTROL_EP_IN)) {
824 LOG_ERR("Failed to disable control endpoint");
825 return -EIO;
826 }
827
828 return 0;
829 }
830
check_reset_end(struct usb_smartbond_data * data,uint32_t alt_ev)831 static uint32_t check_reset_end(struct usb_smartbond_data *data, uint32_t alt_ev)
832 {
833 if (data->nfsr == NFSR_NODE_RESET) {
834 if (GET_BIT(alt_ev, USB_USB_ALTEV_REG_USB_RESET)) {
835 /*
836 * Could be still in reset, but since USB_M_RESET is
837 * disabled it can be also old reset state that was not
838 * cleared yet.
839 * If (after reading USB_ALTEV_REG register again)
840 * bit is cleared reset state just ended.
841 * Keep non-reset bits combined from two previous
842 * ALTEV reads and one from the next line.
843 */
844 alt_ev = (alt_ev & ~USB_USB_ALTEV_REG_USB_RESET_Msk) | USB->USB_ALTEV_REG;
845 }
846
847 if (GET_BIT(alt_ev, USB_USB_ALTEV_REG_USB_RESET) == 0) {
848 USB->USB_ALTMSK_REG =
849 USB_USB_ALTMSK_REG_USB_M_RESET_Msk | USB_USB_ALTEV_REG_USB_SD3_Msk;
850 if (data->ep_state[0][0].buf != NULL) {
851 USB->USB_MAMSK_REG |= USB_USB_MAMSK_REG_USB_M_EP0_RX_Msk;
852 }
853 LOG_DBG("Set operational %02x", USB->USB_MAMSK_REG);
854 set_nfsr(data, NFSR_NODE_OPERATIONAL);
855 }
856 }
857 return alt_ev;
858 }
859
handle_ep0_tx(struct usb_smartbond_data * data)860 void handle_ep0_tx(struct usb_smartbond_data *data)
861 {
862 uint32_t txs0;
863 struct smartbond_ep_state *ep0_in_state = EP0_IN_STATE(data);
864 const uint8_t ep = USB_CONTROL_EP_IN;
865 struct smartbond_ep_reg_set *regs = ep0_in_state->regs;
866 struct udc_ep_config *ep0_in_config = &ep0_in_state->config;
867 struct net_buf *buf = ep0_in_state->buf;
868 bool start_next_packet = true;
869
870 txs0 = regs->txs;
871
872 LOG_DBG("%02x %02x", ep, txs0);
873
874 if (GET_BIT(txs0, USB_USB_TXS0_REG_USB_TX_DONE)) {
875 /* ACK received */
876 if (GET_BIT(txs0, USB_USB_TXS0_REG_USB_ACK_STAT)) {
877 net_buf_pull(buf, ep0_in_state->last_packet_size);
878 ep0_in_state->last_packet_size = 0;
879 ep0_in_config->stat.data1 ^= 1;
880 REG_SET_VAL(USB_TXC0_REG, USB_TOGGLE_TX0, ep0_in_config->stat.data1);
881
882 /*
883 * Packet was sent to host but host already sent OUT packet
884 * that was NAK'ed. It means that no more data is needed.
885 */
886 if (USB->USB_EP0_NAK_REG & USB_USB_EP0_NAK_REG_USB_EP0_OUTNAK_Msk) {
887 net_buf_pull(buf, buf->len);
888 udc_ep_buf_clear_zlp(buf);
889 }
890 if (buf->len == 0) {
891 /* When everything was sent there is not need to fill new packet */
892 start_next_packet = false;
893 /* Send ZLP if protocol needs it */
894 if (udc_ep_buf_has_zlp(buf)) {
895 udc_ep_buf_clear_zlp(buf);
896 /* Enable transmitter without putting anything in FIFO */
897 USB->USB_TXC0_REG |= USB_USB_TXC0_REG_USB_TX_EN_Msk;
898 } else {
899 REG_CLR_BIT(USB_MAMSK_REG, USB_M_EP0_NAK);
900 k_work_submit_to_queue(udc_get_work_q(),
901 &data->ep0_tx_work);
902 }
903 }
904 } else {
905 /* Start from the beginning */
906 ep0_in_state->last_packet_size = 0;
907 }
908 if (start_next_packet) {
909 start_tx_packet(data, ep0_in_state);
910 }
911 }
912 }
913
handle_epx_rx_ev(struct usb_smartbond_data * data,uint8_t ep_idx)914 static void handle_epx_rx_ev(struct usb_smartbond_data *data, uint8_t ep_idx)
915 {
916 uint32_t rxs;
917 int fifo_bytes;
918 struct smartbond_ep_state *ep_state = usb_dc_get_ep_out_state(data, ep_idx);
919 const struct udc_smartbond_config *config = data->dev->config;
920 struct smartbond_ep_reg_set *regs = ep_state->regs;
921 struct udc_ep_config *const ep_cfg = &ep_state->config;
922 struct net_buf *buf = ep_state->buf;
923
924 do {
925 rxs = regs->rxs;
926
927 if (GET_BIT(rxs, USB_USB_RXS1_REG_USB_RX_ERR)) {
928 regs->rxc |= USB_USB_RXC1_REG_USB_FLUSH_Msk;
929 ep_state->last_packet_size = 0;
930 if (data->dma_ep[0] == ep_state) {
931 /* Stop DMA */
932 dma_stop(config->dma_cfg.rx_dev, config->dma_cfg.rx_chan);
933 /* Restart DMA since packet was dropped,
934 * all parameters should still work.
935 */
936 dma_start(config->dma_cfg.rx_dev, config->dma_cfg.rx_chan);
937 }
938 break;
939 }
940
941 if (data->dma_ep[0] == ep_state) {
942 struct dma_status rx_dma_status;
943
944 dma_get_status(config->dma_cfg.rx_dev, config->dma_cfg.rx_chan,
945 &rx_dma_status);
946 /*
947 * Disable DMA and update last_packet_size
948 * with what DMA reported.
949 */
950 dma_stop(config->dma_cfg.rx_dev, config->dma_cfg.rx_chan);
951 ep_state->last_packet_size = rx_dma_status.total_copied;
952
953 /*
954 * When DMA did not finished (packet was smaller then MPS),
955 * dma_idx holds exact number of bytes transmitted. When DMA
956 * finished value in dma_idx is one less then actual number of
957 * transmitted bytes.
958 */
959 if (ep_state->last_packet_size ==
960 (rx_dma_status.total_copied + rx_dma_status.pending_length)) {
961 ep_state->last_packet_size++;
962 }
963 /* Release DMA to use by other endpoints. */
964 data->dma_ep[0] = NULL;
965 }
966 fifo_bytes = GET_BIT(rxs, USB_USB_RXS1_REG_USB_RXCOUNT);
967 /*
968 * FIFO maybe empty if DMA read it before or
969 * it's final iteration and function already read all
970 * that was to read.
971 */
972 if (fifo_bytes > 0) {
973 fifo_bytes = read_rx_fifo(ep_state,
974 net_buf_tail(buf) + ep_state->last_packet_size,
975 fifo_bytes);
976 }
977
978 if (GET_BIT(rxs, USB_USB_RXS1_REG_USB_RX_LAST)) {
979 if (!ep_state->iso &&
980 GET_BIT(rxs, USB_USB_RXS1_REG_USB_TOGGLE_RX) != ep_cfg->stat.data1) {
981 /* Toggle bit does not match discard packet */
982 regs->rxc |= USB_USB_RXC1_REG_USB_FLUSH_Msk;
983 ep_state->last_packet_size = 0;
984 LOG_WRN("Packet with incorrect data1 field rejected");
985 /* Re-enable reception */
986 start_rx_packet(data, ep_state);
987 } else {
988 ep_cfg->stat.data1 ^= 1;
989 REG_SET_VAL(USB_TXC0_REG, USB_TOGGLE_TX0, ep_cfg->stat.data1);
990 net_buf_add(buf, ep_state->last_packet_size);
991
992 if (net_buf_tailroom(buf) == 0 ||
993 ep_state->last_packet_size < udc_mps_ep_size(ep_cfg) ||
994 ep_state->iso) {
995 buf = udc_buf_get(data->dev, ep_cfg->addr);
996 if (unlikely(buf == NULL)) {
997 LOG_ERR("ep 0x%02x queue is empty", ep_cfg->addr);
998 break;
999 }
1000 ep_cfg->stat.busy = 0;
1001 udc_submit_ep_event(data->dev, buf, 0);
1002 break;
1003 }
1004 start_rx_packet(data, ep_state);
1005 }
1006 }
1007 } while (fifo_bytes > config->fifo_read_threshold);
1008 }
1009
handle_rx_ev(struct usb_smartbond_data * data)1010 static void handle_rx_ev(struct usb_smartbond_data *data)
1011 {
1012 if (USB->USB_RXEV_REG & BIT(0)) {
1013 handle_epx_rx_ev(data, 1);
1014 }
1015
1016 if (USB->USB_RXEV_REG & BIT(1)) {
1017 handle_epx_rx_ev(data, 2);
1018 }
1019
1020 if (USB->USB_RXEV_REG & BIT(2)) {
1021 handle_epx_rx_ev(data, 3);
1022 }
1023 }
1024
handle_epx_tx_ev(struct usb_smartbond_data * data,struct smartbond_ep_state * ep_state)1025 static void handle_epx_tx_ev(struct usb_smartbond_data *data, struct smartbond_ep_state *ep_state)
1026 {
1027 uint32_t txs;
1028 const struct udc_smartbond_config *config = data->dev->config;
1029 struct smartbond_ep_reg_set *regs = ep_state->regs;
1030 struct udc_ep_config *const ep_cfg = &ep_state->config;
1031 struct net_buf *buf = ep_state->buf;
1032 const uint8_t ep = ep_cfg->addr;
1033
1034 txs = regs->txs;
1035
1036 if (GET_BIT(txs, USB_USB_TXS1_REG_USB_TX_DONE)) {
1037 if (data->dma_ep[1] == ep_state) {
1038 struct dma_status tx_dma_status;
1039
1040 dma_get_status(config->dma_cfg.tx_dev, config->dma_cfg.tx_chan,
1041 &tx_dma_status);
1042 /*
1043 * Disable DMA and update last_packet_size with what
1044 * DMA reported.
1045 */
1046 dma_stop(config->dma_cfg.tx_dev, config->dma_cfg.tx_chan);
1047 ep_state->last_packet_size = tx_dma_status.total_copied + 1;
1048 /* Release DMA to used by other endpoints. */
1049 data->dma_ep[1] = NULL;
1050 }
1051
1052 if (GET_BIT(txs, USB_USB_TXS1_REG_USB_ACK_STAT)) {
1053 /* ACK received, update transfer state and DATA0/1 bit */
1054 net_buf_pull(buf, ep_state->last_packet_size);
1055 ep_state->last_packet_size = 0;
1056 ep_cfg->stat.data1 ^= 1;
1057 REG_SET_VAL(USB_TXC0_REG, USB_TOGGLE_TX0, ep_cfg->stat.data1);
1058
1059 if (buf->len == 0) {
1060 if (udc_ep_buf_has_zlp(buf)) {
1061 udc_ep_buf_clear_zlp(buf);
1062 /* Enable transmitter without putting anything in FIFO */
1063 regs->txc |= USB_USB_TXC1_REG_USB_TX_EN_Msk |
1064 USB_USB_TXC1_REG_USB_LAST_Msk;
1065 } else {
1066 udc_ep_set_busy(data->dev, ep, false);
1067 buf = udc_buf_get(data->dev, ep);
1068
1069 udc_submit_ep_event(data->dev, buf, 0);
1070 udc_smartbond_ep_tx(data->dev, ep);
1071 }
1072 return;
1073 }
1074 } else if (regs->epc_in & USB_USB_EPC1_REG_USB_STALL_Msk) {
1075 /*
1076 * TX_DONE also indicates that STALL packet was just sent,
1077 * there is no point to put anything into transmit FIFO.
1078 * It could result in empty packet being scheduled.
1079 */
1080 return;
1081 }
1082 }
1083
1084 if (txs & USB_USB_TXS1_REG_USB_TX_URUN_Msk) {
1085 LOG_DBG("EP 0x%02x FIFO under-run\n", ep);
1086 }
1087 /* Start next or repeated packet. */
1088 start_tx_packet(data, ep_state);
1089 }
1090
handle_tx_ev(struct usb_smartbond_data * data)1091 static void handle_tx_ev(struct usb_smartbond_data *data)
1092 {
1093 if (USB->USB_TXEV_REG & BIT(0)) {
1094 handle_epx_tx_ev(data, usb_dc_get_ep_in_state(data, 1));
1095 }
1096 if (USB->USB_TXEV_REG & BIT(1)) {
1097 handle_epx_tx_ev(data, usb_dc_get_ep_in_state(data, 2));
1098 }
1099 if (USB->USB_TXEV_REG & BIT(2)) {
1100 handle_epx_tx_ev(data, usb_dc_get_ep_in_state(data, 3));
1101 }
1102 }
1103
handle_epx_tx_warn_ev(struct usb_smartbond_data * data,uint8_t ep_idx)1104 static void handle_epx_tx_warn_ev(struct usb_smartbond_data *data, uint8_t ep_idx)
1105 {
1106 fill_tx_fifo(usb_dc_get_ep_in_state(data, ep_idx));
1107 }
1108
handle_fifo_warning(struct usb_smartbond_data * data)1109 static void handle_fifo_warning(struct usb_smartbond_data *data)
1110 {
1111 uint32_t fifo_warning = USB->USB_FWEV_REG;
1112
1113 if (fifo_warning & BIT(0)) {
1114 handle_epx_tx_warn_ev(data, 1);
1115 }
1116
1117 if (fifo_warning & BIT(1)) {
1118 handle_epx_tx_warn_ev(data, 2);
1119 }
1120
1121 if (fifo_warning & BIT(2)) {
1122 handle_epx_tx_warn_ev(data, 3);
1123 }
1124
1125 if (fifo_warning & BIT(4)) {
1126 handle_epx_rx_ev(data, 1);
1127 }
1128
1129 if (fifo_warning & BIT(5)) {
1130 handle_epx_rx_ev(data, 2);
1131 }
1132
1133 if (fifo_warning & BIT(6)) {
1134 handle_epx_rx_ev(data, 3);
1135 }
1136 }
1137
handle_ep0_nak(struct usb_smartbond_data * data)1138 static void handle_ep0_nak(struct usb_smartbond_data *data)
1139 {
1140 uint32_t ep0_nak = USB->USB_EP0_NAK_REG;
1141
1142 if (REG_GET_BIT(USB_EPC0_REG, USB_STALL)) {
1143 if (GET_BIT(ep0_nak, USB_USB_EP0_NAK_REG_USB_EP0_INNAK)) {
1144 /*
1145 * EP0 is stalled and NAK was sent, it means that
1146 * RX is enabled. Disable RX for now.
1147 */
1148 REG_CLR_BIT(USB_RXC0_REG, USB_RX_EN);
1149 REG_SET_BIT(USB_TXC0_REG, USB_TX_EN);
1150 }
1151
1152 if (GET_BIT(ep0_nak, USB_USB_EP0_NAK_REG_USB_EP0_OUTNAK)) {
1153 REG_SET_BIT(USB_RXC0_REG, USB_RX_EN);
1154 }
1155 } else {
1156 REG_CLR_BIT(USB_MAMSK_REG, USB_M_EP0_NAK);
1157 if (REG_GET_BIT(USB_RXC0_REG, USB_RX_EN) == 0 &&
1158 GET_BIT(ep0_nak, USB_USB_EP0_NAK_REG_USB_EP0_OUTNAK)) {
1159 (void)USB->USB_EP0_NAK_REG;
1160 k_work_submit_to_queue(udc_get_work_q(), &data->ep0_tx_work);
1161 }
1162 }
1163 }
1164
empty_ep0_queues(const struct device * dev)1165 static void empty_ep0_queues(const struct device *dev)
1166 {
1167 struct net_buf *buf;
1168
1169 buf = udc_buf_get_all(dev, USB_CONTROL_EP_OUT);
1170 if (buf) {
1171 net_buf_unref(buf);
1172 }
1173 buf = udc_buf_get_all(dev, USB_CONTROL_EP_IN);
1174 if (buf) {
1175 net_buf_unref(buf);
1176 }
1177 }
1178
handle_bus_reset(struct usb_smartbond_data * data)1179 static void handle_bus_reset(struct usb_smartbond_data *data)
1180 {
1181 const struct udc_smartbond_config *config = data->dev->config;
1182 uint32_t alt_ev;
1183
1184 USB->USB_NFSR_REG = 0;
1185 USB->USB_FAR_REG = 0x80;
1186 USB->USB_ALTMSK_REG = 0;
1187 USB->USB_NFSR_REG = NFSR_NODE_RESET;
1188 USB->USB_TXMSK_REG = 0;
1189 USB->USB_RXMSK_REG = 0;
1190 set_nfsr(data, NFSR_NODE_RESET);
1191
1192 for (int i = 0; i < config->num_of_eps; ++i) {
1193 data->ep_state[1][i].buf = NULL;
1194 data->ep_state[1][i].config.stat.busy = 0;
1195 }
1196
1197 LOG_INF("send USB_DC_RESET");
1198 udc_submit_event(data->dev, UDC_EVT_RESET, 0);
1199 USB->USB_DMA_CTRL_REG = 0;
1200
1201 USB->USB_MAMSK_REG = USB_USB_MAMSK_REG_USB_M_INTR_Msk | USB_USB_MAMSK_REG_USB_M_FRAME_Msk |
1202 USB_USB_MAMSK_REG_USB_M_WARN_Msk | USB_USB_MAMSK_REG_USB_M_ALT_Msk |
1203 USB_USB_MAMSK_REG_USB_M_EP0_RX_Msk |
1204 USB_USB_MAMSK_REG_USB_M_EP0_TX_Msk;
1205 USB->USB_ALTMSK_REG = USB_USB_ALTMSK_REG_USB_M_RESUME_Msk;
1206 alt_ev = USB->USB_ALTEV_REG;
1207 check_reset_end(data, alt_ev);
1208 empty_ep0_queues(data->dev);
1209 }
1210
usb_clock_on(struct usb_smartbond_data * data)1211 static void usb_clock_on(struct usb_smartbond_data *data)
1212 {
1213 if (atomic_cas(&data->clk_requested, 0, 1)) {
1214 clock_control_on(DEVICE_DT_GET(DT_NODELABEL(osc)),
1215 (clock_control_subsys_rate_t)SMARTBOND_CLK_USB);
1216 }
1217 }
1218
usb_clock_off(struct usb_smartbond_data * data)1219 static void usb_clock_off(struct usb_smartbond_data *data)
1220 {
1221 if (atomic_cas(&data->clk_requested, 1, 0)) {
1222 clock_control_off(DEVICE_DT_GET(DT_NODELABEL(osc)),
1223 (clock_control_subsys_rate_t)SMARTBOND_CLK_USB);
1224 }
1225 }
1226
handle_alt_ev(struct usb_smartbond_data * data)1227 static void handle_alt_ev(struct usb_smartbond_data *data)
1228 {
1229 const struct udc_smartbond_config *config = data->dev->config;
1230 struct smartbond_ep_state *ep_state;
1231 uint32_t alt_ev = USB->USB_ALTEV_REG;
1232
1233 if (USB->USB_NFSR_REG == NFSR_NODE_SUSPEND) {
1234 usb_clock_on(data);
1235 }
1236 alt_ev = check_reset_end(data, alt_ev);
1237 if (GET_BIT(alt_ev, USB_USB_ALTEV_REG_USB_RESET) && data->nfsr != NFSR_NODE_RESET) {
1238 handle_bus_reset(data);
1239 } else if (GET_BIT(alt_ev, USB_USB_ALTEV_REG_USB_RESUME)) {
1240 if (USB->USB_NFSR_REG == NFSR_NODE_SUSPEND) {
1241 set_nfsr(data, NFSR_NODE_OPERATIONAL);
1242 if (data->ep_state[0][0].buf != NULL) {
1243 USB->USB_MAMSK_REG |= USB_USB_MAMSK_REG_USB_M_EP0_RX_Msk;
1244 }
1245 USB->USB_ALTMSK_REG = USB_USB_ALTMSK_REG_USB_M_RESET_Msk |
1246 USB_USB_ALTMSK_REG_USB_M_SD3_Msk;
1247 /* Re-enable reception of endpoint with pending transfer */
1248 for (int ep_idx = 1; ep_idx < config->num_of_eps; ++ep_idx) {
1249 ep_state = usb_dc_get_ep_out_state(data, ep_idx);
1250 if (!ep_state->config.stat.halted) {
1251 start_rx_packet(data, ep_state);
1252 }
1253 }
1254 udc_submit_event(data->dev, UDC_EVT_RESUME, 0);
1255 }
1256 } else if (GET_BIT(alt_ev, USB_USB_ALTEV_REG_USB_SD3)) {
1257 set_nfsr(data, NFSR_NODE_SUSPEND);
1258 USB->USB_ALTMSK_REG =
1259 USB_USB_ALTMSK_REG_USB_M_RESET_Msk | USB_USB_ALTMSK_REG_USB_M_RESUME_Msk;
1260 usb_clock_off(data);
1261 udc_submit_event(data->dev, UDC_EVT_SUSPEND, 0);
1262 }
1263 }
1264
udc_smartbond_isr(const struct device * dev)1265 static void udc_smartbond_isr(const struct device *dev)
1266 {
1267 struct usb_smartbond_data *data = udc_get_private(dev);
1268 uint32_t int_status = USB->USB_MAEV_REG & USB->USB_MAMSK_REG;
1269
1270 if (GET_BIT(int_status, USB_USB_MAEV_REG_USB_WARN)) {
1271 handle_fifo_warning(data);
1272 }
1273
1274 if (GET_BIT(int_status, USB_USB_MAEV_REG_USB_CH_EV)) {
1275 /* For now just clear interrupt */
1276 (void)USB->USB_CHARGER_STAT_REG;
1277 }
1278
1279 if (GET_BIT(int_status, USB_USB_MAEV_REG_USB_EP0_TX)) {
1280 handle_ep0_tx(data);
1281 }
1282
1283 if (GET_BIT(int_status, USB_USB_MAEV_REG_USB_EP0_RX)) {
1284 handle_ep0_rx(data);
1285 }
1286
1287 if (GET_BIT(int_status, USB_USB_MAEV_REG_USB_EP0_NAK)) {
1288 handle_ep0_nak(data);
1289 }
1290
1291 if (GET_BIT(int_status, USB_USB_MAEV_REG_USB_RX_EV)) {
1292 handle_rx_ev(data);
1293 }
1294
1295 if (GET_BIT(int_status, USB_USB_MAEV_REG_USB_NAK)) {
1296 (void)USB->USB_NAKEV_REG;
1297 }
1298
1299 if (GET_BIT(int_status, USB_USB_MAEV_REG_USB_FRAME)) {
1300 if (data->nfsr == NFSR_NODE_RESET) {
1301 /*
1302 * During reset FRAME interrupt is enabled to periodically
1303 * check when reset state ends.
1304 * FRAME interrupt is generated every 1ms without host sending
1305 * actual SOF.
1306 */
1307 check_reset_end(data, USB_USB_ALTEV_REG_USB_RESET_Msk);
1308 } else if (data->nfsr == NFSR_NODE_WAKING) {
1309 /* No need to call set_nfsr, just set state */
1310 data->nfsr = NFSR_NODE_WAKING2;
1311 } else if (data->nfsr == NFSR_NODE_WAKING2) {
1312 /* No need to call set_nfsr, just set state */
1313 data->nfsr = NFSR_NODE_RESUME;
1314 LOG_DBG("data->nfsr = NFSR_NODE_RESUME %02x", USB->USB_MAMSK_REG);
1315 } else if (data->nfsr == NFSR_NODE_RESUME) {
1316 set_nfsr(data, NFSR_NODE_OPERATIONAL);
1317 if (data->ep_state[0][0].buf != NULL) {
1318 USB->USB_MAMSK_REG |= USB_USB_MAMSK_REG_USB_M_EP0_RX_Msk;
1319 }
1320 LOG_DBG("Set operational %02x", USB->USB_MAMSK_REG);
1321 } else {
1322 USB->USB_MAMSK_REG &= ~USB_USB_MAMSK_REG_USB_M_FRAME_Msk;
1323 }
1324 }
1325
1326 if (GET_BIT(int_status, USB_USB_MAEV_REG_USB_TX_EV)) {
1327 handle_tx_ev(data);
1328 }
1329
1330 if (GET_BIT(int_status, USB_USB_MAEV_REG_USB_ALT)) {
1331 handle_alt_ev(data);
1332 }
1333 }
1334
1335 /**
1336 * USB functionality can be disabled from HOST and DEVICE side.
1337 * Host side is indicated by VBUS line.
1338 * Device side is decided by pair of calls udc_enable()/udc_disable(),
1339 * USB will only work when application calls udc_enable() and VBUS is present.
1340 * When both conditions are not met USB clock (PLL) is released, and peripheral
1341 * remain in reset state.
1342 */
usb_change_state(struct usb_smartbond_data * data,bool attached,bool vbus_present)1343 static void usb_change_state(struct usb_smartbond_data *data, bool attached, bool vbus_present)
1344 {
1345 if (data->attached == attached && data->vbus_present == vbus_present) {
1346 return;
1347 }
1348
1349 if (vbus_present != data->vbus_present && attached) {
1350 udc_submit_event(data->dev,
1351 vbus_present ? UDC_EVT_VBUS_READY : UDC_EVT_VBUS_REMOVED, 0);
1352 }
1353 if (attached && vbus_present) {
1354 data->attached = true;
1355 data->vbus_present = true;
1356 /*
1357 * Prevent transition to standby, this greatly reduces
1358 * IRQ response time
1359 */
1360 pm_policy_state_lock_get(PM_STATE_STANDBY, PM_ALL_SUBSTATES);
1361 usb_smartbond_dma_config(data);
1362 usb_clock_on(data);
1363 USB->USB_MCTRL_REG = USB_USB_MCTRL_REG_USBEN_Msk;
1364 USB->USB_NFSR_REG = 0;
1365 USB->USB_FAR_REG = 0x80;
1366 USB->USB_TXMSK_REG = 0;
1367 USB->USB_RXMSK_REG = 0;
1368
1369 USB->USB_MAMSK_REG = USB_USB_MAMSK_REG_USB_M_INTR_Msk |
1370 USB_USB_MAMSK_REG_USB_M_ALT_Msk |
1371 USB_USB_MAMSK_REG_USB_M_WARN_Msk;
1372 USB->USB_ALTMSK_REG =
1373 USB_USB_ALTMSK_REG_USB_M_RESET_Msk | USB_USB_ALTEV_REG_USB_SD3_Msk;
1374
1375 USB->USB_MCTRL_REG = USB_USB_MCTRL_REG_USBEN_Msk | USB_USB_MCTRL_REG_USB_NAT_Msk;
1376 } else if (data->attached && data->vbus_present) {
1377 /*
1378 * USB was previously in use now either VBUS is gone or application
1379 * requested detach, put it down
1380 */
1381 data->attached = attached;
1382 data->vbus_present = vbus_present;
1383 /*
1384 * It's imperative that USB_NAT bit-field is updated with the
1385 * USBEN bit-field being set. As such, zeroing the control
1386 * register at once will result in leaving the USB transceivers
1387 * in a floating state. Such an action, will induce incorrect
1388 * behavior for subsequent charger detection operations and given
1389 * that the device does not enter the sleep state (thus powering off
1390 * PD_SYS and resetting the controller along with its transceivers).
1391 */
1392 REG_CLR_BIT(USB_MCTRL_REG, USB_NAT);
1393 USB->USB_MCTRL_REG = 0;
1394 usb_clock_off(data);
1395 usb_smartbond_dma_deconfig(data);
1396 /* Allow standby USB not in use or not connected */
1397 pm_policy_state_lock_put(PM_STATE_STANDBY, PM_ALL_SUBSTATES);
1398 } else {
1399 /* USB still not activated, keep track of what's on and off */
1400 data->attached = attached;
1401 data->vbus_present = vbus_present;
1402 }
1403 }
1404
usb_dc_smartbond_vbus_isr(struct usb_smartbond_data * data)1405 static void usb_dc_smartbond_vbus_isr(struct usb_smartbond_data *data)
1406 {
1407 LOG_DBG("VBUS_ISR");
1408
1409 CRG_TOP->VBUS_IRQ_CLEAR_REG = 1;
1410 usb_change_state(data, data->attached,
1411 (CRG_TOP->ANA_STATUS_REG & CRG_TOP_ANA_STATUS_REG_VBUS_AVAILABLE_Msk) !=
1412 0);
1413 }
1414
usb_dc_smartbond_alloc_status_out(const struct device * dev)1415 static int usb_dc_smartbond_alloc_status_out(const struct device *dev)
1416 {
1417
1418 struct udc_ep_config *const ep_cfg = udc_get_ep_cfg(dev, USB_CONTROL_EP_OUT);
1419 struct net_buf *buf;
1420
1421 buf = udc_ctrl_alloc(dev, USB_CONTROL_EP_OUT, 0);
1422 if (buf == NULL) {
1423 return -ENOMEM;
1424 }
1425
1426 k_fifo_put(&ep_cfg->fifo, buf);
1427
1428 return 0;
1429 }
1430
usbd_ctrl_feed_dout(const struct device * dev,const size_t length)1431 static int usbd_ctrl_feed_dout(const struct device *dev, const size_t length)
1432 {
1433 struct usb_smartbond_data *data = udc_get_private(dev);
1434 struct smartbond_ep_state *ep_state = EP0_OUT_STATE(data);
1435 struct udc_ep_config *const ep_cfg = &ep_state->config;
1436 struct net_buf *buf;
1437
1438 buf = udc_ctrl_alloc(dev, USB_CONTROL_EP_OUT, length);
1439 if (buf == NULL) {
1440 return -ENOMEM;
1441 }
1442
1443 k_fifo_put(&ep_cfg->fifo, buf);
1444 ep_state->buf = buf;
1445 start_rx_packet(data, ep_state);
1446
1447 return 0;
1448 }
1449
handle_ep0_rx_work(struct k_work * item)1450 static void handle_ep0_rx_work(struct k_work *item)
1451 {
1452 struct usb_smartbond_data *data =
1453 CONTAINER_OF(item, struct usb_smartbond_data, ep0_rx_work);
1454 const uint8_t ep = USB_CONTROL_EP_OUT;
1455 struct net_buf *buf;
1456 const struct device *dev = data->dev;
1457 unsigned int lock_key;
1458
1459 /*
1460 * Lock needed here because busy is a bit field and access
1461 * may result in wrong state of data1 field
1462 */
1463 lock_key = irq_lock();
1464
1465 udc_ep_set_busy(dev, ep, false);
1466 buf = udc_buf_get(dev, ep);
1467
1468 irq_unlock(lock_key);
1469 if (unlikely(buf == NULL)) {
1470 LOG_ERR("ep 0x%02x queue is empty", ep);
1471 return;
1472 }
1473 /* Update packet size */
1474 if (udc_ctrl_stage_is_status_out(dev)) {
1475 udc_ctrl_update_stage(dev, buf);
1476 udc_ctrl_submit_status(dev, buf);
1477 } else {
1478 udc_ctrl_update_stage(dev, buf);
1479 }
1480
1481 if (udc_ctrl_stage_is_status_in(dev)) {
1482 udc_ctrl_submit_s_out_status(dev, buf);
1483 }
1484 }
1485
handle_ep0_tx_work(struct k_work * item)1486 static void handle_ep0_tx_work(struct k_work *item)
1487 {
1488 struct usb_smartbond_data *data =
1489 CONTAINER_OF(item, struct usb_smartbond_data, ep0_tx_work);
1490 struct net_buf *buf;
1491 const struct device *dev = data->dev;
1492 const uint8_t ep = USB_CONTROL_EP_IN;
1493 unsigned int lock_key;
1494
1495 buf = udc_buf_peek(dev, ep);
1496 __ASSERT(buf == EP0_IN_STATE(data)->buf, "TX work without buffer %p %p", buf,
1497 EP0_IN_STATE(data)->buf);
1498
1499 /*
1500 * Lock needed here because busy is a bit field and access
1501 * may result in wrong state of data1 filed
1502 */
1503 lock_key = irq_lock();
1504
1505 udc_ep_set_busy(dev, ep, false);
1506
1507 /* Remove buffer from queue */
1508 buf = udc_buf_get(dev, ep);
1509
1510 irq_unlock(lock_key);
1511
1512 __ASSERT(buf == EP0_IN_STATE(data)->buf, "Internal error");
1513
1514 /* For control endpoint get ready for ACK stage
1515 * from host.
1516 */
1517 if (udc_ctrl_stage_is_status_in(dev) || udc_ctrl_stage_is_no_data(dev)) {
1518 /* Status stage finished, notify upper layer */
1519 udc_ctrl_submit_status(dev, buf);
1520 }
1521
1522 /* Update to next stage of control transfer */
1523 udc_ctrl_update_stage(dev, buf);
1524
1525 if (udc_ctrl_stage_is_status_out(dev)) {
1526 /*
1527 * Flush TX FIFO in case host already send status OUT packet
1528 * and is not interested in reading from IN endpoint
1529 */
1530 USB->USB_TXC0_REG = USB_USB_TXC0_REG_USB_FLUSH_Msk;
1531 /* Enable reception of status OUT packet */
1532 REG_SET_BIT(USB_RXC0_REG, USB_RX_EN);
1533 /*
1534 * IN transfer finished, release buffer,
1535 */
1536 net_buf_unref(buf);
1537 }
1538 }
1539
handle_ep0_setup_work(struct k_work * item)1540 static void handle_ep0_setup_work(struct k_work *item)
1541 {
1542 struct usb_smartbond_data *data =
1543 CONTAINER_OF(item, struct usb_smartbond_data, ep0_setup_work);
1544 struct net_buf *buf;
1545 int err;
1546 const struct device *dev = data->dev;
1547 struct smartbond_ep_state *ep0_out_state;
1548
1549 buf = udc_ctrl_alloc(dev, USB_CONTROL_EP_OUT, sizeof(struct usb_setup_packet));
1550 if (buf == NULL) {
1551 LOG_ERR("Failed to allocate for setup");
1552 return;
1553 }
1554
1555 udc_ep_buf_set_setup(buf);
1556 net_buf_add_mem(buf, data->setup_buffer, 8);
1557 ep0_out_state = EP0_OUT_STATE(data);
1558 ep0_out_state->last_packet_size = 0;
1559 ep0_out_state->buf = NULL;
1560 udc_ctrl_update_stage(dev, buf);
1561
1562 if (udc_ctrl_stage_is_data_out(dev)) {
1563 /* Allocate and feed buffer for data OUT stage */
1564 LOG_DBG("s:%p|feed for -out-", buf);
1565 err = usbd_ctrl_feed_dout(dev, udc_data_stage_length(buf));
1566 if (err == -ENOMEM) {
1567 err = udc_submit_ep_event(dev, buf, err);
1568 }
1569 } else if (udc_ctrl_stage_is_data_in(dev)) {
1570 /* Allocate buffer for Status OUT state */
1571 err = usb_dc_smartbond_alloc_status_out(dev);
1572 if (err == -ENOMEM) {
1573 err = udc_submit_ep_event(dev, buf, err);
1574 } else {
1575 err = udc_ctrl_submit_s_in_status(dev);
1576 if (err == -ENOMEM) {
1577 err = udc_submit_ep_event(dev, buf, err);
1578 }
1579 }
1580 } else {
1581 err = udc_ctrl_submit_s_status(dev);
1582 }
1583 }
1584
udc_smartbond_enable(const struct device * dev)1585 static int udc_smartbond_enable(const struct device *dev)
1586 {
1587 struct usb_smartbond_data *data = udc_get_private(dev);
1588 const struct udc_smartbond_config *config = dev->config;
1589
1590 LOG_DBG("Enable UDC");
1591
1592 usb_change_state(data, true, data->vbus_present);
1593
1594 if (udc_ep_enable_internal(dev, USB_CONTROL_EP_OUT, USB_EP_TYPE_CONTROL, 8, 0)) {
1595 LOG_ERR("Failed to enable control endpoint");
1596 return -EIO;
1597 }
1598
1599 if (udc_ep_enable_internal(dev, USB_CONTROL_EP_IN, USB_EP_TYPE_CONTROL, 8, 0)) {
1600 LOG_ERR("Failed to enable control endpoint");
1601 return -EIO;
1602 }
1603
1604 irq_enable(config->udc_irq);
1605
1606 return 0;
1607 }
1608
udc_smartbond_disable(const struct device * dev)1609 static int udc_smartbond_disable(const struct device *dev)
1610 {
1611 struct usb_smartbond_data *data = udc_get_private(dev);
1612 const struct udc_smartbond_config *config = dev->config;
1613
1614 LOG_DBG("Disable UDC");
1615
1616 usb_change_state(data, false, data->vbus_present);
1617
1618 irq_disable(config->udc_irq);
1619
1620 return 0;
1621 }
1622
1623 /*
1624 * Prepare and configure most of the parts, if the controller has a way
1625 * of detecting VBUS activity it should be enabled here.
1626 * Only udc_smartbond_enable() makes device visible to the host.
1627 */
udc_smartbond_init(const struct device * dev)1628 static int udc_smartbond_init(const struct device *dev)
1629 {
1630 struct usb_smartbond_data *data = udc_get_private(dev);
1631 struct udc_data *udc_data = &data->udc_data;
1632 const struct udc_smartbond_config *config = dev->config;
1633 struct smartbond_ep_reg_set *reg_set = (struct smartbond_ep_reg_set *)&(USB->USB_EPC0_REG);
1634 const uint16_t mps = 1023;
1635 int err;
1636
1637 data->dev = dev;
1638
1639 k_mutex_init(&udc_data->mutex);
1640 k_work_init(&data->ep0_setup_work, handle_ep0_setup_work);
1641 k_work_init(&data->ep0_rx_work, handle_ep0_rx_work);
1642 k_work_init(&data->ep0_tx_work, handle_ep0_tx_work);
1643
1644 udc_data->caps.rwup = true;
1645 udc_data->caps.mps0 = UDC_MPS0_8;
1646
1647 for (int i = 0; i < config->num_of_eps; i++) {
1648 data->ep_state[0][i].config.caps.out = 1;
1649 if (i == 0) {
1650 data->ep_state[0][i].config.caps.control = 1;
1651 data->ep_state[0][i].config.caps.mps = 8;
1652 } else {
1653 data->ep_state[0][i].config.caps.bulk = 1;
1654 data->ep_state[0][i].config.caps.interrupt = 1;
1655 data->ep_state[0][i].config.caps.iso = 1;
1656 data->ep_state[0][i].config.caps.mps = mps;
1657 }
1658 data->ep_state[0][i].config.addr = USB_EP_DIR_OUT | i;
1659 err = udc_register_ep(dev, &data->ep_state[0][i].config);
1660 if (err != 0) {
1661 LOG_ERR("Failed to register endpoint");
1662 return err;
1663 }
1664 data->ep_state[0][i].regs = reg_set + i;
1665 }
1666
1667 for (int i = 0; i < config->num_of_eps; i++) {
1668 data->ep_state[1][i].config.caps.in = 1;
1669 if (i == 0) {
1670 data->ep_state[1][i].config.caps.control = 1;
1671 data->ep_state[1][i].config.caps.mps = 8;
1672 } else {
1673 data->ep_state[1][i].config.caps.bulk = 1;
1674 data->ep_state[1][i].config.caps.interrupt = 1;
1675 data->ep_state[1][i].config.caps.iso = 1;
1676 data->ep_state[1][i].config.caps.mps = mps;
1677 }
1678
1679 data->ep_state[1][i].config.addr = USB_EP_DIR_IN | i;
1680 err = udc_register_ep(dev, &data->ep_state[1][i].config);
1681 if (err != 0) {
1682 LOG_ERR("Failed to register endpoint");
1683 return err;
1684 }
1685 data->ep_state[1][i].regs = reg_set + i;
1686 }
1687
1688 CRG_TOP->VBUS_IRQ_CLEAR_REG = 1;
1689 /* Both connect and disconnect needs to be handled */
1690 CRG_TOP->VBUS_IRQ_MASK_REG = CRG_TOP_VBUS_IRQ_MASK_REG_VBUS_IRQ_EN_FALL_Msk |
1691 CRG_TOP_VBUS_IRQ_MASK_REG_VBUS_IRQ_EN_RISE_Msk;
1692 NVIC_SetPendingIRQ(config->vbus_irq);
1693 irq_enable(config->vbus_irq);
1694
1695 return 0;
1696 }
1697
udc_smartbond_lock(const struct device * dev)1698 static int udc_smartbond_lock(const struct device *dev)
1699 {
1700 return udc_lock_internal(dev, K_FOREVER);
1701 }
1702
udc_smartbond_unlock(const struct device * dev)1703 static int udc_smartbond_unlock(const struct device *dev)
1704 {
1705 return udc_unlock_internal(dev);
1706 }
1707
1708 static const struct udc_api udc_smartbond_api = {
1709 .lock = udc_smartbond_lock,
1710 .unlock = udc_smartbond_unlock,
1711 .device_speed = udc_smartbond_device_speed,
1712 .init = udc_smartbond_init,
1713 .enable = udc_smartbond_enable,
1714 .disable = udc_smartbond_disable,
1715 .shutdown = udc_smartbond_shutdown,
1716 .set_address = udc_smartbond_set_address,
1717 .host_wakeup = udc_smartbond_host_wakeup,
1718 .ep_enable = udc_smartbond_ep_enable,
1719 .ep_disable = udc_smartbond_ep_disable,
1720 .ep_set_halt = udc_smartbond_ep_set_halt,
1721 .ep_clear_halt = udc_smartbond_ep_clear_halt,
1722 .ep_enqueue = udc_smartbond_ep_enqueue,
1723 .ep_dequeue = udc_smartbond_ep_dequeue,
1724 };
1725
1726 #define DT_DRV_COMPAT renesas_smartbond_usbd
1727
1728 #define UDC_IRQ(inst) DT_INST_IRQ_BY_IDX(inst, 0, irq)
1729 #define UDC_IRQ_PRI(inst) DT_INST_IRQ_BY_IDX(inst, 0, priority)
1730 #define VBUS_IRQ(inst) DT_INST_IRQ_BY_IDX(inst, 1, irq)
1731 #define VBUS_IRQ_PRI(inst) DT_INST_IRQ_BY_IDX(inst, 1, priority)
1732
1733 /*
1734 * Minimal transfer size needed to use DMA. For short transfers
1735 * it may be simpler to just fill hardware FIFO with data instead
1736 * of programming DMA registers.
1737 */
1738 #define DMA_MIN_TRANSFER_SIZE(inst) DT_INST_PROP(inst, dma_min_transfer_size)
1739 #define FIFO_READ_THRESHOLD(inst) DT_INST_PROP(inst, fifo_read_threshold)
1740
1741 #define UDC_SMARTBOND_DEVICE_DEFINE(n) \
1742 \
1743 static const struct udc_smartbond_config udc_smartbond_cfg_##n = { \
1744 .udc_irq = UDC_IRQ(n), \
1745 .vbus_irq = VBUS_IRQ(n), \
1746 .dma_min_transfer_size = DMA_MIN_TRANSFER_SIZE(n), \
1747 .fifo_read_threshold = FIFO_READ_THRESHOLD(n), \
1748 .fifo_read_threshold = FIFO_READ_THRESHOLD(n), \
1749 .num_of_eps = DT_INST_PROP(n, num_bidir_endpoints), \
1750 .dma_cfg = { \
1751 .tx_chan = DT_INST_DMAS_CELL_BY_NAME(n, tx, channel), \
1752 .tx_slot_mux = DT_INST_DMAS_CELL_BY_NAME(n, tx, config), \
1753 .tx_dev = DEVICE_DT_GET(DT_INST_DMAS_CTLR_BY_NAME(n, tx)), \
1754 .rx_chan = DT_INST_DMAS_CELL_BY_NAME(n, rx, channel), \
1755 .rx_slot_mux = DT_INST_DMAS_CELL_BY_NAME(n, rx, config), \
1756 .rx_dev = DEVICE_DT_GET(DT_INST_DMAS_CTLR_BY_NAME(n, rx)), \
1757 }, \
1758 }; \
1759 \
1760 static struct usb_smartbond_data udc_data_##n = { \
1761 .udc_data = { \
1762 .mutex = Z_MUTEX_INITIALIZER(udc_data_##n.udc_data.mutex), \
1763 .priv = &udc_data_##n, \
1764 }, \
1765 }; \
1766 \
1767 static int udc_smartbond_driver_preinit_##n(const struct device *dev) \
1768 { \
1769 IRQ_CONNECT(VBUS_IRQ(n), VBUS_IRQ_PRI(n), usb_dc_smartbond_vbus_isr, \
1770 &udc_data_##n, 0); \
1771 IRQ_CONNECT(UDC_IRQ(n), UDC_IRQ_PRI(n), udc_smartbond_isr, DEVICE_DT_INST_GET(n), \
1772 0); \
1773 return 0; \
1774 } \
1775 \
1776 DEVICE_DT_INST_DEFINE(n, udc_smartbond_driver_preinit_##n, NULL, &udc_data_##n, \
1777 &udc_smartbond_cfg_##n, POST_KERNEL, \
1778 CONFIG_KERNEL_INIT_PRIORITY_DEVICE, &udc_smartbond_api);
1779
1780 DT_INST_FOREACH_STATUS_OKAY(UDC_SMARTBOND_DEVICE_DEFINE)
1781