1 /*
2  * SPDX-FileCopyrightText: 2015-2022 Espressif Systems (Shanghai) CO LTD
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <stdint.h>
8 #include <string.h>
9 #include <sys/queue.h>
10 #include "freertos/FreeRTOS.h"
11 #include "freertos/task.h"
12 #include "freertos/semphr.h"
13 #include "esp_heap_caps.h"
14 #include "esp_intr_alloc.h"
15 #include "esp_timer.h"
16 #include "esp_err.h"
17 #include "esp_rom_gpio.h"
18 #include "hal/usbh_hal.h"
19 #include "hal/usb_types_private.h"
20 #include "soc/gpio_pins.h"
21 #include "soc/gpio_sig_map.h"
22 #include "driver/periph_ctrl.h"
23 #include "hcd.h"
24 #include "usb_private.h"
25 #include "usb/usb_types_ch9.h"
26 
27 // ----------------------------------------------------- Macros --------------------------------------------------------
28 
29 // --------------------- Constants -------------------------
30 
31 #define INIT_DELAY_MS                           30  //A delay of at least 25ms to enter Host mode. Make it 30ms to be safe
32 #define DEBOUNCE_DELAY_MS                       250 //A debounce delay of 250ms
33 #define RESET_HOLD_MS                           30  //Spec requires at least 10ms. Make it 30ms to be safe
34 #define RESET_RECOVERY_MS                       30  //Reset recovery delay of 10ms (make it 30 ms to be safe) to allow for connected device to recover (and for port enabled interrupt to occur)
35 #define RESUME_HOLD_MS                          30  //Spec requires at least 20ms, Make it 30ms to be safe
36 #define RESUME_RECOVERY_MS                      20  //Resume recovery of at least 10ms. Make it 20 ms to be safe. This will include the 3 LS bit times of the EOP
37 
38 #define CTRL_EP_MAX_MPS_LS                      8   //Largest Maximum Packet Size for Low Speed control endpoints
39 #define CTRL_EP_MAX_MPS_FS                      64  //Largest Maximum Packet Size for Full Speed control endpoints
40 
41 #define NUM_PORTS                               1   //The controller only has one port.
42 
43 // ----------------------- Configs -------------------------
44 
45 typedef struct {
46     int in_mps;
47     int non_periodic_out_mps;
48     int periodic_out_mps;
49 } fifo_mps_limits_t;
50 
51 /**
52  * @brief Default FIFO sizes (see 2.1.2.4 for programming guide)
53  *
54  * RXFIFO
55  * - Recommended: ((LPS/4) * 2) + 2
56  * - Actual: Whatever leftover size: USBH_HAL_FIFO_TOTAL_USABLE_LINES(200) - 48 - 48 = 104
57  * - Worst case can accommodate two packets of 204 bytes, or one packet of 408
58  * NPTXFIFO
59  * - Recommended: (LPS/4) * 2
60  * - Actual: Assume LPS is 64, and 3 packets: (64/4) * 3 = 48
61  * - Worst case can accommodate three packets of 64 bytes or one packet of 192
62  * PTXFIFO
63  * - Recommended: (LPS/4) * 2
64  * - Actual: Assume LPS is 64, and 3 packets: (64/4) * 3 = 48
65  * - Worst case can accommodate three packets of 64 bytes or one packet of 192
66  */
67 const usbh_hal_fifo_config_t fifo_config_default = {
68     .rx_fifo_lines = 104,
69     .nptx_fifo_lines = 48,
70     .ptx_fifo_lines = 48,
71 };
72 
73 const fifo_mps_limits_t mps_limits_default = {
74     .in_mps = 408,
75     .non_periodic_out_mps = 192,
76     .periodic_out_mps = 192,
77 };
78 
79 /**
80  * @brief FIFO sizes that bias to giving RX FIFO more capacity
81  *
82  * RXFIFO
83  * - Recommended: ((LPS/4) * 2) + 2
84  * - Actual: Whatever leftover size: USBH_HAL_FIFO_TOTAL_USABLE_LINES(200) - 32 - 16 = 152
85  * - Worst case can accommodate two packets of 300 bytes or one packet of 600 bytes
86  * NPTXFIFO
87  * - Recommended: (LPS/4) * 2
88  * - Actual: Assume LPS is 64, and 1 packets: (64/4) * 1 = 16
89  * - Worst case can accommodate one packet of 64 bytes
90  * PTXFIFO
91  * - Recommended: (LPS/4) * 2
92  * - Actual: Assume LPS is 64, and 3 packets: (64/4) * 2 = 32
93  * - Worst case can accommodate two packets of 64 bytes or one packet of 128
94  */
95 const usbh_hal_fifo_config_t fifo_config_bias_rx = {
96     .rx_fifo_lines = 152,
97     .nptx_fifo_lines = 16,
98     .ptx_fifo_lines = 32,
99 };
100 
101 const fifo_mps_limits_t mps_limits_bias_rx = {
102     .in_mps = 600,
103     .non_periodic_out_mps = 64,
104     .periodic_out_mps = 128,
105 };
106 
107 /**
108  * @brief FIFO sizes that bias to giving Periodic TX FIFO more capacity (i.e., ISOC OUT)
109  *
110  * RXFIFO
111  * - Recommended: ((LPS/4) * 2) + 2
112  * - Actual: Assume LPS is 64, and 2 packets: ((64/4) * 2) + 2 = 34
113  * - Worst case can accommodate two packets of 64 bytes or one packet of 128
114  * NPTXFIFO
115  * - Recommended: (LPS/4) * 2
116  * - Actual: Assume LPS is 64, and 1 packets: (64/4) * 1 = 16
117  * - Worst case can accommodate one packet of 64 bytes
118  * PTXFIFO
119  * - Recommended: (LPS/4) * 2
120  * - Actual: Whatever leftover size: USBH_HAL_FIFO_TOTAL_USABLE_LINES(200) - 34 - 16 = 150
121  * - Worst case can accommodate two packets of 300 bytes or one packet of 600 bytes
122  */
123 const usbh_hal_fifo_config_t fifo_config_bias_ptx = {
124     .rx_fifo_lines = 34,
125     .nptx_fifo_lines = 16,
126     .ptx_fifo_lines = 150,
127 };
128 
129 const fifo_mps_limits_t mps_limits_bias_ptx = {
130     .in_mps = 128,
131     .non_periodic_out_mps = 64,
132     .periodic_out_mps = 600,
133 };
134 
135 #define FRAME_LIST_LEN                          USB_HAL_FRAME_LIST_LEN_32
136 #define NUM_BUFFERS                             2
137 
138 #define XFER_LIST_LEN_CTRL                      3   //One descriptor for each stage
139 #define XFER_LIST_LEN_BULK                      2   //One descriptor for transfer, one to support an extra zero length packet
140 #define XFER_LIST_LEN_INTR                      32
141 #define XFER_LIST_LEN_ISOC                      FRAME_LIST_LEN  //Same length as the frame list makes it easier to schedule. Must be power of 2
142 
143 // ------------------------ Flags --------------------------
144 
145 /**
146  * @brief Bit masks for the HCD to use in the URBs reserved_flags field
147  *
148  * The URB object has a reserved_flags member for host stack's internal use. The following flags will be set in
149  * reserved_flags in order to keep track of state of an URB within the HCD.
150  */
151 #define URB_HCD_STATE_IDLE                      0   //The URB is not enqueued in an HCD pipe
152 #define URB_HCD_STATE_PENDING                   1   //The URB is enqueued and pending execution
153 #define URB_HCD_STATE_INFLIGHT                  2   //The URB is currently in flight
154 #define URB_HCD_STATE_DONE                      3   //The URB has completed execution or is retired, and is waiting to be dequeued
155 
156 #define URB_HCD_STATE_SET(reserved_flags, state)    (reserved_flags = (reserved_flags & ~URB_HCD_STATE_MASK) | state)
157 #define URB_HCD_STATE_GET(reserved_flags)           (reserved_flags & URB_HCD_STATE_MASK)
158 
159 // -------------------- Convenience ------------------------
160 
161 #define HCD_ENTER_CRITICAL_ISR()                portENTER_CRITICAL_ISR(&hcd_lock)
162 #define HCD_EXIT_CRITICAL_ISR()                 portEXIT_CRITICAL_ISR(&hcd_lock)
163 #define HCD_ENTER_CRITICAL()                    portENTER_CRITICAL(&hcd_lock)
164 #define HCD_EXIT_CRITICAL()                     portEXIT_CRITICAL(&hcd_lock)
165 
166 #define HCD_CHECK(cond, ret_val) ({                                         \
167             if (!(cond)) {                                                  \
168                 return (ret_val);                                           \
169             }                                                               \
170 })
171 #define HCD_CHECK_FROM_CRIT(cond, ret_val) ({                               \
172             if (!(cond)) {                                                  \
173                 HCD_EXIT_CRITICAL();                                        \
174                 return ret_val;                                             \
175             }                                                               \
176 })
177 
178 // ------------------------------------------------------ Types --------------------------------------------------------
179 
180 typedef struct pipe_obj pipe_t;
181 typedef struct port_obj port_t;
182 
183 /**
184  * @brief Object representing a single buffer of a pipe's multi buffer implementation
185  */
186 typedef struct {
187     void *xfer_desc_list;
188     urb_t *urb;
189     union {
190         struct {
191             uint32_t data_stg_in: 1;        //Data stage of the control transfer is IN
192             uint32_t data_stg_skip: 1;      //Control transfer has no data stage
193             uint32_t cur_stg: 2;            //Index of the current stage (e.g., 0 is setup stage, 2 is status stage)
194             uint32_t reserved28: 28;
195         } ctrl;                             //Control transfer related
196         struct {
197             uint32_t zero_len_packet: 1;    //Added a zero length packet, so transfer consists of 2 QTDs
198             uint32_t reserved31: 31;
199         } bulk;                             //Bulk transfer related
200         struct {
201             uint32_t num_qtds: 8;           //Number of transfer descriptors filled (excluding zero length packet)
202             uint32_t zero_len_packet: 1;    //Added a zero length packet, so true number descriptors is num_qtds + 1
203             uint32_t reserved23: 23;
204         } intr;                             //Interrupt transfer related
205         struct {
206             uint32_t num_qtds: 8;           //Number of transfer descriptors filled (including NULL descriptors)
207             uint32_t interval: 8;           //Interval (in number of SOF i.e., ms)
208             uint32_t start_idx: 8;          //Index of the first transfer descriptor in the list
209             uint32_t next_start_idx: 8;     //Index for the first descriptor of the next buffer
210         } isoc;
211         uint32_t val;
212     } flags;
213     union {
214         struct {
215             uint32_t executing: 1;          //The buffer is currently executing
216             uint32_t was_canceled: 1;      //Buffer was done due to a cancellation (i.e., a halt request)
217             uint32_t reserved6: 6;
218             uint32_t stop_idx: 8;           //The descriptor index when the channel was halted
219             hcd_pipe_event_t pipe_event: 8; //The pipe event when the buffer was done
220             uint32_t reserved8: 8;
221         };
222         uint32_t val;
223     } status_flags;                         //Status flags for the buffer
224 } dma_buffer_block_t;
225 
226 /**
227  * @brief Object representing a pipe in the HCD layer
228  */
229 struct pipe_obj {
230     //URB queueing related
231     TAILQ_HEAD(tailhead_urb_pending, urb_s) pending_urb_tailq;
232     TAILQ_HEAD(tailhead_urb_done, urb_s) done_urb_tailq;
233     int num_urb_pending;
234     int num_urb_done;
235     //Multi-buffer control
236     dma_buffer_block_t *buffers[NUM_BUFFERS];  //Double buffering scheme
237     union {
238         struct {
239             uint32_t buffer_num_to_fill: 2; //Number of buffers that can be filled
240             uint32_t buffer_num_to_exec: 2; //Number of buffers that are filled and need to be executed
241             uint32_t buffer_num_to_parse: 2;//Number of buffers completed execution and waiting to be parsed
242             uint32_t reserved2: 2;
243             uint32_t wr_idx: 1;             //Index of the next buffer to fill. Bit width must allow NUM_BUFFERS to wrap automatically
244             uint32_t rd_idx: 1;             //Index of the current buffer in-flight. Bit width must allow NUM_BUFFERS to wrap automatically
245             uint32_t fr_idx: 1;             //Index of the next buffer to parse. Bit width must allow NUM_BUFFERS to wrap automatically
246             uint32_t buffer_is_executing: 1;//One of the buffers is in flight
247             uint32_t reserved20: 20;
248         };
249         uint32_t val;
250     } multi_buffer_control;
251     //HAL related
252     usbh_hal_chan_t *chan_obj;
253     usbh_hal_ep_char_t ep_char;
254     //Port related
255     port_t *port;                           //The port to which this pipe is routed through
256     TAILQ_ENTRY(pipe_obj) tailq_entry;      //TailQ entry for port's list of pipes
257     //Pipe status/state/events related
258     hcd_pipe_state_t state;
259     hcd_pipe_event_t last_event;
260     volatile TaskHandle_t task_waiting_pipe_notif;  //Task handle used for internal pipe events. Set by waiter, cleared by notifier
261     union {
262         struct {
263             uint32_t waiting_halt: 1;
264             uint32_t pipe_cmd_processing: 1;
265             uint32_t has_urb: 1;            //Indicates there is at least one URB either pending, inflight, or done
266             uint32_t persist: 1;            //indicates that this pipe should persist through a run-time port reset
267             uint32_t reset_lock: 1;         //Indicates that this pipe is undergoing a run-time reset
268             uint32_t reserved27: 27;
269         };
270         uint32_t val;
271     } cs_flags;
272     //Pipe callback and context
273     hcd_pipe_callback_t callback;
274     void *callback_arg;
275     void *context;
276 };
277 
278 /**
279  * @brief Object representing a port in the HCD layer
280  */
281 struct port_obj {
282     usbh_hal_context_t *hal;
283     void *frame_list;
284     //Pipes routed through this port
285     TAILQ_HEAD(tailhead_pipes_idle, pipe_obj) pipes_idle_tailq;
286     TAILQ_HEAD(tailhead_pipes_queued, pipe_obj) pipes_active_tailq;
287     int num_pipes_idle;
288     int num_pipes_queued;
289     //Port status, state, and events
290     hcd_port_state_t state;
291     usb_speed_t speed;
292     hcd_port_event_t last_event;
293     volatile TaskHandle_t task_waiting_port_notif;  //Task handle used for internal port events. Set by waiter, cleared by notifier
294     union {
295         struct {
296             uint32_t event_pending: 1;              //The port has an event that needs to be handled
297             uint32_t event_processing: 1;           //The port is current processing (handling) an event
298             uint32_t cmd_processing: 1;             //Used to indicate command handling is ongoing
299             uint32_t disable_requested: 1;
300             uint32_t conn_dev_ena: 1;               //Used to indicate the port is connected to a device that has been reset
301             uint32_t periodic_scheduling_enabled: 1;
302             uint32_t reserved26: 26;
303         };
304         uint32_t val;
305     } flags;
306     bool initialized;
307     //FIFO biasing related
308     const usbh_hal_fifo_config_t *fifo_config;
309     const fifo_mps_limits_t *fifo_mps_limits;
310     //Port callback and context
311     hcd_port_callback_t callback;
312     void *callback_arg;
313     SemaphoreHandle_t port_mux;
314     void *context;
315 };
316 
317 /**
318  * @brief Object representing the HCD
319  */
320 typedef struct {
321     //Ports (Hardware only has one)
322     port_t *port_obj;
323     intr_handle_t isr_hdl;
324 } hcd_obj_t;
325 
326 static portMUX_TYPE hcd_lock = portMUX_INITIALIZER_UNLOCKED;
327 static hcd_obj_t *s_hcd_obj = NULL;     //Note: "s_" is for the static pointer
328 
329 // ------------------------------------------------- Forward Declare ---------------------------------------------------
330 
331 // ------------------- Buffer Control ----------------------
332 
333 /**
334  * @brief Check if an inactive buffer can be filled with a pending URB
335  *
336  * @param pipe Pipe object
337  * @return true There are one or more pending URBs, and the inactive buffer is yet to be filled
338  * @return false Otherwise
339  */
_buffer_can_fill(pipe_t * pipe)340 static inline bool _buffer_can_fill(pipe_t *pipe)
341 {
342     //We can only fill if there are pending URBs and at least one unfilled buffer
343     if (pipe->num_urb_pending > 0 && pipe->multi_buffer_control.buffer_num_to_fill > 0) {
344         return true;
345     } else {
346         return false;
347     }
348 }
349 
350 /**
351  * @brief Fill an empty buffer with
352  *
353  * This function will:
354  * - Remove an URB from the pending tailq
355  * - Fill that URB into the inactive buffer
356  *
357  * @note _buffer_can_fill() must return true before calling this function
358  *
359  * @param pipe Pipe object
360  */
361 static void _buffer_fill(pipe_t *pipe);
362 
363 /**
364  * @brief Check if there are more filled buffers than can be executed
365  *
366  * @param pipe Pipe object
367  * @return true There are more filled buffers to be executed
368  * @return false No more buffers to execute
369  */
_buffer_can_exec(pipe_t * pipe)370 static inline bool _buffer_can_exec(pipe_t *pipe)
371 {
372     //We can only execute if there is not already a buffer executing and if there are filled buffers awaiting execution
373     if (!pipe->multi_buffer_control.buffer_is_executing && pipe->multi_buffer_control.buffer_num_to_exec > 0) {
374         return true;
375     } else {
376         return false;
377     }
378 }
379 
380 /**
381  * @brief Execute the next filled buffer
382  *
383  * - Must have called _buffer_can_exec() before calling this function
384  * - Will start the execution of the buffer
385  *
386  * @param pipe Pipe object
387  */
388 static void _buffer_exec(pipe_t *pipe);
389 
390 /**
391  * @brief Check if a buffer as completed execution
392  *
393  * This should only be called after receiving a USBH_HAL_CHAN_EVENT_CPLT event to check if a buffer is actually
394  * done.
395  *
396  * @param pipe Pipe object
397  * @return true Buffer complete
398  * @return false Buffer not complete
399  */
_buffer_check_done(pipe_t * pipe)400 static inline bool _buffer_check_done(pipe_t *pipe)
401 {
402     if (pipe->ep_char.type != USB_PRIV_XFER_TYPE_CTRL) {
403         return true;
404     }
405     //Only control transfers need to be continued
406     dma_buffer_block_t *buffer_inflight = pipe->buffers[pipe->multi_buffer_control.rd_idx];
407     return (buffer_inflight->flags.ctrl.cur_stg == 2);
408 }
409 
410 /**
411  * @brief Continue execution of a buffer
412  *
413  * This should only be called after checking if a buffer has completed execution using _buffer_check_done()
414  *
415  * @param pipe Pipe object
416  */
417 static void _buffer_exec_cont(pipe_t *pipe);
418 
419 /**
420  * @brief Marks the last executed buffer as complete
421  *
422  * This should be called on a pipe that has confirmed that a buffer is completed via _buffer_check_done()
423  *
424  * @param pipe Pipe object
425  * @param stop_idx Descriptor index when the buffer stopped execution
426  * @param pipe_event Pipe event that caused the buffer to be complete. Use HCD_PIPE_EVENT_NONE for halt request of disconnections
427  * @param canceled Whether the buffer was done due to a canceled (i.e., halt request). Must set pipe_event to HCD_PIPE_EVENT_NONE
428  */
_buffer_done(pipe_t * pipe,int stop_idx,hcd_pipe_event_t pipe_event,bool canceled)429 static inline void _buffer_done(pipe_t *pipe, int stop_idx, hcd_pipe_event_t pipe_event, bool canceled)
430 {
431     //Store the stop_idx and pipe_event for later parsing
432     dma_buffer_block_t *buffer_done = pipe->buffers[pipe->multi_buffer_control.rd_idx];
433     buffer_done->status_flags.executing = 0;
434     buffer_done->status_flags.was_canceled = canceled;
435     buffer_done->status_flags.stop_idx = stop_idx;
436     buffer_done->status_flags.pipe_event = pipe_event;
437     pipe->multi_buffer_control.rd_idx++;
438     pipe->multi_buffer_control.buffer_num_to_exec--;
439     pipe->multi_buffer_control.buffer_num_to_parse++;
440     pipe->multi_buffer_control.buffer_is_executing = 0;
441 }
442 
443 /**
444  * @brief Checks if a pipe has one or more completed buffers to parse
445  *
446  * @param pipe Pipe object
447  * @return true There are one or more buffers to parse
448  * @return false There are no more buffers to parse
449  */
_buffer_can_parse(pipe_t * pipe)450 static inline bool _buffer_can_parse(pipe_t *pipe)
451 {
452     if (pipe->multi_buffer_control.buffer_num_to_parse > 0) {
453         return true;
454     } else {
455         return false;
456     }
457 }
458 
459 /**
460  * @brief Parse a completed buffer
461  *
462  * This function will:
463  * - Parse the results of an URB from a completed buffer
464  * - Put the URB into the done tailq
465  *
466  * @note This function should only be called on the completion of a buffer
467  *
468  * @param pipe Pipe object
469  * @param stop_idx (For INTR pipes only) The index of the descriptor that follows the last descriptor of the URB. Set to 0 otherwise
470  */
471 static void _buffer_parse(pipe_t *pipe);
472 
473 /**
474  * @brief Marks all buffers pending execution as completed, then parses those buffers
475  *
476  * @note This should only be called on pipes do not have any currently executing buffers.
477  *
478  * @param pipe Pipe object
479  * @param canceled Whether this flush is due to cancellation
480  * @return true One or more buffers were flushed
481  * @return false There were no buffers that needed to be flushed
482  */
483 static bool _buffer_flush_all(pipe_t *pipe, bool canceled);
484 
485 // ------------------------ Pipe ---------------------------
486 
487 /**
488  * @brief Decode a HAL channel error to the corresponding pipe event
489  *
490  * @param chan_error The HAL channel error
491  * @return hcd_pipe_event_t The corresponding pipe error event
492  */
493 static inline hcd_pipe_event_t pipe_decode_error_event(usbh_hal_chan_error_t chan_error);
494 
495 /**
496  * @brief Halt a pipe
497  *
498  * - Attempts to halt a pipe. Pipe must be active in order to be halted
499  * - If the underlying channel has an ongoing transfer, a halt will be requested, then the function will block until the
500  *   channel indicates it is halted
501  * - If the channel is no on-going transfer, the pipe will simply be marked has halted (thus preventing any further URBs
502  *   from being enqueued)
503  *
504  * @note This function can block
505  * @param pipe Pipe object
506  * @return esp_err_t
507  */
508 static esp_err_t _pipe_cmd_halt(pipe_t *pipe);
509 
510 /**
511  * @brief Flush a pipe
512  *
513  * - Flushing a pipe causes all of its pending URBs to be become done, thus allowing them to be dequeued
514  * - The pipe must be halted in order to be flushed
515  * - The pipe callback will be run if one or more URBs become done
516  *
517  * @param pipe Pipe object
518  * @return esp_err_t
519  */
520 static esp_err_t _pipe_cmd_flush(pipe_t *pipe);
521 
522 /**
523  * @brief Clear a pipe from its halt
524  *
525  * - Pipe must be halted in order to be cleared
526  * - Clearing a pipe makes it active again
527  * - If there are any enqueued URBs, they will executed
528  *
529  * @param pipe Pipe object
530  * @return esp_err_t
531  */
532 static esp_err_t _pipe_cmd_clear(pipe_t *pipe);
533 
534 // ------------------------ Port ---------------------------
535 
536 /**
537  * @brief Prepare persistent pipes for reset
538  *
539  * This function checks if all pipes are reset persistent and proceeds to free their underlying HAL channels for the
540  * persistent pipes. This should be called before a run time reset
541  *
542  * @param port Port object
543  * @return true All pipes are persistent and their channels are freed
544  * @return false Not all pipes are persistent
545  */
546 static bool _port_persist_all_pipes(port_t *port);
547 
548 /**
549  * @brief Recovers all persistent pipes after a reset
550  *
551  * This function will recover all persistent pipes after a reset and reallocate their underlying HAl channels. This
552  * function should be called after a reset.
553  *
554  * @param port Port object
555  */
556 static void _port_recover_all_pipes(port_t *port);
557 
558 /**
559  * @brief Checks if all pipes are in the halted state
560  *
561  * @param port Port object
562  * @return true All pipes are halted
563  * @return false Not all pipes are halted
564  */
565 static bool _port_check_all_pipes_halted(port_t *port);
566 
567 /**
568  * @brief Debounce port after a connection or disconnection event
569  *
570  * This function should be called after a port connection or disconnect event. This function will execute a debounce
571  * delay then check the actual connection/disconnections state.
572  *
573  * @note This function can block
574  * @param port Port object
575  * @return true A device is connected
576  * @return false No device connected
577  */
578 static bool _port_debounce(port_t *port);
579 
580 /**
581  * @brief Power ON the port
582  *
583  * @param port Port object
584  * @return esp_err_t
585  */
586 static esp_err_t _port_cmd_power_on(port_t *port);
587 
588 /**
589  * @brief Power OFF the port
590  *
591  * - If a device is currently connected, this function will cause a disconnect event
592  *
593  * @param port Port object
594  * @return esp_err_t
595  */
596 static esp_err_t _port_cmd_power_off(port_t *port);
597 
598 /**
599  * @brief Reset the port
600  *
601  * - This function issues a reset signal using the timings specified by the USB2.0 spec
602  *
603  * @note This function can block
604  * @param port Port object
605  * @return esp_err_t
606  */
607 static esp_err_t _port_cmd_reset(port_t *port);
608 
609 /**
610  * @brief Suspend the port
611  *
612  * - Port must be enabled in order to to be suspended
613  * - All pipes must be halted for the port to be suspended
614  * - Suspending the port stops Keep Alive/SOF from being sent to the connected device
615  *
616  * @param port Port object
617  * @return esp_err_t
618  */
619 static esp_err_t _port_cmd_bus_suspend(port_t *port);
620 
621 /**
622  * @brief Resume the port
623  *
624  * - Port must be suspended in order to be resumed
625  *
626  * @note This function can block
627  * @param port Port object
628  * @return esp_err_t
629  */
630 static esp_err_t _port_cmd_bus_resume(port_t *port);
631 
632 /**
633  * @brief Disable the port
634  *
635  * - All pipes must be halted for the port to be disabled
636  * - The port must be enabled or suspended in order to be disabled
637  *
638  * @note This function can block
639  * @param port Port object
640  * @return esp_err_t
641  */
642 static esp_err_t _port_cmd_disable(port_t *port);
643 
644 // ----------------------- Events --------------------------
645 
646 /**
647  * @brief Wait for an internal event from a port
648  *
649  * @note For each port, there can only be one thread/task waiting for an internal port event
650  * @note This function is blocking (will exit and re-enter the critical section to do so)
651  *
652  * @param port Port object
653  */
654 static void _internal_port_event_wait(port_t *port);
655 
656 /**
657  * @brief Notify (from an ISR context) the thread/task waiting for the internal port event
658  *
659  * @param port Port object
660  * @return true A yield is required
661  * @return false Whether a yield is required or not
662  */
663 static bool _internal_port_event_notify_from_isr(port_t *port);
664 
665 /**
666  * @brief Wait for an internal event from a particular pipe
667  *
668  * @note For each pipe, there can only be one thread/task waiting for an internal port event
669  * @note This function is blocking (will exit and re-enter the critical section to do so)
670  *
671  * @param pipe Pipe object
672  */
673 static void _internal_pipe_event_wait(pipe_t *pipe);
674 
675 /**
676  * @brief Notify (from an ISR context) the thread/task waiting for an internal pipe event
677  *
678  * @param pipe Pipe object
679  * @param from_isr Whether this is called from an ISR or not
680  * @return true A yield is required
681  * @return false Whether a yield is required or not. Always false when from_isr is also false
682  */
683 static bool _internal_pipe_event_notify(pipe_t *pipe, bool from_isr);
684 
685 // ----------------------------------------------- Interrupt Handling --------------------------------------------------
686 
687 // ------------------- Internal Event ----------------------
688 
_internal_port_event_wait(port_t * port)689 static void _internal_port_event_wait(port_t *port)
690 {
691     //There must NOT be another thread/task already waiting for an internal event
692     assert(port->task_waiting_port_notif == NULL);
693     port->task_waiting_port_notif = xTaskGetCurrentTaskHandle();
694     /* We need to loop as task notifications can come from anywhere. If we this
695     was a port event notification, task_waiting_port_notif will have been cleared
696     by the notifier. */
697     while (port->task_waiting_port_notif != NULL) {
698         HCD_EXIT_CRITICAL();
699         //Wait to be notified from ISR
700         ulTaskNotifyTake(pdTRUE, portMAX_DELAY);
701         HCD_ENTER_CRITICAL();
702     }
703 }
704 
_internal_port_event_notify_from_isr(port_t * port)705 static bool _internal_port_event_notify_from_isr(port_t *port)
706 {
707     //There must be a thread/task waiting for an internal event
708     assert(port->task_waiting_port_notif != NULL);
709     TaskHandle_t task_to_unblock = port->task_waiting_port_notif;
710     //Clear task_waiting_port_notif to indicate to the waiter that the unblock was indeed an port event notification
711     port->task_waiting_port_notif = NULL;
712     //Unblock the thread/task waiting for the notification
713     BaseType_t xTaskWoken = pdFALSE;
714     //Note: We don't exit the critical section to be atomic. vTaskNotifyGiveFromISR() doesn't block anyways
715     vTaskNotifyGiveFromISR(task_to_unblock, &xTaskWoken);
716     return (xTaskWoken == pdTRUE);
717 }
718 
_internal_pipe_event_wait(pipe_t * pipe)719 static void _internal_pipe_event_wait(pipe_t *pipe)
720 {
721     //There must NOT be another thread/task already waiting for an internal event
722     assert(pipe->task_waiting_pipe_notif == NULL);
723     pipe->task_waiting_pipe_notif = xTaskGetCurrentTaskHandle();
724     /* We need to loop as task notifications can come from anywhere. If we this
725     was a pipe event notification, task_waiting_pipe_notif will have been cleared
726     by the notifier. */
727     while (pipe->task_waiting_pipe_notif != NULL) {
728         //Wait to be unblocked by notified
729         HCD_EXIT_CRITICAL();
730         ulTaskNotifyTake(pdTRUE, portMAX_DELAY);
731         HCD_ENTER_CRITICAL();
732     }
733 }
734 
_internal_pipe_event_notify(pipe_t * pipe,bool from_isr)735 static bool _internal_pipe_event_notify(pipe_t *pipe, bool from_isr)
736 {
737     //There must be a thread/task waiting for an internal event
738     assert(pipe->task_waiting_pipe_notif != NULL);
739     TaskHandle_t task_to_unblock = pipe->task_waiting_pipe_notif;
740     //Clear task_waiting_pipe_notif to indicate to the waiter that the unblock was indeed an pipe event notification
741     pipe->task_waiting_pipe_notif = NULL;
742     bool ret;
743     if (from_isr) {
744         BaseType_t xTaskWoken = pdFALSE;
745         //Note: We don't exit the critical section to be atomic. vTaskNotifyGiveFromISR() doesn't block anyways
746         //Unblock the thread/task waiting for the pipe notification
747         vTaskNotifyGiveFromISR(task_to_unblock, &xTaskWoken);
748         ret = (xTaskWoken == pdTRUE);
749     } else {
750         HCD_EXIT_CRITICAL();
751         xTaskNotifyGive(task_to_unblock);
752         HCD_ENTER_CRITICAL();
753         ret = false;
754     }
755     return ret;
756 }
757 
758 // ----------------- Interrupt Handlers --------------------
759 
760 /**
761  * @brief Handle a HAL port interrupt and obtain the corresponding port event
762  *
763  * @param[in] port Port object
764  * @param[in] hal_port_event The HAL port event
765  * @param[out] yield Set to true if a yield is required as a result of handling the interrupt
766  * @return hcd_port_event_t  Returns a port event, or HCD_PORT_EVENT_NONE if no port event occurred
767  */
_intr_hdlr_hprt(port_t * port,usbh_hal_port_event_t hal_port_event,bool * yield)768 static hcd_port_event_t _intr_hdlr_hprt(port_t *port, usbh_hal_port_event_t hal_port_event, bool *yield)
769 {
770     hcd_port_event_t port_event = HCD_PORT_EVENT_NONE;
771     switch (hal_port_event) {
772         case USBH_HAL_PORT_EVENT_CONN: {
773             //Don't update state immediately, we still need to debounce.
774             port_event = HCD_PORT_EVENT_CONNECTION;
775             break;
776         }
777         case USBH_HAL_PORT_EVENT_DISCONN: {
778             port->state = HCD_PORT_STATE_RECOVERY;
779             port_event = HCD_PORT_EVENT_DISCONNECTION;
780             port->flags.conn_dev_ena = 0;
781             break;
782         }
783         case USBH_HAL_PORT_EVENT_ENABLED: {
784             usbh_hal_port_enable(port->hal);  //Initialize remaining host port registers
785             port->speed = (usbh_hal_port_get_conn_speed(port->hal) == USB_PRIV_SPEED_FULL) ? USB_SPEED_FULL : USB_SPEED_LOW;
786             port->state = HCD_PORT_STATE_ENABLED;
787             port->flags.conn_dev_ena = 1;
788             //This was triggered by a command, so no event needs to be propagated.
789             break;
790         }
791         case USBH_HAL_PORT_EVENT_DISABLED: {
792             port->flags.conn_dev_ena = 0;
793             //Disabled could be due to a disable request or reset request, or due to a port error
794             if (port->state != HCD_PORT_STATE_RESETTING) {  //Ignore the disable event if it's due to a reset request
795                 if (port->flags.disable_requested) {
796                     //Disabled by request (i.e. by port command). Generate an internal event
797                     port->state = HCD_PORT_STATE_DISABLED;
798                     port->flags.disable_requested = 0;
799                     *yield |= _internal_port_event_notify_from_isr(port);
800                 } else {
801                     //Disabled due to a port error
802                     port->state = HCD_PORT_STATE_RECOVERY;
803                     port_event = HCD_PORT_EVENT_ERROR;
804                 }
805             }
806             break;
807         }
808         case USBH_HAL_PORT_EVENT_OVRCUR:
809         case USBH_HAL_PORT_EVENT_OVRCUR_CLR: {  //Could occur if a quick overcurrent then clear happens
810             if (port->state != HCD_PORT_STATE_NOT_POWERED) {
811                 //We need to power OFF the port to protect it
812                 usbh_hal_port_toggle_power(port->hal, false);
813                 port->state = HCD_PORT_STATE_RECOVERY;
814                 port_event = HCD_PORT_EVENT_OVERCURRENT;
815             }
816             port->flags.conn_dev_ena = 0;
817             break;
818         }
819         default: {
820             abort();
821             break;
822         }
823     }
824     return port_event;
825 }
826 
827 /**
828  * @brief Handles a HAL channel interrupt
829  *
830  * This function should be called on a HAL channel when it has an interrupt. Most HAL channel events will correspond to
831  * to a pipe event, but not always. This function will store the pipe event and return a pipe object pointer if a pipe
832  * event occurred, or return NULL otherwise.
833  *
834  * @param[in] chan_obj Pointer to HAL channel object with interrupt
835  * @param[out] yield Set to true if a yield is required as a result of handling the interrupt
836  * @return hcd_pipe_event_t The pipe event
837  */
_intr_hdlr_chan(pipe_t * pipe,usbh_hal_chan_t * chan_obj,bool * yield)838 static hcd_pipe_event_t _intr_hdlr_chan(pipe_t *pipe, usbh_hal_chan_t *chan_obj, bool *yield)
839 {
840     usbh_hal_chan_event_t chan_event = usbh_hal_chan_decode_intr(chan_obj);
841     hcd_pipe_event_t event = HCD_PIPE_EVENT_NONE;
842 
843     switch (chan_event) {
844         case USBH_HAL_CHAN_EVENT_CPLT: {
845             if (!_buffer_check_done(pipe)) {
846                 _buffer_exec_cont(pipe);
847                 break;
848             }
849             pipe->last_event = HCD_PIPE_EVENT_URB_DONE;
850             event = pipe->last_event;
851             //Mark the buffer as done
852             int stop_idx = usbh_hal_chan_get_qtd_idx(chan_obj);
853             _buffer_done(pipe, stop_idx, pipe->last_event, false);
854             //First check if there is another buffer we can execute. But we only want to execute if there's still a valid device
855             if (_buffer_can_exec(pipe) && pipe->port->flags.conn_dev_ena) {
856                 //If the next buffer is filled and ready to execute, execute it
857                 _buffer_exec(pipe);
858             }
859             //Handle the previously done buffer
860             _buffer_parse(pipe);
861             //Check to see if we can fill another buffer. But we only want to fill if there is still a valid device
862             if (_buffer_can_fill(pipe) && pipe->port->flags.conn_dev_ena) {
863                 //Now that we've parsed a buffer, see if another URB can be filled in its place
864                 _buffer_fill(pipe);
865             }
866             break;
867         }
868         case USBH_HAL_CHAN_EVENT_ERROR: {
869             //Get and store the pipe error event
870             usbh_hal_chan_error_t chan_error = usbh_hal_chan_get_error(chan_obj);
871             pipe->last_event = pipe_decode_error_event(chan_error);
872             event = pipe->last_event;
873             pipe->state = HCD_PIPE_STATE_HALTED;
874             //Mark the buffer as done with an error
875             int stop_idx = usbh_hal_chan_get_qtd_idx(chan_obj);
876             _buffer_done(pipe, stop_idx, pipe->last_event, false);
877             //Parse the buffer
878             _buffer_parse(pipe);
879             break;
880         }
881         case USBH_HAL_CHAN_EVENT_HALT_REQ: {
882             assert(pipe->cs_flags.waiting_halt);
883             //We've halted a transfer, so we need to trigger the pipe callback
884             pipe->last_event = HCD_PIPE_EVENT_URB_DONE;
885             event = pipe->last_event;
886             //Halt request event is triggered when packet is successful completed. But just treat all halted transfers as errors
887             pipe->state = HCD_PIPE_STATE_HALTED;
888             int stop_idx = usbh_hal_chan_get_qtd_idx(chan_obj);
889             _buffer_done(pipe, stop_idx, HCD_PIPE_EVENT_NONE, true);
890             //Parse the buffer
891             _buffer_parse(pipe);
892             //Notify the task waiting for the pipe halt
893             *yield |= _internal_pipe_event_notify(pipe, true);
894             break;
895         }
896         case USBH_HAL_CHAN_EVENT_NONE: {
897             break;  //Nothing to do
898         }
899         default:
900             abort();
901             break;
902     }
903     return event;
904 }
905 
906 /**
907  * @brief Main interrupt handler
908  *
909  * - Handle all HPRT (Host Port) related interrupts first as they may change the
910  *   state of the driver (e.g., a disconnect event)
911  * - If any channels (pipes) have pending interrupts, handle them one by one
912  * - The HCD has not blocking functions, so the user's ISR callback is run to
913  *   allow the users to send whatever OS primitives they need.
914  *
915  * @param arg Interrupt handler argument
916  */
intr_hdlr_main(void * arg)917 static void intr_hdlr_main(void *arg)
918 {
919     port_t *port = (port_t *) arg;
920     bool yield = false;
921 
922     HCD_ENTER_CRITICAL_ISR();
923     usbh_hal_port_event_t hal_port_evt = usbh_hal_decode_intr(port->hal);
924     if (hal_port_evt == USBH_HAL_PORT_EVENT_CHAN) {
925         //Channel event. Cycle through each pending channel
926         usbh_hal_chan_t *chan_obj = usbh_hal_get_chan_pending_intr(port->hal);
927         while (chan_obj != NULL) {
928             pipe_t *pipe = (pipe_t *)usbh_hal_chan_get_context(chan_obj);
929             hcd_pipe_event_t event = _intr_hdlr_chan(pipe, chan_obj, &yield);
930             //Run callback if a pipe event has occurred and the pipe also has a callback
931             if (event != HCD_PIPE_EVENT_NONE && pipe->callback != NULL) {
932                 HCD_EXIT_CRITICAL_ISR();
933                 yield |= pipe->callback((hcd_pipe_handle_t)pipe, event, pipe->callback_arg, true);
934                 HCD_ENTER_CRITICAL_ISR();
935             }
936             //Check for more channels with pending interrupts. Returns NULL if there are no more
937             chan_obj = usbh_hal_get_chan_pending_intr(port->hal);
938         }
939     } else if (hal_port_evt != USBH_HAL_PORT_EVENT_NONE) {  //Port event
940         hcd_port_event_t port_event = _intr_hdlr_hprt(port, hal_port_evt, &yield);
941         if (port_event != HCD_PORT_EVENT_NONE) {
942             port->last_event = port_event;
943             port->flags.event_pending = 1;
944             if (port->callback != NULL) {
945                 HCD_EXIT_CRITICAL_ISR();
946                 yield |= port->callback((hcd_port_handle_t)port, port_event, port->callback_arg, true);
947                 HCD_ENTER_CRITICAL_ISR();
948             }
949         }
950     }
951     HCD_EXIT_CRITICAL_ISR();
952 
953     if (yield) {
954         portYIELD_FROM_ISR();
955     }
956 }
957 
958 // --------------------------------------------- Host Controller Driver ------------------------------------------------
959 
port_obj_alloc(void)960 static port_t *port_obj_alloc(void)
961 {
962     port_t *port = calloc(1, sizeof(port_t));
963     usbh_hal_context_t *hal = malloc(sizeof(usbh_hal_context_t));
964     void *frame_list = heap_caps_aligned_calloc(USBH_HAL_FRAME_LIST_MEM_ALIGN, FRAME_LIST_LEN,sizeof(uint32_t), MALLOC_CAP_DMA);
965     SemaphoreHandle_t port_mux = xSemaphoreCreateMutex();
966     if (port == NULL || hal == NULL || frame_list == NULL || port_mux == NULL) {
967         free(port);
968         free(hal);
969         free(frame_list);
970         if (port_mux != NULL) {
971             vSemaphoreDelete(port_mux);
972         }
973         return NULL;
974     }
975     port->hal = hal;
976     port->frame_list = frame_list;
977     port->port_mux = port_mux;
978     return port;
979 }
980 
port_obj_free(port_t * port)981 static void port_obj_free(port_t *port)
982 {
983     if (port == NULL) {
984         return;
985     }
986     vSemaphoreDelete(port->port_mux);
987     free(port->frame_list);
988     free(port->hal);
989     free(port);
990 }
991 
992 // ----------------------- Public --------------------------
993 
hcd_install(const hcd_config_t * config)994 esp_err_t hcd_install(const hcd_config_t *config)
995 {
996     HCD_ENTER_CRITICAL();
997     HCD_CHECK_FROM_CRIT(s_hcd_obj == NULL, ESP_ERR_INVALID_STATE);
998     HCD_EXIT_CRITICAL();
999 
1000     esp_err_t err_ret;
1001     //Allocate memory and resources for driver object and all port objects
1002     hcd_obj_t *p_hcd_obj_dmy = calloc(1, sizeof(hcd_obj_t));
1003     if (p_hcd_obj_dmy == NULL) {
1004         return ESP_ERR_NO_MEM;
1005     }
1006 
1007     //Allocate resources for each port (there's only one)
1008     p_hcd_obj_dmy->port_obj = port_obj_alloc();
1009     esp_err_t intr_alloc_ret = esp_intr_alloc(ETS_USB_INTR_SOURCE,
1010                                               config->intr_flags | ESP_INTR_FLAG_INTRDISABLED,  //The interrupt must be disabled until the port is initialized
1011                                               intr_hdlr_main,
1012                                               (void *)p_hcd_obj_dmy->port_obj,
1013                                               &p_hcd_obj_dmy->isr_hdl);
1014     if (p_hcd_obj_dmy->port_obj == NULL) {
1015         err_ret = ESP_ERR_NO_MEM;
1016     }
1017     if (intr_alloc_ret != ESP_OK) {
1018         err_ret = intr_alloc_ret;
1019         goto err;
1020     }
1021 
1022     HCD_ENTER_CRITICAL();
1023     if (s_hcd_obj != NULL) {
1024         HCD_EXIT_CRITICAL();
1025         err_ret = ESP_ERR_INVALID_STATE;
1026         goto err;
1027     }
1028     s_hcd_obj = p_hcd_obj_dmy;
1029     HCD_EXIT_CRITICAL();
1030     return ESP_OK;
1031 
1032 err:
1033     if (intr_alloc_ret == ESP_OK) {
1034         esp_intr_free(p_hcd_obj_dmy->isr_hdl);
1035     }
1036     port_obj_free(p_hcd_obj_dmy->port_obj);
1037     free(p_hcd_obj_dmy);
1038     return err_ret;
1039 }
1040 
hcd_uninstall(void)1041 esp_err_t hcd_uninstall(void)
1042 {
1043     HCD_ENTER_CRITICAL();
1044     //Check that all ports have been disabled (there's only one port)
1045     if (s_hcd_obj == NULL || s_hcd_obj->port_obj->initialized) {
1046         HCD_EXIT_CRITICAL();
1047         return ESP_ERR_INVALID_STATE;
1048     }
1049     hcd_obj_t *p_hcd_obj_dmy = s_hcd_obj;
1050     s_hcd_obj = NULL;
1051     HCD_EXIT_CRITICAL();
1052 
1053     //Free resources
1054     port_obj_free(p_hcd_obj_dmy->port_obj);
1055     esp_intr_free(p_hcd_obj_dmy->isr_hdl);
1056     free(p_hcd_obj_dmy);
1057     return ESP_OK;
1058 }
1059 
1060 // ------------------------------------------------------ Port ---------------------------------------------------------
1061 
1062 // ----------------------- Helpers -------------------------
1063 
_port_persist_all_pipes(port_t * port)1064 static bool _port_persist_all_pipes(port_t *port)
1065 {
1066     if (port->num_pipes_queued > 0) {
1067         //All pipes must be idle before we run-time reset
1068         return false;
1069     }
1070     bool all_persist = true;
1071     pipe_t *pipe;
1072     //Check that each pipe is persistent
1073     TAILQ_FOREACH(pipe, &port->pipes_idle_tailq, tailq_entry) {
1074         if (!pipe->cs_flags.persist) {
1075             all_persist = false;
1076             break;
1077         }
1078     }
1079     if (!all_persist) {
1080         //At least one pipe is not persistent. All pipes must be freed or made persistent before we can reset
1081         return false;
1082     }
1083     TAILQ_FOREACH(pipe, &port->pipes_idle_tailq, tailq_entry) {
1084         pipe->cs_flags.reset_lock = 1;
1085         usbh_hal_chan_free(port->hal, pipe->chan_obj);
1086     }
1087     return true;
1088 }
1089 
_port_recover_all_pipes(port_t * port)1090 static void _port_recover_all_pipes(port_t *port)
1091 {
1092     pipe_t *pipe;
1093     TAILQ_FOREACH(pipe, &port->pipes_idle_tailq, tailq_entry) {
1094         pipe->cs_flags.persist = 0;
1095         pipe->cs_flags.reset_lock = 0;
1096         usbh_hal_chan_alloc(port->hal, pipe->chan_obj, (void *)pipe);
1097         usbh_hal_chan_set_ep_char(port->hal, pipe->chan_obj, &pipe->ep_char);
1098     }
1099 }
1100 
_port_check_all_pipes_halted(port_t * port)1101 static bool _port_check_all_pipes_halted(port_t *port)
1102 {
1103     bool all_halted = true;
1104     pipe_t *pipe;
1105     TAILQ_FOREACH(pipe, &port->pipes_active_tailq, tailq_entry) {
1106         if (pipe->state != HCD_PIPE_STATE_HALTED) {
1107             all_halted = false;
1108             break;
1109         }
1110     }
1111     TAILQ_FOREACH(pipe, &port->pipes_idle_tailq, tailq_entry) {
1112         if (pipe->state != HCD_PIPE_STATE_HALTED) {
1113             all_halted = false;
1114             break;
1115         }
1116     }
1117     return all_halted;
1118 }
1119 
_port_debounce(port_t * port)1120 static bool _port_debounce(port_t *port)
1121 {
1122     if (port->state == HCD_PORT_STATE_NOT_POWERED) {
1123         //Disconnect event due to power off, no need to debounce or update port state.
1124         return false;
1125     }
1126     HCD_EXIT_CRITICAL();
1127     vTaskDelay(pdMS_TO_TICKS(DEBOUNCE_DELAY_MS));
1128     HCD_ENTER_CRITICAL();
1129     //Check the post-debounce state of the bus (i.e., whether it's actually connected/disconnected)
1130     bool is_connected = usbh_hal_port_check_if_connected(port->hal);
1131     if (is_connected) {
1132         port->state = HCD_PORT_STATE_DISABLED;
1133     } else {
1134         port->state = HCD_PORT_STATE_DISCONNECTED;
1135     }
1136     //Disable debounce lock
1137     usbh_hal_disable_debounce_lock(port->hal);
1138     return is_connected;
1139 }
1140 
1141 // ---------------------- Commands -------------------------
1142 
_port_cmd_power_on(port_t * port)1143 static esp_err_t _port_cmd_power_on(port_t *port)
1144 {
1145     esp_err_t ret;
1146     //Port can only be powered on if it's currently unpowered
1147     if (port->state == HCD_PORT_STATE_NOT_POWERED) {
1148         port->state = HCD_PORT_STATE_DISCONNECTED;
1149         usbh_hal_port_init(port->hal);
1150         usbh_hal_port_toggle_power(port->hal, true);
1151         ret = ESP_OK;
1152     } else {
1153         ret = ESP_ERR_INVALID_STATE;
1154     }
1155     return ret;
1156 }
1157 
_port_cmd_power_off(port_t * port)1158 static esp_err_t _port_cmd_power_off(port_t *port)
1159 {
1160     esp_err_t ret;
1161     //Port can only be unpowered if already powered
1162     if (port->state != HCD_PORT_STATE_NOT_POWERED) {
1163         port->state = HCD_PORT_STATE_NOT_POWERED;
1164         usbh_hal_port_deinit(port->hal);
1165         usbh_hal_port_toggle_power(port->hal, false);
1166         //If a device is currently connected, this should trigger a disconnect event
1167         ret = ESP_OK;
1168     } else {
1169         ret = ESP_ERR_INVALID_STATE;
1170     }
1171     return ret;
1172 }
1173 
_port_cmd_reset(port_t * port)1174 static esp_err_t _port_cmd_reset(port_t *port)
1175 {
1176     esp_err_t ret;
1177     //Port can only a reset when it is in the enabled or disabled states (in case of new connection)
1178     if (port->state != HCD_PORT_STATE_ENABLED && port->state != HCD_PORT_STATE_DISABLED) {
1179         ret = ESP_ERR_INVALID_STATE;
1180         goto exit;
1181     }
1182     bool is_runtime_reset = (port->state == HCD_PORT_STATE_ENABLED) ? true : false;
1183     if (is_runtime_reset && !_port_persist_all_pipes(port)) {
1184         //If this is a run time reset, check all pipes that are still allocated can persist the reset
1185         ret = ESP_ERR_INVALID_STATE;
1186         goto exit;
1187     }
1188     //All pipes (if any_) are guaranteed to be persistent at this point. Proceed to resetting the bus
1189     port->state = HCD_PORT_STATE_RESETTING;
1190     //Put and hold the bus in the reset state. If the port was previously enabled, a disabled event will occur after this
1191     usbh_hal_port_toggle_reset(port->hal, true);
1192     HCD_EXIT_CRITICAL();
1193     vTaskDelay(pdMS_TO_TICKS(RESET_HOLD_MS));
1194     HCD_ENTER_CRITICAL();
1195     if (port->state != HCD_PORT_STATE_RESETTING) {
1196         //The port state has unexpectedly changed
1197         ret = ESP_ERR_INVALID_RESPONSE;
1198         goto bailout;
1199     }
1200     //Return the bus to the idle state and hold it for the required reset recovery time. Port enabled event should occur
1201     usbh_hal_port_toggle_reset(port->hal, false);
1202     HCD_EXIT_CRITICAL();
1203     vTaskDelay(pdMS_TO_TICKS(RESET_RECOVERY_MS));
1204     HCD_ENTER_CRITICAL();
1205     if (port->state != HCD_PORT_STATE_ENABLED || !port->flags.conn_dev_ena) {
1206         //The port state has unexpectedly changed
1207         ret = ESP_ERR_INVALID_RESPONSE;
1208         goto bailout;
1209     }
1210     //Set FIFO sizes based on the selected biasing
1211     usbh_hal_set_fifo_size(port->hal, port->fifo_config);
1212     //We start periodic scheduling only after a RESET command since SOFs only start after a reset
1213     usbh_hal_port_set_frame_list(port->hal, port->frame_list, FRAME_LIST_LEN);
1214     usbh_hal_port_periodic_enable(port->hal);
1215     ret = ESP_OK;
1216 bailout:
1217     if (is_runtime_reset) {
1218         _port_recover_all_pipes(port);
1219     }
1220 exit:
1221     return ret;
1222 }
1223 
_port_cmd_bus_suspend(port_t * port)1224 static esp_err_t _port_cmd_bus_suspend(port_t *port)
1225 {
1226     esp_err_t ret;
1227     //Port must have been previously enabled, and all pipes must already be halted
1228     if (port->state == HCD_PORT_STATE_ENABLED && !_port_check_all_pipes_halted(port)) {
1229         ret = ESP_ERR_INVALID_STATE;
1230         goto exit;
1231     }
1232     //All pipes are guaranteed halted at this point. Proceed to suspend the port
1233     usbh_hal_port_suspend(port->hal);
1234     port->state = HCD_PORT_STATE_SUSPENDED;
1235     ret = ESP_OK;
1236 exit:
1237     return ret;
1238 }
1239 
_port_cmd_bus_resume(port_t * port)1240 static esp_err_t _port_cmd_bus_resume(port_t *port)
1241 {
1242     esp_err_t ret;
1243     //Port can only be resumed if it was previously suspended
1244     if (port->state != HCD_PORT_STATE_SUSPENDED) {
1245         ret = ESP_ERR_INVALID_STATE;
1246         goto exit;
1247     }
1248     //Put and hold the bus in the K state.
1249     usbh_hal_port_toggle_resume(port->hal, true);
1250     port->state = HCD_PORT_STATE_RESUMING;
1251     HCD_EXIT_CRITICAL();
1252     vTaskDelay(pdMS_TO_TICKS(RESUME_HOLD_MS));
1253     HCD_ENTER_CRITICAL();
1254     //Return and hold the bus to the J state (as port of the LS EOP)
1255     usbh_hal_port_toggle_resume(port->hal, false);
1256     if (port->state != HCD_PORT_STATE_RESUMING || !port->flags.conn_dev_ena) {
1257         //Port state unexpectedly changed
1258         ret = ESP_ERR_INVALID_RESPONSE;
1259         goto exit;
1260     }
1261     HCD_EXIT_CRITICAL();
1262     vTaskDelay(pdMS_TO_TICKS(RESUME_RECOVERY_MS));
1263     HCD_ENTER_CRITICAL();
1264     if (port->state != HCD_PORT_STATE_RESUMING || !port->flags.conn_dev_ena) {
1265         //Port state unexpectedly changed
1266         ret = ESP_ERR_INVALID_RESPONSE;
1267         goto exit;
1268     }
1269     port->state = HCD_PORT_STATE_ENABLED;
1270     ret = ESP_OK;
1271 exit:
1272     return ret;
1273 }
1274 
_port_cmd_disable(port_t * port)1275 static esp_err_t _port_cmd_disable(port_t *port)
1276 {
1277     esp_err_t ret;
1278     if (port->state != HCD_PORT_STATE_ENABLED && port->state != HCD_PORT_STATE_SUSPENDED) {
1279         ret = ESP_ERR_INVALID_STATE;
1280         goto exit;
1281     }
1282     //All pipes must be halted before disabling the port
1283     if (!_port_check_all_pipes_halted(port)){
1284         ret = ESP_ERR_INVALID_STATE;
1285         goto exit;
1286     }
1287     //All pipes are guaranteed to be halted or freed at this point. Proceed to disable the port
1288     port->flags.disable_requested = 1;
1289     usbh_hal_port_disable(port->hal);
1290     _internal_port_event_wait(port);
1291     if (port->state != HCD_PORT_STATE_DISABLED) {
1292         //Port state unexpectedly changed
1293         ret = ESP_ERR_INVALID_RESPONSE;
1294         goto exit;
1295     }
1296     ret = ESP_OK;
1297 exit:
1298     return ret;
1299 }
1300 
1301 // ----------------------- Public --------------------------
1302 
hcd_port_init(int port_number,const hcd_port_config_t * port_config,hcd_port_handle_t * port_hdl)1303 esp_err_t hcd_port_init(int port_number, const hcd_port_config_t *port_config, hcd_port_handle_t *port_hdl)
1304 {
1305     HCD_CHECK(port_number > 0 && port_config != NULL && port_hdl != NULL, ESP_ERR_INVALID_ARG);
1306     HCD_CHECK(port_number <= NUM_PORTS, ESP_ERR_NOT_FOUND);
1307 
1308     //Get a pointer to the correct FIFO bias constant values
1309     const usbh_hal_fifo_config_t *fifo_config;
1310     const fifo_mps_limits_t *mps_limits;
1311     switch (port_config->fifo_bias) {
1312         case HCD_PORT_FIFO_BIAS_BALANCED:
1313             fifo_config = &fifo_config_default;
1314             mps_limits = &mps_limits_default;
1315             break;
1316         case HCD_PORT_FIFO_BIAS_RX:
1317             fifo_config = &fifo_config_bias_rx;
1318             mps_limits = &mps_limits_bias_rx;
1319             break;
1320         case HCD_PORT_FIFO_BIAS_PTX:
1321             fifo_config = &fifo_config_bias_ptx;
1322             mps_limits = &mps_limits_bias_ptx;
1323             break;
1324         default:
1325             fifo_config = NULL;
1326             mps_limits = NULL;
1327             abort();
1328             break;
1329     }
1330 
1331     HCD_ENTER_CRITICAL();
1332     HCD_CHECK_FROM_CRIT(s_hcd_obj != NULL && !s_hcd_obj->port_obj->initialized, ESP_ERR_INVALID_STATE);
1333     //Port object memory and resources (such as the mutex) already be allocated. Just need to initialize necessary fields only
1334     port_t *port_obj = s_hcd_obj->port_obj;
1335     TAILQ_INIT(&port_obj->pipes_idle_tailq);
1336     TAILQ_INIT(&port_obj->pipes_active_tailq);
1337     port_obj->state = HCD_PORT_STATE_NOT_POWERED;
1338     port_obj->last_event = HCD_PORT_EVENT_NONE;
1339     port_obj->fifo_config = fifo_config;
1340     port_obj->fifo_mps_limits = mps_limits;
1341     port_obj->callback = port_config->callback;
1342     port_obj->callback_arg = port_config->callback_arg;
1343     port_obj->context = port_config->context;
1344     usbh_hal_init(port_obj->hal);
1345     port_obj->initialized = true;
1346     //Clear the frame list. We set the frame list register and enable periodic scheduling after a successful reset
1347     memset(port_obj->frame_list, 0, FRAME_LIST_LEN * sizeof(uint32_t));
1348     esp_intr_enable(s_hcd_obj->isr_hdl);
1349     *port_hdl = (hcd_port_handle_t)port_obj;
1350     HCD_EXIT_CRITICAL();
1351 
1352     vTaskDelay(pdMS_TO_TICKS(INIT_DELAY_MS));    //Need a short delay before host mode takes effect
1353     return ESP_OK;
1354 }
1355 
hcd_port_deinit(hcd_port_handle_t port_hdl)1356 esp_err_t hcd_port_deinit(hcd_port_handle_t port_hdl)
1357 {
1358     port_t *port = (port_t *)port_hdl;
1359 
1360     HCD_ENTER_CRITICAL();
1361     HCD_CHECK_FROM_CRIT(s_hcd_obj != NULL && port->initialized
1362                         && port->num_pipes_idle == 0 && port->num_pipes_queued == 0
1363                         && (port->state == HCD_PORT_STATE_NOT_POWERED || port->state == HCD_PORT_STATE_RECOVERY)
1364                         && port->task_waiting_port_notif == NULL,
1365                         ESP_ERR_INVALID_STATE);
1366     port->initialized = false;
1367     esp_intr_disable(s_hcd_obj->isr_hdl);
1368     usbh_hal_deinit(port->hal);
1369     HCD_EXIT_CRITICAL();
1370 
1371     return ESP_OK;
1372 }
1373 
hcd_port_command(hcd_port_handle_t port_hdl,hcd_port_cmd_t command)1374 esp_err_t hcd_port_command(hcd_port_handle_t port_hdl, hcd_port_cmd_t command)
1375 {
1376     esp_err_t ret = ESP_ERR_INVALID_STATE;
1377     port_t *port = (port_t *)port_hdl;
1378     xSemaphoreTake(port->port_mux, portMAX_DELAY);
1379     HCD_ENTER_CRITICAL();
1380     if (port->initialized && !port->flags.event_pending) { //Port events need to be handled first before issuing a command
1381         port->flags.cmd_processing = 1;
1382         switch (command) {
1383             case HCD_PORT_CMD_POWER_ON: {
1384                 ret = _port_cmd_power_on(port);
1385                 break;
1386             }
1387             case HCD_PORT_CMD_POWER_OFF: {
1388                 ret = _port_cmd_power_off(port);
1389                 break;
1390             }
1391             case HCD_PORT_CMD_RESET: {
1392                 ret = _port_cmd_reset(port);
1393                 break;
1394             }
1395             case HCD_PORT_CMD_SUSPEND: {
1396                 ret = _port_cmd_bus_suspend(port);
1397                 break;
1398             }
1399             case HCD_PORT_CMD_RESUME: {
1400                 ret = _port_cmd_bus_resume(port);
1401                 break;
1402             }
1403             case HCD_PORT_CMD_DISABLE: {
1404                 ret = _port_cmd_disable(port);
1405                 break;
1406             }
1407         }
1408         port->flags.cmd_processing = 0;
1409     }
1410     HCD_EXIT_CRITICAL();
1411     xSemaphoreGive(port->port_mux);
1412     return ret;
1413 }
1414 
hcd_port_get_state(hcd_port_handle_t port_hdl)1415 hcd_port_state_t hcd_port_get_state(hcd_port_handle_t port_hdl)
1416 {
1417     port_t *port = (port_t *)port_hdl;
1418     hcd_port_state_t ret;
1419     HCD_ENTER_CRITICAL();
1420     ret = port->state;
1421     HCD_EXIT_CRITICAL();
1422     return ret;
1423 }
1424 
hcd_port_get_speed(hcd_port_handle_t port_hdl,usb_speed_t * speed)1425 esp_err_t hcd_port_get_speed(hcd_port_handle_t port_hdl, usb_speed_t *speed)
1426 {
1427     port_t *port = (port_t *)port_hdl;
1428     HCD_CHECK(speed != NULL, ESP_ERR_INVALID_ARG);
1429     HCD_ENTER_CRITICAL();
1430     //Device speed is only valid if there is device connected to the port that has been reset
1431     HCD_CHECK_FROM_CRIT(port->flags.conn_dev_ena, ESP_ERR_INVALID_STATE);
1432     usb_priv_speed_t hal_speed = usbh_hal_port_get_conn_speed(port->hal);
1433     if (hal_speed == USB_PRIV_SPEED_FULL) {
1434         *speed = USB_SPEED_FULL;
1435     } else {
1436         *speed = USB_SPEED_LOW;
1437     }
1438     HCD_EXIT_CRITICAL();
1439     return ESP_OK;
1440 }
1441 
hcd_port_handle_event(hcd_port_handle_t port_hdl)1442 hcd_port_event_t hcd_port_handle_event(hcd_port_handle_t port_hdl)
1443 {
1444     port_t *port = (port_t *)port_hdl;
1445     hcd_port_event_t ret = HCD_PORT_EVENT_NONE;
1446     xSemaphoreTake(port->port_mux, portMAX_DELAY);
1447     HCD_ENTER_CRITICAL();
1448     if (port->initialized && port->flags.event_pending) {
1449         port->flags.event_pending = 0;
1450         port->flags.event_processing = 1;
1451         ret = port->last_event;
1452         switch (ret) {
1453             case HCD_PORT_EVENT_CONNECTION: {
1454                 if (_port_debounce(port)) {
1455                     ret = HCD_PORT_EVENT_CONNECTION;
1456                 }
1457                 break;
1458             }
1459             case HCD_PORT_EVENT_DISCONNECTION:
1460             case HCD_PORT_EVENT_ERROR:
1461             case HCD_PORT_EVENT_OVERCURRENT: {
1462                 break;
1463             }
1464             default: {
1465                 break;
1466             }
1467         }
1468         port->flags.event_processing = 0;
1469     } else {
1470         ret = HCD_PORT_EVENT_NONE;
1471     }
1472     HCD_EXIT_CRITICAL();
1473     xSemaphoreGive(port->port_mux);
1474     return ret;
1475 }
1476 
hcd_port_recover(hcd_port_handle_t port_hdl)1477 esp_err_t hcd_port_recover(hcd_port_handle_t port_hdl)
1478 {
1479     port_t *port = (port_t *)port_hdl;
1480     HCD_ENTER_CRITICAL();
1481     HCD_CHECK_FROM_CRIT(s_hcd_obj != NULL && port->initialized && port->state == HCD_PORT_STATE_RECOVERY
1482                         && port->num_pipes_idle == 0 && port->num_pipes_queued == 0
1483                         && port->flags.val == 0 && port->task_waiting_port_notif == NULL,
1484                         ESP_ERR_INVALID_STATE);
1485     //We are about to do a soft reset on the peripheral. Disable the peripheral throughout
1486     esp_intr_disable(s_hcd_obj->isr_hdl);
1487     usbh_hal_core_soft_reset(port->hal);
1488     port->state = HCD_PORT_STATE_NOT_POWERED;
1489     port->last_event = HCD_PORT_EVENT_NONE;
1490     port->flags.val = 0;
1491     //Soft reset wipes all registers so we need to reinitialize the HAL
1492     usbh_hal_init(port->hal);
1493     //Clear the frame list. We set the frame list register and enable periodic scheduling after a successful reset
1494     memset(port->frame_list, 0, FRAME_LIST_LEN * sizeof(uint32_t));
1495     esp_intr_enable(s_hcd_obj->isr_hdl);
1496     HCD_EXIT_CRITICAL();
1497     return ESP_OK;
1498 }
1499 
hcd_port_get_context(hcd_port_handle_t port_hdl)1500 void *hcd_port_get_context(hcd_port_handle_t port_hdl)
1501 {
1502     port_t *port = (port_t *)port_hdl;
1503     void *ret;
1504     HCD_ENTER_CRITICAL();
1505     ret = port->context;
1506     HCD_EXIT_CRITICAL();
1507     return ret;
1508 }
1509 
hcd_port_set_fifo_bias(hcd_port_handle_t port_hdl,hcd_port_fifo_bias_t bias)1510 esp_err_t hcd_port_set_fifo_bias(hcd_port_handle_t port_hdl, hcd_port_fifo_bias_t bias)
1511 {
1512     esp_err_t ret;
1513     //Get a pointer to the correct FIFO bias constant values
1514     const usbh_hal_fifo_config_t *fifo_config;
1515     const fifo_mps_limits_t *mps_limits;
1516     switch (bias) {
1517         case HCD_PORT_FIFO_BIAS_BALANCED:
1518             fifo_config = &fifo_config_default;
1519             mps_limits = &mps_limits_default;
1520             break;
1521         case HCD_PORT_FIFO_BIAS_RX:
1522             fifo_config = &fifo_config_bias_rx;
1523             mps_limits = &mps_limits_bias_rx;
1524             break;
1525         case HCD_PORT_FIFO_BIAS_PTX:
1526             fifo_config = &fifo_config_bias_ptx;
1527             mps_limits = &mps_limits_bias_ptx;
1528             break;
1529         default:
1530             fifo_config = NULL;
1531             mps_limits = NULL;
1532             abort();
1533             break;
1534     }
1535     //Configure the new FIFO sizes and store the pointers
1536     port_t *port = (port_t *)port_hdl;
1537     xSemaphoreTake(port->port_mux, portMAX_DELAY);
1538     HCD_ENTER_CRITICAL();
1539     //Check that port is in the correct state to update FIFO sizes
1540     if (port->initialized && !port->flags.event_pending && port->num_pipes_idle == 0 && port->num_pipes_queued == 0) {
1541         usbh_hal_set_fifo_size(port->hal, fifo_config);
1542         port->fifo_config = fifo_config;
1543         port->fifo_mps_limits = mps_limits;
1544         ret = ESP_OK;
1545     } else {
1546         ret = ESP_ERR_INVALID_STATE;
1547     }
1548     HCD_EXIT_CRITICAL();
1549     xSemaphoreGive(port->port_mux);
1550     return ret;
1551 }
1552 
1553 // --------------------------------------------------- HCD Pipes -------------------------------------------------------
1554 
1555 // ----------------------- Private -------------------------
1556 
pipe_decode_error_event(usbh_hal_chan_error_t chan_error)1557 static inline hcd_pipe_event_t pipe_decode_error_event(usbh_hal_chan_error_t chan_error)
1558 {
1559     hcd_pipe_event_t event = HCD_PIPE_EVENT_NONE;
1560     switch (chan_error) {
1561         case USBH_HAL_CHAN_ERROR_XCS_XACT:
1562             event = HCD_PIPE_EVENT_ERROR_XFER;
1563             break;
1564         case USBH_HAL_CHAN_ERROR_BNA:
1565             event = HCD_PIPE_EVENT_ERROR_URB_NOT_AVAIL;
1566             break;
1567         case USBH_HAL_CHAN_ERROR_PKT_BBL:
1568             event = HCD_PIPE_EVENT_ERROR_OVERFLOW;
1569             break;
1570         case USBH_HAL_CHAN_ERROR_STALL:
1571             event = HCD_PIPE_EVENT_ERROR_STALL;
1572             break;
1573     }
1574     return event;
1575 }
1576 
buffer_block_alloc(usb_transfer_type_t type)1577 static dma_buffer_block_t *buffer_block_alloc(usb_transfer_type_t type)
1578 {
1579     int desc_list_len;
1580     switch (type) {
1581     case USB_TRANSFER_TYPE_CTRL:
1582         desc_list_len = XFER_LIST_LEN_CTRL;
1583         break;
1584     case USB_TRANSFER_TYPE_ISOCHRONOUS:
1585         desc_list_len = XFER_LIST_LEN_ISOC;
1586         break;
1587     case USB_TRANSFER_TYPE_BULK:
1588         desc_list_len = XFER_LIST_LEN_BULK;
1589         break;
1590     default:    //USB_TRANSFER_TYPE_INTR:
1591         desc_list_len = XFER_LIST_LEN_INTR;
1592         break;
1593     }
1594     dma_buffer_block_t *buffer = calloc(1, sizeof(dma_buffer_block_t));
1595     void *xfer_desc_list = heap_caps_aligned_calloc(USBH_HAL_DMA_MEM_ALIGN, desc_list_len, sizeof(usbh_ll_dma_qtd_t), MALLOC_CAP_DMA);
1596     if (buffer == NULL || xfer_desc_list == NULL) {
1597         free(buffer);
1598         heap_caps_free(xfer_desc_list);
1599         return NULL;
1600     }
1601     buffer->xfer_desc_list = xfer_desc_list;
1602     return buffer;
1603 }
1604 
buffer_block_free(dma_buffer_block_t * buffer)1605 static void buffer_block_free(dma_buffer_block_t *buffer)
1606 {
1607     if (buffer == NULL) {
1608         return;
1609     }
1610     heap_caps_free(buffer->xfer_desc_list);
1611     free(buffer);
1612 }
1613 
pipe_alloc_check_args(const hcd_pipe_config_t * pipe_config,usb_speed_t port_speed,const fifo_mps_limits_t * mps_limits,usb_transfer_type_t type,bool is_default_pipe)1614 static bool pipe_alloc_check_args(const hcd_pipe_config_t *pipe_config, usb_speed_t port_speed, const fifo_mps_limits_t *mps_limits, usb_transfer_type_t type, bool is_default_pipe)
1615 {
1616     //Check if pipe can be supported
1617     if (port_speed == USB_SPEED_LOW && pipe_config->dev_speed == USB_SPEED_FULL) {
1618         //Low speed port does not supported full speed pipe
1619         return false;
1620     }
1621     if (pipe_config->dev_speed == USB_SPEED_LOW && (type == USB_TRANSFER_TYPE_BULK || type == USB_TRANSFER_TYPE_ISOCHRONOUS)) {
1622         //Low speed does not support Bulk or Isochronous pipes
1623         return false;
1624     }
1625     //Check interval of pipe
1626     if (type == USB_TRANSFER_TYPE_INTR &&
1627         (pipe_config->ep_desc->bInterval > 0 && pipe_config->ep_desc->bInterval > 32)) {
1628         //Interval not supported for interrupt pipe
1629         return false;
1630     }
1631     if (type == USB_TRANSFER_TYPE_ISOCHRONOUS &&
1632         (pipe_config->ep_desc->bInterval > 0 && pipe_config->ep_desc->bInterval > 6)) {
1633         //Interval not supported for isochronous pipe (where 0 < 2^(bInterval - 1) <= 32)
1634         return false;
1635     }
1636     if (is_default_pipe) {
1637         return true;
1638     }
1639 
1640     int limit;
1641     if (USB_EP_DESC_GET_EP_DIR(pipe_config->ep_desc)) { //IN
1642         limit = mps_limits->in_mps;
1643     } else {    //OUT
1644         if (type == USB_TRANSFER_TYPE_CTRL || type == USB_TRANSFER_TYPE_BULK) {
1645             limit = mps_limits->non_periodic_out_mps;
1646         } else {
1647             limit = mps_limits->periodic_out_mps;
1648         }
1649     }
1650     return (pipe_config->ep_desc->wMaxPacketSize <= limit);
1651 }
1652 
pipe_set_ep_char(const hcd_pipe_config_t * pipe_config,usb_transfer_type_t type,bool is_default_pipe,int pipe_idx,usb_speed_t port_speed,usbh_hal_ep_char_t * ep_char)1653 static void pipe_set_ep_char(const hcd_pipe_config_t *pipe_config, usb_transfer_type_t type, bool is_default_pipe, int pipe_idx, usb_speed_t port_speed, usbh_hal_ep_char_t *ep_char)
1654 {
1655     //Initialize EP characteristics
1656     usb_priv_xfer_type_t hal_xfer_type;
1657     switch (type) {
1658         case USB_TRANSFER_TYPE_CTRL:
1659             hal_xfer_type = USB_PRIV_XFER_TYPE_CTRL;
1660             break;
1661         case USB_TRANSFER_TYPE_ISOCHRONOUS:
1662             hal_xfer_type = USB_PRIV_XFER_TYPE_ISOCHRONOUS;
1663             break;
1664         case USB_TRANSFER_TYPE_BULK:
1665             hal_xfer_type = USB_PRIV_XFER_TYPE_BULK;
1666             break;
1667         default:    //USB_TRANSFER_TYPE_INTR
1668             hal_xfer_type = USB_PRIV_XFER_TYPE_INTR;
1669             break;
1670     }
1671     ep_char->type = hal_xfer_type;
1672     if (is_default_pipe) {
1673         ep_char->bEndpointAddress = 0;
1674         //Set the default pipe's MPS to the worst case MPS for the device's speed
1675         ep_char->mps = (pipe_config->dev_speed == USB_SPEED_FULL) ? CTRL_EP_MAX_MPS_FS : CTRL_EP_MAX_MPS_LS;
1676     } else {
1677         ep_char->bEndpointAddress = pipe_config->ep_desc->bEndpointAddress;
1678         ep_char->mps = pipe_config->ep_desc->wMaxPacketSize;
1679     }
1680     ep_char->dev_addr = pipe_config->dev_addr;
1681     ep_char->ls_via_fs_hub = (port_speed == USB_SPEED_FULL && pipe_config->dev_speed == USB_SPEED_LOW);
1682     //Calculate the pipe's interval in terms of USB frames
1683     if (type == USB_TRANSFER_TYPE_INTR || type == USB_TRANSFER_TYPE_ISOCHRONOUS) {
1684         int interval_frames;
1685         if (type == USB_TRANSFER_TYPE_INTR) {
1686             interval_frames = pipe_config->ep_desc->bInterval;
1687         } else {
1688             interval_frames = (1 << (pipe_config->ep_desc->bInterval - 1));
1689         }
1690         //Round down interval to nearest power of 2
1691         if (interval_frames >= 32) {
1692             interval_frames = 32;
1693         } else if (interval_frames >= 16) {
1694             interval_frames = 16;
1695         } else if (interval_frames >= 8) {
1696             interval_frames = 8;
1697         } else if (interval_frames >= 4) {
1698             interval_frames = 4;
1699         } else if (interval_frames >= 2) {
1700             interval_frames = 2;
1701         } else if (interval_frames >= 1) {
1702             interval_frames = 1;
1703         }
1704         ep_char->periodic.interval = interval_frames;
1705         //We are the Nth pipe to be allocated. Use N as a phase offset
1706         ep_char->periodic.phase_offset_frames = pipe_idx & (XFER_LIST_LEN_ISOC - 1);
1707     }else {
1708         ep_char->periodic.interval = 0;
1709         ep_char->periodic.phase_offset_frames = 0;
1710     }
1711 }
1712 
1713 // ---------------------- Commands -------------------------
1714 
_pipe_cmd_halt(pipe_t * pipe)1715 static esp_err_t _pipe_cmd_halt(pipe_t *pipe)
1716 {
1717     esp_err_t ret;
1718 
1719     //If pipe is already halted, just return.
1720     if (pipe->state == HCD_PIPE_STATE_HALTED) {
1721         ret = ESP_OK;
1722         goto exit;
1723     }
1724     //If the pipe's port is invalid, we just mark the pipe as halted without needing to halt the underlying channel
1725     if (pipe->port->flags.conn_dev_ena //Skip halting the underlying channel if the port is invalid
1726         && !usbh_hal_chan_request_halt(pipe->chan_obj)) {   //Check if the channel is already halted
1727             //Channel is not halted, we need to request and wait for a haltWe need to wait for channel to be halted.
1728             pipe->cs_flags.waiting_halt = 1;
1729             _internal_pipe_event_wait(pipe);
1730             //State should have been updated in the ISR
1731             assert(pipe->state == HCD_PIPE_STATE_HALTED);
1732     } else {
1733         //We are already halted, just need to update the state
1734         usbh_hal_chan_mark_halted(pipe->chan_obj);
1735         pipe->state = HCD_PIPE_STATE_HALTED;
1736     }
1737     ret = ESP_OK;
1738 exit:
1739     return ret;
1740 }
1741 
_pipe_cmd_flush(pipe_t * pipe)1742 static esp_err_t _pipe_cmd_flush(pipe_t *pipe)
1743 {
1744     esp_err_t ret;
1745     //The pipe must be halted in order to be flushed
1746     if (pipe->state != HCD_PIPE_STATE_HALTED) {
1747         ret = ESP_ERR_INVALID_STATE;
1748         goto exit;
1749     }
1750     //If the port is still valid, we are canceling transfers. Otherwise, we are flushing due to a port error
1751     bool canceled = pipe->port->flags.conn_dev_ena;
1752     bool call_pipe_cb;
1753     //Flush any filled buffers
1754     call_pipe_cb = _buffer_flush_all(pipe, canceled);
1755     //Move all URBs from the pending tailq to the done tailq
1756     if (pipe->num_urb_pending > 0) {
1757         //Process all remaining pending URBs
1758         urb_t *urb;
1759         TAILQ_FOREACH(urb, &pipe->pending_urb_tailq, tailq_entry) {
1760             //Update the URB's current state
1761             urb->hcd_var = URB_HCD_STATE_DONE;
1762             //URBs were never executed, Update the actual_num_bytes and status
1763             urb->transfer.actual_num_bytes = 0;
1764             urb->transfer.status = (canceled) ? USB_TRANSFER_STATUS_CANCELED : USB_TRANSFER_STATUS_NO_DEVICE;
1765             if (pipe->ep_char.type == USB_PRIV_XFER_TYPE_ISOCHRONOUS) {
1766                 //Update the URB's isoc packet descriptors as well
1767                 for (int pkt_idx = 0; pkt_idx < urb->transfer.num_isoc_packets; pkt_idx++) {
1768                     urb->transfer.isoc_packet_desc[pkt_idx].actual_num_bytes = 0;
1769                     urb->transfer.isoc_packet_desc[pkt_idx].status = (canceled) ? USB_TRANSFER_STATUS_CANCELED : USB_TRANSFER_STATUS_NO_DEVICE;
1770                 }
1771             }
1772         }
1773         //Concatenated pending tailq to the done tailq
1774         TAILQ_CONCAT(&pipe->done_urb_tailq, &pipe->pending_urb_tailq, tailq_entry);
1775         pipe->num_urb_done += pipe->num_urb_pending;
1776         pipe->num_urb_pending = 0;
1777         call_pipe_cb = true;
1778     }
1779     if (call_pipe_cb) {
1780         //One or more URBs can be dequeued as a result of the flush. We need to call the callback
1781         HCD_EXIT_CRITICAL();
1782         pipe->callback((hcd_pipe_handle_t)pipe, HCD_PIPE_EVENT_URB_DONE, pipe->callback_arg, false);
1783         HCD_ENTER_CRITICAL();
1784     }
1785     ret = ESP_OK;
1786 exit:
1787     return ret;
1788 }
1789 
_pipe_cmd_clear(pipe_t * pipe)1790 static esp_err_t _pipe_cmd_clear(pipe_t *pipe)
1791 {
1792     esp_err_t ret;
1793     //Pipe must be in the halted state in order to be made active, and there must be an enabled device on the port
1794     if (pipe->state != HCD_PIPE_STATE_HALTED || !pipe->port->flags.conn_dev_ena) {
1795         ret = ESP_ERR_INVALID_STATE;
1796         goto exit;
1797     }
1798     //Update the pipe's state
1799     pipe->state = HCD_PIPE_STATE_ACTIVE;
1800     if (pipe->num_urb_pending > 0) {
1801         //Fill as many buffers as possible
1802         while (_buffer_can_fill(pipe)) {
1803             _buffer_fill(pipe);
1804         }
1805     }
1806     //Execute any filled buffers
1807     if (_buffer_can_exec(pipe)) {
1808         _buffer_exec(pipe);
1809     }
1810     ret = ESP_OK;
1811 exit:
1812     return ret;
1813 }
1814 
1815 // ----------------------- Public --------------------------
1816 
hcd_pipe_alloc(hcd_port_handle_t port_hdl,const hcd_pipe_config_t * pipe_config,hcd_pipe_handle_t * pipe_hdl)1817 esp_err_t hcd_pipe_alloc(hcd_port_handle_t port_hdl, const hcd_pipe_config_t *pipe_config, hcd_pipe_handle_t *pipe_hdl)
1818 {
1819     HCD_CHECK(port_hdl != NULL && pipe_config != NULL && pipe_hdl != NULL, ESP_ERR_INVALID_ARG);
1820     port_t *port = (port_t *)port_hdl;
1821     HCD_ENTER_CRITICAL();
1822     //Can only allocate a pipe if the target port is initialized and connected to an enabled device
1823     HCD_CHECK_FROM_CRIT(port->initialized && port->flags.conn_dev_ena, ESP_ERR_INVALID_STATE);
1824     usb_speed_t port_speed = port->speed;
1825     const fifo_mps_limits_t *mps_limits = port->fifo_mps_limits;
1826     int pipe_idx = port->num_pipes_idle + port->num_pipes_queued;
1827     HCD_EXIT_CRITICAL();
1828 
1829     usb_transfer_type_t type;
1830     bool is_default;
1831     if (pipe_config->ep_desc == NULL) {
1832         type = USB_TRANSFER_TYPE_CTRL;
1833         is_default = true;
1834     } else {
1835         type = USB_EP_DESC_GET_XFERTYPE(pipe_config->ep_desc);
1836         is_default = false;
1837     }
1838     //Check if pipe configuration can be supported
1839     if (!pipe_alloc_check_args(pipe_config, port_speed, mps_limits, type, is_default)) {
1840         return ESP_ERR_NOT_SUPPORTED;
1841     }
1842 
1843     esp_err_t ret;
1844     //Allocate the pipe resources
1845     pipe_t *pipe = calloc(1, sizeof(pipe_t));
1846     usbh_hal_chan_t *chan_obj = calloc(1, sizeof(usbh_hal_chan_t));
1847     dma_buffer_block_t *buffers[NUM_BUFFERS] = {0};
1848     if (pipe == NULL|| chan_obj == NULL) {
1849         ret = ESP_ERR_NO_MEM;
1850         goto err;
1851     }
1852     for (int i = 0; i < NUM_BUFFERS; i++) {
1853         buffers[i] = buffer_block_alloc(type);
1854         if (buffers[i] == NULL) {
1855             ret = ESP_ERR_NO_MEM;
1856             goto err;
1857         }
1858     }
1859 
1860     //Initialize pipe object
1861     TAILQ_INIT(&pipe->pending_urb_tailq);
1862     TAILQ_INIT(&pipe->done_urb_tailq);
1863     for (int i = 0; i < NUM_BUFFERS; i++) {
1864         pipe->buffers[i] = buffers[i];
1865     }
1866     pipe->multi_buffer_control.buffer_num_to_fill = NUM_BUFFERS;
1867     pipe->port = port;
1868     pipe->chan_obj = chan_obj;
1869     usbh_hal_ep_char_t ep_char;
1870     pipe_set_ep_char(pipe_config, type, is_default, pipe_idx, port_speed, &ep_char);
1871     memcpy(&pipe->ep_char, &ep_char, sizeof(usbh_hal_ep_char_t));
1872     pipe->state = HCD_PIPE_STATE_ACTIVE;
1873     pipe->callback = pipe_config->callback;
1874     pipe->callback_arg = pipe_config->callback_arg;
1875     pipe->context = pipe_config->context;
1876 
1877     //Allocate channel
1878     HCD_ENTER_CRITICAL();
1879     if (!port->initialized || !port->flags.conn_dev_ena) {
1880         HCD_EXIT_CRITICAL();
1881         ret = ESP_ERR_INVALID_STATE;
1882         goto err;
1883     }
1884     bool chan_allocated = usbh_hal_chan_alloc(port->hal, pipe->chan_obj, (void *) pipe);
1885     if (!chan_allocated) {
1886         HCD_EXIT_CRITICAL();
1887         ret = ESP_ERR_NOT_SUPPORTED;
1888         goto err;
1889     }
1890     usbh_hal_chan_set_ep_char(port->hal, pipe->chan_obj, &pipe->ep_char);
1891     //Add the pipe to the list of idle pipes in the port object
1892     TAILQ_INSERT_TAIL(&port->pipes_idle_tailq, pipe, tailq_entry);
1893     port->num_pipes_idle++;
1894     HCD_EXIT_CRITICAL();
1895 
1896     *pipe_hdl = (hcd_pipe_handle_t)pipe;
1897     return ESP_OK;
1898 
1899 err:
1900     for (int i = 0; i < NUM_BUFFERS; i++) {
1901         buffer_block_free(buffers[i]);
1902     }
1903     free(chan_obj);
1904     free(pipe);
1905     return ret;
1906 }
1907 
hcd_pipe_free(hcd_pipe_handle_t pipe_hdl)1908 esp_err_t hcd_pipe_free(hcd_pipe_handle_t pipe_hdl)
1909 {
1910     pipe_t *pipe = (pipe_t *)pipe_hdl;
1911     HCD_ENTER_CRITICAL();
1912     //Check that all URBs have been removed and pipe has no pending events
1913     HCD_CHECK_FROM_CRIT(!pipe->multi_buffer_control.buffer_is_executing
1914                         && !pipe->cs_flags.has_urb
1915                         && !pipe->cs_flags.reset_lock,
1916                         ESP_ERR_INVALID_STATE);
1917     //Remove pipe from the list of idle pipes (it must be in the idle list because it should have no queued URBs)
1918     TAILQ_REMOVE(&pipe->port->pipes_idle_tailq, pipe, tailq_entry);
1919     pipe->port->num_pipes_idle--;
1920     usbh_hal_chan_free(pipe->port->hal, pipe->chan_obj);
1921     HCD_EXIT_CRITICAL();
1922 
1923     //Free pipe resources
1924     for (int i = 0; i < NUM_BUFFERS; i++) {
1925         buffer_block_free(pipe->buffers[i]);
1926     }
1927     free(pipe->chan_obj);
1928     free(pipe);
1929     return ESP_OK;
1930 }
1931 
hcd_pipe_update_mps(hcd_pipe_handle_t pipe_hdl,int mps)1932 esp_err_t hcd_pipe_update_mps(hcd_pipe_handle_t pipe_hdl, int mps)
1933 {
1934     pipe_t *pipe = (pipe_t *)pipe_hdl;
1935     HCD_ENTER_CRITICAL();
1936     //Check if pipe is in the correct state to be updated
1937     HCD_CHECK_FROM_CRIT(!pipe->cs_flags.pipe_cmd_processing &&
1938                         !pipe->cs_flags.has_urb &&
1939                         !pipe->cs_flags.reset_lock,
1940                         ESP_ERR_INVALID_STATE);
1941     pipe->ep_char.mps = mps;
1942     //Update the underlying channel's registers
1943     usbh_hal_chan_set_ep_char(pipe->port->hal, pipe->chan_obj, &pipe->ep_char);
1944     HCD_EXIT_CRITICAL();
1945     return ESP_OK;
1946 }
1947 
hcd_pipe_update_dev_addr(hcd_pipe_handle_t pipe_hdl,uint8_t dev_addr)1948 esp_err_t hcd_pipe_update_dev_addr(hcd_pipe_handle_t pipe_hdl, uint8_t dev_addr)
1949 {
1950     pipe_t *pipe = (pipe_t *)pipe_hdl;
1951     HCD_ENTER_CRITICAL();
1952     //Check if pipe is in the correct state to be updated
1953     HCD_CHECK_FROM_CRIT(!pipe->cs_flags.pipe_cmd_processing &&
1954                         !pipe->cs_flags.has_urb &&
1955                         !pipe->cs_flags.reset_lock,
1956                         ESP_ERR_INVALID_STATE);
1957     pipe->ep_char.dev_addr = dev_addr;
1958     //Update the underlying channel's registers
1959     usbh_hal_chan_set_ep_char(pipe->port->hal, pipe->chan_obj, &pipe->ep_char);
1960     HCD_EXIT_CRITICAL();
1961     return ESP_OK;
1962 }
1963 
hcd_pipe_update_callback(hcd_pipe_handle_t pipe_hdl,hcd_pipe_callback_t callback,void * user_arg)1964 esp_err_t hcd_pipe_update_callback(hcd_pipe_handle_t pipe_hdl, hcd_pipe_callback_t callback, void *user_arg)
1965 {
1966     pipe_t *pipe = (pipe_t *)pipe_hdl;
1967     HCD_ENTER_CRITICAL();
1968     //Check if pipe is in the correct state to be updated
1969     HCD_CHECK_FROM_CRIT(!pipe->cs_flags.pipe_cmd_processing &&
1970                         !pipe->cs_flags.has_urb &&
1971                         !pipe->cs_flags.reset_lock,
1972                         ESP_ERR_INVALID_STATE);
1973     pipe->callback = callback;
1974     pipe->callback_arg = user_arg;
1975     HCD_EXIT_CRITICAL();
1976     return ESP_OK;
1977 }
1978 
hcd_pipe_set_persist_reset(hcd_pipe_handle_t pipe_hdl)1979 esp_err_t hcd_pipe_set_persist_reset(hcd_pipe_handle_t pipe_hdl)
1980 {
1981     pipe_t *pipe = (pipe_t *)pipe_hdl;
1982     HCD_ENTER_CRITICAL();
1983     //Check if pipe is in the correct state to be updated
1984     HCD_CHECK_FROM_CRIT(!pipe->cs_flags.pipe_cmd_processing &&
1985                         !pipe->cs_flags.has_urb &&
1986                         !pipe->cs_flags.reset_lock,
1987                         ESP_ERR_INVALID_STATE);
1988     pipe->cs_flags.persist = 1;
1989     HCD_EXIT_CRITICAL();
1990     return ESP_OK;
1991 }
1992 
hcd_pipe_get_context(hcd_pipe_handle_t pipe_hdl)1993 void *hcd_pipe_get_context(hcd_pipe_handle_t pipe_hdl)
1994 {
1995     pipe_t *pipe = (pipe_t *)pipe_hdl;
1996     void *ret;
1997     HCD_ENTER_CRITICAL();
1998     ret = pipe->context;
1999     HCD_EXIT_CRITICAL();
2000     return ret;
2001 }
2002 
hcd_pipe_get_state(hcd_pipe_handle_t pipe_hdl)2003 hcd_pipe_state_t hcd_pipe_get_state(hcd_pipe_handle_t pipe_hdl)
2004 {
2005     hcd_pipe_state_t ret;
2006     pipe_t *pipe = (pipe_t *)pipe_hdl;
2007     HCD_ENTER_CRITICAL();
2008     ret = pipe->state;
2009     HCD_EXIT_CRITICAL();
2010     return ret;
2011 }
2012 
hcd_pipe_command(hcd_pipe_handle_t pipe_hdl,hcd_pipe_cmd_t command)2013 esp_err_t hcd_pipe_command(hcd_pipe_handle_t pipe_hdl, hcd_pipe_cmd_t command)
2014 {
2015     pipe_t *pipe = (pipe_t *)pipe_hdl;
2016     esp_err_t ret = ESP_OK;
2017 
2018     HCD_ENTER_CRITICAL();
2019     //Cannot execute pipe commands the pipe is already executing a command, or if the pipe or its port are no longer valid
2020     if (pipe->cs_flags.reset_lock) {
2021         ret = ESP_ERR_INVALID_STATE;
2022     } else {
2023         pipe->cs_flags.pipe_cmd_processing = 1;
2024         switch (command) {
2025             case HCD_PIPE_CMD_HALT: {
2026                 ret = _pipe_cmd_halt(pipe);
2027                 break;
2028             }
2029             case HCD_PIPE_CMD_FLUSH: {
2030                 ret = _pipe_cmd_flush(pipe);
2031                 break;
2032             }
2033             case HCD_PIPE_CMD_CLEAR: {
2034                 ret = _pipe_cmd_clear(pipe);
2035                 break;
2036             }
2037         }
2038         pipe->cs_flags.pipe_cmd_processing = 0;
2039     }
2040     HCD_EXIT_CRITICAL();
2041     return ret;
2042 }
2043 
hcd_pipe_get_event(hcd_pipe_handle_t pipe_hdl)2044 hcd_pipe_event_t hcd_pipe_get_event(hcd_pipe_handle_t pipe_hdl)
2045 {
2046     pipe_t *pipe = (pipe_t *)pipe_hdl;
2047     hcd_pipe_event_t ret;
2048     HCD_ENTER_CRITICAL();
2049     ret = pipe->last_event;
2050     pipe->last_event = HCD_PIPE_EVENT_NONE;
2051     HCD_EXIT_CRITICAL();
2052     return ret;
2053 }
2054 
2055 // ------------------------------------------------- Buffer Control ----------------------------------------------------
2056 
_buffer_fill_ctrl(dma_buffer_block_t * buffer,usb_transfer_t * transfer)2057 static inline void _buffer_fill_ctrl(dma_buffer_block_t *buffer, usb_transfer_t *transfer)
2058 {
2059     //Get information about the control transfer by analyzing the setup packet (the first 8 bytes of the URB's data)
2060     usb_setup_packet_t *setup_pkt = (usb_setup_packet_t *)transfer->data_buffer;
2061     bool data_stg_in = (setup_pkt->bmRequestType & USB_BM_REQUEST_TYPE_DIR_IN);
2062     bool data_stg_skip = (setup_pkt->wLength == 0);
2063     //Fill setup stage
2064     usbh_hal_xfer_desc_fill(buffer->xfer_desc_list, 0, transfer->data_buffer, sizeof(usb_setup_packet_t),
2065                             USBH_HAL_XFER_DESC_FLAG_SETUP | USBH_HAL_XFER_DESC_FLAG_HOC);
2066     //Fill data stage
2067     if (data_stg_skip) {
2068         //Not data stage. Fill with an empty descriptor
2069         usbh_hal_xfer_desc_clear(buffer->xfer_desc_list, 1);
2070     } else {
2071         //Fill data stage. Note that we still fill with transfer->num_bytes instead of setup_pkt->wLength as it's possible to require more bytes than wLength
2072         usbh_hal_xfer_desc_fill(buffer->xfer_desc_list, 1, transfer->data_buffer + sizeof(usb_setup_packet_t), transfer->num_bytes - sizeof(usb_setup_packet_t),
2073                                 ((data_stg_in) ? USBH_HAL_XFER_DESC_FLAG_IN : 0) | USBH_HAL_XFER_DESC_FLAG_HOC);
2074     }
2075     //Fill status stage (i.e., a zero length packet). If data stage is skipped, the status stage is always IN.
2076     usbh_hal_xfer_desc_fill(buffer->xfer_desc_list, 2, NULL, 0,
2077                             ((data_stg_in && !data_stg_skip) ? 0 : USBH_HAL_XFER_DESC_FLAG_IN) | USBH_HAL_XFER_DESC_FLAG_HOC);
2078     //Update buffer flags
2079     buffer->flags.ctrl.data_stg_in = data_stg_in;
2080     buffer->flags.ctrl.data_stg_skip = data_stg_skip;
2081     buffer->flags.ctrl.cur_stg = 0;
2082 }
2083 
_buffer_fill_bulk(dma_buffer_block_t * buffer,usb_transfer_t * transfer,bool is_in,int mps)2084 static inline void _buffer_fill_bulk(dma_buffer_block_t *buffer, usb_transfer_t *transfer, bool is_in, int mps)
2085 {
2086     //Only add a zero length packet if OUT, flag is set, and transfer length is multiple of EP's MPS
2087     //Minor optimization: Do the mod operation last
2088     bool zero_len_packet = !is_in && (transfer->flags & USB_TRANSFER_FLAG_ZERO_PACK) && (transfer->num_bytes % mps == 0);
2089     if (is_in) {
2090         usbh_hal_xfer_desc_fill(buffer->xfer_desc_list, 0, transfer->data_buffer, transfer->num_bytes,
2091                                 USBH_HAL_XFER_DESC_FLAG_IN | USBH_HAL_XFER_DESC_FLAG_HOC);
2092     } else { //OUT
2093         if (zero_len_packet) {
2094             //Adding a zero length packet, so two descriptors are used.
2095             usbh_hal_xfer_desc_fill(buffer->xfer_desc_list, 0, transfer->data_buffer, transfer->num_bytes, 0);
2096             usbh_hal_xfer_desc_fill(buffer->xfer_desc_list, 1, NULL, 0, USBH_HAL_XFER_DESC_FLAG_HOC);
2097         } else {
2098             //Zero length packet not required. One descriptor is enough
2099             usbh_hal_xfer_desc_fill(buffer->xfer_desc_list, 0, transfer->data_buffer, transfer->num_bytes, USBH_HAL_XFER_DESC_FLAG_HOC);
2100         }
2101     }
2102     //Update buffer flags
2103     buffer->flags.bulk.zero_len_packet = zero_len_packet;
2104 }
2105 
_buffer_fill_intr(dma_buffer_block_t * buffer,usb_transfer_t * transfer,bool is_in,int mps)2106 static inline void _buffer_fill_intr(dma_buffer_block_t *buffer, usb_transfer_t *transfer, bool is_in, int mps)
2107 {
2108     int num_qtds;
2109     int mod_mps = transfer->num_bytes % mps;
2110     //Only add a zero length packet if OUT, flag is set, and transfer length is multiple of EP's MPS
2111     bool zero_len_packet = !is_in && (transfer->flags & USB_TRANSFER_FLAG_ZERO_PACK) && (mod_mps == 0);
2112     if (is_in) {
2113         assert(mod_mps == 0);  //IN transfers MUST be integer multiple of MPS
2114         num_qtds = transfer->num_bytes / mps;   //Can just floor divide as it's already multiple of MPS
2115     } else {
2116         num_qtds = transfer->num_bytes / mps;   //Floor division to get the number of MPS sized packets
2117         if (mod_mps > 0) {
2118             num_qtds++; //Add a short packet for the remainder
2119         }
2120     }
2121     assert((zero_len_packet) ? num_qtds + 1 : num_qtds <= XFER_LIST_LEN_INTR); //Check that the number of QTDs doesn't exceed the QTD list's length
2122 
2123     uint32_t xfer_desc_flags = (is_in) ? USBH_HAL_XFER_DESC_FLAG_IN : 0;
2124     int bytes_filled = 0;
2125     //Fill all but last QTD
2126     for (int i = 0; i < num_qtds - 1; i++) {
2127         usbh_hal_xfer_desc_fill(buffer->xfer_desc_list, i, &transfer->data_buffer[bytes_filled], mps, xfer_desc_flags);
2128         bytes_filled += mps;
2129     }
2130     //Fill last QTD and zero length packet
2131     if (zero_len_packet) {
2132         //Fill in last data packet without HOC flag
2133         usbh_hal_xfer_desc_fill(buffer->xfer_desc_list, num_qtds - 1, &transfer->data_buffer[bytes_filled], transfer->num_bytes - bytes_filled,
2134                                 xfer_desc_flags);
2135         //HOC flag goes to zero length packet instead
2136         usbh_hal_xfer_desc_fill(buffer->xfer_desc_list, num_qtds, NULL, 0, USBH_HAL_XFER_DESC_FLAG_HOC);
2137     } else {
2138         //Zero length packet not required. Fill in last QTD with HOC flag
2139         usbh_hal_xfer_desc_fill(buffer->xfer_desc_list, num_qtds - 1, &transfer->data_buffer[bytes_filled], transfer->num_bytes - bytes_filled,
2140                                 xfer_desc_flags | USBH_HAL_XFER_DESC_FLAG_HOC);
2141     }
2142 
2143     //Update buffer members and flags
2144     buffer->flags.intr.num_qtds = num_qtds;
2145     buffer->flags.intr.zero_len_packet = zero_len_packet;
2146 }
2147 
_buffer_fill_isoc(dma_buffer_block_t * buffer,usb_transfer_t * transfer,bool is_in,int mps,int interval,int start_idx)2148 static inline void _buffer_fill_isoc(dma_buffer_block_t *buffer, usb_transfer_t *transfer, bool is_in, int mps, int interval, int start_idx)
2149 {
2150     assert(interval > 0);
2151     int total_num_desc = transfer->num_isoc_packets * interval;
2152     assert(total_num_desc <= XFER_LIST_LEN_ISOC);
2153     int desc_idx = start_idx;
2154     int bytes_filled = 0;
2155     //For each packet, fill in a descriptor and a interval-1 blank descriptor after it
2156     for (int pkt_idx = 0; pkt_idx < transfer->num_isoc_packets; pkt_idx++) {
2157         int xfer_len = transfer->isoc_packet_desc[pkt_idx].num_bytes;
2158         uint32_t flags = (is_in) ? USBH_HAL_XFER_DESC_FLAG_IN : 0;
2159         if (pkt_idx == transfer->num_isoc_packets - 1) {
2160             //Last packet, set the the HOC flag
2161             flags |= USBH_HAL_XFER_DESC_FLAG_HOC;
2162         }
2163         usbh_hal_xfer_desc_fill(buffer->xfer_desc_list, desc_idx, &transfer->data_buffer[bytes_filled], xfer_len, flags);
2164         bytes_filled += xfer_len;
2165         if (++desc_idx >= XFER_LIST_LEN_ISOC) {
2166             desc_idx = 0;
2167         }
2168         //Clear descriptors for unscheduled frames
2169         for (int i = 0; i < interval - 1; i++) {
2170             usbh_hal_xfer_desc_clear(buffer->xfer_desc_list, desc_idx);
2171             if (++desc_idx >= XFER_LIST_LEN_ISOC) {
2172                 desc_idx = 0;
2173             }
2174         }
2175     }
2176     //Update buffer members and flags
2177     buffer->flags.isoc.num_qtds = total_num_desc;
2178     buffer->flags.isoc.interval = interval;
2179     buffer->flags.isoc.start_idx = start_idx;
2180     buffer->flags.isoc.next_start_idx = desc_idx;
2181 }
2182 
_buffer_fill(pipe_t * pipe)2183 static void _buffer_fill(pipe_t *pipe)
2184 {
2185     //Get an URB from the pending tailq
2186     urb_t *urb = TAILQ_FIRST(&pipe->pending_urb_tailq);
2187     assert(pipe->num_urb_pending > 0 && urb != NULL);
2188     TAILQ_REMOVE(&pipe->pending_urb_tailq, urb, tailq_entry);
2189     pipe->num_urb_pending--;
2190 
2191     //Select the inactive buffer
2192     assert(pipe->multi_buffer_control.buffer_num_to_exec <= NUM_BUFFERS);
2193     dma_buffer_block_t *buffer_to_fill = pipe->buffers[pipe->multi_buffer_control.wr_idx];
2194     buffer_to_fill->status_flags.val = 0;   //Clear the buffer's status flags
2195     assert(buffer_to_fill->urb == NULL);
2196     bool is_in = pipe->ep_char.bEndpointAddress & USB_B_ENDPOINT_ADDRESS_EP_DIR_MASK;
2197     int mps = pipe->ep_char.mps;
2198     usb_transfer_t *transfer = &urb->transfer;
2199     switch (pipe->ep_char.type) {
2200         case USB_PRIV_XFER_TYPE_CTRL: {
2201             _buffer_fill_ctrl(buffer_to_fill, transfer);
2202             break;
2203         }
2204         case USB_PRIV_XFER_TYPE_ISOCHRONOUS: {
2205             uint32_t start_idx;
2206             if (pipe->multi_buffer_control.buffer_num_to_exec == 0) {
2207                 //There are no more previously filled buffers to execute. We need to calculate a new start index based on HFNUM and the pipe's schedule
2208                 uint32_t cur_frame_num = usbh_hal_port_get_cur_frame_num(pipe->port->hal);
2209                 uint32_t cur_mod_idx_no_offset = (cur_frame_num - pipe->ep_char.periodic.phase_offset_frames) & (XFER_LIST_LEN_ISOC - 1);    //Get the modulated index (i.e., the Nth desc in the descriptor list)
2210                 //This is the non-offset modulated QTD index of the last scheduled interval
2211                 uint32_t last_interval_mod_idx_no_offset = (cur_mod_idx_no_offset / pipe->ep_char.periodic.interval) * pipe->ep_char.periodic.interval; //Floor divide and the multiply again
2212                 uint32_t next_interval_idx_no_offset = (last_interval_mod_idx_no_offset + pipe->ep_char.periodic.interval);
2213                 //We want at least a half interval or 2 frames of buffer space
2214                 if (next_interval_idx_no_offset - cur_mod_idx_no_offset > (pipe->ep_char.periodic.interval / 2)
2215                     && next_interval_idx_no_offset - cur_mod_idx_no_offset >= 2) {
2216                         start_idx = (next_interval_idx_no_offset + pipe->ep_char.periodic.phase_offset_frames) & (XFER_LIST_LEN_ISOC - 1);
2217                 } else {
2218                     //Not enough time until the next schedule, add another interval to it.
2219                         start_idx =  (next_interval_idx_no_offset + pipe->ep_char.periodic.interval + pipe->ep_char.periodic.phase_offset_frames) & (XFER_LIST_LEN_ISOC - 1);
2220                 }
2221             } else {
2222                 //Start index is based on previously filled buffer
2223                 uint32_t prev_buffer_idx = (pipe->multi_buffer_control.wr_idx - 1) & (NUM_BUFFERS - 1);
2224                 dma_buffer_block_t *prev_filled_buffer = pipe->buffers[prev_buffer_idx];
2225                 start_idx = prev_filled_buffer->flags.isoc.next_start_idx;
2226             }
2227             _buffer_fill_isoc(buffer_to_fill, transfer, is_in, mps, (int)pipe->ep_char.periodic.interval, start_idx);
2228             break;
2229         }
2230         case USB_PRIV_XFER_TYPE_BULK: {
2231             _buffer_fill_bulk(buffer_to_fill, transfer, is_in, mps);
2232             break;
2233         }
2234         case USB_PRIV_XFER_TYPE_INTR: {
2235             _buffer_fill_intr(buffer_to_fill, transfer, is_in, mps);
2236             break;
2237         }
2238         default: {
2239             abort();
2240             break;
2241         }
2242     }
2243     buffer_to_fill->urb = urb;
2244     urb->hcd_var = URB_HCD_STATE_INFLIGHT;
2245     //Update multi buffer flags
2246     pipe->multi_buffer_control.wr_idx++;
2247     pipe->multi_buffer_control.buffer_num_to_fill--;
2248     pipe->multi_buffer_control.buffer_num_to_exec++;
2249 }
2250 
_buffer_exec(pipe_t * pipe)2251 static void _buffer_exec(pipe_t *pipe)
2252 {
2253     assert(pipe->multi_buffer_control.rd_idx != pipe->multi_buffer_control.wr_idx || pipe->multi_buffer_control.buffer_num_to_exec > 0);
2254     dma_buffer_block_t *buffer_to_exec = pipe->buffers[pipe->multi_buffer_control.rd_idx];
2255     assert(buffer_to_exec->urb != NULL);
2256 
2257     uint32_t start_idx;
2258     int desc_list_len;
2259     switch (pipe->ep_char.type) {
2260         case USB_PRIV_XFER_TYPE_CTRL: {
2261             start_idx = 0;
2262             desc_list_len = XFER_LIST_LEN_CTRL;
2263             //Set the channel's direction to OUT and PID to 0 respectively for the the setup stage
2264             usbh_hal_chan_set_dir(pipe->chan_obj, false);   //Setup stage is always OUT
2265             usbh_hal_chan_set_pid(pipe->chan_obj, 0);   //Setup stage always has a PID of DATA0
2266             break;
2267         }
2268         case USB_PRIV_XFER_TYPE_ISOCHRONOUS: {
2269             start_idx = buffer_to_exec->flags.isoc.start_idx;
2270             desc_list_len = XFER_LIST_LEN_ISOC;
2271             break;
2272         }
2273         case USB_PRIV_XFER_TYPE_BULK: {
2274             start_idx = 0;
2275             desc_list_len = (buffer_to_exec->flags.bulk.zero_len_packet) ? XFER_LIST_LEN_BULK : 1;
2276             break;
2277         }
2278         case USB_PRIV_XFER_TYPE_INTR: {
2279             start_idx = 0;
2280             desc_list_len = (buffer_to_exec->flags.intr.zero_len_packet) ? buffer_to_exec->flags.intr.num_qtds + 1 : buffer_to_exec->flags.intr.num_qtds;
2281             break;
2282         }
2283         default: {
2284             start_idx = 0;
2285             desc_list_len = 0;
2286             abort();
2287             break;
2288         }
2289     }
2290     //Update buffer and multi buffer flags
2291     buffer_to_exec->status_flags.executing = 1;
2292     pipe->multi_buffer_control.buffer_is_executing = 1;
2293     usbh_hal_chan_activate(pipe->chan_obj, buffer_to_exec->xfer_desc_list, desc_list_len, start_idx);
2294 }
2295 
_buffer_exec_cont(pipe_t * pipe)2296 static void _buffer_exec_cont(pipe_t *pipe)
2297 {
2298     //This should only ever be called on control transfers
2299     assert(pipe->ep_char.type == USB_PRIV_XFER_TYPE_CTRL);
2300     dma_buffer_block_t *buffer_inflight = pipe->buffers[pipe->multi_buffer_control.rd_idx];
2301     bool next_dir_is_in;
2302     int next_pid;
2303     assert(buffer_inflight->flags.ctrl.cur_stg != 2);
2304     if (buffer_inflight->flags.ctrl.cur_stg == 0) { //Just finished control stage
2305         if (buffer_inflight->flags.ctrl.data_stg_skip) {
2306             //Skipping data stage. Go straight to status stage
2307             next_dir_is_in = true;     //With no data stage, status stage must be IN
2308             next_pid = 1;       //Status stage always has a PID of DATA1
2309             buffer_inflight->flags.ctrl.cur_stg = 2;    //Skip over the null descriptor representing the skipped data stage
2310         } else {
2311             //Go to data stage
2312             next_dir_is_in = buffer_inflight->flags.ctrl.data_stg_in;
2313             next_pid = 1;   //Data stage always starts with a PID of DATA1
2314             buffer_inflight->flags.ctrl.cur_stg = 1;
2315         }
2316     } else {        //cur_stg == 1. //Just finished data stage. Go to status stage
2317         next_dir_is_in = !buffer_inflight->flags.ctrl.data_stg_in;  //Status stage is always the opposite direction of data stage
2318         next_pid = 1;   //Status stage always has a PID of DATA1
2319         buffer_inflight->flags.ctrl.cur_stg = 2;
2320     }
2321     //Continue the control transfer
2322     usbh_hal_chan_set_dir(pipe->chan_obj, next_dir_is_in);
2323     usbh_hal_chan_set_pid(pipe->chan_obj, next_pid);
2324     usbh_hal_chan_activate(pipe->chan_obj, buffer_inflight->xfer_desc_list, XFER_LIST_LEN_CTRL, buffer_inflight->flags.ctrl.cur_stg);
2325 }
2326 
_buffer_parse_ctrl(dma_buffer_block_t * buffer)2327 static inline void _buffer_parse_ctrl(dma_buffer_block_t *buffer)
2328 {
2329     usb_transfer_t *transfer = &buffer->urb->transfer;
2330     //Update URB's actual number of bytes
2331     if (buffer->flags.ctrl.data_stg_skip)     {
2332         //There was no data stage. Just set the actual length to the size of the setup packet
2333         transfer->actual_num_bytes = sizeof(usb_setup_packet_t);
2334     } else {
2335         //Parse the data stage for the remaining length
2336         int rem_len;
2337         int desc_status;
2338         usbh_hal_xfer_desc_parse(buffer->xfer_desc_list, 1, &rem_len, &desc_status);
2339         assert(desc_status == USBH_HAL_XFER_DESC_STS_SUCCESS);
2340         assert(rem_len <= (transfer->num_bytes - sizeof(usb_setup_packet_t)));
2341         transfer->actual_num_bytes = transfer->num_bytes - rem_len;
2342     }
2343     //Update URB status
2344     transfer->status = USB_TRANSFER_STATUS_COMPLETED;
2345     //Clear the descriptor list
2346     memset(buffer->xfer_desc_list, XFER_LIST_LEN_CTRL, sizeof(usbh_ll_dma_qtd_t));
2347 }
2348 
_buffer_parse_bulk(dma_buffer_block_t * buffer)2349 static inline void _buffer_parse_bulk(dma_buffer_block_t *buffer)
2350 {
2351     usb_transfer_t *transfer = &buffer->urb->transfer;
2352     //Update URB's actual number of bytes
2353     int rem_len;
2354     int desc_status;
2355     usbh_hal_xfer_desc_parse(buffer->xfer_desc_list, 0, &rem_len, &desc_status);
2356     assert(desc_status == USBH_HAL_XFER_DESC_STS_SUCCESS);
2357     assert(rem_len <= transfer->num_bytes);
2358     transfer->actual_num_bytes = transfer->num_bytes - rem_len;
2359     //Update URB's status
2360     transfer->status = USB_TRANSFER_STATUS_COMPLETED;
2361     //Clear the descriptor list
2362     memset(buffer->xfer_desc_list, XFER_LIST_LEN_BULK, sizeof(usbh_ll_dma_qtd_t));
2363 }
2364 
_buffer_parse_intr(dma_buffer_block_t * buffer,bool is_in,int mps)2365 static inline void _buffer_parse_intr(dma_buffer_block_t *buffer, bool is_in, int mps)
2366 {
2367     usb_transfer_t *transfer = &buffer->urb->transfer;
2368     int intr_stop_idx = buffer->status_flags.stop_idx;
2369     if (is_in) {
2370         if (intr_stop_idx > 0) { //This is an early stop (short packet)
2371             assert(intr_stop_idx <= buffer->flags.intr.num_qtds);
2372             int rem_len;
2373             int desc_status;
2374             for (int i = 0; i < intr_stop_idx - 1; i++) {    //Check all packets before the short
2375                 usbh_hal_xfer_desc_parse(buffer->xfer_desc_list, i, &rem_len, &desc_status);
2376                 assert(rem_len == 0 && desc_status == USBH_HAL_XFER_DESC_STS_SUCCESS);
2377             }
2378             //Check the short packet
2379             usbh_hal_xfer_desc_parse(buffer->xfer_desc_list, intr_stop_idx - 1, &rem_len, &desc_status);
2380             assert(rem_len > 0 && desc_status == USBH_HAL_XFER_DESC_STS_SUCCESS);
2381             //Update actual bytes
2382             transfer->actual_num_bytes = (mps * intr_stop_idx - 2) + (mps - rem_len);
2383         } else {
2384             //Check that all but the last packet transmitted MPS
2385             for (int i = 0; i < buffer->flags.intr.num_qtds - 1; i++) {
2386                 int rem_len;
2387                 int desc_status;
2388                 usbh_hal_xfer_desc_parse(buffer->xfer_desc_list, i, &rem_len, &desc_status);
2389                 assert(rem_len == 0 && desc_status == USBH_HAL_XFER_DESC_STS_SUCCESS);
2390             }
2391             //Check the last packet
2392             int last_packet_rem_len;
2393             int last_packet_desc_status;
2394             usbh_hal_xfer_desc_parse(buffer->xfer_desc_list, buffer->flags.intr.num_qtds - 1, &last_packet_rem_len, &last_packet_desc_status);
2395             assert(last_packet_desc_status == USBH_HAL_XFER_DESC_STS_SUCCESS);
2396             //All packets except last MUST be MPS. So just deduct the remaining length of the last packet to get actual number of bytes
2397             transfer->actual_num_bytes = transfer->num_bytes - last_packet_rem_len;
2398         }
2399     } else {
2400         //OUT INTR transfers can only complete successfully if all packets have been transmitted. Double check
2401         for (int i = 0 ; i < buffer->flags.intr.num_qtds; i++) {
2402             int rem_len;
2403             int desc_status;
2404             usbh_hal_xfer_desc_parse(buffer->xfer_desc_list, i, &rem_len, &desc_status);
2405             assert(rem_len == 0 && desc_status == USBH_HAL_XFER_DESC_STS_SUCCESS);
2406         }
2407         transfer->actual_num_bytes = transfer->num_bytes;
2408     }
2409     //Update URB's status
2410     transfer->status = USB_TRANSFER_STATUS_COMPLETED;
2411     //Clear the descriptor list
2412     memset(buffer->xfer_desc_list, XFER_LIST_LEN_INTR, sizeof(usbh_ll_dma_qtd_t));
2413 }
2414 
_buffer_parse_isoc(dma_buffer_block_t * buffer,bool is_in)2415 static inline void _buffer_parse_isoc(dma_buffer_block_t *buffer, bool is_in)
2416 {
2417     usb_transfer_t *transfer = &buffer->urb->transfer;
2418     int desc_idx = buffer->flags.isoc.start_idx;    //Descriptor index tracks which descriptor in the QTD list
2419     int total_actual_num_bytes = 0;
2420     for (int pkt_idx = 0; pkt_idx < transfer->num_isoc_packets; pkt_idx++) {
2421         //Clear the filled descriptor
2422         int rem_len;
2423         int desc_status;
2424         usbh_hal_xfer_desc_parse(buffer->xfer_desc_list, desc_idx, &rem_len, &desc_status);
2425         usbh_hal_xfer_desc_clear(buffer->xfer_desc_list, desc_idx);
2426         assert(rem_len == 0 || is_in);
2427         assert(desc_status == USBH_HAL_XFER_DESC_STS_SUCCESS || USBH_HAL_XFER_DESC_STS_NOT_EXECUTED);
2428         assert(rem_len <= transfer->isoc_packet_desc[pkt_idx].num_bytes);    //Check for DMA errata
2429         //Update ISO packet actual length and status
2430         transfer->isoc_packet_desc[pkt_idx].actual_num_bytes = transfer->isoc_packet_desc[pkt_idx].num_bytes - rem_len;
2431         total_actual_num_bytes += transfer->isoc_packet_desc[pkt_idx].actual_num_bytes;
2432         transfer->isoc_packet_desc[pkt_idx].status = (desc_status == USBH_HAL_XFER_DESC_STS_NOT_EXECUTED) ? USB_TRANSFER_STATUS_SKIPPED : USB_TRANSFER_STATUS_COMPLETED;
2433         //A descriptor is also allocated for unscheduled frames. We need to skip over them
2434         desc_idx += buffer->flags.isoc.interval;
2435         if (desc_idx >= XFER_LIST_LEN_INTR) {
2436             desc_idx -= XFER_LIST_LEN_INTR;
2437         }
2438     }
2439     //Write back the actual_num_bytes and statue of entire transfer
2440     assert(total_actual_num_bytes <= transfer->num_bytes);
2441     transfer->actual_num_bytes = total_actual_num_bytes;
2442     transfer->status = USB_TRANSFER_STATUS_COMPLETED;
2443 }
2444 
_buffer_parse_error(dma_buffer_block_t * buffer)2445 static inline void _buffer_parse_error(dma_buffer_block_t *buffer)
2446 {
2447     //The URB had an error in one of its packet, or a port error), so we the entire URB an error.
2448     usb_transfer_t *transfer = &buffer->urb->transfer;
2449     transfer->actual_num_bytes = 0;
2450     //Update the overall status of URB. Status will depend on the pipe_event
2451     switch (buffer->status_flags.pipe_event) {
2452         case HCD_PIPE_EVENT_NONE:
2453             transfer->status = (buffer->status_flags.was_canceled) ? USB_TRANSFER_STATUS_CANCELED : USB_TRANSFER_STATUS_NO_DEVICE;
2454             break;
2455         case HCD_PIPE_EVENT_ERROR_XFER:
2456             transfer->status = USB_TRANSFER_STATUS_ERROR;
2457             break;
2458         case HCD_PIPE_EVENT_ERROR_OVERFLOW:
2459             transfer->status = USB_TRANSFER_STATUS_OVERFLOW;
2460             break;
2461         case HCD_PIPE_EVENT_ERROR_STALL:
2462             transfer->status = USB_TRANSFER_STATUS_STALL;
2463             break;
2464         default:
2465             //HCD_PIPE_EVENT_URB_DONE and HCD_PIPE_EVENT_ERROR_URB_NOT_AVAIL should not occur here
2466             abort();
2467             break;
2468     }
2469 }
2470 
_buffer_parse(pipe_t * pipe)2471 static void _buffer_parse(pipe_t *pipe)
2472 {
2473     assert(pipe->multi_buffer_control.buffer_num_to_parse > 0);
2474     dma_buffer_block_t *buffer_to_parse = pipe->buffers[pipe->multi_buffer_control.fr_idx];
2475     assert(buffer_to_parse->urb != NULL);
2476     bool is_in = pipe->ep_char.bEndpointAddress & USB_B_ENDPOINT_ADDRESS_EP_DIR_MASK;
2477     int mps = pipe->ep_char.mps;
2478 
2479     //Parsing the buffer will update the buffer's corresponding URB
2480     if (buffer_to_parse->status_flags.pipe_event == HCD_PIPE_EVENT_URB_DONE) {
2481         //URB was successful
2482         switch (pipe->ep_char.type) {
2483             case USB_PRIV_XFER_TYPE_CTRL: {
2484                 _buffer_parse_ctrl(buffer_to_parse);
2485                 break;
2486             }
2487             case USB_PRIV_XFER_TYPE_ISOCHRONOUS: {
2488                 _buffer_parse_isoc(buffer_to_parse, is_in);
2489                 break;
2490             }
2491             case USB_PRIV_XFER_TYPE_BULK: {
2492                 _buffer_parse_bulk(buffer_to_parse);
2493                 break;
2494             }
2495             case USB_PRIV_XFER_TYPE_INTR: {
2496                 _buffer_parse_intr(buffer_to_parse, is_in, mps);
2497                 break;
2498             }
2499             default: {
2500                 abort();
2501                 break;
2502             }
2503         }
2504     } else {
2505         //URB failed
2506         _buffer_parse_error(buffer_to_parse);
2507     }
2508     urb_t *urb = buffer_to_parse->urb;
2509     urb->hcd_var = URB_HCD_STATE_DONE;
2510     buffer_to_parse->urb = NULL;
2511     buffer_to_parse->flags.val = 0; //Clear flags
2512     //Move the URB to the done tailq
2513     TAILQ_INSERT_TAIL(&pipe->done_urb_tailq, urb, tailq_entry);
2514     pipe->num_urb_done++;
2515     //Update multi buffer flags
2516     pipe->multi_buffer_control.fr_idx++;
2517     pipe->multi_buffer_control.buffer_num_to_parse--;
2518     pipe->multi_buffer_control.buffer_num_to_fill++;
2519 }
2520 
_buffer_flush_all(pipe_t * pipe,bool canceled)2521 static bool _buffer_flush_all(pipe_t *pipe, bool canceled)
2522 {
2523     int cur_num_to_mark_done = pipe->multi_buffer_control.buffer_num_to_exec;
2524     for (int i = 0; i < cur_num_to_mark_done; i++) {
2525         //Mark any filled buffers as done
2526         _buffer_done(pipe, 0, HCD_PIPE_EVENT_NONE, canceled);
2527     }
2528     int cur_num_to_parse = pipe->multi_buffer_control.buffer_num_to_parse;
2529     for (int i = 0; i < cur_num_to_parse; i++) {
2530         _buffer_parse(pipe);
2531     }
2532     //At this point, there should be no more filled buffers. Only URBs in the pending or done tailq
2533     return (cur_num_to_parse > 0);
2534 }
2535 
2536 // ---------------------------------------------- HCD Transfer Descriptors ---------------------------------------------
2537 
2538 // ----------------------- Public --------------------------
2539 
hcd_urb_enqueue(hcd_pipe_handle_t pipe_hdl,urb_t * urb)2540 esp_err_t hcd_urb_enqueue(hcd_pipe_handle_t pipe_hdl, urb_t *urb)
2541 {
2542     //Check that URB has not already been enqueued
2543     HCD_CHECK(urb->hcd_ptr == NULL && urb->hcd_var == URB_HCD_STATE_IDLE, ESP_ERR_INVALID_STATE);
2544     pipe_t *pipe = (pipe_t *)pipe_hdl;
2545 
2546     HCD_ENTER_CRITICAL();
2547     //Check that pipe and port are in the correct state to receive URBs
2548     HCD_CHECK_FROM_CRIT(pipe->port->state == HCD_PORT_STATE_ENABLED         //The pipe's port must be in the correct state
2549                         && pipe->state == HCD_PIPE_STATE_ACTIVE             //The pipe must be in the correct state
2550                         && !pipe->cs_flags.pipe_cmd_processing              //Pipe cannot currently be processing a pipe command
2551                         && !pipe->cs_flags.reset_lock,                      //Pipe cannot be persisting through a port reset
2552                         ESP_ERR_INVALID_STATE);
2553     //Use the URB's reserved_ptr to store the pipe's
2554     urb->hcd_ptr = (void *)pipe;
2555     //Add the URB to the pipe's pending tailq
2556     urb->hcd_var = URB_HCD_STATE_PENDING;
2557     TAILQ_INSERT_TAIL(&pipe->pending_urb_tailq, urb, tailq_entry);
2558     pipe->num_urb_pending++;
2559     //use the URB's reserved_flags to store the URB's current state
2560     if (_buffer_can_fill(pipe)) {
2561         _buffer_fill(pipe);
2562     }
2563     if (_buffer_can_exec(pipe)) {
2564         _buffer_exec(pipe);
2565     }
2566     if (!pipe->cs_flags.has_urb) {
2567         //This is the first URB to be enqueued into the pipe. Move the pipe to the list of active pipes
2568         TAILQ_REMOVE(&pipe->port->pipes_idle_tailq, pipe, tailq_entry);
2569         TAILQ_INSERT_TAIL(&pipe->port->pipes_active_tailq, pipe, tailq_entry);
2570         pipe->port->num_pipes_idle--;
2571         pipe->port->num_pipes_queued++;
2572         pipe->cs_flags.has_urb = 1;
2573     }
2574     HCD_EXIT_CRITICAL();
2575     return ESP_OK;
2576 }
2577 
hcd_urb_dequeue(hcd_pipe_handle_t pipe_hdl)2578 urb_t *hcd_urb_dequeue(hcd_pipe_handle_t pipe_hdl)
2579 {
2580     pipe_t *pipe = (pipe_t *)pipe_hdl;
2581     urb_t *urb;
2582 
2583     HCD_ENTER_CRITICAL();
2584     if (pipe->num_urb_done > 0) {
2585         urb = TAILQ_FIRST(&pipe->done_urb_tailq);
2586         TAILQ_REMOVE(&pipe->done_urb_tailq, urb, tailq_entry);
2587         pipe->num_urb_done--;
2588         //Check the URB's reserved fields then reset them
2589         assert(urb->hcd_ptr == (void *)pipe && urb->hcd_var == URB_HCD_STATE_DONE);  //The URB's reserved field should have been set to this pipe
2590         urb->hcd_ptr = NULL;
2591         urb->hcd_var = URB_HCD_STATE_IDLE;
2592         if (pipe->cs_flags.has_urb
2593             && pipe->num_urb_pending == 0 && pipe->num_urb_done == 0
2594             && pipe->multi_buffer_control.buffer_num_to_exec == 0 && pipe->multi_buffer_control.buffer_num_to_parse == 0) {
2595             //This pipe has no more enqueued URBs. Move the pipe to the list of idle pipes
2596             TAILQ_REMOVE(&pipe->port->pipes_active_tailq, pipe, tailq_entry);
2597             TAILQ_INSERT_TAIL(&pipe->port->pipes_idle_tailq, pipe, tailq_entry);
2598             pipe->port->num_pipes_idle++;
2599             pipe->port->num_pipes_queued--;
2600             pipe->cs_flags.has_urb = 0;
2601         }
2602     } else {
2603         //No more URBs to dequeue from this pipe
2604         urb = NULL;
2605     }
2606     HCD_EXIT_CRITICAL();
2607     return urb;
2608 }
2609 
hcd_urb_abort(urb_t * urb)2610 esp_err_t hcd_urb_abort(urb_t *urb)
2611 {
2612     HCD_ENTER_CRITICAL();
2613     //Check that the URB was enqueued to begin with
2614     HCD_CHECK_FROM_CRIT(urb->hcd_ptr != NULL && urb->hcd_var != URB_HCD_STATE_IDLE, ESP_ERR_INVALID_STATE);
2615     if (urb->hcd_var == URB_HCD_STATE_PENDING) {
2616         //URB has not been executed so it can be aborted
2617         pipe_t *pipe = (pipe_t *)urb->hcd_ptr;
2618         //Remove it form the pending queue
2619         TAILQ_REMOVE(&pipe->pending_urb_tailq, urb, tailq_entry);
2620         pipe->num_urb_pending--;
2621         //Add it to the done queue
2622         TAILQ_INSERT_TAIL(&pipe->done_urb_tailq, urb, tailq_entry);
2623         pipe->num_urb_done++;
2624         //Update the URB's current state, status, and actual length
2625         urb->hcd_var = URB_HCD_STATE_DONE;
2626         if (urb->transfer.num_isoc_packets == 0) {
2627             urb->transfer.actual_num_bytes = 0;
2628             urb->transfer.status = USB_TRANSFER_STATUS_CANCELED;
2629         } else {
2630             //If this is an ISOC URB, update the ISO packet descriptors instead
2631             for (int i = 0; i < urb->transfer.num_isoc_packets; i++) {
2632                 urb->transfer.isoc_packet_desc[i].actual_num_bytes = 0;
2633                 urb->transfer.isoc_packet_desc[i].status = USB_TRANSFER_STATUS_CANCELED;
2634             }
2635         }
2636     }   // Otherwise, the URB is in-flight or already done thus cannot be aborted
2637     HCD_EXIT_CRITICAL();
2638     return ESP_OK;
2639 }
2640