1 /**************************************************************************/
2 /* */
3 /* Copyright (c) Microsoft Corporation. All rights reserved. */
4 /* */
5 /* This software is licensed under the Microsoft Software License */
6 /* Terms for Microsoft Azure RTOS. Full text of the license can be */
7 /* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
8 /* and in the root directory of this software. */
9 /* */
10 /**************************************************************************/
11
12
13 /**************************************************************************/
14 /**************************************************************************/
15 /** */
16 /** USBX Component */
17 /** */
18 /** EHCI Controller Driver */
19 /** */
20 /**************************************************************************/
21 /**************************************************************************/
22
23
24 /* Include necessary system files. */
25
26 #define UX_SOURCE_CODE
27
28 #include "ux_api.h"
29 #include "ux_hcd_ehci.h"
30 #include "ux_host_stack.h"
31
32
33 /**************************************************************************/
34 /* */
35 /* FUNCTION RELEASE */
36 /* */
37 /* _ux_hcd_ehci_isochronous_endpoint_create PORTABLE C */
38 /* 6.1.11 */
39 /* AUTHOR */
40 /* */
41 /* Chaoqiong Xiao, Microsoft Corporation */
42 /* */
43 /* DESCRIPTION */
44 /* */
45 /* This function will create an isochronous endpoint. */
46 /* */
47 /* INPUT */
48 /* */
49 /* hcd_ehci Pointer to EHCI controller */
50 /* endpoint Pointer to endpoint */
51 /* */
52 /* OUTPUT */
53 /* */
54 /* Completion Status */
55 /* */
56 /* CALLS */
57 /* */
58 /* _ux_utility_memory_allocate Allocate memory */
59 /* _ux_utility_memory_free Free memory */
60 /* _ux_hcd_ehci_hsisochronous_td_obtain Obtain a TD */
61 /* _ux_hcd_ehci_least_traffic_list_get Get least traffic list */
62 /* _ux_hcd_ehci_poll_rate_entry_get Get anchor for poll rate */
63 /* _ux_utility_physical_address Get physical address */
64 /* _ux_host_mutex_on Get mutex */
65 /* _ux_host_mutex_off Put mutex */
66 /* _ux_hcd_ehci_periodic_descriptor_link Link/unlink descriptor */
67 /* */
68 /* CALLED BY */
69 /* */
70 /* EHCI Controller Driver */
71 /* */
72 /* RELEASE HISTORY */
73 /* */
74 /* DATE NAME DESCRIPTION */
75 /* */
76 /* 05-19-2020 Chaoqiong Xiao Initial Version 6.0 */
77 /* 09-30-2020 Chaoqiong Xiao Modified comment(s), */
78 /* resulting in version 6.1 */
79 /* 11-09-2020 Chaoqiong Xiao Modified comment(s), */
80 /* fixed compile warnings, */
81 /* resulting in version 6.1.2 */
82 /* 04-02-2021 Chaoqiong Xiao Modified comment(s), */
83 /* fixed compile issues with */
84 /* some macro options, */
85 /* filled max transfer length, */
86 /* resulting in version 6.1.6 */
87 /* 04-25-2022 Chaoqiong Xiao Modified comment(s), */
88 /* fixed standalone compile, */
89 /* resulting in version 6.1.11 */
90 /* */
91 /**************************************************************************/
_ux_hcd_ehci_isochronous_endpoint_create(UX_HCD_EHCI * hcd_ehci,UX_ENDPOINT * endpoint)92 UINT _ux_hcd_ehci_isochronous_endpoint_create(UX_HCD_EHCI *hcd_ehci, UX_ENDPOINT *endpoint)
93 {
94 #if UX_MAX_ISO_TD == 0
95
96 UX_PARAMETER_NOT_USED(hcd_ehci);
97 UX_PARAMETER_NOT_USED(endpoint);
98
99 /* Error trap. */
100 _ux_system_error_handler(UX_SYSTEM_LEVEL_THREAD, UX_SYSTEM_CONTEXT_HCD, UX_FUNCTION_NOT_SUPPORTED);
101
102 /* If trace is enabled, insert this event into the trace buffer. */
103 UX_TRACE_IN_LINE_INSERT(UX_TRACE_ERROR, UX_FUNCTION_NOT_SUPPORTED, 0, 0, 0, UX_TRACE_ERRORS, 0, 0)
104
105 /* Not supported, return error. */
106 return(UX_FUNCTION_NOT_SUPPORTED);
107 #else
108
109 UX_DEVICE *device;
110 UX_EHCI_HSISO_ED *ed;
111 UX_EHCI_PERIODIC_LINK_POINTER itd;
112 UX_EHCI_ED *ed_list;
113 UX_EHCI_ED *ed_anchor;
114 UX_EHCI_PERIODIC_LINK_POINTER lp;
115 UX_EHCI_POINTER bp;
116 UCHAR interval;
117 UCHAR interval_shift;
118 UINT poll_depth;
119 ULONG microframe_load[8];
120 #if defined(UX_HCD_EHCI_SPLIT_TRANSFER_ENABLE)
121 ULONG microframe_ssplit_count[8];
122 ULONG mask;
123 UINT split_count;
124 ULONG split_last_size;
125 #else
126 #define microframe_ssplit_count UX_NULL
127 #endif
128 ULONG microframe_i;
129 ULONG endpt;
130 ULONG device_address;
131 ULONG max_packet_size;
132 ULONG max_trans_size;
133 ULONG mult;
134 ULONG io;
135 UINT i;
136 UINT status;
137
138
139 /* Get the pointer to the device. */
140 device = endpoint -> ux_endpoint_device;
141
142 /* Get the interval value from endpoint descriptor. */
143 interval = (UCHAR)endpoint -> ux_endpoint_descriptor.bInterval;
144
145 /* For ISO, interval 1 ~ 16, means 2^(n-1). */
146 if (interval == 0)
147 interval = 1;
148 if (interval > 16)
149 interval = 16;
150
151 /* Interval shift is base 0. */
152 interval_shift = (UCHAR)(interval - 1);
153
154 /* Keep interval as number of micro-frames. */
155 interval = (UCHAR)(1u << interval_shift);
156
157 /* Get max packet size. */
158 max_packet_size = endpoint -> ux_endpoint_descriptor.wMaxPacketSize & UX_MAX_PACKET_SIZE_MASK;
159
160 /* Get number transactions per micro-frame. */
161 mult = endpoint -> ux_endpoint_descriptor.wMaxPacketSize & UX_MAX_NUMBER_OF_TRANSACTIONS_MASK;
162 mult >>= UX_MAX_NUMBER_OF_TRANSACTIONS_SHIFT;
163 if (mult < 3)
164 mult ++;
165
166 /* Get max transfer size. */
167 max_trans_size = max_packet_size * mult;
168
169 /* We need to take into account the nature of the HCD to define the max size
170 of any transfer in the transfer request. */
171 endpoint -> ux_endpoint_transfer_request.ux_transfer_request_maximum_length = max_trans_size;
172
173 /* Get the Endpt, Device Address, I/O, Maximum Packet Size, Mult. */
174 endpt = (endpoint -> ux_endpoint_descriptor.bEndpointAddress << UX_EHCI_HSISO_ENDPT_SHIFT) & UX_EHCI_HSISO_ENDPT_MASK;
175 device_address = device -> ux_device_address & UX_EHCI_HSISO_DEVICE_ADDRESS_MASK;
176 io = (endpoint -> ux_endpoint_descriptor.bEndpointAddress & UX_ENDPOINT_DIRECTION) ? UX_EHCI_HSISO_DIRECTION_IN : UX_EHCI_HSISO_DIRECTION_OUT;
177
178 /* Only high speed transfer supported without split transfer. */
179 if (device -> ux_device_speed != UX_HIGH_SPEED_DEVICE)
180 {
181 #if !defined(UX_HCD_EHCI_SPLIT_TRANSFER_ENABLE)
182 return(UX_FUNCTION_NOT_SUPPORTED);
183 #else
184
185 /* 1 ~ N siTDs ... */
186 /* OUT: only start-splits, no complete splits. */
187 /* IN : at most one start-split and one to N complete-splits. */
188
189 /* TBD. */
190 #endif
191 }
192 else
193 {
194
195 /* Allocate memory for ED. */
196 ed = (UX_EHCI_HSISO_ED *)_ux_utility_memory_allocate(UX_NO_ALIGN, UX_REGULAR_MEMORY, sizeof(UX_EHCI_HSISO_ED));
197 if (ed == UX_NULL)
198 return(UX_MEMORY_INSUFFICIENT);
199
200 /* Obtain iTDs for this new endpoint.
201 ** For noncontinuous request address and simplify calculation, allocate
202 ** one iTD for two microframes.
203 ** - interval 1 (0): 8 micro-frames, 4 iTD
204 ** - interval 2 (1): 4 micro-frames, 2 iTD
205 ** - interval 4 (2): 2 micro-frames, 1 iTD
206 ** - interval >=8 (3): 1 micro-frame , 1 iTD
207 ** Two micro-frames in iTD uses BP[3,4] and BP[5,6] to avoid merging of
208 ** page buffer and iTD settings.
209 */
210
211 /* Get number of iTDs should be allocated. */
212 if (interval > 2)
213 ed -> ux_ehci_hsiso_ed_nb_tds = 1;
214 else
215 ed -> ux_ehci_hsiso_ed_nb_tds = (UCHAR)(4u >> interval_shift);
216
217 /* Obtain iTDs. */
218 status = UX_SUCCESS;
219 for (i = 0; i < ed -> ux_ehci_hsiso_ed_nb_tds; i ++)
220 {
221
222 /* Get a new free iTD. */
223 itd.itd_ptr = _ux_hcd_ehci_hsisochronous_td_obtain(hcd_ehci);
224 if (itd.itd_ptr == UX_NULL)
225 {
226 status = UX_NO_TD_AVAILABLE;
227 break;
228 }
229
230 /* Link to ED. */
231 itd.itd_ptr -> ux_ehci_hsiso_td_ed = ed;
232
233 /* Save max transfer size. */
234 itd.itd_ptr -> ux_ehci_hsiso_td_max_trans_size = (USHORT)max_trans_size;
235
236 /* Save the iTD for the micro-frame(s). */
237 ed -> ux_ehci_hsiso_ed_fr_td[i] = itd.itd_ptr;
238 }
239
240 /* If there is error, free allocated resources. */
241 if (status != UX_SUCCESS)
242 {
243 for (i = 0; i < ed -> ux_ehci_hsiso_ed_nb_tds; i ++)
244 ed -> ux_ehci_hsiso_ed_fr_td[i] -> ux_ehci_hsiso_td_status = UX_UNUSED;
245 _ux_utility_memory_free(ed);
246 }
247
248 /* Save information not related to periodic things. */
249
250 /* Save endpoint. */
251 ed -> ux_ehci_hsiso_ed_endpoint = endpoint;
252
253 /* Save interval. */
254 ed -> ux_ehci_hsiso_ed_frinterval = interval;
255 ed -> ux_ehci_hsiso_ed_frinterval_shift = interval_shift;
256
257 /* Disable iTDs for now. */
258 ed -> ux_ehci_hsiso_ed_frstart = 0xFF;
259 }
260
261 /* Attach the first iTD as the endpoint container. */
262 endpoint -> ux_endpoint_ed = ed -> ux_ehci_hsiso_ed_fr_td[0];
263
264 /* Match the interval for the endpoint to a EHCI list.
265 We match anything that is > 32ms to the 32ms interval layer.
266 The 32ms list is layer 0, 16ms list is 1 ... the 1ms list is depth 5. */
267
268 /* Match > 32ms to 32ms list. */
269 /* Poll depth deeper, interval smaller. */
270 if (interval < 4)
271 poll_depth = 5;
272 else if (interval > 8)
273 poll_depth = 0;
274 else
275 poll_depth = (UINT)(8u - interval);
276
277 /* Keep only interval < 1ms for micro-frame calculation. */
278 interval_shift &= 0x3;
279 interval &= 0x7;
280
281 /* Fill the iTDs/siTDs contents that are not related to periodic list.
282 Initialize the fields to be ready for ZLPs if OUT.
283 But a zero buffer to underrun IN? */
284 #if defined(UX_HCD_EHCI_SPLIT_TRANSFER_ENABLE)
285 if (device -> ux_device_speed != UX_HIGH_SPEED_DEVICE)
286 {
287
288 /* TBD. */
289 }
290 else
291 #endif
292 {
293
294 /* Prepare things not related to periodic things. */
295
296 for (i = 0; i < ed -> ux_ehci_hsiso_ed_nb_tds; i ++)
297 {
298
299 /* Get iTD. */
300 itd.itd_ptr = ed -> ux_ehci_hsiso_ed_fr_td[i];
301
302 /* Build next link pointer, if not last one.*/
303 if (i < ed -> ux_ehci_hsiso_ed_nb_tds - 1u)
304 {
305 lp.void_ptr = _ux_utility_physical_address(ed -> ux_ehci_hsiso_ed_fr_td[i + 1]);
306 itd.itd_ptr -> ux_ehci_hsiso_td_next_lp = lp;
307 }
308
309 /* Build previous pointer, if not first one. */
310 if (i > 0)
311 {
312 itd.itd_ptr -> ux_ehci_hsiso_td_previous_lp.itd_ptr =
313 ed -> ux_ehci_hsiso_ed_fr_td[i - 1];
314 }
315
316 /* Save Device Address and Endpt @ BP0. */
317 bp.value = device_address | endpt;
318 itd.itd_ptr -> ux_ehci_hsiso_td_bp[0] = bp.void_ptr;
319
320 /* Save I/O and max packet size @ BP1. */
321 bp.value = io | max_packet_size;
322 itd.itd_ptr -> ux_ehci_hsiso_td_bp[1] = bp.void_ptr;
323
324 /* Save Mult @ BP2. */
325 bp.value = mult;
326 itd.itd_ptr -> ux_ehci_hsiso_td_bp[2] = bp.void_ptr;
327 }
328
329 }
330
331 /* Lock the periodic list to update. */
332 _ux_host_mutex_on(&hcd_ehci -> ux_hcd_ehci_periodic_mutex);
333
334 /* Get the list index with the least traffic. */
335 ed_list = _ux_hcd_ehci_least_traffic_list_get(hcd_ehci, microframe_load, microframe_ssplit_count);
336
337 /* Now we need to scan the list of EDs from the lowest load entry until we reach the
338 appropriate interval node. The depth index is the interval EHCI value and the
339 1st entry is pointed by the ED list entry. */
340 ed_anchor = _ux_hcd_ehci_poll_rate_entry_get(hcd_ehci, ed_list, poll_depth);
341
342 /* Calculate packet size with num transactions. */
343 max_packet_size *= mult;
344
345 /* Go through the transaction loads for for start
346 index of micro-frame. */
347 for (microframe_i = 0; microframe_i < interval; microframe_i ++)
348 {
349
350 /* Skip if load too much. */
351 if (microframe_load[microframe_i] + max_packet_size > UX_MAX_BYTES_PER_MICROFRAME_HS)
352 continue;
353
354 #if defined(UX_HCD_EHCI_SPLIT_TRANSFER_ENABLE)
355 if (device -> ux_device_speed != UX_HIGH_SPEED_DEVICE)
356 {
357
358 /* Skip Y6 since host must not use it. */
359 if (i == 6)
360 continue;
361
362 /* Skip if start split count over 16 split. */
363 if (microframe_ssplit_count[i] >= 16)
364 continue;
365 }
366 #endif
367
368 /* Use the load. */
369 break;
370 }
371
372 /* Sanity check, bandwidth checked before endpoint creation so there should
373 not be error but we check it any way. */
374 if (microframe_i >= interval)
375 {
376 _ux_host_mutex_off(&hcd_ehci -> ux_hcd_ehci_periodic_mutex);
377 for (i = 0; i < ed -> ux_ehci_hsiso_ed_nb_tds; i ++)
378 ed -> ux_ehci_hsiso_ed_fr_td[i] -> ux_ehci_hsiso_td_status = UX_UNUSED;
379 _ux_utility_memory_free(ed);
380 return(UX_NO_BANDWIDTH_AVAILABLE);
381 }
382
383 /* Now start microframe index is calculated, things related periodic list. */
384 #if defined(UX_HCD_EHCI_SPLIT_TRANSFER_ENABLE)
385 if (device -> ux_device_speed != UX_HIGH_SPEED_DEVICE)
386 {
387 /* OUT: each microframe budgeted, 188 (or the remaining data size) data byte.
388 never complete-split.
389 IN : complete-split must be scheduled for each following microframe.
390 L - the last microframe in which a complete-split is scheduled.
391 L < Y6, schedule additional complete-splits in microframe L+1 and L+2.
392 L == Y6, schedule one complete-split in microframe Y7,
393 schedule one complete-split in microframe Y0 of the next frame,
394 unless the full speed transaction was budgeted to start in microframe Y0.
395 L == Y7, schedule one complete-split in microframe Y0 of the next frame,
396 unless the full speed transaction was budgeted to start in microframe Y0.
397 */
398
399 /* Save anchor pointer. */
400 itd.sitd_ptr -> ux_ehci_fsiso_td_anchor = ed_anchor;
401
402 /* No back pointer by default. */
403 lp.value = UX_EHCI_T;
404
405 /* OUT or IN? */
406 if (io == 0)
407 {
408
409 /* Multiple start split based on max packet size, no complete split. */
410 split_count = (max_packet_size + 187) / 188;
411 split_last_size = max_packet_size % 188;
412
413 mask = (UX_EHCI_SMASK_0 << split_count) - UX_EHCI_SMASK_0;
414 mask <<= microframe_i;
415 if (microframe_i + split_count > 8)
416 {
417 mask |= mask >> 8;
418 mask &= UX_EHCI_SMASK_MASK;
419
420 /* Need back pointer. */
421 lp = itd;
422 lp.void_ptr = _ux_utility_physical_address(lp.void_ptr);
423 }
424
425 /* Save settings. */
426 itd.sitd_ptr -> ux_ehci_fsiso_td_cap1 = mask;
427 itd.sitd_ptr -> ux_ehci_fsiso_td_back_pointer = lp.void_ptr;
428
429 /* Update anchor micro-frame loads and start splits. */
430 for (i = 0; i < 8; i ++)
431 {
432 if ((mask & (UX_EHCI_SMASK_0 << i)) == 0)
433 continue;
434
435 /* Add to load. */
436 if (split_last_size &&
437 i == ((microframe_i + split_count - 1) & 7))
438 {
439 ed_anchor -> REF_AS.ANCHOR.ux_ehci_ed_microframe_load[i] = (USHORT)(ed_anchor -> REF_AS.ANCHOR.ux_ehci_ed_microframe_load[i] + split_last_size);
440 }
441 else
442 ed_anchor -> REF_AS.ANCHOR.ux_ehci_ed_microframe_load[i] = (USHORT)(ed_anchor -> REF_AS.ANCHOR.ux_ehci_ed_microframe_load[i] + 188u);
443
444 /* Increment SSplit count. */
445 ed_anchor -> REF_AS.ANCHOR.ux_ehci_ed_microframe_ssplit_count[i] ++;
446 }
447 }
448 else
449 {
450
451 /* Single start split. */
452 itd.sitd_ptr -> ux_ehci_fsiso_td_cap1 = UX_EHCI_SMASK_0 << microframe_i;
453
454 /* Multiple complete split, start +2, based on max packet size. */
455 split_count = (max_packet_size + 187) / 188;
456
457 /* Adding extra 2 at end. */
458 split_count += 2;
459 mask = (UX_EHCI_CMASK_0 << split_count) - UX_EHCI_CMASK_0;
460 mask <<= microframe_i + 2;
461 if (microframe_i + 2 + split_count > 8)
462 {
463 mask |= mask >> 8;
464 mask &= UX_EHCI_CMASK_MASK;
465
466 /* Need back pointer. */
467 lp = itd;
468 lp.void_ptr = _ux_utility_physical_address(lp.void_ptr);
469 }
470
471 /* If Y0 has budget, clear complete mask of it. */
472 if (microframe_i == 7)
473 {
474 if (mask & UX_EHCI_CMASK_0)
475 {
476 mask &= ~UX_EHCI_CMASK_0;
477 split_count --;
478 }
479 }
480
481 /* Save settings. */
482 itd.sitd_ptr -> ux_ehci_fsiso_td_cap1 |= mask;
483 itd.sitd_ptr -> ux_ehci_fsiso_td_back_pointer = lp.void_ptr;
484
485 /* Update anchor micro-frame loads and complete splits. */
486 for (i = 0; i < 8; i ++)
487 {
488 if ((mask & (UX_EHCI_CMASK_0 << i)) == 0)
489 continue;
490
491 /* Add to load. */
492 ed_anchor -> REF_AS.ANCHOR.ux_ehci_ed_microframe_load[i] = (USHORT)(ed_anchor -> REF_AS.ANCHOR.ux_ehci_ed_microframe_load[i] + 188u);
493 }
494
495 /* Increment SSplit count. */
496 ed_anchor -> REF_AS.ANCHOR.ux_ehci_ed_microframe_ssplit_count[i] ++;
497 }
498
499 }
500 else
501 #endif
502 {
503
504 /* Save index base of allocated micro-frame. */
505 ed -> ux_ehci_hsiso_ed_frindex = (UCHAR)microframe_i;
506
507 /* Save anchor pointer. */
508 ed -> ux_ehci_hsiso_ed_anchor = ed_anchor;
509
510 /* Update micro-frames. */
511 for (i = microframe_i; i < 8; i += interval)
512 {
513
514 /* Update anchor micro-frame loads. */
515 ed_anchor -> REF_AS.ANCHOR.ux_ehci_ed_microframe_load[i] = (USHORT)(ed_anchor -> REF_AS.ANCHOR.ux_ehci_ed_microframe_load[i] + max_packet_size);
516
517 /* Initialize control with PG -> BP (3, 5). */
518 itd.itd_ptr = ed -> ux_ehci_hsiso_ed_fr_td[i >> 1];
519
520 /* Buffer in page 3,4 or 5,6 to avoid merging settings. */
521 if (i & 1u)
522 itd.itd_ptr -> ux_ehci_hsiso_td_control[i] = UX_EHCI_HSISO_IOC |
523 (5 << UX_EHCI_HSISO_PG_SHIFT);
524 else
525 itd.itd_ptr -> ux_ehci_hsiso_td_control[i] = UX_EHCI_HSISO_IOC |
526 (3 << UX_EHCI_HSISO_PG_SHIFT);
527 }
528 }
529
530 /* Link iTDs to periodic list. */
531
532 /* Physical LP for anchor (Typ iTD, 0). */
533 lp.void_ptr = _ux_utility_physical_address(ed -> ux_ehci_hsiso_ed_fr_td[0]);
534
535 /* Link to periodic list. */
536 ed -> ux_ehci_hsiso_ed_fr_td[0] -> ux_ehci_hsiso_td_previous_lp.ed_ptr = ed_anchor;
537 _ux_hcd_ehci_periodic_descriptor_link(ed_anchor, lp.void_ptr,
538 ed -> ux_ehci_hsiso_ed_fr_td[ed -> ux_ehci_hsiso_ed_nb_tds - 1],
539 ed_anchor -> ux_ehci_ed_queue_head);
540
541 /* Simply insert all iTD[0]/siTD[0] to head of scan list. */
542 #if defined(UX_HCD_EHCI_SPLIT_TRANSFER_ENABLE)
543 if (device -> ux_device_speed != UX_HIGH_SPEED_DEVICE)
544 {
545 itd.sitd_ptr -> ux_ehci_fsiso_td_next_scan_td = hcd_ehci -> ux_hcd_ehci_fsiso_scan_list;
546 hcd_ehci -> ux_hcd_ehci_fsiso_scan_list = itd.sitd_ptr;
547 }
548 else
549 #endif
550 {
551 itd.itd_ptr = ed -> ux_ehci_hsiso_ed_fr_td[0];
552 itd.itd_ptr -> ux_ehci_hsiso_td_next_scan_td =
553 hcd_ehci -> ux_hcd_ehci_hsiso_scan_list;
554 hcd_ehci -> ux_hcd_ehci_hsiso_scan_list = itd.itd_ptr;
555 if (itd.itd_ptr -> ux_ehci_hsiso_td_next_scan_td)
556 itd.itd_ptr -> ux_ehci_hsiso_td_next_scan_td -> ux_ehci_hsiso_td_previous_scan_td =
557 itd.itd_ptr;
558 }
559
560 /* Release the periodic table. */
561 _ux_host_mutex_off(&hcd_ehci -> ux_hcd_ehci_periodic_mutex);
562
563 /* Return successful completion. */
564 return(UX_SUCCESS);
565 #endif
566 }
567