1 /* USER CODE BEGIN Header */
2 /**
3 ******************************************************************************
4 * @file tramsport_layer.c
5 * @author GPM WBL Application Team
6 * @brief Transport layer file
7 ******************************************************************************
8 * @attention
9 *
10 * Copyright (c) 2024 STMicroelectronics.
11 * All rights reserved.
12 *
13 * This software is licensed under terms that can be found in the LICENSE file
14 * in the root directory of this software component.
15 * If no LICENSE file comes with this software, it is provided AS-IS.
16 *
17 ******************************************************************************
18 */
19 /* USER CODE END Header */
20
21 /* Includes ------------------------------------------------------------------*/
22 #include "stm32wb0x.h"
23 #include "fifo.h"
24 #include "compiler.h"
25 #include "transport_layer.h"
26 #include "dtm_cmd_db.h"
27 #include "stm32wb0x_ll_usart.h"
28 #include "stm32wb0x_hal_dma.h"
29 #include "stm32wb0x_hal_uart.h"
30 #include "hci_parser.h"
31 #include "dtm_preprocess_events.h"
32 #include "app_common.h"
33
34 /* Private typedef -----------------------------------------------------------*/\
35
36 typedef PACKED(struct) event_lost_register_s {
37 uint8_t event_lost;
38 uint8_t event_register[5];
39 uint64_t event_lost_code;
40 } event_lost_register_t;
41
42 /* Private define ------------------------------------------------------------*/
43
44 #define MAX_EVENT_SIZE (536)
45
46 #if defined(STM32WB07) || defined(STM32WB06) || defined(STM32WB09)
47 #define COMMAND_BUFFER_SIZE (536 + 4)
48 #define EVENT_BUFFER_SIZE 2300
49 #elif defined(STM32WB05)
50 #define COMMAND_BUFFER_SIZE 265 /* Decrease buffer size for reducing RAM footprint */
51 #define EVENT_BUFFER_SIZE 1024
52 #endif
53
54 #define FIFO_ALIGNMENT 4
55 #define FIFO_VAR_LEN_ITEM_MAX_SIZE (MAX_EVENT_SIZE)
56
57 #define DMA_CH_UART_TX LL_DMA_CHANNEL_1
58 #define DMA_CH_UART_RX LL_DMA_CHANNEL_2
59
60 #define HCI_RESET_OPCODE 0x0C03
61
62 #define LEGACY_ADV_OPCODE_LOW 0x2006 // Lowest opcode for legacy advertising commands
63 #define LEGACY_ADV_OPCODE_HIGH 0x200D // Highest opcode for legacy advertising commands
64
65 #define EXTENDED_ADV_OPCODE_LOW 0x2036 // Lowest opcode for extended advertising commands
66 #define EXTENDED_ADV_OPCODE_HIGH 0x204A // Highest opcode for extended advertising commands
67
68 // Opcodes of commands that returns command status instead of command complete events
69 #define HCI_LE_CREATE_CONNECTION_OPCODE 0x200D
70 #define HCI_LE_EXTENDED_CREATE_CONNECTION_OPCODE 0x2043
71 #define HCI_LE_PERIODIC_ADVERTISING_CREATE_SYNC_OPCODE 0x2044
72
73 /* Private macro -------------------------------------------------------------*/
74 /* Private variables ---------------------------------------------------------*/
75 #ifndef __ZEPHYR__
76 ALIGN(2) static uint8_t event_buffer[EVENT_BUFFER_SIZE + FIFO_VAR_LEN_ITEM_MAX_SIZE];
77 static uint8_t command_buffer[COMMAND_BUFFER_SIZE];
78 static circular_fifo_t event_fifo, command_fifo;
79 static uint8_t DMA_RX_Buffer[DMA_RX_BUFFER_SIZE];
80
81 static event_lost_register_t event_lost_register;
82 static uint8_t dma_state = DMA_IDLE;
83
84 #ifdef DEBUG_DTM
85 DebugLabel debug_buf[DEBUG_ARRAY_LEN] = {EMPTY,};
86 uint32_t debug_cnt = 0;
87 #endif
88
89 /* External variables --------------------------------------------------------*/
90
91 extern UART_HandleTypeDef huart1;
92 extern DMA_HandleTypeDef handle_DMA_Channel2;
93
94 /* Private function prototypes -----------------------------------------------*/
95
96 static void enqueue_event(circular_fifo_t *fifo, uint16_t buff_len1, const uint8_t *buff_evt1, uint16_t buff_len2, const uint8_t *buff_evt2, int8_t overflow_index);
97
98 /* Private functions ---------------------------------------------------------*/
99
100 /* "If, since the last power-on or reset, the Host has ever issued a legacy
101 advertising command and then issues an extended advertising command, or
102 has ever issued an extended advertising command and then issues a legacy
103 advertising command, the Controller shall return the error code Command
104 Disallowed (0x0C)."
105 This function returns 1 if an error has to be given. */
check_legacy_extended_call(uint16_t opcode,uint8_t * buffer_out)106 static uint8_t check_legacy_extended_call(uint16_t opcode, uint8_t *buffer_out)
107 {
108 static uint8_t legacy_cmd_issued = FALSE, extended_cmd_issued = FALSE;
109 uint8_t allowed = TRUE;
110
111 if(opcode >= LEGACY_ADV_OPCODE_LOW && opcode <= LEGACY_ADV_OPCODE_HIGH){
112 if(extended_cmd_issued)
113 allowed = FALSE; // Error
114 else {
115 legacy_cmd_issued = TRUE;
116 allowed = TRUE; // OK
117 }
118 }
119 else if(opcode >= EXTENDED_ADV_OPCODE_LOW && opcode <= EXTENDED_ADV_OPCODE_HIGH){
120 if(legacy_cmd_issued)
121 allowed = FALSE; // Error
122 else {
123 extended_cmd_issued = TRUE;
124 allowed = TRUE; // OK
125 }
126 }
127
128 if(!allowed){
129 if(opcode == HCI_LE_CREATE_CONNECTION_OPCODE ||
130 opcode == HCI_LE_EXTENDED_CREATE_CONNECTION_OPCODE||
131 opcode == HCI_LE_PERIODIC_ADVERTISING_CREATE_SYNC_OPCODE){
132 buffer_out[0] = 0x04;
133 buffer_out[1] = 0x0F;
134 buffer_out[2] = 0x04;
135 buffer_out[3] = 0x0C;
136 buffer_out[4] = 0x01;
137 HOST_TO_LE_16(buffer_out+5,opcode);
138 return 7;
139 }
140 else {
141 buffer_out[0] = 0x04;
142 buffer_out[1] = 0x0E;
143 buffer_out[2] = 0x04;
144 buffer_out[3] = 0x01;
145 HOST_TO_LE_16(buffer_out+4,opcode);
146 buffer_out[6] = 0x0C;
147 return 7;
148 }
149 }
150
151 return 0;
152 }
153
154 /* Process Commands */
process_command(uint16_t op_code,uint8_t * buffer_in,uint16_t buffer_in_length,uint8_t * buffer_out,uint16_t buffer_out_max_length)155 uint16_t process_command(uint16_t op_code, uint8_t *buffer_in, uint16_t buffer_in_length, uint8_t *buffer_out, uint16_t buffer_out_max_length)
156 {
157 uint32_t i;
158 uint16_t ret_val;
159
160 ret_val = check_legacy_extended_call(op_code, buffer_out);
161 if(ret_val != 0){
162 return ret_val;
163 }
164
165 for (i = 0; hci_command_table[i].opcode != 0; i++) {
166 if (op_code == hci_command_table[i].opcode) {
167 ret_val = hci_command_table[i].execute(buffer_in, buffer_in_length, buffer_out, buffer_out_max_length);
168 /* add get crash handler */
169 return ret_val;
170 }
171 }
172
173 // Unknown command length
174 buffer_out[0] = 0x04;
175 buffer_out[1] = 0x0F;
176 buffer_out[2] = 0x04;
177 buffer_out[3] = 0x01;
178 buffer_out[4] = 0x01;
179 HOST_TO_LE_16(buffer_out+5, op_code);
180 return 7;
181 }
182
183 /**
184 * @brief Transport Layer Init.
185 * Init the transport layer.
186 * @param None
187 * @retval None
188 */
transport_layer_init(void)189 void transport_layer_init(void)
190 {
191 /* Queue index init */
192 fifo_init(&event_fifo, EVENT_BUFFER_SIZE, event_buffer, FIFO_ALIGNMENT);
193 fifo_init(&command_fifo, COMMAND_BUFFER_SIZE, command_buffer, FIFO_ALIGNMENT);
194
195 /* event_lost_register init */
196 event_lost_register.event_lost = 0;
197 event_lost_register.event_register[0] = 0x04;
198 event_lost_register.event_register[1] = 0xFF;
199 event_lost_register.event_register[2] = 0x0A;
200 event_lost_register.event_register[3] = 0x02;
201 event_lost_register.event_register[4] = 0x00;
202 event_lost_register.event_lost_code = 0;
203
204 HAL_UARTEx_ReceiveToIdle_DMA(&huart1, DMA_RX_Buffer, DMA_RX_BUFFER_SIZE);
205 }
206
transport_layer_send_data(uint8_t * data,uint16_t data_length)207 static void transport_layer_send_data(uint8_t *data, uint16_t data_length)
208 {
209 if (dma_state == DMA_IDLE) {
210 dma_state = DMA_IN_PROGRESS;
211 DEBUG_NOTES(DMA_REARM);
212 HAL_UART_Transmit_DMA(&huart1, data, data_length);
213 }
214 }
215
transport_layer_DMA_RX_Data(uint16_t dma_counter)216 static void transport_layer_DMA_RX_Data(uint16_t dma_counter)
217 {
218 static uint16_t rx_index = 0;
219
220 if(rx_index != dma_counter)
221 {
222 if(dma_counter > rx_index)
223 {
224 hci_input(&DMA_RX_Buffer[rx_index], dma_counter - rx_index);
225 }
226 else
227 {
228 hci_input(&DMA_RX_Buffer[rx_index], DMA_RX_BUFFER_SIZE - rx_index);
229 if(dma_counter != 0)
230 {
231 hci_input(&DMA_RX_Buffer[0], dma_counter);
232 }
233 }
234 rx_index = dma_counter;
235 if(rx_index == DMA_RX_BUFFER_SIZE)
236 {
237 rx_index = 0;
238 }
239 }
240 }
241
242 /**
243 * @brief Advance transport layer state machine
244 * @param None
245 * @retval Desired sleep level
246 */
transport_layer_tick(void)247 void transport_layer_tick(void)
248 {
249 uint8_t buffer[COMMAND_BUFFER_SIZE], buffer_out[FIFO_VAR_LEN_ITEM_MAX_SIZE];
250 uint16_t len;
251 uint16_t size = 0;
252
253 /* Event queue */
254 if ((fifo_size(&event_fifo) > 0) && (dma_state == DMA_IDLE)) {
255 uint8_t *ptr;
256 DEBUG_NOTES(SEND_DATA);
257 if (fifo_get_ptr_var_len_item(&event_fifo, &size, &ptr) == 0) {
258 transport_layer_send_data(ptr+FIFO_ALIGNMENT, size);
259 }
260 }
261
262 /* Command FIFO */
263 if (fifo_size(&command_fifo) > 0) {
264 uint16_t opcode;
265 uint8_t offset;
266
267 fifo_get_var_len_item(&command_fifo, &size, buffer);
268 /* */
269 if(buffer[0] == HCI_COMMAND_PKT){
270 hci_cmd_hdr *hdr = (hci_cmd_hdr *)buffer;
271 opcode = hdr->opcode;
272 offset = sizeof(hci_cmd_hdr);
273 }
274 else if(buffer[0] == HCI_COMMAND_EXT_PKT){
275 hci_cmd_ext_hdr *hdr = (hci_cmd_ext_hdr *)buffer;
276 opcode = hdr->opcode;
277 offset = sizeof(hci_cmd_ext_hdr);
278 }
279 else {
280 /* Unknown packet type */
281 fifo_flush(&command_fifo);
282 return;
283 }
284
285 if(opcode == HCI_RESET_OPCODE)
286 {
287 while(dma_state == DMA_IN_PROGRESS);
288 while(LL_USART_IsActiveFlag_TXE_TXFNF(USART1) == RESET);
289 while(LL_USART_IsActiveFlag_TC(USART1) == RESET);
290 TL_ResetReqCallback();
291 }
292
293 len=process_command(opcode, buffer+offset, size-offset, buffer_out, sizeof(buffer_out));
294
295 #if (BUFFER_CMDS_ON_BUSY == 1)
296 uint8_t status_offset = (buffer_out[1] == 0x0E) ? 6 : 3; /* 0x0E: command complete, 0x0F: command status */
297 /* Apply command buffering in case of CONTROLLER BUSY error with the exception of the
298 * aci_l2cap_connection_parameter_update_resp command (see req_pbs #990070)
299 */
300 if ((*(buffer_out+status_offset) != BLE_ERROR_CONTROLLER_BUSY) || (opcode == 0xfd82))
301 {
302 DEBUG_NOTES(COMMAND_PROCESSED);
303 /* Set user events back to normal queue */
304 send_event(buffer_out, len, 1);
305 fifo_flush(&command_fifo);
306 }
307 else
308 {
309 fifo_roll_back(&command_fifo, size);
310 }
311 #else
312 DEBUG_NOTES(COMMAND_PROCESSED);
313 /* Set user events back to normal queue */
314 send_event(buffer_out, len, 1);
315 fifo_flush(&command_fifo);
316 #endif
317 }
318
319 if(event_lost_register.event_lost==1) {
320 if (fifo_put_var_len_item(&event_fifo, 13, event_lost_register.event_register,0,NULL) == 0) {
321 event_lost_register.event_lost = 0;
322 event_lost_register.event_lost_code = 0;
323 }
324 }
325 }
326
command_received(uint8_t * cmd,uint16_t len)327 void command_received(uint8_t *cmd, uint16_t len)
328 {
329 fifo_put_var_len_item(&command_fifo, len, cmd, 0, NULL);
330 TL_ProcessReqCallback();
331 }
332
enqueue_event(circular_fifo_t * fifo,uint16_t buff_len1,const uint8_t * buff_evt1,uint16_t buff_len2,const uint8_t * buff_evt2,int8_t overflow_index)333 void enqueue_event(circular_fifo_t *fifo, uint16_t buff_len1, const uint8_t *buff_evt1, uint16_t buff_len2, const uint8_t *buff_evt2, int8_t overflow_index)
334 {
335 if (fifo_put_var_len_item(fifo, buff_len1, buff_evt1, buff_len2, buff_evt2) != 0) {
336 /* Event queue overflow */
337 if ((overflow_index >=0) && (overflow_index < 64)) {
338 event_lost_register.event_lost = 1;
339 event_lost_register.event_lost_code |= (1 << overflow_index);
340 } else {
341 /* assert */
342 }
343 }
344 TL_ProcessReqCallback();
345 }
346
send_event(const uint8_t * buffer_out,uint16_t buffer_out_length,int8_t overflow_index)347 void send_event(const uint8_t *buffer_out, uint16_t buffer_out_length, int8_t overflow_index)
348 {
349 if(buffer_out_length != 0) {
350 DEBUG_NOTES(ENQUEUE_EVENT);
351 enqueue_event(&event_fifo, buffer_out_length, buffer_out, 0, NULL, overflow_index);
352 }
353 }
354
send_event_2buffers(const uint8_t * buffer_out1,uint16_t buffer_out_length1,const uint8_t * buffer_out2,uint16_t buffer_out_length2,int8_t overflow_index)355 void send_event_2buffers(const uint8_t *buffer_out1, uint16_t buffer_out_length1, const uint8_t *buffer_out2, uint16_t buffer_out_length2, int8_t overflow_index)
356 {
357 if(buffer_out_length1 != 0) {
358 DEBUG_NOTES(ENQUEUE_EVENT);
359 enqueue_event(&event_fifo, buffer_out_length1, buffer_out1, buffer_out_length2, buffer_out2, overflow_index);
360 }
361 }
362
advance_dma(void)363 void advance_dma(void)
364 {
365 uint8_t *ptr;
366 uint16_t size;
367 fifo_discard_var_len_item(&event_fifo);
368
369 if (fifo_size(&event_fifo) > 0) {
370 if (fifo_get_ptr_var_len_item(&event_fifo, &size, &ptr) == 0) {
371 transport_layer_send_data(ptr+FIFO_ALIGNMENT, size);
372 }
373 }
374 }
375
HAL_UART_TxCpltCallback(UART_HandleTypeDef * huart)376 void HAL_UART_TxCpltCallback(UART_HandleTypeDef *huart)
377 {
378 /* DMA1 finished the transfer of SrcBuffer */
379 dma_state = DMA_IDLE;
380
381 DEBUG_NOTES(DMA_TC);
382
383 advance_dma();
384 }
385
HAL_UARTEx_RxEventCallback(UART_HandleTypeDef * huart,uint16_t Size)386 void HAL_UARTEx_RxEventCallback(UART_HandleTypeDef *huart, uint16_t Size)
387 {
388 transport_layer_DMA_RX_Data(Size);
389 }
390
TL_ProcessReqCallback(void)391 __weak void TL_ProcessReqCallback(void){}
392
TL_ResetReqCallback(void)393 __weak void TL_ResetReqCallback(void){}
394
395 #endif /* __ZEPHYR__ */
396
BLE_STACK_Event(hci_pckt * hci_pckt,uint16_t length)397 void BLE_STACK_Event(hci_pckt *hci_pckt, uint16_t length)
398 {
399 uint16_t i;
400 int ret = 0;
401
402 if(hci_pckt->type == HCI_EVENT_PKT)
403 {
404 hci_event_pckt *event_pckt = (hci_event_pckt*)hci_pckt->data;
405
406 if(event_pckt->evt == EVT_LE_META_EVENT)
407 {
408 evt_le_meta_event *evt = (evt_le_meta_event *)event_pckt->data;
409
410 for (i = 0; hci_le_meta_events_table[i].evt_code != 0; i++)
411 {
412 if (evt->subevent == hci_le_meta_events_table[i].evt_code)
413 {
414 ret = hci_le_meta_events_table[i].process((void *)evt->data);
415 goto send_event;
416 }
417 }
418 }
419 else if(event_pckt->evt != HCI_VENDOR_PKT)
420 {
421 for (i = 0; hci_events_table[i].evt_code != 0; i++)
422 {
423 if (event_pckt->evt == hci_events_table[i].evt_code)
424 {
425 ret = hci_events_table[i].process(event_pckt->data);
426 goto send_event;
427 }
428 }
429 }
430 }
431 if(hci_pckt->type == HCI_EVENT_PKT || hci_pckt->type == HCI_EVENT_EXT_PKT)
432 {
433 uint8_t evt_code;
434 uint8_t *evt_payload;
435
436 if(hci_pckt->type == HCI_EVENT_PKT)
437 {
438 hci_event_pckt *event_pckt = (hci_event_pckt*)hci_pckt->data;
439 evt_code = event_pckt->evt;
440 evt_payload = event_pckt->data;
441 }
442 else
443 {
444 hci_event_ext_pckt *event_pckt = (hci_event_ext_pckt*)hci_pckt->data;
445 evt_code = event_pckt->evt;
446 evt_payload = event_pckt->data;
447 }
448
449 if(evt_code == HCI_VENDOR_PKT)
450 {
451 evt_blue_aci *blue_evt = (evt_blue_aci *)evt_payload;
452
453 for (i = 0; hci_vendor_specific_events_table[i].evt_code != 0; i++)
454 {
455 if (blue_evt->ecode == hci_vendor_specific_events_table[i].evt_code)
456 {
457 ret = hci_vendor_specific_events_table[i].process((void *)blue_evt->data);
458 break;
459 }
460 }
461 }
462 }
463
464 send_event:
465
466 if(ret == 0)
467 {
468 send_event((uint8_t *)hci_pckt, length, -1);
469 }
470 }
471
472